blob: 25e884a75f6baf2aa0e1dc2dc00183df5486cb65 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
37#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000038#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/module.h>
40#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010041#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010042#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010043#include <linux/platform_device.h>
44#include <linux/slab.h>
45#include <linux/spinlock.h>
46
47#include <linux/amba/bus.h>
48
Will Deacon518f7132014-11-14 17:17:54 +000049#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010050
51/* Maximum number of stream IDs assigned to a single device */
Andreas Herrmann636e97b2014-01-30 18:18:08 +000052#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
Will Deacon45ae7cf2013-06-24 18:31:25 +010053
54/* Maximum number of context banks per SMMU */
55#define ARM_SMMU_MAX_CBS 128
56
57/* Maximum number of mapping groups per SMMU */
58#define ARM_SMMU_MAX_SMRS 128
59
Will Deacon45ae7cf2013-06-24 18:31:25 +010060/* SMMU global address space */
61#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010062#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010063
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000064/*
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
67 * nsGFSYNR0: 0x450)
68 */
69#define ARM_SMMU_GR0_NS(smmu) \
70 ((smmu)->base + \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
72 ? 0x400 : 0))
73
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010074#ifdef CONFIG_64BIT
75#define smmu_writeq writeq_relaxed
76#else
77#define smmu_writeq(reg64, addr) \
78 do { \
79 u64 __val = (reg64); \
80 void __iomem *__addr = (addr); \
81 writel_relaxed(__val >> 32, __addr + 4); \
82 writel_relaxed(__val, __addr); \
83 } while (0)
84#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
101/* Identification registers */
102#define ARM_SMMU_GR0_ID0 0x20
103#define ARM_SMMU_GR0_ID1 0x24
104#define ARM_SMMU_GR0_ID2 0x28
105#define ARM_SMMU_GR0_ID3 0x2c
106#define ARM_SMMU_GR0_ID4 0x30
107#define ARM_SMMU_GR0_ID5 0x34
108#define ARM_SMMU_GR0_ID6 0x38
109#define ARM_SMMU_GR0_ID7 0x3c
110#define ARM_SMMU_GR0_sGFSR 0x48
111#define ARM_SMMU_GR0_sGFSYNR0 0x50
112#define ARM_SMMU_GR0_sGFSYNR1 0x54
113#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100114
115#define ID0_S1TS (1 << 30)
116#define ID0_S2TS (1 << 29)
117#define ID0_NTS (1 << 28)
118#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000119#define ID0_ATOSNS (1 << 26)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100120#define ID0_CTTW (1 << 14)
121#define ID0_NUMIRPT_SHIFT 16
122#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700123#define ID0_NUMSIDB_SHIFT 9
124#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_NUMSMRG_SHIFT 0
126#define ID0_NUMSMRG_MASK 0xff
127
128#define ID1_PAGESIZE (1 << 31)
129#define ID1_NUMPAGENDXB_SHIFT 28
130#define ID1_NUMPAGENDXB_MASK 7
131#define ID1_NUMS2CB_SHIFT 16
132#define ID1_NUMS2CB_MASK 0xff
133#define ID1_NUMCB_SHIFT 0
134#define ID1_NUMCB_MASK 0xff
135
136#define ID2_OAS_SHIFT 4
137#define ID2_OAS_MASK 0xf
138#define ID2_IAS_SHIFT 0
139#define ID2_IAS_MASK 0xf
140#define ID2_UBS_SHIFT 8
141#define ID2_UBS_MASK 0xf
142#define ID2_PTFS_4K (1 << 12)
143#define ID2_PTFS_16K (1 << 13)
144#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800145#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100146
Will Deacon45ae7cf2013-06-24 18:31:25 +0100147/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100148#define ARM_SMMU_GR0_TLBIVMID 0x64
149#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
150#define ARM_SMMU_GR0_TLBIALLH 0x6c
151#define ARM_SMMU_GR0_sTLBGSYNC 0x70
152#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
153#define sTLBGSTATUS_GSACTIVE (1 << 0)
154#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
155
156/* Stream mapping registers */
157#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
158#define SMR_VALID (1 << 31)
159#define SMR_MASK_SHIFT 16
160#define SMR_MASK_MASK 0x7fff
161#define SMR_ID_SHIFT 0
162#define SMR_ID_MASK 0x7fff
163
164#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
165#define S2CR_CBNDX_SHIFT 0
166#define S2CR_CBNDX_MASK 0xff
167#define S2CR_TYPE_SHIFT 16
168#define S2CR_TYPE_MASK 0x3
169#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
170#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
171#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
172
Robin Murphyd3461802016-01-26 18:06:34 +0000173#define S2CR_PRIVCFG_SHIFT 24
174#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
175
Will Deacon45ae7cf2013-06-24 18:31:25 +0100176/* Context bank attribute registers */
177#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
178#define CBAR_VMID_SHIFT 0
179#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000180#define CBAR_S1_BPSHCFG_SHIFT 8
181#define CBAR_S1_BPSHCFG_MASK 3
182#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100183#define CBAR_S1_MEMATTR_SHIFT 12
184#define CBAR_S1_MEMATTR_MASK 0xf
185#define CBAR_S1_MEMATTR_WB 0xf
186#define CBAR_TYPE_SHIFT 16
187#define CBAR_TYPE_MASK 0x3
188#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
189#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
190#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
191#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
192#define CBAR_IRPTNDX_SHIFT 24
193#define CBAR_IRPTNDX_MASK 0xff
194
195#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
196#define CBA2R_RW64_32BIT (0 << 0)
197#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800198#define CBA2R_VMID_SHIFT 16
199#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100200
201/* Translation context bank */
202#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100203#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100204
205#define ARM_SMMU_CB_SCTLR 0x0
206#define ARM_SMMU_CB_RESUME 0x8
207#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100208#define ARM_SMMU_CB_TTBR0 0x20
209#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100210#define ARM_SMMU_CB_TTBCR 0x30
211#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000212#define ARM_SMMU_CB_S1_MAIR1 0x3c
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000213#define ARM_SMMU_CB_PAR_LO 0x50
214#define ARM_SMMU_CB_PAR_HI 0x54
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215#define ARM_SMMU_CB_FSR 0x58
216#define ARM_SMMU_CB_FAR_LO 0x60
217#define ARM_SMMU_CB_FAR_HI 0x64
218#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000219#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100220#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000221#define ARM_SMMU_CB_S1_TLBIVAL 0x620
222#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
223#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100224#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000225#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226
227#define SCTLR_S1_ASIDPNE (1 << 12)
228#define SCTLR_CFCFG (1 << 7)
229#define SCTLR_CFIE (1 << 6)
230#define SCTLR_CFRE (1 << 5)
231#define SCTLR_E (1 << 4)
232#define SCTLR_AFE (1 << 2)
233#define SCTLR_TRE (1 << 1)
234#define SCTLR_M (1 << 0)
235#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
236
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000237#define CB_PAR_F (1 << 0)
238
239#define ATSR_ACTIVE (1 << 0)
240
Will Deacon45ae7cf2013-06-24 18:31:25 +0100241#define RESUME_RETRY (0 << 0)
242#define RESUME_TERMINATE (1 << 0)
243
Will Deacon45ae7cf2013-06-24 18:31:25 +0100244#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100245#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100246
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100247#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248
249#define FSR_MULTI (1 << 31)
250#define FSR_SS (1 << 30)
251#define FSR_UUT (1 << 8)
252#define FSR_ASF (1 << 7)
253#define FSR_TLBLKF (1 << 6)
254#define FSR_TLBMCF (1 << 5)
255#define FSR_EF (1 << 4)
256#define FSR_PF (1 << 3)
257#define FSR_AFF (1 << 2)
258#define FSR_TF (1 << 1)
259
Mitchel Humpherys29073202014-07-08 09:52:18 -0700260#define FSR_IGN (FSR_AFF | FSR_ASF | \
261 FSR_TLBMCF | FSR_TLBLKF)
262#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100263 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264
265#define FSYNR0_WNR (1 << 4)
266
Will Deacon4cf740b2014-07-14 19:47:39 +0100267static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000268module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100269MODULE_PARM_DESC(force_stage,
270 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000271static bool disable_bypass;
272module_param(disable_bypass, bool, S_IRUGO);
273MODULE_PARM_DESC(disable_bypass,
274 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100275
Robin Murphy09360402014-08-28 17:51:59 +0100276enum arm_smmu_arch_version {
277 ARM_SMMU_V1 = 1,
278 ARM_SMMU_V2,
279};
280
Will Deacon45ae7cf2013-06-24 18:31:25 +0100281struct arm_smmu_smr {
282 u8 idx;
283 u16 mask;
284 u16 id;
285};
286
Will Deacona9a1b0b2014-05-01 18:05:08 +0100287struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100288 int num_streamids;
289 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100290 struct arm_smmu_smr *smrs;
291};
292
Will Deacona9a1b0b2014-05-01 18:05:08 +0100293struct arm_smmu_master {
294 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100295 struct rb_node node;
296 struct arm_smmu_master_cfg cfg;
297};
298
Will Deacon45ae7cf2013-06-24 18:31:25 +0100299struct arm_smmu_device {
300 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100301
302 void __iomem *base;
303 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100304 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100305
306#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
307#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
308#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
309#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
310#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000311#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800312#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100313 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000314
315#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
316 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100317 enum arm_smmu_arch_version version;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100318
319 u32 num_context_banks;
320 u32 num_s2_context_banks;
321 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
322 atomic_t irptndx;
323
324 u32 num_mapping_groups;
325 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
326
Will Deacon518f7132014-11-14 17:17:54 +0000327 unsigned long va_size;
328 unsigned long ipa_size;
329 unsigned long pa_size;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100330
331 u32 num_global_irqs;
332 u32 num_context_irqs;
333 unsigned int *irqs;
334
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335 struct list_head list;
336 struct rb_root masters;
337};
338
339struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100340 u8 cbndx;
341 u8 irptndx;
342 u32 cbar;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100343};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100344#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100345
Will Deaconecfadb62013-07-31 19:21:28 +0100346#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
347#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
348
Will Deaconc752ce42014-06-25 22:46:31 +0100349enum arm_smmu_domain_stage {
350 ARM_SMMU_DOMAIN_S1 = 0,
351 ARM_SMMU_DOMAIN_S2,
352 ARM_SMMU_DOMAIN_NESTED,
353};
354
Will Deacon45ae7cf2013-06-24 18:31:25 +0100355struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100356 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000357 struct io_pgtable_ops *pgtbl_ops;
358 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100359 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100360 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000361 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100362 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100363};
364
Will Deacon518f7132014-11-14 17:17:54 +0000365static struct iommu_ops arm_smmu_ops;
366
Will Deacon45ae7cf2013-06-24 18:31:25 +0100367static DEFINE_SPINLOCK(arm_smmu_devices_lock);
368static LIST_HEAD(arm_smmu_devices);
369
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000370struct arm_smmu_option_prop {
371 u32 opt;
372 const char *prop;
373};
374
Mitchel Humpherys29073202014-07-08 09:52:18 -0700375static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000376 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
377 { 0, NULL},
378};
379
Joerg Roedel1d672632015-03-26 13:43:10 +0100380static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
381{
382 return container_of(dom, struct arm_smmu_domain, domain);
383}
384
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000385static void parse_driver_options(struct arm_smmu_device *smmu)
386{
387 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700388
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000389 do {
390 if (of_property_read_bool(smmu->dev->of_node,
391 arm_smmu_options[i].prop)) {
392 smmu->options |= arm_smmu_options[i].opt;
393 dev_notice(smmu->dev, "option %s\n",
394 arm_smmu_options[i].prop);
395 }
396 } while (arm_smmu_options[++i].opt);
397}
398
Will Deacon8f68f8e2014-07-15 11:27:08 +0100399static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100400{
401 if (dev_is_pci(dev)) {
402 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700403
Will Deacona9a1b0b2014-05-01 18:05:08 +0100404 while (!pci_is_root_bus(bus))
405 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100406 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100407 }
408
Will Deacon8f68f8e2014-07-15 11:27:08 +0100409 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100410}
411
Will Deacon45ae7cf2013-06-24 18:31:25 +0100412static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
413 struct device_node *dev_node)
414{
415 struct rb_node *node = smmu->masters.rb_node;
416
417 while (node) {
418 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700419
Will Deacon45ae7cf2013-06-24 18:31:25 +0100420 master = container_of(node, struct arm_smmu_master, node);
421
422 if (dev_node < master->of_node)
423 node = node->rb_left;
424 else if (dev_node > master->of_node)
425 node = node->rb_right;
426 else
427 return master;
428 }
429
430 return NULL;
431}
432
Will Deacona9a1b0b2014-05-01 18:05:08 +0100433static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100434find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100435{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100436 struct arm_smmu_master_cfg *cfg = NULL;
437 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100438
Will Deacon8f68f8e2014-07-15 11:27:08 +0100439 if (group) {
440 cfg = iommu_group_get_iommudata(group);
441 iommu_group_put(group);
442 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100443
Will Deacon8f68f8e2014-07-15 11:27:08 +0100444 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100445}
446
Will Deacon45ae7cf2013-06-24 18:31:25 +0100447static int insert_smmu_master(struct arm_smmu_device *smmu,
448 struct arm_smmu_master *master)
449{
450 struct rb_node **new, *parent;
451
452 new = &smmu->masters.rb_node;
453 parent = NULL;
454 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700455 struct arm_smmu_master *this
456 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100457
458 parent = *new;
459 if (master->of_node < this->of_node)
460 new = &((*new)->rb_left);
461 else if (master->of_node > this->of_node)
462 new = &((*new)->rb_right);
463 else
464 return -EEXIST;
465 }
466
467 rb_link_node(&master->node, parent, new);
468 rb_insert_color(&master->node, &smmu->masters);
469 return 0;
470}
471
472static int register_smmu_master(struct arm_smmu_device *smmu,
473 struct device *dev,
474 struct of_phandle_args *masterspec)
475{
476 int i;
477 struct arm_smmu_master *master;
478
479 master = find_smmu_master(smmu, masterspec->np);
480 if (master) {
481 dev_err(dev,
482 "rejecting multiple registrations for master device %s\n",
483 masterspec->np->name);
484 return -EBUSY;
485 }
486
487 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
488 dev_err(dev,
489 "reached maximum number (%d) of stream IDs for master device %s\n",
490 MAX_MASTER_STREAMIDS, masterspec->np->name);
491 return -ENOSPC;
492 }
493
494 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
495 if (!master)
496 return -ENOMEM;
497
Will Deacona9a1b0b2014-05-01 18:05:08 +0100498 master->of_node = masterspec->np;
499 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100500
Olav Haugan3c8766d2014-08-22 17:12:32 -0700501 for (i = 0; i < master->cfg.num_streamids; ++i) {
502 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100503
Olav Haugan3c8766d2014-08-22 17:12:32 -0700504 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
505 (streamid >= smmu->num_mapping_groups)) {
506 dev_err(dev,
507 "stream ID for master device %s greater than maximum allowed (%d)\n",
508 masterspec->np->name, smmu->num_mapping_groups);
509 return -ERANGE;
510 }
511 master->cfg.streamids[i] = streamid;
512 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100513 return insert_smmu_master(smmu, master);
514}
515
Will Deacon44680ee2014-06-25 11:29:12 +0100516static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100517{
Will Deacon44680ee2014-06-25 11:29:12 +0100518 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100519 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100520 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100521
522 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100523 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100524 master = find_smmu_master(smmu, dev_node);
525 if (master)
526 break;
527 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100528 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100529
Will Deacona9a1b0b2014-05-01 18:05:08 +0100530 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100531}
532
533static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
534{
535 int idx;
536
537 do {
538 idx = find_next_zero_bit(map, end, start);
539 if (idx == end)
540 return -ENOSPC;
541 } while (test_and_set_bit(idx, map));
542
543 return idx;
544}
545
546static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
547{
548 clear_bit(idx, map);
549}
550
551/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000552static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100553{
554 int count = 0;
555 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
556
557 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
558 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
559 & sTLBGSTATUS_GSACTIVE) {
560 cpu_relax();
561 if (++count == TLB_LOOP_TIMEOUT) {
562 dev_err_ratelimited(smmu->dev,
563 "TLB sync timed out -- SMMU may be deadlocked\n");
564 return;
565 }
566 udelay(1);
567 }
568}
569
Will Deacon518f7132014-11-14 17:17:54 +0000570static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100571{
Will Deacon518f7132014-11-14 17:17:54 +0000572 struct arm_smmu_domain *smmu_domain = cookie;
573 __arm_smmu_tlb_sync(smmu_domain->smmu);
574}
575
576static void arm_smmu_tlb_inv_context(void *cookie)
577{
578 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100579 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
580 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100581 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000582 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100583
584 if (stage1) {
585 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deaconecfadb62013-07-31 19:21:28 +0100586 writel_relaxed(ARM_SMMU_CB_ASID(cfg),
587 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100588 } else {
589 base = ARM_SMMU_GR0(smmu);
Will Deaconecfadb62013-07-31 19:21:28 +0100590 writel_relaxed(ARM_SMMU_CB_VMID(cfg),
591 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100592 }
593
Will Deacon518f7132014-11-14 17:17:54 +0000594 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100595}
596
Will Deacon518f7132014-11-14 17:17:54 +0000597static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000598 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000599{
600 struct arm_smmu_domain *smmu_domain = cookie;
601 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
602 struct arm_smmu_device *smmu = smmu_domain->smmu;
603 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
604 void __iomem *reg;
605
606 if (stage1) {
607 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
608 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
609
610 if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
611 iova &= ~12UL;
612 iova |= ARM_SMMU_CB_ASID(cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000613 do {
614 writel_relaxed(iova, reg);
615 iova += granule;
616 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000617#ifdef CONFIG_64BIT
618 } else {
619 iova >>= 12;
620 iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000621 do {
622 writeq_relaxed(iova, reg);
623 iova += granule >> 12;
624 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000625#endif
626 }
627#ifdef CONFIG_64BIT
628 } else if (smmu->version == ARM_SMMU_V2) {
629 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
630 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
631 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000632 iova >>= 12;
633 do {
634 writeq_relaxed(iova, reg);
635 iova += granule >> 12;
636 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000637#endif
638 } else {
639 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
640 writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
641 }
642}
643
Will Deacon518f7132014-11-14 17:17:54 +0000644static struct iommu_gather_ops arm_smmu_gather_ops = {
645 .tlb_flush_all = arm_smmu_tlb_inv_context,
646 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
647 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000648};
649
Will Deacon45ae7cf2013-06-24 18:31:25 +0100650static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
651{
652 int flags, ret;
653 u32 fsr, far, fsynr, resume;
654 unsigned long iova;
655 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100656 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100657 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
658 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100659 void __iomem *cb_base;
660
Will Deacon44680ee2014-06-25 11:29:12 +0100661 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100662 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
663
664 if (!(fsr & FSR_FAULT))
665 return IRQ_NONE;
666
667 if (fsr & FSR_IGN)
668 dev_err_ratelimited(smmu->dev,
Hans Wennborg70c9a7d2014-08-06 05:42:01 +0100669 "Unexpected context fault (fsr 0x%x)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +0100670 fsr);
671
672 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
673 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
674
675 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
676 iova = far;
677#ifdef CONFIG_64BIT
678 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
679 iova |= ((unsigned long)far << 32);
680#endif
681
682 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
683 ret = IRQ_HANDLED;
684 resume = RESUME_RETRY;
685 } else {
Andreas Herrmann2ef0f032013-10-01 13:39:08 +0100686 dev_err_ratelimited(smmu->dev,
687 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100688 iova, fsynr, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100689 ret = IRQ_NONE;
690 resume = RESUME_TERMINATE;
691 }
692
693 /* Clear the faulting FSR */
694 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
695
696 /* Retry or terminate any stalled transactions */
697 if (fsr & FSR_SS)
698 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
699
700 return ret;
701}
702
703static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
704{
705 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
706 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000707 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100708
709 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
710 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
711 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
712 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
713
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000714 if (!gfsr)
715 return IRQ_NONE;
716
Will Deacon45ae7cf2013-06-24 18:31:25 +0100717 dev_err_ratelimited(smmu->dev,
718 "Unexpected global fault, this could be serious\n");
719 dev_err_ratelimited(smmu->dev,
720 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
721 gfsr, gfsynr0, gfsynr1, gfsynr2);
722
723 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100724 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100725}
726
Will Deacon518f7132014-11-14 17:17:54 +0000727static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
728 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100729{
730 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100731 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100732 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100733 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
734 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100735 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100736
Will Deacon45ae7cf2013-06-24 18:31:25 +0100737 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100738 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
739 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100740
Will Deacon4a1c93c2015-03-04 12:21:03 +0000741 if (smmu->version > ARM_SMMU_V1) {
Will Deacon4a1c93c2015-03-04 12:21:03 +0000742#ifdef CONFIG_64BIT
743 reg = CBA2R_RW64_64BIT;
744#else
745 reg = CBA2R_RW64_32BIT;
746#endif
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800747 /* 16-bit VMIDs live in CBA2R */
748 if (smmu->features & ARM_SMMU_FEAT_VMID16)
749 reg |= ARM_SMMU_CB_VMID(cfg) << CBA2R_VMID_SHIFT;
750
Will Deacon4a1c93c2015-03-04 12:21:03 +0000751 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
752 }
753
Will Deacon45ae7cf2013-06-24 18:31:25 +0100754 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100755 reg = cfg->cbar;
Robin Murphy09360402014-08-28 17:51:59 +0100756 if (smmu->version == ARM_SMMU_V1)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700757 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100758
Will Deacon57ca90f2014-02-06 14:59:05 +0000759 /*
760 * Use the weakest shareability/memory types, so they are
761 * overridden by the ttbcr/pte.
762 */
763 if (stage1) {
764 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
765 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800766 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
767 /* 8-bit VMIDs live in CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100768 reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000769 }
Will Deacon44680ee2014-06-25 11:29:12 +0100770 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100771
Will Deacon518f7132014-11-14 17:17:54 +0000772 /* TTBRs */
773 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100774 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100775
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100776 reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
777 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
778
779 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
780 reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
781 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000782 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100783 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
784 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000785 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100786
Will Deacon518f7132014-11-14 17:17:54 +0000787 /* TTBCR */
788 if (stage1) {
789 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
790 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
791 if (smmu->version > ARM_SMMU_V1) {
792 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100793 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000794 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100795 }
796 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000797 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
798 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100799 }
800
Will Deacon518f7132014-11-14 17:17:54 +0000801 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100802 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000803 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100804 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +0000805 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
806 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100807 }
808
Will Deacon45ae7cf2013-06-24 18:31:25 +0100809 /* SCTLR */
810 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
811 if (stage1)
812 reg |= SCTLR_S1_ASIDPNE;
813#ifdef __BIG_ENDIAN
814 reg |= SCTLR_E;
815#endif
Will Deacon25724842013-08-21 13:49:53 +0100816 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100817}
818
819static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100820 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100821{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100822 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000823 unsigned long ias, oas;
824 struct io_pgtable_ops *pgtbl_ops;
825 struct io_pgtable_cfg pgtbl_cfg;
826 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100827 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100828 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100829
Will Deacon518f7132014-11-14 17:17:54 +0000830 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100831 if (smmu_domain->smmu)
832 goto out_unlock;
833
Will Deaconc752ce42014-06-25 22:46:31 +0100834 /*
835 * Mapping the requested stage onto what we support is surprisingly
836 * complicated, mainly because the spec allows S1+S2 SMMUs without
837 * support for nested translation. That means we end up with the
838 * following table:
839 *
840 * Requested Supported Actual
841 * S1 N S1
842 * S1 S1+S2 S1
843 * S1 S2 S2
844 * S1 S1 S1
845 * N N N
846 * N S1+S2 S2
847 * N S2 S2
848 * N S1 S1
849 *
850 * Note that you can't actually request stage-2 mappings.
851 */
852 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
853 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
854 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
855 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
856
857 switch (smmu_domain->stage) {
858 case ARM_SMMU_DOMAIN_S1:
859 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
860 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000861 ias = smmu->va_size;
862 oas = smmu->ipa_size;
863 if (IS_ENABLED(CONFIG_64BIT))
864 fmt = ARM_64_LPAE_S1;
865 else
866 fmt = ARM_32_LPAE_S1;
Will Deaconc752ce42014-06-25 22:46:31 +0100867 break;
868 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100869 /*
870 * We will likely want to change this if/when KVM gets
871 * involved.
872 */
Will Deaconc752ce42014-06-25 22:46:31 +0100873 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100874 cfg->cbar = CBAR_TYPE_S2_TRANS;
875 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000876 ias = smmu->ipa_size;
877 oas = smmu->pa_size;
878 if (IS_ENABLED(CONFIG_64BIT))
879 fmt = ARM_64_LPAE_S2;
880 else
881 fmt = ARM_32_LPAE_S2;
Will Deaconc752ce42014-06-25 22:46:31 +0100882 break;
883 default:
884 ret = -EINVAL;
885 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100886 }
887
888 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
889 smmu->num_context_banks);
890 if (IS_ERR_VALUE(ret))
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100891 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100892
Will Deacon44680ee2014-06-25 11:29:12 +0100893 cfg->cbndx = ret;
Robin Murphy09360402014-08-28 17:51:59 +0100894 if (smmu->version == ARM_SMMU_V1) {
Will Deacon44680ee2014-06-25 11:29:12 +0100895 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
896 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100897 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100898 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100899 }
900
Will Deacon518f7132014-11-14 17:17:54 +0000901 pgtbl_cfg = (struct io_pgtable_cfg) {
902 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
903 .ias = ias,
904 .oas = oas,
905 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100906 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000907 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100908
Will Deacon518f7132014-11-14 17:17:54 +0000909 smmu_domain->smmu = smmu;
910 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
911 if (!pgtbl_ops) {
912 ret = -ENOMEM;
913 goto out_clear_smmu;
914 }
915
916 /* Update our support page sizes to reflect the page table format */
917 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
918
919 /* Initialise the context bank with our page table cfg */
920 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
921
922 /*
923 * Request context fault interrupt. Do this last to avoid the
924 * handler seeing a half-initialised domain state.
925 */
Will Deacon44680ee2014-06-25 11:29:12 +0100926 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100927 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
928 "arm-smmu-context-fault", domain);
929 if (IS_ERR_VALUE(ret)) {
930 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100931 cfg->irptndx, irq);
932 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100933 }
934
Will Deacon518f7132014-11-14 17:17:54 +0000935 mutex_unlock(&smmu_domain->init_mutex);
936
937 /* Publish page table ops for map/unmap */
938 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100939 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100940
Will Deacon518f7132014-11-14 17:17:54 +0000941out_clear_smmu:
942 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100943out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000944 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100945 return ret;
946}
947
948static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
949{
Joerg Roedel1d672632015-03-26 13:43:10 +0100950 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100951 struct arm_smmu_device *smmu = smmu_domain->smmu;
952 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +0100953 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100954 int irq;
955
956 if (!smmu)
957 return;
958
Will Deacon518f7132014-11-14 17:17:54 +0000959 /*
960 * Disable the context bank and free the page tables before freeing
961 * it.
962 */
Will Deacon44680ee2014-06-25 11:29:12 +0100963 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100964 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +0100965
Will Deacon44680ee2014-06-25 11:29:12 +0100966 if (cfg->irptndx != INVALID_IRPTNDX) {
967 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100968 free_irq(irq, domain);
969 }
970
Markus Elfring44830b02015-11-06 18:32:41 +0100971 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100972 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100973}
974
Joerg Roedel1d672632015-03-26 13:43:10 +0100975static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100976{
977 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100978
Robin Murphy9adb9592016-01-26 18:06:36 +0000979 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +0100980 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100981 /*
982 * Allocate the domain and initialise some of its data structures.
983 * We can't really do anything meaningful until we've added a
984 * master.
985 */
986 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
987 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100988 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100989
Robin Murphy9adb9592016-01-26 18:06:36 +0000990 if (type == IOMMU_DOMAIN_DMA &&
991 iommu_get_dma_cookie(&smmu_domain->domain)) {
992 kfree(smmu_domain);
993 return NULL;
994 }
995
Will Deacon518f7132014-11-14 17:17:54 +0000996 mutex_init(&smmu_domain->init_mutex);
997 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +0100998
999 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001000}
1001
Joerg Roedel1d672632015-03-26 13:43:10 +01001002static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001003{
Joerg Roedel1d672632015-03-26 13:43:10 +01001004 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001005
1006 /*
1007 * Free the domain resources. We assume that all devices have
1008 * already been detached.
1009 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001010 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001011 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001012 kfree(smmu_domain);
1013}
1014
1015static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001016 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001017{
1018 int i;
1019 struct arm_smmu_smr *smrs;
1020 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1021
1022 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1023 return 0;
1024
Will Deacona9a1b0b2014-05-01 18:05:08 +01001025 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001026 return -EEXIST;
1027
Mitchel Humpherys29073202014-07-08 09:52:18 -07001028 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001029 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001030 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1031 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001032 return -ENOMEM;
1033 }
1034
Will Deacon44680ee2014-06-25 11:29:12 +01001035 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001036 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001037 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1038 smmu->num_mapping_groups);
1039 if (IS_ERR_VALUE(idx)) {
1040 dev_err(smmu->dev, "failed to allocate free SMR\n");
1041 goto err_free_smrs;
1042 }
1043
1044 smrs[i] = (struct arm_smmu_smr) {
1045 .idx = idx,
1046 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001047 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001048 };
1049 }
1050
1051 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001052 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001053 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1054 smrs[i].mask << SMR_MASK_SHIFT;
1055 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1056 }
1057
Will Deacona9a1b0b2014-05-01 18:05:08 +01001058 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001059 return 0;
1060
1061err_free_smrs:
1062 while (--i >= 0)
1063 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1064 kfree(smrs);
1065 return -ENOSPC;
1066}
1067
1068static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001069 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001070{
1071 int i;
1072 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001073 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001074
Will Deacon43b412b2014-07-15 11:22:24 +01001075 if (!smrs)
1076 return;
1077
Will Deacon45ae7cf2013-06-24 18:31:25 +01001078 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001079 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001080 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001081
Will Deacon45ae7cf2013-06-24 18:31:25 +01001082 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1083 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1084 }
1085
Will Deacona9a1b0b2014-05-01 18:05:08 +01001086 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001087 kfree(smrs);
1088}
1089
Will Deacon45ae7cf2013-06-24 18:31:25 +01001090static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001091 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001092{
1093 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001094 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001095 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1096
Will Deacon8f68f8e2014-07-15 11:27:08 +01001097 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001098 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001099 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001100 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001101
Will Deaconcbf82772016-02-18 12:05:57 +00001102 /*
1103 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1104 * for all devices behind the SMMU.
1105 */
1106 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1107 return 0;
1108
Will Deacona9a1b0b2014-05-01 18:05:08 +01001109 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001110 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001111
Will Deacona9a1b0b2014-05-01 18:05:08 +01001112 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001113 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001114 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001115 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1116 }
1117
1118 return 0;
1119}
1120
1121static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001122 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001123{
Will Deacon43b412b2014-07-15 11:22:24 +01001124 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001125 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001126 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001127
Will Deacon8f68f8e2014-07-15 11:27:08 +01001128 /* An IOMMU group is torn down by the first device to be removed */
1129 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1130 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001131
1132 /*
1133 * We *must* clear the S2CR first, because freeing the SMR means
1134 * that it can be re-allocated immediately.
1135 */
Will Deacon43b412b2014-07-15 11:22:24 +01001136 for (i = 0; i < cfg->num_streamids; ++i) {
1137 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001138 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001139
Robin Murphy25a1c962016-02-10 14:25:33 +00001140 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001141 }
1142
Will Deacona9a1b0b2014-05-01 18:05:08 +01001143 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001144}
1145
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001146static void arm_smmu_detach_dev(struct device *dev,
1147 struct arm_smmu_master_cfg *cfg)
1148{
1149 struct iommu_domain *domain = dev->archdata.iommu;
1150 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1151
1152 dev->archdata.iommu = NULL;
1153 arm_smmu_domain_remove_master(smmu_domain, cfg);
1154}
1155
Will Deacon45ae7cf2013-06-24 18:31:25 +01001156static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1157{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001158 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001159 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001160 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001161 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001162
Will Deacon8f68f8e2014-07-15 11:27:08 +01001163 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001164 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001165 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1166 return -ENXIO;
1167 }
1168
Will Deacon518f7132014-11-14 17:17:54 +00001169 /* Ensure that the domain is finalised */
1170 ret = arm_smmu_init_domain_context(domain, smmu);
1171 if (IS_ERR_VALUE(ret))
1172 return ret;
1173
Will Deacon45ae7cf2013-06-24 18:31:25 +01001174 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001175 * Sanity check the domain. We don't support domains across
1176 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001177 */
Will Deacon518f7132014-11-14 17:17:54 +00001178 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001179 dev_err(dev,
1180 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001181 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1182 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001183 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001184
1185 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001186 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001187 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001188 return -ENODEV;
1189
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001190 /* Detach the dev from its current domain */
1191 if (dev->archdata.iommu)
1192 arm_smmu_detach_dev(dev, cfg);
1193
Will Deacon844e35b2014-07-17 11:23:51 +01001194 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1195 if (!ret)
1196 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001197 return ret;
1198}
1199
Will Deacon45ae7cf2013-06-24 18:31:25 +01001200static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001201 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001202{
Will Deacon518f7132014-11-14 17:17:54 +00001203 int ret;
1204 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001205 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001206 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001207
Will Deacon518f7132014-11-14 17:17:54 +00001208 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001209 return -ENODEV;
1210
Will Deacon518f7132014-11-14 17:17:54 +00001211 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1212 ret = ops->map(ops, iova, paddr, size, prot);
1213 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1214 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001215}
1216
1217static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1218 size_t size)
1219{
Will Deacon518f7132014-11-14 17:17:54 +00001220 size_t ret;
1221 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001222 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001223 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001224
Will Deacon518f7132014-11-14 17:17:54 +00001225 if (!ops)
1226 return 0;
1227
1228 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1229 ret = ops->unmap(ops, iova, size);
1230 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1231 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001232}
1233
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001234static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1235 dma_addr_t iova)
1236{
Joerg Roedel1d672632015-03-26 13:43:10 +01001237 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001238 struct arm_smmu_device *smmu = smmu_domain->smmu;
1239 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1240 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1241 struct device *dev = smmu->dev;
1242 void __iomem *cb_base;
1243 u32 tmp;
1244 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001245 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001246
1247 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1248
Robin Murphy661d9622015-05-27 17:09:34 +01001249 /* ATS1 registers can only be written atomically */
1250 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001251 if (smmu->version == ARM_SMMU_V2)
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001252 smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR);
Robin Murphy661d9622015-05-27 17:09:34 +01001253 else
Robin Murphy661d9622015-05-27 17:09:34 +01001254 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001255
1256 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1257 !(tmp & ATSR_ACTIVE), 5, 50)) {
1258 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001259 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001260 &iova);
1261 return ops->iova_to_phys(ops, iova);
1262 }
1263
1264 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
1265 phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
1266
1267 if (phys & CB_PAR_F) {
1268 dev_err(dev, "translation fault!\n");
1269 dev_err(dev, "PAR = 0x%llx\n", phys);
1270 return 0;
1271 }
1272
1273 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1274}
1275
Will Deacon45ae7cf2013-06-24 18:31:25 +01001276static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001277 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001278{
Will Deacon518f7132014-11-14 17:17:54 +00001279 phys_addr_t ret;
1280 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001281 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001282 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001283
Will Deacon518f7132014-11-14 17:17:54 +00001284 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001285 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001286
Will Deacon518f7132014-11-14 17:17:54 +00001287 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001288 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1289 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001290 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001291 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001292 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001293 }
1294
Will Deacon518f7132014-11-14 17:17:54 +00001295 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001296
Will Deacon518f7132014-11-14 17:17:54 +00001297 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001298}
1299
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001300static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001301{
Will Deacond0948942014-06-24 17:30:10 +01001302 switch (cap) {
1303 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001304 /*
1305 * Return true here as the SMMU can always send out coherent
1306 * requests.
1307 */
1308 return true;
Will Deacond0948942014-06-24 17:30:10 +01001309 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001310 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001311 case IOMMU_CAP_NOEXEC:
1312 return true;
Will Deacond0948942014-06-24 17:30:10 +01001313 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001314 return false;
Will Deacond0948942014-06-24 17:30:10 +01001315 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001316}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001317
Will Deacona9a1b0b2014-05-01 18:05:08 +01001318static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1319{
1320 *((u16 *)data) = alias;
1321 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001322}
1323
Will Deacon8f68f8e2014-07-15 11:27:08 +01001324static void __arm_smmu_release_pci_iommudata(void *data)
1325{
1326 kfree(data);
1327}
1328
Joerg Roedelaf659932015-10-21 23:51:41 +02001329static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1330 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331{
Will Deacon03edb222015-01-19 14:27:33 +00001332 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001333 u16 sid;
1334 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001335
Will Deacon03edb222015-01-19 14:27:33 +00001336 cfg = iommu_group_get_iommudata(group);
1337 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001338 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001339 if (!cfg)
1340 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001341
Will Deacon03edb222015-01-19 14:27:33 +00001342 iommu_group_set_iommudata(group, cfg,
1343 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001344 }
1345
Joerg Roedelaf659932015-10-21 23:51:41 +02001346 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1347 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001348
Will Deacon03edb222015-01-19 14:27:33 +00001349 /*
1350 * Assume Stream ID == Requester ID for now.
1351 * We need a way to describe the ID mappings in FDT.
1352 */
1353 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1354 for (i = 0; i < cfg->num_streamids; ++i)
1355 if (cfg->streamids[i] == sid)
1356 break;
1357
1358 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1359 if (i == cfg->num_streamids)
1360 cfg->streamids[cfg->num_streamids++] = sid;
1361
1362 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001363}
1364
Joerg Roedelaf659932015-10-21 23:51:41 +02001365static int arm_smmu_init_platform_device(struct device *dev,
1366 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001367{
Will Deacon03edb222015-01-19 14:27:33 +00001368 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001369 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001370
1371 if (!smmu)
1372 return -ENODEV;
1373
1374 master = find_smmu_master(smmu, dev->of_node);
1375 if (!master)
1376 return -ENODEV;
1377
Will Deacon03edb222015-01-19 14:27:33 +00001378 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001379
1380 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001381}
1382
1383static int arm_smmu_add_device(struct device *dev)
1384{
Joerg Roedelaf659932015-10-21 23:51:41 +02001385 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001386
Joerg Roedelaf659932015-10-21 23:51:41 +02001387 group = iommu_group_get_for_dev(dev);
1388 if (IS_ERR(group))
1389 return PTR_ERR(group);
1390
Peng Fan9a4a9d82015-11-20 16:56:18 +08001391 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001392 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001393}
1394
Will Deacon45ae7cf2013-06-24 18:31:25 +01001395static void arm_smmu_remove_device(struct device *dev)
1396{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001397 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001398}
1399
Joerg Roedelaf659932015-10-21 23:51:41 +02001400static struct iommu_group *arm_smmu_device_group(struct device *dev)
1401{
1402 struct iommu_group *group;
1403 int ret;
1404
1405 if (dev_is_pci(dev))
1406 group = pci_device_group(dev);
1407 else
1408 group = generic_device_group(dev);
1409
1410 if (IS_ERR(group))
1411 return group;
1412
1413 if (dev_is_pci(dev))
1414 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1415 else
1416 ret = arm_smmu_init_platform_device(dev, group);
1417
1418 if (ret) {
1419 iommu_group_put(group);
1420 group = ERR_PTR(ret);
1421 }
1422
1423 return group;
1424}
1425
Will Deaconc752ce42014-06-25 22:46:31 +01001426static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1427 enum iommu_attr attr, void *data)
1428{
Joerg Roedel1d672632015-03-26 13:43:10 +01001429 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001430
1431 switch (attr) {
1432 case DOMAIN_ATTR_NESTING:
1433 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1434 return 0;
1435 default:
1436 return -ENODEV;
1437 }
1438}
1439
1440static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1441 enum iommu_attr attr, void *data)
1442{
Will Deacon518f7132014-11-14 17:17:54 +00001443 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001444 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001445
Will Deacon518f7132014-11-14 17:17:54 +00001446 mutex_lock(&smmu_domain->init_mutex);
1447
Will Deaconc752ce42014-06-25 22:46:31 +01001448 switch (attr) {
1449 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001450 if (smmu_domain->smmu) {
1451 ret = -EPERM;
1452 goto out_unlock;
1453 }
1454
Will Deaconc752ce42014-06-25 22:46:31 +01001455 if (*(int *)data)
1456 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1457 else
1458 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1459
Will Deacon518f7132014-11-14 17:17:54 +00001460 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001461 default:
Will Deacon518f7132014-11-14 17:17:54 +00001462 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001463 }
Will Deacon518f7132014-11-14 17:17:54 +00001464
1465out_unlock:
1466 mutex_unlock(&smmu_domain->init_mutex);
1467 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001468}
1469
Will Deacon518f7132014-11-14 17:17:54 +00001470static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001471 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001472 .domain_alloc = arm_smmu_domain_alloc,
1473 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001474 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001475 .map = arm_smmu_map,
1476 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001477 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001478 .iova_to_phys = arm_smmu_iova_to_phys,
1479 .add_device = arm_smmu_add_device,
1480 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001481 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001482 .domain_get_attr = arm_smmu_domain_get_attr,
1483 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001484 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001485};
1486
1487static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1488{
1489 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001490 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001491 int i = 0;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001492 u32 reg;
1493
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001494 /* clear global FSR */
1495 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1496 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001497
Robin Murphy25a1c962016-02-10 14:25:33 +00001498 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1499 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001500 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001501 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001502 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001503 }
1504
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001505 /* Make sure all context banks are disabled and clear CB_FSR */
1506 for (i = 0; i < smmu->num_context_banks; ++i) {
1507 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1508 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1509 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1510 }
Will Deacon1463fe42013-07-31 19:21:27 +01001511
Will Deacon45ae7cf2013-06-24 18:31:25 +01001512 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001513 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1514 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1515
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001516 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001517
Will Deacon45ae7cf2013-06-24 18:31:25 +01001518 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001519 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001520
1521 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001522 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001523
Robin Murphy25a1c962016-02-10 14:25:33 +00001524 /* Enable client access, handling unmatched streams as appropriate */
1525 reg &= ~sCR0_CLIENTPD;
1526 if (disable_bypass)
1527 reg |= sCR0_USFCFG;
1528 else
1529 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001530
1531 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001532 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001533
1534 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001535 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001536
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001537 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1538 reg |= sCR0_VMID16EN;
1539
Will Deacon45ae7cf2013-06-24 18:31:25 +01001540 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001541 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001542 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001543}
1544
1545static int arm_smmu_id_size_to_bits(int size)
1546{
1547 switch (size) {
1548 case 0:
1549 return 32;
1550 case 1:
1551 return 36;
1552 case 2:
1553 return 40;
1554 case 3:
1555 return 42;
1556 case 4:
1557 return 44;
1558 case 5:
1559 default:
1560 return 48;
1561 }
1562}
1563
1564static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1565{
1566 unsigned long size;
1567 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1568 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001569 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001570
1571 dev_notice(smmu->dev, "probing hardware configuration...\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001572 dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
1573
1574 /* ID0 */
1575 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001576
1577 /* Restrict available stages based on module parameter */
1578 if (force_stage == 1)
1579 id &= ~(ID0_S2TS | ID0_NTS);
1580 else if (force_stage == 2)
1581 id &= ~(ID0_S1TS | ID0_NTS);
1582
Will Deacon45ae7cf2013-06-24 18:31:25 +01001583 if (id & ID0_S1TS) {
1584 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1585 dev_notice(smmu->dev, "\tstage 1 translation\n");
1586 }
1587
1588 if (id & ID0_S2TS) {
1589 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1590 dev_notice(smmu->dev, "\tstage 2 translation\n");
1591 }
1592
1593 if (id & ID0_NTS) {
1594 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1595 dev_notice(smmu->dev, "\tnested translation\n");
1596 }
1597
1598 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001599 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001600 dev_err(smmu->dev, "\tno translation support!\n");
1601 return -ENODEV;
1602 }
1603
Will Deacond38f0ff2015-06-29 17:47:42 +01001604 if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001605 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1606 dev_notice(smmu->dev, "\taddress translation ops\n");
1607 }
1608
Robin Murphybae2c2d2015-07-29 19:46:05 +01001609 /*
1610 * In order for DMA API calls to work properly, we must defer to what
1611 * the DT says about coherency, regardless of what the hardware claims.
1612 * Fortunately, this also opens up a workaround for systems where the
1613 * ID register value has ended up configured incorrectly.
1614 */
1615 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1616 cttw_reg = !!(id & ID0_CTTW);
1617 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001618 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001619 if (cttw_dt || cttw_reg)
1620 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1621 cttw_dt ? "" : "non-");
1622 if (cttw_dt != cttw_reg)
1623 dev_notice(smmu->dev,
1624 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001625
1626 if (id & ID0_SMS) {
1627 u32 smr, sid, mask;
1628
1629 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1630 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1631 ID0_NUMSMRG_MASK;
1632 if (smmu->num_mapping_groups == 0) {
1633 dev_err(smmu->dev,
1634 "stream-matching supported, but no SMRs present!\n");
1635 return -ENODEV;
1636 }
1637
1638 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1639 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1640 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1641 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1642
1643 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1644 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1645 if ((mask & sid) != sid) {
1646 dev_err(smmu->dev,
1647 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1648 mask, sid);
1649 return -ENODEV;
1650 }
1651
1652 dev_notice(smmu->dev,
1653 "\tstream matching with %u register groups, mask 0x%x",
1654 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07001655 } else {
1656 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1657 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001658 }
1659
1660 /* ID1 */
1661 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001662 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001663
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001664 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001665 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001666 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001667 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001668 dev_warn(smmu->dev,
1669 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1670 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001671
Will Deacon518f7132014-11-14 17:17:54 +00001672 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001673 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1674 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1675 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1676 return -ENODEV;
1677 }
1678 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1679 smmu->num_context_banks, smmu->num_s2_context_banks);
1680
1681 /* ID2 */
1682 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1683 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001684 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001685
Will Deacon518f7132014-11-14 17:17:54 +00001686 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001687 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001688 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001689
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001690 if (id & ID2_VMID16)
1691 smmu->features |= ARM_SMMU_FEAT_VMID16;
1692
Robin Murphyf1d84542015-03-04 16:41:05 +00001693 /*
1694 * What the page table walker can address actually depends on which
1695 * descriptor format is in use, but since a) we don't know that yet,
1696 * and b) it can vary per context bank, this will have to do...
1697 */
1698 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1699 dev_warn(smmu->dev,
1700 "failed to set DMA mask for table walker\n");
1701
Robin Murphy09360402014-08-28 17:51:59 +01001702 if (smmu->version == ARM_SMMU_V1) {
Will Deacon518f7132014-11-14 17:17:54 +00001703 smmu->va_size = smmu->ipa_size;
1704 size = SZ_4K | SZ_2M | SZ_1G;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001705 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001706 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001707 smmu->va_size = arm_smmu_id_size_to_bits(size);
1708#ifndef CONFIG_64BIT
1709 smmu->va_size = min(32UL, smmu->va_size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001710#endif
Will Deacon518f7132014-11-14 17:17:54 +00001711 size = 0;
1712 if (id & ID2_PTFS_4K)
1713 size |= SZ_4K | SZ_2M | SZ_1G;
1714 if (id & ID2_PTFS_16K)
1715 size |= SZ_16K | SZ_32M;
1716 if (id & ID2_PTFS_64K)
1717 size |= SZ_64K | SZ_512M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001718 }
1719
Will Deacon518f7132014-11-14 17:17:54 +00001720 arm_smmu_ops.pgsize_bitmap &= size;
1721 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1722
Will Deacon28d60072014-09-01 16:24:48 +01001723 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1724 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001725 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001726
1727 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1728 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001729 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001730
Will Deacon45ae7cf2013-06-24 18:31:25 +01001731 return 0;
1732}
1733
Joerg Roedel09b52692014-10-02 12:24:45 +02001734static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy09360402014-08-28 17:51:59 +01001735 { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 },
1736 { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 },
1737 { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
Robin Murphyd3aba042014-08-28 17:52:00 +01001738 { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
Robin Murphy09360402014-08-28 17:51:59 +01001739 { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
1740 { },
1741};
1742MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1743
Will Deacon45ae7cf2013-06-24 18:31:25 +01001744static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1745{
Robin Murphy09360402014-08-28 17:51:59 +01001746 const struct of_device_id *of_id;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001747 struct resource *res;
1748 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001749 struct device *dev = &pdev->dev;
1750 struct rb_node *node;
1751 struct of_phandle_args masterspec;
1752 int num_irqs, i, err;
1753
1754 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1755 if (!smmu) {
1756 dev_err(dev, "failed to allocate arm_smmu_device\n");
1757 return -ENOMEM;
1758 }
1759 smmu->dev = dev;
1760
Robin Murphy09360402014-08-28 17:51:59 +01001761 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
1762 smmu->version = (enum arm_smmu_arch_version)of_id->data;
1763
Will Deacon45ae7cf2013-06-24 18:31:25 +01001764 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001765 smmu->base = devm_ioremap_resource(dev, res);
1766 if (IS_ERR(smmu->base))
1767 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001768 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001769
1770 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1771 &smmu->num_global_irqs)) {
1772 dev_err(dev, "missing #global-interrupts property\n");
1773 return -ENODEV;
1774 }
1775
1776 num_irqs = 0;
1777 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1778 num_irqs++;
1779 if (num_irqs > smmu->num_global_irqs)
1780 smmu->num_context_irqs++;
1781 }
1782
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001783 if (!smmu->num_context_irqs) {
1784 dev_err(dev, "found %d interrupts but expected at least %d\n",
1785 num_irqs, smmu->num_global_irqs + 1);
1786 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001787 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001788
1789 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1790 GFP_KERNEL);
1791 if (!smmu->irqs) {
1792 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1793 return -ENOMEM;
1794 }
1795
1796 for (i = 0; i < num_irqs; ++i) {
1797 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001798
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799 if (irq < 0) {
1800 dev_err(dev, "failed to get irq index %d\n", i);
1801 return -ENODEV;
1802 }
1803 smmu->irqs[i] = irq;
1804 }
1805
Olav Haugan3c8766d2014-08-22 17:12:32 -07001806 err = arm_smmu_device_cfg_probe(smmu);
1807 if (err)
1808 return err;
1809
Will Deacon45ae7cf2013-06-24 18:31:25 +01001810 i = 0;
1811 smmu->masters = RB_ROOT;
1812 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1813 "#stream-id-cells", i,
1814 &masterspec)) {
1815 err = register_smmu_master(smmu, dev, &masterspec);
1816 if (err) {
1817 dev_err(dev, "failed to add master %s\n",
1818 masterspec.np->name);
1819 goto out_put_masters;
1820 }
1821
1822 i++;
1823 }
1824 dev_notice(dev, "registered %d master devices\n", i);
1825
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001826 parse_driver_options(smmu);
1827
Robin Murphy09360402014-08-28 17:51:59 +01001828 if (smmu->version > ARM_SMMU_V1 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01001829 smmu->num_context_banks != smmu->num_context_irqs) {
1830 dev_err(dev,
1831 "found only %d context interrupt(s) but %d required\n",
1832 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cde2013-11-15 09:42:30 +00001833 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01001834 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001835 }
1836
Will Deacon45ae7cf2013-06-24 18:31:25 +01001837 for (i = 0; i < smmu->num_global_irqs; ++i) {
1838 err = request_irq(smmu->irqs[i],
1839 arm_smmu_global_fault,
1840 IRQF_SHARED,
1841 "arm-smmu global fault",
1842 smmu);
1843 if (err) {
1844 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1845 i, smmu->irqs[i]);
1846 goto out_free_irqs;
1847 }
1848 }
1849
1850 INIT_LIST_HEAD(&smmu->list);
1851 spin_lock(&arm_smmu_devices_lock);
1852 list_add(&smmu->list, &arm_smmu_devices);
1853 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01001854
1855 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001856 return 0;
1857
1858out_free_irqs:
1859 while (i--)
1860 free_irq(smmu->irqs[i], smmu);
1861
Will Deacon45ae7cf2013-06-24 18:31:25 +01001862out_put_masters:
1863 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07001864 struct arm_smmu_master *master
1865 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001866 of_node_put(master->of_node);
1867 }
1868
1869 return err;
1870}
1871
1872static int arm_smmu_device_remove(struct platform_device *pdev)
1873{
1874 int i;
1875 struct device *dev = &pdev->dev;
1876 struct arm_smmu_device *curr, *smmu = NULL;
1877 struct rb_node *node;
1878
1879 spin_lock(&arm_smmu_devices_lock);
1880 list_for_each_entry(curr, &arm_smmu_devices, list) {
1881 if (curr->dev == dev) {
1882 smmu = curr;
1883 list_del(&smmu->list);
1884 break;
1885 }
1886 }
1887 spin_unlock(&arm_smmu_devices_lock);
1888
1889 if (!smmu)
1890 return -ENODEV;
1891
Will Deacon45ae7cf2013-06-24 18:31:25 +01001892 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07001893 struct arm_smmu_master *master
1894 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001895 of_node_put(master->of_node);
1896 }
1897
Will Deaconecfadb62013-07-31 19:21:28 +01001898 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001899 dev_err(dev, "removing device with active domains!\n");
1900
1901 for (i = 0; i < smmu->num_global_irqs; ++i)
1902 free_irq(smmu->irqs[i], smmu);
1903
1904 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07001905 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001906 return 0;
1907}
1908
Will Deacon45ae7cf2013-06-24 18:31:25 +01001909static struct platform_driver arm_smmu_driver = {
1910 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001911 .name = "arm-smmu",
1912 .of_match_table = of_match_ptr(arm_smmu_of_match),
1913 },
1914 .probe = arm_smmu_device_dt_probe,
1915 .remove = arm_smmu_device_remove,
1916};
1917
1918static int __init arm_smmu_init(void)
1919{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00001920 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001921 int ret;
1922
Thierry Reding0e7d37a2014-11-07 15:26:18 +00001923 /*
1924 * Play nice with systems that don't have an ARM SMMU by checking that
1925 * an ARM SMMU exists in the system before proceeding with the driver
1926 * and IOMMU bus operation registration.
1927 */
1928 np = of_find_matching_node(NULL, arm_smmu_of_match);
1929 if (!np)
1930 return 0;
1931
1932 of_node_put(np);
1933
Will Deacon45ae7cf2013-06-24 18:31:25 +01001934 ret = platform_driver_register(&arm_smmu_driver);
1935 if (ret)
1936 return ret;
1937
1938 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01001939 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001940 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1941
Will Deacond123cf82014-02-04 22:17:53 +00001942#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01001943 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001944 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00001945#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01001946
Will Deacona9a1b0b2014-05-01 18:05:08 +01001947#ifdef CONFIG_PCI
1948 if (!iommu_present(&pci_bus_type))
1949 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
1950#endif
1951
Will Deacon45ae7cf2013-06-24 18:31:25 +01001952 return 0;
1953}
1954
1955static void __exit arm_smmu_exit(void)
1956{
1957 return platform_driver_unregister(&arm_smmu_driver);
1958}
1959
Andreas Herrmannb1950b22013-10-01 13:39:05 +01001960subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001961module_exit(arm_smmu_exit);
1962
1963MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
1964MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1965MODULE_LICENSE("GPL v2");