blob: 2d5f357de69c1ed75df1e20e9d99be5a2876395c [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
37#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000038#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/module.h>
40#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010041#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010042#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010043#include <linux/platform_device.h>
44#include <linux/slab.h>
45#include <linux/spinlock.h>
46
47#include <linux/amba/bus.h>
48
Will Deacon518f7132014-11-14 17:17:54 +000049#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010050
51/* Maximum number of stream IDs assigned to a single device */
Andreas Herrmann636e97b2014-01-30 18:18:08 +000052#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
Will Deacon45ae7cf2013-06-24 18:31:25 +010053
54/* Maximum number of context banks per SMMU */
55#define ARM_SMMU_MAX_CBS 128
56
57/* Maximum number of mapping groups per SMMU */
58#define ARM_SMMU_MAX_SMRS 128
59
Will Deacon45ae7cf2013-06-24 18:31:25 +010060/* SMMU global address space */
61#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010062#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010063
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000064/*
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
67 * nsGFSYNR0: 0x450)
68 */
69#define ARM_SMMU_GR0_NS(smmu) \
70 ((smmu)->base + \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
72 ? 0x400 : 0))
73
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010074#ifdef CONFIG_64BIT
75#define smmu_writeq writeq_relaxed
76#else
77#define smmu_writeq(reg64, addr) \
78 do { \
79 u64 __val = (reg64); \
80 void __iomem *__addr = (addr); \
81 writel_relaxed(__val >> 32, __addr + 4); \
82 writel_relaxed(__val, __addr); \
83 } while (0)
84#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
101/* Identification registers */
102#define ARM_SMMU_GR0_ID0 0x20
103#define ARM_SMMU_GR0_ID1 0x24
104#define ARM_SMMU_GR0_ID2 0x28
105#define ARM_SMMU_GR0_ID3 0x2c
106#define ARM_SMMU_GR0_ID4 0x30
107#define ARM_SMMU_GR0_ID5 0x34
108#define ARM_SMMU_GR0_ID6 0x38
109#define ARM_SMMU_GR0_ID7 0x3c
110#define ARM_SMMU_GR0_sGFSR 0x48
111#define ARM_SMMU_GR0_sGFSYNR0 0x50
112#define ARM_SMMU_GR0_sGFSYNR1 0x54
113#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100114
115#define ID0_S1TS (1 << 30)
116#define ID0_S2TS (1 << 29)
117#define ID0_NTS (1 << 28)
118#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000119#define ID0_ATOSNS (1 << 26)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100120#define ID0_CTTW (1 << 14)
121#define ID0_NUMIRPT_SHIFT 16
122#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700123#define ID0_NUMSIDB_SHIFT 9
124#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_NUMSMRG_SHIFT 0
126#define ID0_NUMSMRG_MASK 0xff
127
128#define ID1_PAGESIZE (1 << 31)
129#define ID1_NUMPAGENDXB_SHIFT 28
130#define ID1_NUMPAGENDXB_MASK 7
131#define ID1_NUMS2CB_SHIFT 16
132#define ID1_NUMS2CB_MASK 0xff
133#define ID1_NUMCB_SHIFT 0
134#define ID1_NUMCB_MASK 0xff
135
136#define ID2_OAS_SHIFT 4
137#define ID2_OAS_MASK 0xf
138#define ID2_IAS_SHIFT 0
139#define ID2_IAS_MASK 0xf
140#define ID2_UBS_SHIFT 8
141#define ID2_UBS_MASK 0xf
142#define ID2_PTFS_4K (1 << 12)
143#define ID2_PTFS_16K (1 << 13)
144#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800145#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100146
Will Deacon45ae7cf2013-06-24 18:31:25 +0100147/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100148#define ARM_SMMU_GR0_TLBIVMID 0x64
149#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
150#define ARM_SMMU_GR0_TLBIALLH 0x6c
151#define ARM_SMMU_GR0_sTLBGSYNC 0x70
152#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
153#define sTLBGSTATUS_GSACTIVE (1 << 0)
154#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
155
156/* Stream mapping registers */
157#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
158#define SMR_VALID (1 << 31)
159#define SMR_MASK_SHIFT 16
160#define SMR_MASK_MASK 0x7fff
161#define SMR_ID_SHIFT 0
162#define SMR_ID_MASK 0x7fff
163
164#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
165#define S2CR_CBNDX_SHIFT 0
166#define S2CR_CBNDX_MASK 0xff
167#define S2CR_TYPE_SHIFT 16
168#define S2CR_TYPE_MASK 0x3
169#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
170#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
171#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
172
Robin Murphyd3461802016-01-26 18:06:34 +0000173#define S2CR_PRIVCFG_SHIFT 24
174#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
175
Will Deacon45ae7cf2013-06-24 18:31:25 +0100176/* Context bank attribute registers */
177#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
178#define CBAR_VMID_SHIFT 0
179#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000180#define CBAR_S1_BPSHCFG_SHIFT 8
181#define CBAR_S1_BPSHCFG_MASK 3
182#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100183#define CBAR_S1_MEMATTR_SHIFT 12
184#define CBAR_S1_MEMATTR_MASK 0xf
185#define CBAR_S1_MEMATTR_WB 0xf
186#define CBAR_TYPE_SHIFT 16
187#define CBAR_TYPE_MASK 0x3
188#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
189#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
190#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
191#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
192#define CBAR_IRPTNDX_SHIFT 24
193#define CBAR_IRPTNDX_MASK 0xff
194
195#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
196#define CBA2R_RW64_32BIT (0 << 0)
197#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800198#define CBA2R_VMID_SHIFT 16
199#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100200
201/* Translation context bank */
202#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100203#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100204
205#define ARM_SMMU_CB_SCTLR 0x0
206#define ARM_SMMU_CB_RESUME 0x8
207#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100208#define ARM_SMMU_CB_TTBR0 0x20
209#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100210#define ARM_SMMU_CB_TTBCR 0x30
211#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000212#define ARM_SMMU_CB_S1_MAIR1 0x3c
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000213#define ARM_SMMU_CB_PAR_LO 0x50
214#define ARM_SMMU_CB_PAR_HI 0x54
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215#define ARM_SMMU_CB_FSR 0x58
216#define ARM_SMMU_CB_FAR_LO 0x60
217#define ARM_SMMU_CB_FAR_HI 0x64
218#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000219#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100220#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000221#define ARM_SMMU_CB_S1_TLBIVAL 0x620
222#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
223#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100224#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000225#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226
227#define SCTLR_S1_ASIDPNE (1 << 12)
228#define SCTLR_CFCFG (1 << 7)
229#define SCTLR_CFIE (1 << 6)
230#define SCTLR_CFRE (1 << 5)
231#define SCTLR_E (1 << 4)
232#define SCTLR_AFE (1 << 2)
233#define SCTLR_TRE (1 << 1)
234#define SCTLR_M (1 << 0)
235#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
236
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000237#define CB_PAR_F (1 << 0)
238
239#define ATSR_ACTIVE (1 << 0)
240
Will Deacon45ae7cf2013-06-24 18:31:25 +0100241#define RESUME_RETRY (0 << 0)
242#define RESUME_TERMINATE (1 << 0)
243
Will Deacon45ae7cf2013-06-24 18:31:25 +0100244#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100245#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100246
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100247#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248
249#define FSR_MULTI (1 << 31)
250#define FSR_SS (1 << 30)
251#define FSR_UUT (1 << 8)
252#define FSR_ASF (1 << 7)
253#define FSR_TLBLKF (1 << 6)
254#define FSR_TLBMCF (1 << 5)
255#define FSR_EF (1 << 4)
256#define FSR_PF (1 << 3)
257#define FSR_AFF (1 << 2)
258#define FSR_TF (1 << 1)
259
Mitchel Humpherys29073202014-07-08 09:52:18 -0700260#define FSR_IGN (FSR_AFF | FSR_ASF | \
261 FSR_TLBMCF | FSR_TLBLKF)
262#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100263 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264
265#define FSYNR0_WNR (1 << 4)
266
Will Deacon4cf740b2014-07-14 19:47:39 +0100267static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000268module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100269MODULE_PARM_DESC(force_stage,
270 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000271static bool disable_bypass;
272module_param(disable_bypass, bool, S_IRUGO);
273MODULE_PARM_DESC(disable_bypass,
274 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100275
Robin Murphy09360402014-08-28 17:51:59 +0100276enum arm_smmu_arch_version {
277 ARM_SMMU_V1 = 1,
278 ARM_SMMU_V2,
279};
280
Robin Murphy67b65a32016-04-13 18:12:57 +0100281enum arm_smmu_implementation {
282 GENERIC_SMMU,
283};
284
Will Deacon45ae7cf2013-06-24 18:31:25 +0100285struct arm_smmu_smr {
286 u8 idx;
287 u16 mask;
288 u16 id;
289};
290
Will Deacona9a1b0b2014-05-01 18:05:08 +0100291struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100292 int num_streamids;
293 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100294 struct arm_smmu_smr *smrs;
295};
296
Will Deacona9a1b0b2014-05-01 18:05:08 +0100297struct arm_smmu_master {
298 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100299 struct rb_node node;
300 struct arm_smmu_master_cfg cfg;
301};
302
Will Deacon45ae7cf2013-06-24 18:31:25 +0100303struct arm_smmu_device {
304 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100305
306 void __iomem *base;
307 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100308 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100309
310#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
311#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
312#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
313#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
314#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000315#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800316#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100317 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000318
319#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
320 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100321 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100322 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100323
324 u32 num_context_banks;
325 u32 num_s2_context_banks;
326 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
327 atomic_t irptndx;
328
329 u32 num_mapping_groups;
330 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
331
Will Deacon518f7132014-11-14 17:17:54 +0000332 unsigned long va_size;
333 unsigned long ipa_size;
334 unsigned long pa_size;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100335
336 u32 num_global_irqs;
337 u32 num_context_irqs;
338 unsigned int *irqs;
339
Will Deacon45ae7cf2013-06-24 18:31:25 +0100340 struct list_head list;
341 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800342
343 u32 cavium_id_base; /* Specific to Cavium */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100344};
345
346struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100347 u8 cbndx;
348 u8 irptndx;
349 u32 cbar;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100350};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100351#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100352
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800353#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
354#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100355
Will Deaconc752ce42014-06-25 22:46:31 +0100356enum arm_smmu_domain_stage {
357 ARM_SMMU_DOMAIN_S1 = 0,
358 ARM_SMMU_DOMAIN_S2,
359 ARM_SMMU_DOMAIN_NESTED,
360};
361
Will Deacon45ae7cf2013-06-24 18:31:25 +0100362struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100363 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000364 struct io_pgtable_ops *pgtbl_ops;
365 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100366 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100367 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000368 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100369 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100370};
371
Will Deacon518f7132014-11-14 17:17:54 +0000372static struct iommu_ops arm_smmu_ops;
373
Will Deacon45ae7cf2013-06-24 18:31:25 +0100374static DEFINE_SPINLOCK(arm_smmu_devices_lock);
375static LIST_HEAD(arm_smmu_devices);
376
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000377struct arm_smmu_option_prop {
378 u32 opt;
379 const char *prop;
380};
381
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800382static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
383
Mitchel Humpherys29073202014-07-08 09:52:18 -0700384static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000385 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
386 { 0, NULL},
387};
388
Joerg Roedel1d672632015-03-26 13:43:10 +0100389static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
390{
391 return container_of(dom, struct arm_smmu_domain, domain);
392}
393
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000394static void parse_driver_options(struct arm_smmu_device *smmu)
395{
396 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700397
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000398 do {
399 if (of_property_read_bool(smmu->dev->of_node,
400 arm_smmu_options[i].prop)) {
401 smmu->options |= arm_smmu_options[i].opt;
402 dev_notice(smmu->dev, "option %s\n",
403 arm_smmu_options[i].prop);
404 }
405 } while (arm_smmu_options[++i].opt);
406}
407
Will Deacon8f68f8e2014-07-15 11:27:08 +0100408static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100409{
410 if (dev_is_pci(dev)) {
411 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700412
Will Deacona9a1b0b2014-05-01 18:05:08 +0100413 while (!pci_is_root_bus(bus))
414 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100415 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100416 }
417
Will Deacon8f68f8e2014-07-15 11:27:08 +0100418 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100419}
420
Will Deacon45ae7cf2013-06-24 18:31:25 +0100421static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
422 struct device_node *dev_node)
423{
424 struct rb_node *node = smmu->masters.rb_node;
425
426 while (node) {
427 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700428
Will Deacon45ae7cf2013-06-24 18:31:25 +0100429 master = container_of(node, struct arm_smmu_master, node);
430
431 if (dev_node < master->of_node)
432 node = node->rb_left;
433 else if (dev_node > master->of_node)
434 node = node->rb_right;
435 else
436 return master;
437 }
438
439 return NULL;
440}
441
Will Deacona9a1b0b2014-05-01 18:05:08 +0100442static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100443find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100444{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100445 struct arm_smmu_master_cfg *cfg = NULL;
446 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100447
Will Deacon8f68f8e2014-07-15 11:27:08 +0100448 if (group) {
449 cfg = iommu_group_get_iommudata(group);
450 iommu_group_put(group);
451 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100452
Will Deacon8f68f8e2014-07-15 11:27:08 +0100453 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100454}
455
Will Deacon45ae7cf2013-06-24 18:31:25 +0100456static int insert_smmu_master(struct arm_smmu_device *smmu,
457 struct arm_smmu_master *master)
458{
459 struct rb_node **new, *parent;
460
461 new = &smmu->masters.rb_node;
462 parent = NULL;
463 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700464 struct arm_smmu_master *this
465 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100466
467 parent = *new;
468 if (master->of_node < this->of_node)
469 new = &((*new)->rb_left);
470 else if (master->of_node > this->of_node)
471 new = &((*new)->rb_right);
472 else
473 return -EEXIST;
474 }
475
476 rb_link_node(&master->node, parent, new);
477 rb_insert_color(&master->node, &smmu->masters);
478 return 0;
479}
480
481static int register_smmu_master(struct arm_smmu_device *smmu,
482 struct device *dev,
483 struct of_phandle_args *masterspec)
484{
485 int i;
486 struct arm_smmu_master *master;
487
488 master = find_smmu_master(smmu, masterspec->np);
489 if (master) {
490 dev_err(dev,
491 "rejecting multiple registrations for master device %s\n",
492 masterspec->np->name);
493 return -EBUSY;
494 }
495
496 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
497 dev_err(dev,
498 "reached maximum number (%d) of stream IDs for master device %s\n",
499 MAX_MASTER_STREAMIDS, masterspec->np->name);
500 return -ENOSPC;
501 }
502
503 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
504 if (!master)
505 return -ENOMEM;
506
Will Deacona9a1b0b2014-05-01 18:05:08 +0100507 master->of_node = masterspec->np;
508 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100509
Olav Haugan3c8766d2014-08-22 17:12:32 -0700510 for (i = 0; i < master->cfg.num_streamids; ++i) {
511 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100512
Olav Haugan3c8766d2014-08-22 17:12:32 -0700513 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
514 (streamid >= smmu->num_mapping_groups)) {
515 dev_err(dev,
516 "stream ID for master device %s greater than maximum allowed (%d)\n",
517 masterspec->np->name, smmu->num_mapping_groups);
518 return -ERANGE;
519 }
520 master->cfg.streamids[i] = streamid;
521 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100522 return insert_smmu_master(smmu, master);
523}
524
Will Deacon44680ee2014-06-25 11:29:12 +0100525static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100526{
Will Deacon44680ee2014-06-25 11:29:12 +0100527 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100528 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100529 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100530
531 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100532 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100533 master = find_smmu_master(smmu, dev_node);
534 if (master)
535 break;
536 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100537 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100538
Will Deacona9a1b0b2014-05-01 18:05:08 +0100539 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100540}
541
542static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
543{
544 int idx;
545
546 do {
547 idx = find_next_zero_bit(map, end, start);
548 if (idx == end)
549 return -ENOSPC;
550 } while (test_and_set_bit(idx, map));
551
552 return idx;
553}
554
555static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
556{
557 clear_bit(idx, map);
558}
559
560/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000561static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100562{
563 int count = 0;
564 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
565
566 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
567 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
568 & sTLBGSTATUS_GSACTIVE) {
569 cpu_relax();
570 if (++count == TLB_LOOP_TIMEOUT) {
571 dev_err_ratelimited(smmu->dev,
572 "TLB sync timed out -- SMMU may be deadlocked\n");
573 return;
574 }
575 udelay(1);
576 }
577}
578
Will Deacon518f7132014-11-14 17:17:54 +0000579static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100580{
Will Deacon518f7132014-11-14 17:17:54 +0000581 struct arm_smmu_domain *smmu_domain = cookie;
582 __arm_smmu_tlb_sync(smmu_domain->smmu);
583}
584
585static void arm_smmu_tlb_inv_context(void *cookie)
586{
587 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100588 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
589 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100590 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000591 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100592
593 if (stage1) {
594 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800595 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100596 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100597 } else {
598 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800599 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100600 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100601 }
602
Will Deacon518f7132014-11-14 17:17:54 +0000603 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100604}
605
Will Deacon518f7132014-11-14 17:17:54 +0000606static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000607 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000608{
609 struct arm_smmu_domain *smmu_domain = cookie;
610 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
611 struct arm_smmu_device *smmu = smmu_domain->smmu;
612 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
613 void __iomem *reg;
614
615 if (stage1) {
616 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
617 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
618
619 if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
620 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800621 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000622 do {
623 writel_relaxed(iova, reg);
624 iova += granule;
625 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000626#ifdef CONFIG_64BIT
627 } else {
628 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800629 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000630 do {
631 writeq_relaxed(iova, reg);
632 iova += granule >> 12;
633 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000634#endif
635 }
636#ifdef CONFIG_64BIT
637 } else if (smmu->version == ARM_SMMU_V2) {
638 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
639 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
640 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000641 iova >>= 12;
642 do {
643 writeq_relaxed(iova, reg);
644 iova += granule >> 12;
645 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000646#endif
647 } else {
648 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800649 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000650 }
651}
652
Will Deacon518f7132014-11-14 17:17:54 +0000653static struct iommu_gather_ops arm_smmu_gather_ops = {
654 .tlb_flush_all = arm_smmu_tlb_inv_context,
655 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
656 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000657};
658
Will Deacon45ae7cf2013-06-24 18:31:25 +0100659static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
660{
661 int flags, ret;
662 u32 fsr, far, fsynr, resume;
663 unsigned long iova;
664 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100665 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100666 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
667 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100668 void __iomem *cb_base;
669
Will Deacon44680ee2014-06-25 11:29:12 +0100670 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100671 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
672
673 if (!(fsr & FSR_FAULT))
674 return IRQ_NONE;
675
676 if (fsr & FSR_IGN)
677 dev_err_ratelimited(smmu->dev,
Hans Wennborg70c9a7d2014-08-06 05:42:01 +0100678 "Unexpected context fault (fsr 0x%x)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +0100679 fsr);
680
681 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
682 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
683
684 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
685 iova = far;
686#ifdef CONFIG_64BIT
687 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
688 iova |= ((unsigned long)far << 32);
689#endif
690
691 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
692 ret = IRQ_HANDLED;
693 resume = RESUME_RETRY;
694 } else {
Andreas Herrmann2ef0f032013-10-01 13:39:08 +0100695 dev_err_ratelimited(smmu->dev,
696 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100697 iova, fsynr, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100698 ret = IRQ_NONE;
699 resume = RESUME_TERMINATE;
700 }
701
702 /* Clear the faulting FSR */
703 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
704
705 /* Retry or terminate any stalled transactions */
706 if (fsr & FSR_SS)
707 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
708
709 return ret;
710}
711
712static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
713{
714 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
715 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000716 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100717
718 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
719 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
720 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
721 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
722
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000723 if (!gfsr)
724 return IRQ_NONE;
725
Will Deacon45ae7cf2013-06-24 18:31:25 +0100726 dev_err_ratelimited(smmu->dev,
727 "Unexpected global fault, this could be serious\n");
728 dev_err_ratelimited(smmu->dev,
729 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
730 gfsr, gfsynr0, gfsynr1, gfsynr2);
731
732 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100733 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100734}
735
Will Deacon518f7132014-11-14 17:17:54 +0000736static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
737 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100738{
739 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100740 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100741 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100742 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
743 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100744 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100745
Will Deacon45ae7cf2013-06-24 18:31:25 +0100746 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100747 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
748 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100749
Will Deacon4a1c93c2015-03-04 12:21:03 +0000750 if (smmu->version > ARM_SMMU_V1) {
Will Deacon4a1c93c2015-03-04 12:21:03 +0000751#ifdef CONFIG_64BIT
752 reg = CBA2R_RW64_64BIT;
753#else
754 reg = CBA2R_RW64_32BIT;
755#endif
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800756 /* 16-bit VMIDs live in CBA2R */
757 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800758 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800759
Will Deacon4a1c93c2015-03-04 12:21:03 +0000760 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
761 }
762
Will Deacon45ae7cf2013-06-24 18:31:25 +0100763 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100764 reg = cfg->cbar;
Robin Murphy09360402014-08-28 17:51:59 +0100765 if (smmu->version == ARM_SMMU_V1)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700766 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100767
Will Deacon57ca90f2014-02-06 14:59:05 +0000768 /*
769 * Use the weakest shareability/memory types, so they are
770 * overridden by the ttbcr/pte.
771 */
772 if (stage1) {
773 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
774 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800775 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
776 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800777 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000778 }
Will Deacon44680ee2014-06-25 11:29:12 +0100779 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100780
Will Deacon518f7132014-11-14 17:17:54 +0000781 /* TTBRs */
782 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100783 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100784
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800785 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100786 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
787
788 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800789 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100790 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000791 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100792 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
793 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000794 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100795
Will Deacon518f7132014-11-14 17:17:54 +0000796 /* TTBCR */
797 if (stage1) {
798 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
799 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
800 if (smmu->version > ARM_SMMU_V1) {
801 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100802 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000803 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100804 }
805 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000806 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
807 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100808 }
809
Will Deacon518f7132014-11-14 17:17:54 +0000810 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100811 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000812 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100813 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +0000814 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
815 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100816 }
817
Will Deacon45ae7cf2013-06-24 18:31:25 +0100818 /* SCTLR */
819 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
820 if (stage1)
821 reg |= SCTLR_S1_ASIDPNE;
822#ifdef __BIG_ENDIAN
823 reg |= SCTLR_E;
824#endif
Will Deacon25724842013-08-21 13:49:53 +0100825 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100826}
827
828static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100829 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100830{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100831 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000832 unsigned long ias, oas;
833 struct io_pgtable_ops *pgtbl_ops;
834 struct io_pgtable_cfg pgtbl_cfg;
835 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100836 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100837 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100838
Will Deacon518f7132014-11-14 17:17:54 +0000839 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100840 if (smmu_domain->smmu)
841 goto out_unlock;
842
Will Deaconc752ce42014-06-25 22:46:31 +0100843 /*
844 * Mapping the requested stage onto what we support is surprisingly
845 * complicated, mainly because the spec allows S1+S2 SMMUs without
846 * support for nested translation. That means we end up with the
847 * following table:
848 *
849 * Requested Supported Actual
850 * S1 N S1
851 * S1 S1+S2 S1
852 * S1 S2 S2
853 * S1 S1 S1
854 * N N N
855 * N S1+S2 S2
856 * N S2 S2
857 * N S1 S1
858 *
859 * Note that you can't actually request stage-2 mappings.
860 */
861 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
862 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
863 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
864 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
865
866 switch (smmu_domain->stage) {
867 case ARM_SMMU_DOMAIN_S1:
868 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
869 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000870 ias = smmu->va_size;
871 oas = smmu->ipa_size;
872 if (IS_ENABLED(CONFIG_64BIT))
873 fmt = ARM_64_LPAE_S1;
874 else
875 fmt = ARM_32_LPAE_S1;
Will Deaconc752ce42014-06-25 22:46:31 +0100876 break;
877 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100878 /*
879 * We will likely want to change this if/when KVM gets
880 * involved.
881 */
Will Deaconc752ce42014-06-25 22:46:31 +0100882 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100883 cfg->cbar = CBAR_TYPE_S2_TRANS;
884 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000885 ias = smmu->ipa_size;
886 oas = smmu->pa_size;
887 if (IS_ENABLED(CONFIG_64BIT))
888 fmt = ARM_64_LPAE_S2;
889 else
890 fmt = ARM_32_LPAE_S2;
Will Deaconc752ce42014-06-25 22:46:31 +0100891 break;
892 default:
893 ret = -EINVAL;
894 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100895 }
896
897 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
898 smmu->num_context_banks);
899 if (IS_ERR_VALUE(ret))
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100900 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100901
Will Deacon44680ee2014-06-25 11:29:12 +0100902 cfg->cbndx = ret;
Robin Murphy09360402014-08-28 17:51:59 +0100903 if (smmu->version == ARM_SMMU_V1) {
Will Deacon44680ee2014-06-25 11:29:12 +0100904 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
905 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100906 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100907 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100908 }
909
Will Deacon518f7132014-11-14 17:17:54 +0000910 pgtbl_cfg = (struct io_pgtable_cfg) {
911 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
912 .ias = ias,
913 .oas = oas,
914 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100915 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000916 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100917
Will Deacon518f7132014-11-14 17:17:54 +0000918 smmu_domain->smmu = smmu;
919 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
920 if (!pgtbl_ops) {
921 ret = -ENOMEM;
922 goto out_clear_smmu;
923 }
924
925 /* Update our support page sizes to reflect the page table format */
926 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
927
928 /* Initialise the context bank with our page table cfg */
929 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
930
931 /*
932 * Request context fault interrupt. Do this last to avoid the
933 * handler seeing a half-initialised domain state.
934 */
Will Deacon44680ee2014-06-25 11:29:12 +0100935 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100936 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
937 "arm-smmu-context-fault", domain);
938 if (IS_ERR_VALUE(ret)) {
939 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100940 cfg->irptndx, irq);
941 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100942 }
943
Will Deacon518f7132014-11-14 17:17:54 +0000944 mutex_unlock(&smmu_domain->init_mutex);
945
946 /* Publish page table ops for map/unmap */
947 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100948 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100949
Will Deacon518f7132014-11-14 17:17:54 +0000950out_clear_smmu:
951 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100952out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000953 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100954 return ret;
955}
956
957static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
958{
Joerg Roedel1d672632015-03-26 13:43:10 +0100959 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100960 struct arm_smmu_device *smmu = smmu_domain->smmu;
961 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +0100962 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100963 int irq;
964
965 if (!smmu)
966 return;
967
Will Deacon518f7132014-11-14 17:17:54 +0000968 /*
969 * Disable the context bank and free the page tables before freeing
970 * it.
971 */
Will Deacon44680ee2014-06-25 11:29:12 +0100972 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100973 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +0100974
Will Deacon44680ee2014-06-25 11:29:12 +0100975 if (cfg->irptndx != INVALID_IRPTNDX) {
976 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100977 free_irq(irq, domain);
978 }
979
Markus Elfring44830b02015-11-06 18:32:41 +0100980 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100981 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100982}
983
Joerg Roedel1d672632015-03-26 13:43:10 +0100984static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100985{
986 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100987
Robin Murphy9adb9592016-01-26 18:06:36 +0000988 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +0100989 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100990 /*
991 * Allocate the domain and initialise some of its data structures.
992 * We can't really do anything meaningful until we've added a
993 * master.
994 */
995 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
996 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +0100997 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100998
Robin Murphy9adb9592016-01-26 18:06:36 +0000999 if (type == IOMMU_DOMAIN_DMA &&
1000 iommu_get_dma_cookie(&smmu_domain->domain)) {
1001 kfree(smmu_domain);
1002 return NULL;
1003 }
1004
Will Deacon518f7132014-11-14 17:17:54 +00001005 mutex_init(&smmu_domain->init_mutex);
1006 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001007
1008 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001009}
1010
Joerg Roedel1d672632015-03-26 13:43:10 +01001011static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001012{
Joerg Roedel1d672632015-03-26 13:43:10 +01001013 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001014
1015 /*
1016 * Free the domain resources. We assume that all devices have
1017 * already been detached.
1018 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001019 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001020 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001021 kfree(smmu_domain);
1022}
1023
1024static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001025 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001026{
1027 int i;
1028 struct arm_smmu_smr *smrs;
1029 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1030
1031 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1032 return 0;
1033
Will Deacona9a1b0b2014-05-01 18:05:08 +01001034 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001035 return -EEXIST;
1036
Mitchel Humpherys29073202014-07-08 09:52:18 -07001037 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001038 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001039 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1040 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001041 return -ENOMEM;
1042 }
1043
Will Deacon44680ee2014-06-25 11:29:12 +01001044 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001045 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001046 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1047 smmu->num_mapping_groups);
1048 if (IS_ERR_VALUE(idx)) {
1049 dev_err(smmu->dev, "failed to allocate free SMR\n");
1050 goto err_free_smrs;
1051 }
1052
1053 smrs[i] = (struct arm_smmu_smr) {
1054 .idx = idx,
1055 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001056 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001057 };
1058 }
1059
1060 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001061 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001062 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1063 smrs[i].mask << SMR_MASK_SHIFT;
1064 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1065 }
1066
Will Deacona9a1b0b2014-05-01 18:05:08 +01001067 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001068 return 0;
1069
1070err_free_smrs:
1071 while (--i >= 0)
1072 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1073 kfree(smrs);
1074 return -ENOSPC;
1075}
1076
1077static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001078 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001079{
1080 int i;
1081 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001082 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001083
Will Deacon43b412b2014-07-15 11:22:24 +01001084 if (!smrs)
1085 return;
1086
Will Deacon45ae7cf2013-06-24 18:31:25 +01001087 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001088 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001089 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001090
Will Deacon45ae7cf2013-06-24 18:31:25 +01001091 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1092 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1093 }
1094
Will Deacona9a1b0b2014-05-01 18:05:08 +01001095 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001096 kfree(smrs);
1097}
1098
Will Deacon45ae7cf2013-06-24 18:31:25 +01001099static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001100 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001101{
1102 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001103 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001104 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1105
Will Deacon8f68f8e2014-07-15 11:27:08 +01001106 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001107 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001108 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001109 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001110
Will Deaconcbf82772016-02-18 12:05:57 +00001111 /*
1112 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1113 * for all devices behind the SMMU.
1114 */
1115 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1116 return 0;
1117
Will Deacona9a1b0b2014-05-01 18:05:08 +01001118 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001119 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001120
Will Deacona9a1b0b2014-05-01 18:05:08 +01001121 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001122 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001123 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001124 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1125 }
1126
1127 return 0;
1128}
1129
1130static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001131 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001132{
Will Deacon43b412b2014-07-15 11:22:24 +01001133 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001134 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001135 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001136
Will Deacon8f68f8e2014-07-15 11:27:08 +01001137 /* An IOMMU group is torn down by the first device to be removed */
1138 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1139 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001140
1141 /*
1142 * We *must* clear the S2CR first, because freeing the SMR means
1143 * that it can be re-allocated immediately.
1144 */
Will Deacon43b412b2014-07-15 11:22:24 +01001145 for (i = 0; i < cfg->num_streamids; ++i) {
1146 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001147 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001148
Robin Murphy25a1c962016-02-10 14:25:33 +00001149 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001150 }
1151
Will Deacona9a1b0b2014-05-01 18:05:08 +01001152 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001153}
1154
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001155static void arm_smmu_detach_dev(struct device *dev,
1156 struct arm_smmu_master_cfg *cfg)
1157{
1158 struct iommu_domain *domain = dev->archdata.iommu;
1159 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1160
1161 dev->archdata.iommu = NULL;
1162 arm_smmu_domain_remove_master(smmu_domain, cfg);
1163}
1164
Will Deacon45ae7cf2013-06-24 18:31:25 +01001165static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1166{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001167 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001168 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001169 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001170 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001171
Will Deacon8f68f8e2014-07-15 11:27:08 +01001172 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001173 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001174 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1175 return -ENXIO;
1176 }
1177
Will Deacon518f7132014-11-14 17:17:54 +00001178 /* Ensure that the domain is finalised */
1179 ret = arm_smmu_init_domain_context(domain, smmu);
1180 if (IS_ERR_VALUE(ret))
1181 return ret;
1182
Will Deacon45ae7cf2013-06-24 18:31:25 +01001183 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001184 * Sanity check the domain. We don't support domains across
1185 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001186 */
Will Deacon518f7132014-11-14 17:17:54 +00001187 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001188 dev_err(dev,
1189 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001190 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1191 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001192 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001193
1194 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001195 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001196 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001197 return -ENODEV;
1198
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001199 /* Detach the dev from its current domain */
1200 if (dev->archdata.iommu)
1201 arm_smmu_detach_dev(dev, cfg);
1202
Will Deacon844e35b2014-07-17 11:23:51 +01001203 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1204 if (!ret)
1205 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001206 return ret;
1207}
1208
Will Deacon45ae7cf2013-06-24 18:31:25 +01001209static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001210 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001211{
Will Deacon518f7132014-11-14 17:17:54 +00001212 int ret;
1213 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001214 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001215 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001216
Will Deacon518f7132014-11-14 17:17:54 +00001217 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001218 return -ENODEV;
1219
Will Deacon518f7132014-11-14 17:17:54 +00001220 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1221 ret = ops->map(ops, iova, paddr, size, prot);
1222 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1223 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001224}
1225
1226static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1227 size_t size)
1228{
Will Deacon518f7132014-11-14 17:17:54 +00001229 size_t ret;
1230 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001231 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001232 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001233
Will Deacon518f7132014-11-14 17:17:54 +00001234 if (!ops)
1235 return 0;
1236
1237 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1238 ret = ops->unmap(ops, iova, size);
1239 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1240 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001241}
1242
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001243static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1244 dma_addr_t iova)
1245{
Joerg Roedel1d672632015-03-26 13:43:10 +01001246 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001247 struct arm_smmu_device *smmu = smmu_domain->smmu;
1248 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1249 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1250 struct device *dev = smmu->dev;
1251 void __iomem *cb_base;
1252 u32 tmp;
1253 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001254 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001255
1256 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1257
Robin Murphy661d9622015-05-27 17:09:34 +01001258 /* ATS1 registers can only be written atomically */
1259 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001260 if (smmu->version == ARM_SMMU_V2)
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +01001261 smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR);
Robin Murphy661d9622015-05-27 17:09:34 +01001262 else
Robin Murphy661d9622015-05-27 17:09:34 +01001263 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001264
1265 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1266 !(tmp & ATSR_ACTIVE), 5, 50)) {
1267 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001268 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001269 &iova);
1270 return ops->iova_to_phys(ops, iova);
1271 }
1272
1273 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
1274 phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
1275
1276 if (phys & CB_PAR_F) {
1277 dev_err(dev, "translation fault!\n");
1278 dev_err(dev, "PAR = 0x%llx\n", phys);
1279 return 0;
1280 }
1281
1282 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1283}
1284
Will Deacon45ae7cf2013-06-24 18:31:25 +01001285static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001286 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001287{
Will Deacon518f7132014-11-14 17:17:54 +00001288 phys_addr_t ret;
1289 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001290 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001291 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001292
Will Deacon518f7132014-11-14 17:17:54 +00001293 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001294 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001295
Will Deacon518f7132014-11-14 17:17:54 +00001296 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001297 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1298 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001299 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001300 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001301 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001302 }
1303
Will Deacon518f7132014-11-14 17:17:54 +00001304 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001305
Will Deacon518f7132014-11-14 17:17:54 +00001306 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001307}
1308
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001309static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001310{
Will Deacond0948942014-06-24 17:30:10 +01001311 switch (cap) {
1312 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001313 /*
1314 * Return true here as the SMMU can always send out coherent
1315 * requests.
1316 */
1317 return true;
Will Deacond0948942014-06-24 17:30:10 +01001318 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001319 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001320 case IOMMU_CAP_NOEXEC:
1321 return true;
Will Deacond0948942014-06-24 17:30:10 +01001322 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001323 return false;
Will Deacond0948942014-06-24 17:30:10 +01001324 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001325}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001326
Will Deacona9a1b0b2014-05-01 18:05:08 +01001327static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1328{
1329 *((u16 *)data) = alias;
1330 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331}
1332
Will Deacon8f68f8e2014-07-15 11:27:08 +01001333static void __arm_smmu_release_pci_iommudata(void *data)
1334{
1335 kfree(data);
1336}
1337
Joerg Roedelaf659932015-10-21 23:51:41 +02001338static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1339 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001340{
Will Deacon03edb222015-01-19 14:27:33 +00001341 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001342 u16 sid;
1343 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001344
Will Deacon03edb222015-01-19 14:27:33 +00001345 cfg = iommu_group_get_iommudata(group);
1346 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001347 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001348 if (!cfg)
1349 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001350
Will Deacon03edb222015-01-19 14:27:33 +00001351 iommu_group_set_iommudata(group, cfg,
1352 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001353 }
1354
Joerg Roedelaf659932015-10-21 23:51:41 +02001355 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1356 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001357
Will Deacon03edb222015-01-19 14:27:33 +00001358 /*
1359 * Assume Stream ID == Requester ID for now.
1360 * We need a way to describe the ID mappings in FDT.
1361 */
1362 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1363 for (i = 0; i < cfg->num_streamids; ++i)
1364 if (cfg->streamids[i] == sid)
1365 break;
1366
1367 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1368 if (i == cfg->num_streamids)
1369 cfg->streamids[cfg->num_streamids++] = sid;
1370
1371 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001372}
1373
Joerg Roedelaf659932015-10-21 23:51:41 +02001374static int arm_smmu_init_platform_device(struct device *dev,
1375 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001376{
Will Deacon03edb222015-01-19 14:27:33 +00001377 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001378 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001379
1380 if (!smmu)
1381 return -ENODEV;
1382
1383 master = find_smmu_master(smmu, dev->of_node);
1384 if (!master)
1385 return -ENODEV;
1386
Will Deacon03edb222015-01-19 14:27:33 +00001387 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001388
1389 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001390}
1391
1392static int arm_smmu_add_device(struct device *dev)
1393{
Joerg Roedelaf659932015-10-21 23:51:41 +02001394 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001395
Joerg Roedelaf659932015-10-21 23:51:41 +02001396 group = iommu_group_get_for_dev(dev);
1397 if (IS_ERR(group))
1398 return PTR_ERR(group);
1399
Peng Fan9a4a9d82015-11-20 16:56:18 +08001400 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001401 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001402}
1403
Will Deacon45ae7cf2013-06-24 18:31:25 +01001404static void arm_smmu_remove_device(struct device *dev)
1405{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001406 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001407}
1408
Joerg Roedelaf659932015-10-21 23:51:41 +02001409static struct iommu_group *arm_smmu_device_group(struct device *dev)
1410{
1411 struct iommu_group *group;
1412 int ret;
1413
1414 if (dev_is_pci(dev))
1415 group = pci_device_group(dev);
1416 else
1417 group = generic_device_group(dev);
1418
1419 if (IS_ERR(group))
1420 return group;
1421
1422 if (dev_is_pci(dev))
1423 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1424 else
1425 ret = arm_smmu_init_platform_device(dev, group);
1426
1427 if (ret) {
1428 iommu_group_put(group);
1429 group = ERR_PTR(ret);
1430 }
1431
1432 return group;
1433}
1434
Will Deaconc752ce42014-06-25 22:46:31 +01001435static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1436 enum iommu_attr attr, void *data)
1437{
Joerg Roedel1d672632015-03-26 13:43:10 +01001438 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001439
1440 switch (attr) {
1441 case DOMAIN_ATTR_NESTING:
1442 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1443 return 0;
1444 default:
1445 return -ENODEV;
1446 }
1447}
1448
1449static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1450 enum iommu_attr attr, void *data)
1451{
Will Deacon518f7132014-11-14 17:17:54 +00001452 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001453 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001454
Will Deacon518f7132014-11-14 17:17:54 +00001455 mutex_lock(&smmu_domain->init_mutex);
1456
Will Deaconc752ce42014-06-25 22:46:31 +01001457 switch (attr) {
1458 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001459 if (smmu_domain->smmu) {
1460 ret = -EPERM;
1461 goto out_unlock;
1462 }
1463
Will Deaconc752ce42014-06-25 22:46:31 +01001464 if (*(int *)data)
1465 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1466 else
1467 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1468
Will Deacon518f7132014-11-14 17:17:54 +00001469 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001470 default:
Will Deacon518f7132014-11-14 17:17:54 +00001471 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001472 }
Will Deacon518f7132014-11-14 17:17:54 +00001473
1474out_unlock:
1475 mutex_unlock(&smmu_domain->init_mutex);
1476 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001477}
1478
Will Deacon518f7132014-11-14 17:17:54 +00001479static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001480 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001481 .domain_alloc = arm_smmu_domain_alloc,
1482 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001483 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001484 .map = arm_smmu_map,
1485 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001486 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001487 .iova_to_phys = arm_smmu_iova_to_phys,
1488 .add_device = arm_smmu_add_device,
1489 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001490 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001491 .domain_get_attr = arm_smmu_domain_get_attr,
1492 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001493 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001494};
1495
1496static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1497{
1498 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001499 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001500 int i = 0;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001501 u32 reg;
1502
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001503 /* clear global FSR */
1504 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1505 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001506
Robin Murphy25a1c962016-02-10 14:25:33 +00001507 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1508 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001509 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001510 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001511 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001512 }
1513
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001514 /* Make sure all context banks are disabled and clear CB_FSR */
1515 for (i = 0; i < smmu->num_context_banks; ++i) {
1516 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1517 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1518 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1519 }
Will Deacon1463fe42013-07-31 19:21:27 +01001520
Will Deacon45ae7cf2013-06-24 18:31:25 +01001521 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001522 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1523 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1524
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001525 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001526
Will Deacon45ae7cf2013-06-24 18:31:25 +01001527 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001528 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001529
1530 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001531 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001532
Robin Murphy25a1c962016-02-10 14:25:33 +00001533 /* Enable client access, handling unmatched streams as appropriate */
1534 reg &= ~sCR0_CLIENTPD;
1535 if (disable_bypass)
1536 reg |= sCR0_USFCFG;
1537 else
1538 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001539
1540 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001541 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001542
1543 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001544 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001545
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001546 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1547 reg |= sCR0_VMID16EN;
1548
Will Deacon45ae7cf2013-06-24 18:31:25 +01001549 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001550 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001551 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001552}
1553
1554static int arm_smmu_id_size_to_bits(int size)
1555{
1556 switch (size) {
1557 case 0:
1558 return 32;
1559 case 1:
1560 return 36;
1561 case 2:
1562 return 40;
1563 case 3:
1564 return 42;
1565 case 4:
1566 return 44;
1567 case 5:
1568 default:
1569 return 48;
1570 }
1571}
1572
1573static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1574{
1575 unsigned long size;
1576 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1577 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001578 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001579
1580 dev_notice(smmu->dev, "probing hardware configuration...\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001581 dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
1582
1583 /* ID0 */
1584 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001585
1586 /* Restrict available stages based on module parameter */
1587 if (force_stage == 1)
1588 id &= ~(ID0_S2TS | ID0_NTS);
1589 else if (force_stage == 2)
1590 id &= ~(ID0_S1TS | ID0_NTS);
1591
Will Deacon45ae7cf2013-06-24 18:31:25 +01001592 if (id & ID0_S1TS) {
1593 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1594 dev_notice(smmu->dev, "\tstage 1 translation\n");
1595 }
1596
1597 if (id & ID0_S2TS) {
1598 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1599 dev_notice(smmu->dev, "\tstage 2 translation\n");
1600 }
1601
1602 if (id & ID0_NTS) {
1603 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1604 dev_notice(smmu->dev, "\tnested translation\n");
1605 }
1606
1607 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001608 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001609 dev_err(smmu->dev, "\tno translation support!\n");
1610 return -ENODEV;
1611 }
1612
Will Deacond38f0ff2015-06-29 17:47:42 +01001613 if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001614 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1615 dev_notice(smmu->dev, "\taddress translation ops\n");
1616 }
1617
Robin Murphybae2c2d2015-07-29 19:46:05 +01001618 /*
1619 * In order for DMA API calls to work properly, we must defer to what
1620 * the DT says about coherency, regardless of what the hardware claims.
1621 * Fortunately, this also opens up a workaround for systems where the
1622 * ID register value has ended up configured incorrectly.
1623 */
1624 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1625 cttw_reg = !!(id & ID0_CTTW);
1626 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001627 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001628 if (cttw_dt || cttw_reg)
1629 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1630 cttw_dt ? "" : "non-");
1631 if (cttw_dt != cttw_reg)
1632 dev_notice(smmu->dev,
1633 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001634
1635 if (id & ID0_SMS) {
1636 u32 smr, sid, mask;
1637
1638 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1639 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1640 ID0_NUMSMRG_MASK;
1641 if (smmu->num_mapping_groups == 0) {
1642 dev_err(smmu->dev,
1643 "stream-matching supported, but no SMRs present!\n");
1644 return -ENODEV;
1645 }
1646
1647 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1648 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1649 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1650 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1651
1652 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1653 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1654 if ((mask & sid) != sid) {
1655 dev_err(smmu->dev,
1656 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1657 mask, sid);
1658 return -ENODEV;
1659 }
1660
1661 dev_notice(smmu->dev,
1662 "\tstream matching with %u register groups, mask 0x%x",
1663 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07001664 } else {
1665 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1666 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001667 }
1668
1669 /* ID1 */
1670 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001671 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001672
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001673 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001674 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001675 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001676 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001677 dev_warn(smmu->dev,
1678 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1679 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001680
Will Deacon518f7132014-11-14 17:17:54 +00001681 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001682 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1683 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1684 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1685 return -ENODEV;
1686 }
1687 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1688 smmu->num_context_banks, smmu->num_s2_context_banks);
1689
1690 /* ID2 */
1691 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1692 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001693 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001694
Will Deacon518f7132014-11-14 17:17:54 +00001695 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001696 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001697 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001698
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001699 if (id & ID2_VMID16)
1700 smmu->features |= ARM_SMMU_FEAT_VMID16;
1701
Robin Murphyf1d84542015-03-04 16:41:05 +00001702 /*
1703 * What the page table walker can address actually depends on which
1704 * descriptor format is in use, but since a) we don't know that yet,
1705 * and b) it can vary per context bank, this will have to do...
1706 */
1707 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1708 dev_warn(smmu->dev,
1709 "failed to set DMA mask for table walker\n");
1710
Robin Murphy09360402014-08-28 17:51:59 +01001711 if (smmu->version == ARM_SMMU_V1) {
Will Deacon518f7132014-11-14 17:17:54 +00001712 smmu->va_size = smmu->ipa_size;
1713 size = SZ_4K | SZ_2M | SZ_1G;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001714 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001715 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001716 smmu->va_size = arm_smmu_id_size_to_bits(size);
1717#ifndef CONFIG_64BIT
1718 smmu->va_size = min(32UL, smmu->va_size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001719#endif
Will Deacon518f7132014-11-14 17:17:54 +00001720 size = 0;
1721 if (id & ID2_PTFS_4K)
1722 size |= SZ_4K | SZ_2M | SZ_1G;
1723 if (id & ID2_PTFS_16K)
1724 size |= SZ_16K | SZ_32M;
1725 if (id & ID2_PTFS_64K)
1726 size |= SZ_64K | SZ_512M;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001727 }
1728
Will Deacon518f7132014-11-14 17:17:54 +00001729 arm_smmu_ops.pgsize_bitmap &= size;
1730 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1731
Will Deacon28d60072014-09-01 16:24:48 +01001732 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1733 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001734 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001735
1736 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1737 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001738 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001739
Will Deacon45ae7cf2013-06-24 18:31:25 +01001740 return 0;
1741}
1742
Robin Murphy67b65a32016-04-13 18:12:57 +01001743struct arm_smmu_match_data {
1744 enum arm_smmu_arch_version version;
1745 enum arm_smmu_implementation model;
1746};
1747
1748#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1749static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1750
1751ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1752ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
1753
Joerg Roedel09b52692014-10-02 12:24:45 +02001754static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001755 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1756 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1757 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1758 { .compatible = "arm,mmu-401", .data = &smmu_generic_v1 },
1759 { .compatible = "arm,mmu-500", .data = &smmu_generic_v2 },
1760 { .compatible = "cavium,smmu-v2", .data = &smmu_generic_v2 },
Robin Murphy09360402014-08-28 17:51:59 +01001761 { },
1762};
1763MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1764
Will Deacon45ae7cf2013-06-24 18:31:25 +01001765static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1766{
Robin Murphy09360402014-08-28 17:51:59 +01001767 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01001768 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001769 struct resource *res;
1770 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001771 struct device *dev = &pdev->dev;
1772 struct rb_node *node;
1773 struct of_phandle_args masterspec;
1774 int num_irqs, i, err;
1775
1776 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1777 if (!smmu) {
1778 dev_err(dev, "failed to allocate arm_smmu_device\n");
1779 return -ENOMEM;
1780 }
1781 smmu->dev = dev;
1782
Robin Murphy09360402014-08-28 17:51:59 +01001783 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01001784 data = of_id->data;
1785 smmu->version = data->version;
1786 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01001787
Will Deacon45ae7cf2013-06-24 18:31:25 +01001788 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001789 smmu->base = devm_ioremap_resource(dev, res);
1790 if (IS_ERR(smmu->base))
1791 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001792 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001793
1794 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1795 &smmu->num_global_irqs)) {
1796 dev_err(dev, "missing #global-interrupts property\n");
1797 return -ENODEV;
1798 }
1799
1800 num_irqs = 0;
1801 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1802 num_irqs++;
1803 if (num_irqs > smmu->num_global_irqs)
1804 smmu->num_context_irqs++;
1805 }
1806
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001807 if (!smmu->num_context_irqs) {
1808 dev_err(dev, "found %d interrupts but expected at least %d\n",
1809 num_irqs, smmu->num_global_irqs + 1);
1810 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001811 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001812
1813 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1814 GFP_KERNEL);
1815 if (!smmu->irqs) {
1816 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1817 return -ENOMEM;
1818 }
1819
1820 for (i = 0; i < num_irqs; ++i) {
1821 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001822
Will Deacon45ae7cf2013-06-24 18:31:25 +01001823 if (irq < 0) {
1824 dev_err(dev, "failed to get irq index %d\n", i);
1825 return -ENODEV;
1826 }
1827 smmu->irqs[i] = irq;
1828 }
1829
Olav Haugan3c8766d2014-08-22 17:12:32 -07001830 err = arm_smmu_device_cfg_probe(smmu);
1831 if (err)
1832 return err;
1833
Will Deacon45ae7cf2013-06-24 18:31:25 +01001834 i = 0;
1835 smmu->masters = RB_ROOT;
1836 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1837 "#stream-id-cells", i,
1838 &masterspec)) {
1839 err = register_smmu_master(smmu, dev, &masterspec);
1840 if (err) {
1841 dev_err(dev, "failed to add master %s\n",
1842 masterspec.np->name);
1843 goto out_put_masters;
1844 }
1845
1846 i++;
1847 }
1848 dev_notice(dev, "registered %d master devices\n", i);
1849
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001850 parse_driver_options(smmu);
1851
Robin Murphy09360402014-08-28 17:51:59 +01001852 if (smmu->version > ARM_SMMU_V1 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01001853 smmu->num_context_banks != smmu->num_context_irqs) {
1854 dev_err(dev,
1855 "found only %d context interrupt(s) but %d required\n",
1856 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cde2013-11-15 09:42:30 +00001857 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01001858 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001859 }
1860
Will Deacon45ae7cf2013-06-24 18:31:25 +01001861 for (i = 0; i < smmu->num_global_irqs; ++i) {
1862 err = request_irq(smmu->irqs[i],
1863 arm_smmu_global_fault,
1864 IRQF_SHARED,
1865 "arm-smmu global fault",
1866 smmu);
1867 if (err) {
1868 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1869 i, smmu->irqs[i]);
1870 goto out_free_irqs;
1871 }
1872 }
1873
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -08001874 /*
1875 * Cavium CN88xx erratum #27704.
1876 * Ensure ASID and VMID allocation is unique across all SMMUs in
1877 * the system.
1878 */
1879 if (of_device_is_compatible(dev->of_node, "cavium,smmu-v2")) {
1880 smmu->cavium_id_base =
1881 atomic_add_return(smmu->num_context_banks,
1882 &cavium_smmu_context_count);
1883 smmu->cavium_id_base -= smmu->num_context_banks;
1884 }
1885
Will Deacon45ae7cf2013-06-24 18:31:25 +01001886 INIT_LIST_HEAD(&smmu->list);
1887 spin_lock(&arm_smmu_devices_lock);
1888 list_add(&smmu->list, &arm_smmu_devices);
1889 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01001890
1891 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001892 return 0;
1893
1894out_free_irqs:
1895 while (i--)
1896 free_irq(smmu->irqs[i], smmu);
1897
Will Deacon45ae7cf2013-06-24 18:31:25 +01001898out_put_masters:
1899 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07001900 struct arm_smmu_master *master
1901 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001902 of_node_put(master->of_node);
1903 }
1904
1905 return err;
1906}
1907
1908static int arm_smmu_device_remove(struct platform_device *pdev)
1909{
1910 int i;
1911 struct device *dev = &pdev->dev;
1912 struct arm_smmu_device *curr, *smmu = NULL;
1913 struct rb_node *node;
1914
1915 spin_lock(&arm_smmu_devices_lock);
1916 list_for_each_entry(curr, &arm_smmu_devices, list) {
1917 if (curr->dev == dev) {
1918 smmu = curr;
1919 list_del(&smmu->list);
1920 break;
1921 }
1922 }
1923 spin_unlock(&arm_smmu_devices_lock);
1924
1925 if (!smmu)
1926 return -ENODEV;
1927
Will Deacon45ae7cf2013-06-24 18:31:25 +01001928 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07001929 struct arm_smmu_master *master
1930 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001931 of_node_put(master->of_node);
1932 }
1933
Will Deaconecfadb62013-07-31 19:21:28 +01001934 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001935 dev_err(dev, "removing device with active domains!\n");
1936
1937 for (i = 0; i < smmu->num_global_irqs; ++i)
1938 free_irq(smmu->irqs[i], smmu);
1939
1940 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07001941 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001942 return 0;
1943}
1944
Will Deacon45ae7cf2013-06-24 18:31:25 +01001945static struct platform_driver arm_smmu_driver = {
1946 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001947 .name = "arm-smmu",
1948 .of_match_table = of_match_ptr(arm_smmu_of_match),
1949 },
1950 .probe = arm_smmu_device_dt_probe,
1951 .remove = arm_smmu_device_remove,
1952};
1953
1954static int __init arm_smmu_init(void)
1955{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00001956 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001957 int ret;
1958
Thierry Reding0e7d37a2014-11-07 15:26:18 +00001959 /*
1960 * Play nice with systems that don't have an ARM SMMU by checking that
1961 * an ARM SMMU exists in the system before proceeding with the driver
1962 * and IOMMU bus operation registration.
1963 */
1964 np = of_find_matching_node(NULL, arm_smmu_of_match);
1965 if (!np)
1966 return 0;
1967
1968 of_node_put(np);
1969
Will Deacon45ae7cf2013-06-24 18:31:25 +01001970 ret = platform_driver_register(&arm_smmu_driver);
1971 if (ret)
1972 return ret;
1973
1974 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01001975 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001976 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1977
Will Deacond123cf82014-02-04 22:17:53 +00001978#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01001979 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00001981#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01001982
Will Deacona9a1b0b2014-05-01 18:05:08 +01001983#ifdef CONFIG_PCI
1984 if (!iommu_present(&pci_bus_type))
1985 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
1986#endif
1987
Will Deacon45ae7cf2013-06-24 18:31:25 +01001988 return 0;
1989}
1990
1991static void __exit arm_smmu_exit(void)
1992{
1993 return platform_driver_unregister(&arm_smmu_driver);
1994}
1995
Andreas Herrmannb1950b22013-10-01 13:39:05 +01001996subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001997module_exit(arm_smmu_exit);
1998
1999MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2000MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2001MODULE_LICENSE("GPL v2");