blob: 72dea314f9a10b9c51ac3a649304bf9ef5f8c92a [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
Will Deacon518f7132014-11-14 17:17:54 +000050#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52/* Maximum number of stream IDs assigned to a single device */
Andreas Herrmann636e97b2014-01-30 18:18:08 +000053#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
101/* Identification registers */
102#define ARM_SMMU_GR0_ID0 0x20
103#define ARM_SMMU_GR0_ID1 0x24
104#define ARM_SMMU_GR0_ID2 0x28
105#define ARM_SMMU_GR0_ID3 0x2c
106#define ARM_SMMU_GR0_ID4 0x30
107#define ARM_SMMU_GR0_ID5 0x34
108#define ARM_SMMU_GR0_ID6 0x38
109#define ARM_SMMU_GR0_ID7 0x3c
110#define ARM_SMMU_GR0_sGFSR 0x48
111#define ARM_SMMU_GR0_sGFSYNR0 0x50
112#define ARM_SMMU_GR0_sGFSYNR1 0x54
113#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100114
115#define ID0_S1TS (1 << 30)
116#define ID0_S2TS (1 << 29)
117#define ID0_NTS (1 << 28)
118#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000119#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100120#define ID0_PTFS_NO_AARCH32 (1 << 25)
121#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100122#define ID0_CTTW (1 << 14)
123#define ID0_NUMIRPT_SHIFT 16
124#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700125#define ID0_NUMSIDB_SHIFT 9
126#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100127#define ID0_NUMSMRG_SHIFT 0
128#define ID0_NUMSMRG_MASK 0xff
129
130#define ID1_PAGESIZE (1 << 31)
131#define ID1_NUMPAGENDXB_SHIFT 28
132#define ID1_NUMPAGENDXB_MASK 7
133#define ID1_NUMS2CB_SHIFT 16
134#define ID1_NUMS2CB_MASK 0xff
135#define ID1_NUMCB_SHIFT 0
136#define ID1_NUMCB_MASK 0xff
137
138#define ID2_OAS_SHIFT 4
139#define ID2_OAS_MASK 0xf
140#define ID2_IAS_SHIFT 0
141#define ID2_IAS_MASK 0xf
142#define ID2_UBS_SHIFT 8
143#define ID2_UBS_MASK 0xf
144#define ID2_PTFS_4K (1 << 12)
145#define ID2_PTFS_16K (1 << 13)
146#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800147#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100148
Will Deacon45ae7cf2013-06-24 18:31:25 +0100149/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100150#define ARM_SMMU_GR0_TLBIVMID 0x64
151#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
152#define ARM_SMMU_GR0_TLBIALLH 0x6c
153#define ARM_SMMU_GR0_sTLBGSYNC 0x70
154#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
155#define sTLBGSTATUS_GSACTIVE (1 << 0)
156#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
157
158/* Stream mapping registers */
159#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
160#define SMR_VALID (1 << 31)
161#define SMR_MASK_SHIFT 16
162#define SMR_MASK_MASK 0x7fff
163#define SMR_ID_SHIFT 0
164#define SMR_ID_MASK 0x7fff
165
166#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
167#define S2CR_CBNDX_SHIFT 0
168#define S2CR_CBNDX_MASK 0xff
169#define S2CR_TYPE_SHIFT 16
170#define S2CR_TYPE_MASK 0x3
171#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
172#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
173#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
174
Robin Murphyd3461802016-01-26 18:06:34 +0000175#define S2CR_PRIVCFG_SHIFT 24
176#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
177
Will Deacon45ae7cf2013-06-24 18:31:25 +0100178/* Context bank attribute registers */
179#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
180#define CBAR_VMID_SHIFT 0
181#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000182#define CBAR_S1_BPSHCFG_SHIFT 8
183#define CBAR_S1_BPSHCFG_MASK 3
184#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100185#define CBAR_S1_MEMATTR_SHIFT 12
186#define CBAR_S1_MEMATTR_MASK 0xf
187#define CBAR_S1_MEMATTR_WB 0xf
188#define CBAR_TYPE_SHIFT 16
189#define CBAR_TYPE_MASK 0x3
190#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
191#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
192#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
193#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
194#define CBAR_IRPTNDX_SHIFT 24
195#define CBAR_IRPTNDX_MASK 0xff
196
197#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
198#define CBA2R_RW64_32BIT (0 << 0)
199#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800200#define CBA2R_VMID_SHIFT 16
201#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100202
203/* Translation context bank */
204#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100205#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206
207#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100208#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100209#define ARM_SMMU_CB_RESUME 0x8
210#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100211#define ARM_SMMU_CB_TTBR0 0x20
212#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213#define ARM_SMMU_CB_TTBCR 0x30
214#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000215#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100216#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100217#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100218#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000220#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100221#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000222#define ARM_SMMU_CB_S1_TLBIVAL 0x620
223#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
224#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100225#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000226#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227
228#define SCTLR_S1_ASIDPNE (1 << 12)
229#define SCTLR_CFCFG (1 << 7)
230#define SCTLR_CFIE (1 << 6)
231#define SCTLR_CFRE (1 << 5)
232#define SCTLR_E (1 << 4)
233#define SCTLR_AFE (1 << 2)
234#define SCTLR_TRE (1 << 1)
235#define SCTLR_M (1 << 0)
236#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
237
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100238#define ARM_MMU500_ACTLR_CPRE (1 << 1)
239
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000240#define CB_PAR_F (1 << 0)
241
242#define ATSR_ACTIVE (1 << 0)
243
Will Deacon45ae7cf2013-06-24 18:31:25 +0100244#define RESUME_RETRY (0 << 0)
245#define RESUME_TERMINATE (1 << 0)
246
Will Deacon45ae7cf2013-06-24 18:31:25 +0100247#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100248#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100249
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100250#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100251
252#define FSR_MULTI (1 << 31)
253#define FSR_SS (1 << 30)
254#define FSR_UUT (1 << 8)
255#define FSR_ASF (1 << 7)
256#define FSR_TLBLKF (1 << 6)
257#define FSR_TLBMCF (1 << 5)
258#define FSR_EF (1 << 4)
259#define FSR_PF (1 << 3)
260#define FSR_AFF (1 << 2)
261#define FSR_TF (1 << 1)
262
Mitchel Humpherys29073202014-07-08 09:52:18 -0700263#define FSR_IGN (FSR_AFF | FSR_ASF | \
264 FSR_TLBMCF | FSR_TLBLKF)
265#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100266 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100267
268#define FSYNR0_WNR (1 << 4)
269
Will Deacon4cf740b2014-07-14 19:47:39 +0100270static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000271module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100272MODULE_PARM_DESC(force_stage,
273 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000274static bool disable_bypass;
275module_param(disable_bypass, bool, S_IRUGO);
276MODULE_PARM_DESC(disable_bypass,
277 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100278
Robin Murphy09360402014-08-28 17:51:59 +0100279enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100280 ARM_SMMU_V1,
281 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100282 ARM_SMMU_V2,
283};
284
Robin Murphy67b65a32016-04-13 18:12:57 +0100285enum arm_smmu_implementation {
286 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100287 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100288 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100289};
290
Will Deacon45ae7cf2013-06-24 18:31:25 +0100291struct arm_smmu_smr {
292 u8 idx;
293 u16 mask;
294 u16 id;
295};
296
Will Deacona9a1b0b2014-05-01 18:05:08 +0100297struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100298 int num_streamids;
299 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100300 struct arm_smmu_smr *smrs;
301};
302
Will Deacona9a1b0b2014-05-01 18:05:08 +0100303struct arm_smmu_master {
304 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100305 struct rb_node node;
306 struct arm_smmu_master_cfg cfg;
307};
308
Will Deacon45ae7cf2013-06-24 18:31:25 +0100309struct arm_smmu_device {
310 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100311
312 void __iomem *base;
313 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100314 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100315
316#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
317#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
318#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
319#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
320#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000321#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800322#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100323#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
324#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
325#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
326#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
327#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100328 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000329
330#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
331 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100332 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100333 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100334
335 u32 num_context_banks;
336 u32 num_s2_context_banks;
337 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
338 atomic_t irptndx;
339
340 u32 num_mapping_groups;
341 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
342
Will Deacon518f7132014-11-14 17:17:54 +0000343 unsigned long va_size;
344 unsigned long ipa_size;
345 unsigned long pa_size;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100346
347 u32 num_global_irqs;
348 u32 num_context_irqs;
349 unsigned int *irqs;
350
Will Deacon45ae7cf2013-06-24 18:31:25 +0100351 struct list_head list;
352 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800353
354 u32 cavium_id_base; /* Specific to Cavium */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100355};
356
Robin Murphy7602b872016-04-28 17:12:09 +0100357enum arm_smmu_context_fmt {
358 ARM_SMMU_CTX_FMT_NONE,
359 ARM_SMMU_CTX_FMT_AARCH64,
360 ARM_SMMU_CTX_FMT_AARCH32_L,
361 ARM_SMMU_CTX_FMT_AARCH32_S,
362};
363
Will Deacon45ae7cf2013-06-24 18:31:25 +0100364struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100365 u8 cbndx;
366 u8 irptndx;
367 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100368 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100369};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100370#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800372#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
373#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100374
Will Deaconc752ce42014-06-25 22:46:31 +0100375enum arm_smmu_domain_stage {
376 ARM_SMMU_DOMAIN_S1 = 0,
377 ARM_SMMU_DOMAIN_S2,
378 ARM_SMMU_DOMAIN_NESTED,
379};
380
Will Deacon45ae7cf2013-06-24 18:31:25 +0100381struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100382 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000383 struct io_pgtable_ops *pgtbl_ops;
384 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100385 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100386 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000387 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100388 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100389};
390
Will Deacon518f7132014-11-14 17:17:54 +0000391static struct iommu_ops arm_smmu_ops;
392
Will Deacon45ae7cf2013-06-24 18:31:25 +0100393static DEFINE_SPINLOCK(arm_smmu_devices_lock);
394static LIST_HEAD(arm_smmu_devices);
395
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000396struct arm_smmu_option_prop {
397 u32 opt;
398 const char *prop;
399};
400
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800401static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
402
Mitchel Humpherys29073202014-07-08 09:52:18 -0700403static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000404 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
405 { 0, NULL},
406};
407
Joerg Roedel1d672632015-03-26 13:43:10 +0100408static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
409{
410 return container_of(dom, struct arm_smmu_domain, domain);
411}
412
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000413static void parse_driver_options(struct arm_smmu_device *smmu)
414{
415 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700416
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000417 do {
418 if (of_property_read_bool(smmu->dev->of_node,
419 arm_smmu_options[i].prop)) {
420 smmu->options |= arm_smmu_options[i].opt;
421 dev_notice(smmu->dev, "option %s\n",
422 arm_smmu_options[i].prop);
423 }
424 } while (arm_smmu_options[++i].opt);
425}
426
Will Deacon8f68f8e2014-07-15 11:27:08 +0100427static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100428{
429 if (dev_is_pci(dev)) {
430 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700431
Will Deacona9a1b0b2014-05-01 18:05:08 +0100432 while (!pci_is_root_bus(bus))
433 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100434 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100435 }
436
Will Deacon8f68f8e2014-07-15 11:27:08 +0100437 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100438}
439
Will Deacon45ae7cf2013-06-24 18:31:25 +0100440static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
441 struct device_node *dev_node)
442{
443 struct rb_node *node = smmu->masters.rb_node;
444
445 while (node) {
446 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700447
Will Deacon45ae7cf2013-06-24 18:31:25 +0100448 master = container_of(node, struct arm_smmu_master, node);
449
450 if (dev_node < master->of_node)
451 node = node->rb_left;
452 else if (dev_node > master->of_node)
453 node = node->rb_right;
454 else
455 return master;
456 }
457
458 return NULL;
459}
460
Will Deacona9a1b0b2014-05-01 18:05:08 +0100461static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100462find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100463{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100464 struct arm_smmu_master_cfg *cfg = NULL;
465 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100466
Will Deacon8f68f8e2014-07-15 11:27:08 +0100467 if (group) {
468 cfg = iommu_group_get_iommudata(group);
469 iommu_group_put(group);
470 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100471
Will Deacon8f68f8e2014-07-15 11:27:08 +0100472 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100473}
474
Will Deacon45ae7cf2013-06-24 18:31:25 +0100475static int insert_smmu_master(struct arm_smmu_device *smmu,
476 struct arm_smmu_master *master)
477{
478 struct rb_node **new, *parent;
479
480 new = &smmu->masters.rb_node;
481 parent = NULL;
482 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700483 struct arm_smmu_master *this
484 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100485
486 parent = *new;
487 if (master->of_node < this->of_node)
488 new = &((*new)->rb_left);
489 else if (master->of_node > this->of_node)
490 new = &((*new)->rb_right);
491 else
492 return -EEXIST;
493 }
494
495 rb_link_node(&master->node, parent, new);
496 rb_insert_color(&master->node, &smmu->masters);
497 return 0;
498}
499
500static int register_smmu_master(struct arm_smmu_device *smmu,
501 struct device *dev,
502 struct of_phandle_args *masterspec)
503{
504 int i;
505 struct arm_smmu_master *master;
506
507 master = find_smmu_master(smmu, masterspec->np);
508 if (master) {
509 dev_err(dev,
510 "rejecting multiple registrations for master device %s\n",
511 masterspec->np->name);
512 return -EBUSY;
513 }
514
515 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
516 dev_err(dev,
517 "reached maximum number (%d) of stream IDs for master device %s\n",
518 MAX_MASTER_STREAMIDS, masterspec->np->name);
519 return -ENOSPC;
520 }
521
522 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
523 if (!master)
524 return -ENOMEM;
525
Will Deacona9a1b0b2014-05-01 18:05:08 +0100526 master->of_node = masterspec->np;
527 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100528
Olav Haugan3c8766d2014-08-22 17:12:32 -0700529 for (i = 0; i < master->cfg.num_streamids; ++i) {
530 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100531
Olav Haugan3c8766d2014-08-22 17:12:32 -0700532 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
533 (streamid >= smmu->num_mapping_groups)) {
534 dev_err(dev,
535 "stream ID for master device %s greater than maximum allowed (%d)\n",
536 masterspec->np->name, smmu->num_mapping_groups);
537 return -ERANGE;
538 }
539 master->cfg.streamids[i] = streamid;
540 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100541 return insert_smmu_master(smmu, master);
542}
543
Will Deacon44680ee2014-06-25 11:29:12 +0100544static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100545{
Will Deacon44680ee2014-06-25 11:29:12 +0100546 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100547 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100548 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100549
550 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100551 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100552 master = find_smmu_master(smmu, dev_node);
553 if (master)
554 break;
555 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100556 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100557
Will Deacona9a1b0b2014-05-01 18:05:08 +0100558 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100559}
560
561static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
562{
563 int idx;
564
565 do {
566 idx = find_next_zero_bit(map, end, start);
567 if (idx == end)
568 return -ENOSPC;
569 } while (test_and_set_bit(idx, map));
570
571 return idx;
572}
573
574static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
575{
576 clear_bit(idx, map);
577}
578
579/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000580static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100581{
582 int count = 0;
583 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
584
585 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
586 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
587 & sTLBGSTATUS_GSACTIVE) {
588 cpu_relax();
589 if (++count == TLB_LOOP_TIMEOUT) {
590 dev_err_ratelimited(smmu->dev,
591 "TLB sync timed out -- SMMU may be deadlocked\n");
592 return;
593 }
594 udelay(1);
595 }
596}
597
Will Deacon518f7132014-11-14 17:17:54 +0000598static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100599{
Will Deacon518f7132014-11-14 17:17:54 +0000600 struct arm_smmu_domain *smmu_domain = cookie;
601 __arm_smmu_tlb_sync(smmu_domain->smmu);
602}
603
604static void arm_smmu_tlb_inv_context(void *cookie)
605{
606 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100607 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
608 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100609 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000610 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100611
612 if (stage1) {
613 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800614 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100615 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100616 } else {
617 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800618 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100619 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100620 }
621
Will Deacon518f7132014-11-14 17:17:54 +0000622 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100623}
624
Will Deacon518f7132014-11-14 17:17:54 +0000625static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000626 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000627{
628 struct arm_smmu_domain *smmu_domain = cookie;
629 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
630 struct arm_smmu_device *smmu = smmu_domain->smmu;
631 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
632 void __iomem *reg;
633
634 if (stage1) {
635 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
636 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
637
Robin Murphy7602b872016-04-28 17:12:09 +0100638 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000639 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800640 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000641 do {
642 writel_relaxed(iova, reg);
643 iova += granule;
644 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000645 } else {
646 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800647 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000648 do {
649 writeq_relaxed(iova, reg);
650 iova += granule >> 12;
651 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000652 }
Will Deacon518f7132014-11-14 17:17:54 +0000653 } else if (smmu->version == ARM_SMMU_V2) {
654 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
655 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
656 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000657 iova >>= 12;
658 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100659 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000660 iova += granule >> 12;
661 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000662 } else {
663 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800664 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000665 }
666}
667
Will Deacon518f7132014-11-14 17:17:54 +0000668static struct iommu_gather_ops arm_smmu_gather_ops = {
669 .tlb_flush_all = arm_smmu_tlb_inv_context,
670 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
671 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000672};
673
Will Deacon45ae7cf2013-06-24 18:31:25 +0100674static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
675{
676 int flags, ret;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100677 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100678 unsigned long iova;
679 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100680 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100681 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
682 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100683 void __iomem *cb_base;
684
Will Deacon44680ee2014-06-25 11:29:12 +0100685 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100686 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
687
688 if (!(fsr & FSR_FAULT))
689 return IRQ_NONE;
690
691 if (fsr & FSR_IGN)
692 dev_err_ratelimited(smmu->dev,
Hans Wennborg70c9a7d2014-08-06 05:42:01 +0100693 "Unexpected context fault (fsr 0x%x)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +0100694 fsr);
695
696 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
697 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
698
Robin Murphyf9a05f02016-04-13 18:13:01 +0100699 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100700 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
701 ret = IRQ_HANDLED;
702 resume = RESUME_RETRY;
703 } else {
Andreas Herrmann2ef0f032013-10-01 13:39:08 +0100704 dev_err_ratelimited(smmu->dev,
705 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100706 iova, fsynr, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100707 ret = IRQ_NONE;
708 resume = RESUME_TERMINATE;
709 }
710
711 /* Clear the faulting FSR */
712 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
713
714 /* Retry or terminate any stalled transactions */
715 if (fsr & FSR_SS)
716 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
717
718 return ret;
719}
720
721static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
722{
723 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
724 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000725 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100726
727 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
728 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
729 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
730 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
731
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000732 if (!gfsr)
733 return IRQ_NONE;
734
Will Deacon45ae7cf2013-06-24 18:31:25 +0100735 dev_err_ratelimited(smmu->dev,
736 "Unexpected global fault, this could be serious\n");
737 dev_err_ratelimited(smmu->dev,
738 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
739 gfsr, gfsynr0, gfsynr1, gfsynr2);
740
741 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100742 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100743}
744
Will Deacon518f7132014-11-14 17:17:54 +0000745static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
746 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100747{
748 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100749 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100750 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100751 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
752 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100753 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100754
Will Deacon45ae7cf2013-06-24 18:31:25 +0100755 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100756 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
757 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100758
Will Deacon4a1c93c2015-03-04 12:21:03 +0000759 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100760 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
761 reg = CBA2R_RW64_64BIT;
762 else
763 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800764 /* 16-bit VMIDs live in CBA2R */
765 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800766 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800767
Will Deacon4a1c93c2015-03-04 12:21:03 +0000768 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
769 }
770
Will Deacon45ae7cf2013-06-24 18:31:25 +0100771 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100772 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100773 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700774 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100775
Will Deacon57ca90f2014-02-06 14:59:05 +0000776 /*
777 * Use the weakest shareability/memory types, so they are
778 * overridden by the ttbcr/pte.
779 */
780 if (stage1) {
781 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
782 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800783 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
784 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800785 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000786 }
Will Deacon44680ee2014-06-25 11:29:12 +0100787 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100788
Will Deacon518f7132014-11-14 17:17:54 +0000789 /* TTBRs */
790 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100791 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100792
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800793 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100794 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100795
796 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800797 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100798 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000799 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100800 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100801 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000802 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100803
Will Deacon518f7132014-11-14 17:17:54 +0000804 /* TTBCR */
805 if (stage1) {
806 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
807 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
808 if (smmu->version > ARM_SMMU_V1) {
809 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100810 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000811 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100812 }
813 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000814 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
815 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100816 }
817
Will Deacon518f7132014-11-14 17:17:54 +0000818 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100819 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000820 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100821 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +0000822 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
823 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100824 }
825
Will Deacon45ae7cf2013-06-24 18:31:25 +0100826 /* SCTLR */
827 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
828 if (stage1)
829 reg |= SCTLR_S1_ASIDPNE;
830#ifdef __BIG_ENDIAN
831 reg |= SCTLR_E;
832#endif
Will Deacon25724842013-08-21 13:49:53 +0100833 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100834}
835
836static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100837 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100838{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100839 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000840 unsigned long ias, oas;
841 struct io_pgtable_ops *pgtbl_ops;
842 struct io_pgtable_cfg pgtbl_cfg;
843 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100844 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100845 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100846
Will Deacon518f7132014-11-14 17:17:54 +0000847 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100848 if (smmu_domain->smmu)
849 goto out_unlock;
850
Will Deaconc752ce42014-06-25 22:46:31 +0100851 /*
852 * Mapping the requested stage onto what we support is surprisingly
853 * complicated, mainly because the spec allows S1+S2 SMMUs without
854 * support for nested translation. That means we end up with the
855 * following table:
856 *
857 * Requested Supported Actual
858 * S1 N S1
859 * S1 S1+S2 S1
860 * S1 S2 S2
861 * S1 S1 S1
862 * N N N
863 * N S1+S2 S2
864 * N S2 S2
865 * N S1 S1
866 *
867 * Note that you can't actually request stage-2 mappings.
868 */
869 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
870 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
871 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
872 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
873
Robin Murphy7602b872016-04-28 17:12:09 +0100874 /*
875 * Choosing a suitable context format is even more fiddly. Until we
876 * grow some way for the caller to express a preference, and/or move
877 * the decision into the io-pgtable code where it arguably belongs,
878 * just aim for the closest thing to the rest of the system, and hope
879 * that the hardware isn't esoteric enough that we can't assume AArch64
880 * support to be a superset of AArch32 support...
881 */
882 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
883 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
884 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
885 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
886 ARM_SMMU_FEAT_FMT_AARCH64_16K |
887 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
888 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
889
890 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
891 ret = -EINVAL;
892 goto out_unlock;
893 }
894
Will Deaconc752ce42014-06-25 22:46:31 +0100895 switch (smmu_domain->stage) {
896 case ARM_SMMU_DOMAIN_S1:
897 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
898 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000899 ias = smmu->va_size;
900 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100901 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000902 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100903 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000904 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100905 ias = min(ias, 32UL);
906 oas = min(oas, 40UL);
907 }
Will Deaconc752ce42014-06-25 22:46:31 +0100908 break;
909 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100910 /*
911 * We will likely want to change this if/when KVM gets
912 * involved.
913 */
Will Deaconc752ce42014-06-25 22:46:31 +0100914 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100915 cfg->cbar = CBAR_TYPE_S2_TRANS;
916 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000917 ias = smmu->ipa_size;
918 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100919 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000920 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100921 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000922 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100923 ias = min(ias, 40UL);
924 oas = min(oas, 40UL);
925 }
Will Deaconc752ce42014-06-25 22:46:31 +0100926 break;
927 default:
928 ret = -EINVAL;
929 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100930 }
931
932 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
933 smmu->num_context_banks);
934 if (IS_ERR_VALUE(ret))
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100935 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100936
Will Deacon44680ee2014-06-25 11:29:12 +0100937 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100938 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100939 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
940 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100941 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100942 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100943 }
944
Will Deacon518f7132014-11-14 17:17:54 +0000945 pgtbl_cfg = (struct io_pgtable_cfg) {
946 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
947 .ias = ias,
948 .oas = oas,
949 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100950 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000951 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100952
Will Deacon518f7132014-11-14 17:17:54 +0000953 smmu_domain->smmu = smmu;
954 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
955 if (!pgtbl_ops) {
956 ret = -ENOMEM;
957 goto out_clear_smmu;
958 }
959
960 /* Update our support page sizes to reflect the page table format */
961 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
962
963 /* Initialise the context bank with our page table cfg */
964 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
965
966 /*
967 * Request context fault interrupt. Do this last to avoid the
968 * handler seeing a half-initialised domain state.
969 */
Will Deacon44680ee2014-06-25 11:29:12 +0100970 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100971 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
972 "arm-smmu-context-fault", domain);
973 if (IS_ERR_VALUE(ret)) {
974 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100975 cfg->irptndx, irq);
976 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100977 }
978
Will Deacon518f7132014-11-14 17:17:54 +0000979 mutex_unlock(&smmu_domain->init_mutex);
980
981 /* Publish page table ops for map/unmap */
982 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100983 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100984
Will Deacon518f7132014-11-14 17:17:54 +0000985out_clear_smmu:
986 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100987out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000988 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100989 return ret;
990}
991
992static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
993{
Joerg Roedel1d672632015-03-26 13:43:10 +0100994 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100995 struct arm_smmu_device *smmu = smmu_domain->smmu;
996 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +0100997 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100998 int irq;
999
1000 if (!smmu)
1001 return;
1002
Will Deacon518f7132014-11-14 17:17:54 +00001003 /*
1004 * Disable the context bank and free the page tables before freeing
1005 * it.
1006 */
Will Deacon44680ee2014-06-25 11:29:12 +01001007 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001008 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001009
Will Deacon44680ee2014-06-25 11:29:12 +01001010 if (cfg->irptndx != INVALID_IRPTNDX) {
1011 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001012 free_irq(irq, domain);
1013 }
1014
Markus Elfring44830b02015-11-06 18:32:41 +01001015 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001016 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001017}
1018
Joerg Roedel1d672632015-03-26 13:43:10 +01001019static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001020{
1021 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001022
Robin Murphy9adb9592016-01-26 18:06:36 +00001023 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001024 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001025 /*
1026 * Allocate the domain and initialise some of its data structures.
1027 * We can't really do anything meaningful until we've added a
1028 * master.
1029 */
1030 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1031 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001032 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001033
Robin Murphy9adb9592016-01-26 18:06:36 +00001034 if (type == IOMMU_DOMAIN_DMA &&
1035 iommu_get_dma_cookie(&smmu_domain->domain)) {
1036 kfree(smmu_domain);
1037 return NULL;
1038 }
1039
Will Deacon518f7132014-11-14 17:17:54 +00001040 mutex_init(&smmu_domain->init_mutex);
1041 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001042
1043 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001044}
1045
Joerg Roedel1d672632015-03-26 13:43:10 +01001046static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001047{
Joerg Roedel1d672632015-03-26 13:43:10 +01001048 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001049
1050 /*
1051 * Free the domain resources. We assume that all devices have
1052 * already been detached.
1053 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001054 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001055 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001056 kfree(smmu_domain);
1057}
1058
1059static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001060 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001061{
1062 int i;
1063 struct arm_smmu_smr *smrs;
1064 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1065
1066 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1067 return 0;
1068
Will Deacona9a1b0b2014-05-01 18:05:08 +01001069 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001070 return -EEXIST;
1071
Mitchel Humpherys29073202014-07-08 09:52:18 -07001072 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001073 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001074 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1075 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001076 return -ENOMEM;
1077 }
1078
Will Deacon44680ee2014-06-25 11:29:12 +01001079 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001080 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001081 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1082 smmu->num_mapping_groups);
1083 if (IS_ERR_VALUE(idx)) {
1084 dev_err(smmu->dev, "failed to allocate free SMR\n");
1085 goto err_free_smrs;
1086 }
1087
1088 smrs[i] = (struct arm_smmu_smr) {
1089 .idx = idx,
1090 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001091 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001092 };
1093 }
1094
1095 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001096 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001097 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1098 smrs[i].mask << SMR_MASK_SHIFT;
1099 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1100 }
1101
Will Deacona9a1b0b2014-05-01 18:05:08 +01001102 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001103 return 0;
1104
1105err_free_smrs:
1106 while (--i >= 0)
1107 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1108 kfree(smrs);
1109 return -ENOSPC;
1110}
1111
1112static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001113 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001114{
1115 int i;
1116 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001117 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001118
Will Deacon43b412b2014-07-15 11:22:24 +01001119 if (!smrs)
1120 return;
1121
Will Deacon45ae7cf2013-06-24 18:31:25 +01001122 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001123 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001124 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001125
Will Deacon45ae7cf2013-06-24 18:31:25 +01001126 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1127 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1128 }
1129
Will Deacona9a1b0b2014-05-01 18:05:08 +01001130 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001131 kfree(smrs);
1132}
1133
Will Deacon45ae7cf2013-06-24 18:31:25 +01001134static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001135 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001136{
1137 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001138 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001139 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1140
Will Deacon8f68f8e2014-07-15 11:27:08 +01001141 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001142 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001143 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001144 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001145
Will Deaconcbf82772016-02-18 12:05:57 +00001146 /*
1147 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1148 * for all devices behind the SMMU.
1149 */
1150 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1151 return 0;
1152
Will Deacona9a1b0b2014-05-01 18:05:08 +01001153 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001154 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001155
Will Deacona9a1b0b2014-05-01 18:05:08 +01001156 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001157 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001158 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001159 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1160 }
1161
1162 return 0;
1163}
1164
1165static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001166 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001167{
Will Deacon43b412b2014-07-15 11:22:24 +01001168 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001169 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001170 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001171
Will Deacon8f68f8e2014-07-15 11:27:08 +01001172 /* An IOMMU group is torn down by the first device to be removed */
1173 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1174 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001175
1176 /*
1177 * We *must* clear the S2CR first, because freeing the SMR means
1178 * that it can be re-allocated immediately.
1179 */
Will Deacon43b412b2014-07-15 11:22:24 +01001180 for (i = 0; i < cfg->num_streamids; ++i) {
1181 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001182 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001183
Robin Murphy25a1c962016-02-10 14:25:33 +00001184 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001185 }
1186
Will Deacona9a1b0b2014-05-01 18:05:08 +01001187 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001188}
1189
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001190static void arm_smmu_detach_dev(struct device *dev,
1191 struct arm_smmu_master_cfg *cfg)
1192{
1193 struct iommu_domain *domain = dev->archdata.iommu;
1194 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1195
1196 dev->archdata.iommu = NULL;
1197 arm_smmu_domain_remove_master(smmu_domain, cfg);
1198}
1199
Will Deacon45ae7cf2013-06-24 18:31:25 +01001200static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1201{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001202 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001203 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001204 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001205 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001206
Will Deacon8f68f8e2014-07-15 11:27:08 +01001207 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001208 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001209 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1210 return -ENXIO;
1211 }
1212
Will Deacon518f7132014-11-14 17:17:54 +00001213 /* Ensure that the domain is finalised */
1214 ret = arm_smmu_init_domain_context(domain, smmu);
1215 if (IS_ERR_VALUE(ret))
1216 return ret;
1217
Will Deacon45ae7cf2013-06-24 18:31:25 +01001218 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001219 * Sanity check the domain. We don't support domains across
1220 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001221 */
Will Deacon518f7132014-11-14 17:17:54 +00001222 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001223 dev_err(dev,
1224 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001225 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1226 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001227 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001228
1229 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001230 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001231 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001232 return -ENODEV;
1233
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001234 /* Detach the dev from its current domain */
1235 if (dev->archdata.iommu)
1236 arm_smmu_detach_dev(dev, cfg);
1237
Will Deacon844e35b2014-07-17 11:23:51 +01001238 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1239 if (!ret)
1240 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001241 return ret;
1242}
1243
Will Deacon45ae7cf2013-06-24 18:31:25 +01001244static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001245 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001246{
Will Deacon518f7132014-11-14 17:17:54 +00001247 int ret;
1248 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001249 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001250 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001251
Will Deacon518f7132014-11-14 17:17:54 +00001252 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001253 return -ENODEV;
1254
Will Deacon518f7132014-11-14 17:17:54 +00001255 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1256 ret = ops->map(ops, iova, paddr, size, prot);
1257 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1258 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001259}
1260
1261static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1262 size_t size)
1263{
Will Deacon518f7132014-11-14 17:17:54 +00001264 size_t ret;
1265 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001266 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001267 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001268
Will Deacon518f7132014-11-14 17:17:54 +00001269 if (!ops)
1270 return 0;
1271
1272 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1273 ret = ops->unmap(ops, iova, size);
1274 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1275 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001276}
1277
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001278static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1279 dma_addr_t iova)
1280{
Joerg Roedel1d672632015-03-26 13:43:10 +01001281 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001282 struct arm_smmu_device *smmu = smmu_domain->smmu;
1283 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1284 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1285 struct device *dev = smmu->dev;
1286 void __iomem *cb_base;
1287 u32 tmp;
1288 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001289 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001290
1291 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1292
Robin Murphy661d9622015-05-27 17:09:34 +01001293 /* ATS1 registers can only be written atomically */
1294 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001295 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001296 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1297 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001298 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001299
1300 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1301 !(tmp & ATSR_ACTIVE), 5, 50)) {
1302 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001303 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001304 &iova);
1305 return ops->iova_to_phys(ops, iova);
1306 }
1307
Robin Murphyf9a05f02016-04-13 18:13:01 +01001308 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001309 if (phys & CB_PAR_F) {
1310 dev_err(dev, "translation fault!\n");
1311 dev_err(dev, "PAR = 0x%llx\n", phys);
1312 return 0;
1313 }
1314
1315 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1316}
1317
Will Deacon45ae7cf2013-06-24 18:31:25 +01001318static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001319 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001320{
Will Deacon518f7132014-11-14 17:17:54 +00001321 phys_addr_t ret;
1322 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001323 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001324 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001325
Will Deacon518f7132014-11-14 17:17:54 +00001326 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001327 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001328
Will Deacon518f7132014-11-14 17:17:54 +00001329 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001330 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1331 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001332 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001333 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001334 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001335 }
1336
Will Deacon518f7132014-11-14 17:17:54 +00001337 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001338
Will Deacon518f7132014-11-14 17:17:54 +00001339 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001340}
1341
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001342static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001343{
Will Deacond0948942014-06-24 17:30:10 +01001344 switch (cap) {
1345 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001346 /*
1347 * Return true here as the SMMU can always send out coherent
1348 * requests.
1349 */
1350 return true;
Will Deacond0948942014-06-24 17:30:10 +01001351 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001352 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001353 case IOMMU_CAP_NOEXEC:
1354 return true;
Will Deacond0948942014-06-24 17:30:10 +01001355 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001356 return false;
Will Deacond0948942014-06-24 17:30:10 +01001357 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001358}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001359
Will Deacona9a1b0b2014-05-01 18:05:08 +01001360static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1361{
1362 *((u16 *)data) = alias;
1363 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364}
1365
Will Deacon8f68f8e2014-07-15 11:27:08 +01001366static void __arm_smmu_release_pci_iommudata(void *data)
1367{
1368 kfree(data);
1369}
1370
Joerg Roedelaf659932015-10-21 23:51:41 +02001371static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1372 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001373{
Will Deacon03edb222015-01-19 14:27:33 +00001374 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001375 u16 sid;
1376 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001377
Will Deacon03edb222015-01-19 14:27:33 +00001378 cfg = iommu_group_get_iommudata(group);
1379 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001380 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001381 if (!cfg)
1382 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001383
Will Deacon03edb222015-01-19 14:27:33 +00001384 iommu_group_set_iommudata(group, cfg,
1385 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001386 }
1387
Joerg Roedelaf659932015-10-21 23:51:41 +02001388 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1389 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001390
Will Deacon03edb222015-01-19 14:27:33 +00001391 /*
1392 * Assume Stream ID == Requester ID for now.
1393 * We need a way to describe the ID mappings in FDT.
1394 */
1395 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1396 for (i = 0; i < cfg->num_streamids; ++i)
1397 if (cfg->streamids[i] == sid)
1398 break;
1399
1400 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1401 if (i == cfg->num_streamids)
1402 cfg->streamids[cfg->num_streamids++] = sid;
1403
1404 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001405}
1406
Joerg Roedelaf659932015-10-21 23:51:41 +02001407static int arm_smmu_init_platform_device(struct device *dev,
1408 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001409{
Will Deacon03edb222015-01-19 14:27:33 +00001410 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001411 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001412
1413 if (!smmu)
1414 return -ENODEV;
1415
1416 master = find_smmu_master(smmu, dev->of_node);
1417 if (!master)
1418 return -ENODEV;
1419
Will Deacon03edb222015-01-19 14:27:33 +00001420 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001421
1422 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001423}
1424
1425static int arm_smmu_add_device(struct device *dev)
1426{
Joerg Roedelaf659932015-10-21 23:51:41 +02001427 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001428
Joerg Roedelaf659932015-10-21 23:51:41 +02001429 group = iommu_group_get_for_dev(dev);
1430 if (IS_ERR(group))
1431 return PTR_ERR(group);
1432
Peng Fan9a4a9d82015-11-20 16:56:18 +08001433 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001434 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001435}
1436
Will Deacon45ae7cf2013-06-24 18:31:25 +01001437static void arm_smmu_remove_device(struct device *dev)
1438{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001439 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001440}
1441
Joerg Roedelaf659932015-10-21 23:51:41 +02001442static struct iommu_group *arm_smmu_device_group(struct device *dev)
1443{
1444 struct iommu_group *group;
1445 int ret;
1446
1447 if (dev_is_pci(dev))
1448 group = pci_device_group(dev);
1449 else
1450 group = generic_device_group(dev);
1451
1452 if (IS_ERR(group))
1453 return group;
1454
1455 if (dev_is_pci(dev))
1456 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1457 else
1458 ret = arm_smmu_init_platform_device(dev, group);
1459
1460 if (ret) {
1461 iommu_group_put(group);
1462 group = ERR_PTR(ret);
1463 }
1464
1465 return group;
1466}
1467
Will Deaconc752ce42014-06-25 22:46:31 +01001468static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1469 enum iommu_attr attr, void *data)
1470{
Joerg Roedel1d672632015-03-26 13:43:10 +01001471 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001472
1473 switch (attr) {
1474 case DOMAIN_ATTR_NESTING:
1475 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1476 return 0;
1477 default:
1478 return -ENODEV;
1479 }
1480}
1481
1482static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1483 enum iommu_attr attr, void *data)
1484{
Will Deacon518f7132014-11-14 17:17:54 +00001485 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001486 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001487
Will Deacon518f7132014-11-14 17:17:54 +00001488 mutex_lock(&smmu_domain->init_mutex);
1489
Will Deaconc752ce42014-06-25 22:46:31 +01001490 switch (attr) {
1491 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001492 if (smmu_domain->smmu) {
1493 ret = -EPERM;
1494 goto out_unlock;
1495 }
1496
Will Deaconc752ce42014-06-25 22:46:31 +01001497 if (*(int *)data)
1498 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1499 else
1500 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1501
Will Deacon518f7132014-11-14 17:17:54 +00001502 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001503 default:
Will Deacon518f7132014-11-14 17:17:54 +00001504 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001505 }
Will Deacon518f7132014-11-14 17:17:54 +00001506
1507out_unlock:
1508 mutex_unlock(&smmu_domain->init_mutex);
1509 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001510}
1511
Will Deacon518f7132014-11-14 17:17:54 +00001512static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001513 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001514 .domain_alloc = arm_smmu_domain_alloc,
1515 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001516 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001517 .map = arm_smmu_map,
1518 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001519 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001520 .iova_to_phys = arm_smmu_iova_to_phys,
1521 .add_device = arm_smmu_add_device,
1522 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001523 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001524 .domain_get_attr = arm_smmu_domain_get_attr,
1525 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001526 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001527};
1528
1529static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1530{
1531 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001532 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001533 int i = 0;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001534 u32 reg;
1535
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001536 /* clear global FSR */
1537 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1538 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001539
Robin Murphy25a1c962016-02-10 14:25:33 +00001540 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1541 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001542 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001543 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001544 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001545 }
1546
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001547 /* Make sure all context banks are disabled and clear CB_FSR */
1548 for (i = 0; i < smmu->num_context_banks; ++i) {
1549 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1550 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1551 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001552 /*
1553 * Disable MMU-500's not-particularly-beneficial next-page
1554 * prefetcher for the sake of errata #841119 and #826419.
1555 */
1556 if (smmu->model == ARM_MMU500) {
1557 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1558 reg &= ~ARM_MMU500_ACTLR_CPRE;
1559 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1560 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001561 }
Will Deacon1463fe42013-07-31 19:21:27 +01001562
Will Deacon45ae7cf2013-06-24 18:31:25 +01001563 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001564 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1565 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1566
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001567 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001568
Will Deacon45ae7cf2013-06-24 18:31:25 +01001569 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001570 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001571
1572 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001573 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001574
Robin Murphy25a1c962016-02-10 14:25:33 +00001575 /* Enable client access, handling unmatched streams as appropriate */
1576 reg &= ~sCR0_CLIENTPD;
1577 if (disable_bypass)
1578 reg |= sCR0_USFCFG;
1579 else
1580 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001581
1582 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001583 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001584
1585 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001586 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001587
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001588 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1589 reg |= sCR0_VMID16EN;
1590
Will Deacon45ae7cf2013-06-24 18:31:25 +01001591 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001592 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001593 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001594}
1595
1596static int arm_smmu_id_size_to_bits(int size)
1597{
1598 switch (size) {
1599 case 0:
1600 return 32;
1601 case 1:
1602 return 36;
1603 case 2:
1604 return 40;
1605 case 3:
1606 return 42;
1607 case 4:
1608 return 44;
1609 case 5:
1610 default:
1611 return 48;
1612 }
1613}
1614
1615static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1616{
1617 unsigned long size;
1618 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1619 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001620 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001621
1622 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001623 dev_notice(smmu->dev, "SMMUv%d with:\n",
1624 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001625
1626 /* ID0 */
1627 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001628
1629 /* Restrict available stages based on module parameter */
1630 if (force_stage == 1)
1631 id &= ~(ID0_S2TS | ID0_NTS);
1632 else if (force_stage == 2)
1633 id &= ~(ID0_S1TS | ID0_NTS);
1634
Will Deacon45ae7cf2013-06-24 18:31:25 +01001635 if (id & ID0_S1TS) {
1636 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1637 dev_notice(smmu->dev, "\tstage 1 translation\n");
1638 }
1639
1640 if (id & ID0_S2TS) {
1641 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1642 dev_notice(smmu->dev, "\tstage 2 translation\n");
1643 }
1644
1645 if (id & ID0_NTS) {
1646 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1647 dev_notice(smmu->dev, "\tnested translation\n");
1648 }
1649
1650 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001651 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001652 dev_err(smmu->dev, "\tno translation support!\n");
1653 return -ENODEV;
1654 }
1655
Robin Murphyb7862e32016-04-13 18:13:03 +01001656 if ((id & ID0_S1TS) &&
1657 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001658 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1659 dev_notice(smmu->dev, "\taddress translation ops\n");
1660 }
1661
Robin Murphybae2c2d2015-07-29 19:46:05 +01001662 /*
1663 * In order for DMA API calls to work properly, we must defer to what
1664 * the DT says about coherency, regardless of what the hardware claims.
1665 * Fortunately, this also opens up a workaround for systems where the
1666 * ID register value has ended up configured incorrectly.
1667 */
1668 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1669 cttw_reg = !!(id & ID0_CTTW);
1670 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001671 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001672 if (cttw_dt || cttw_reg)
1673 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1674 cttw_dt ? "" : "non-");
1675 if (cttw_dt != cttw_reg)
1676 dev_notice(smmu->dev,
1677 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001678
1679 if (id & ID0_SMS) {
1680 u32 smr, sid, mask;
1681
1682 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1683 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1684 ID0_NUMSMRG_MASK;
1685 if (smmu->num_mapping_groups == 0) {
1686 dev_err(smmu->dev,
1687 "stream-matching supported, but no SMRs present!\n");
1688 return -ENODEV;
1689 }
1690
1691 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1692 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1693 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1694 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1695
1696 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1697 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1698 if ((mask & sid) != sid) {
1699 dev_err(smmu->dev,
1700 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1701 mask, sid);
1702 return -ENODEV;
1703 }
1704
1705 dev_notice(smmu->dev,
1706 "\tstream matching with %u register groups, mask 0x%x",
1707 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07001708 } else {
1709 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1710 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001711 }
1712
Robin Murphy7602b872016-04-28 17:12:09 +01001713 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1714 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1715 if (!(id & ID0_PTFS_NO_AARCH32S))
1716 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1717 }
1718
Will Deacon45ae7cf2013-06-24 18:31:25 +01001719 /* ID1 */
1720 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001721 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001722
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001723 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001724 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001725 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001726 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001727 dev_warn(smmu->dev,
1728 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1729 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001730
Will Deacon518f7132014-11-14 17:17:54 +00001731 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001732 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1733 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1734 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1735 return -ENODEV;
1736 }
1737 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1738 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001739 /*
1740 * Cavium CN88xx erratum #27704.
1741 * Ensure ASID and VMID allocation is unique across all SMMUs in
1742 * the system.
1743 */
1744 if (smmu->model == CAVIUM_SMMUV2) {
1745 smmu->cavium_id_base =
1746 atomic_add_return(smmu->num_context_banks,
1747 &cavium_smmu_context_count);
1748 smmu->cavium_id_base -= smmu->num_context_banks;
1749 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001750
1751 /* ID2 */
1752 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1753 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001754 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001755
Will Deacon518f7132014-11-14 17:17:54 +00001756 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001757 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001758 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001759
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001760 if (id & ID2_VMID16)
1761 smmu->features |= ARM_SMMU_FEAT_VMID16;
1762
Robin Murphyf1d84542015-03-04 16:41:05 +00001763 /*
1764 * What the page table walker can address actually depends on which
1765 * descriptor format is in use, but since a) we don't know that yet,
1766 * and b) it can vary per context bank, this will have to do...
1767 */
1768 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1769 dev_warn(smmu->dev,
1770 "failed to set DMA mask for table walker\n");
1771
Robin Murphyb7862e32016-04-13 18:13:03 +01001772 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001773 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001774 if (smmu->version == ARM_SMMU_V1_64K)
1775 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001776 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001777 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001778 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001779 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001780 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001781 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001782 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001783 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001784 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001785 }
1786
Robin Murphy7602b872016-04-28 17:12:09 +01001787 /* Now we've corralled the various formats, what'll it do? */
1788 size = 0;
1789 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1790 size |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1791 if (smmu->features &
1792 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1793 size |= SZ_4K | SZ_2M | SZ_1G;
1794 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1795 size |= SZ_16K | SZ_32M;
1796 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1797 size |= SZ_64K | SZ_512M;
1798
Will Deacon518f7132014-11-14 17:17:54 +00001799 arm_smmu_ops.pgsize_bitmap &= size;
1800 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1801
Will Deacon28d60072014-09-01 16:24:48 +01001802 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1803 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001804 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001805
1806 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1807 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001808 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001809
Will Deacon45ae7cf2013-06-24 18:31:25 +01001810 return 0;
1811}
1812
Robin Murphy67b65a32016-04-13 18:12:57 +01001813struct arm_smmu_match_data {
1814 enum arm_smmu_arch_version version;
1815 enum arm_smmu_implementation model;
1816};
1817
1818#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1819static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1820
1821ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1822ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001823ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001824ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001825ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001826
Joerg Roedel09b52692014-10-02 12:24:45 +02001827static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001828 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1829 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1830 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001831 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001832 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001833 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001834 { },
1835};
1836MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1837
Will Deacon45ae7cf2013-06-24 18:31:25 +01001838static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1839{
Robin Murphy09360402014-08-28 17:51:59 +01001840 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01001841 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001842 struct resource *res;
1843 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001844 struct device *dev = &pdev->dev;
1845 struct rb_node *node;
1846 struct of_phandle_args masterspec;
1847 int num_irqs, i, err;
1848
1849 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1850 if (!smmu) {
1851 dev_err(dev, "failed to allocate arm_smmu_device\n");
1852 return -ENOMEM;
1853 }
1854 smmu->dev = dev;
1855
Robin Murphy09360402014-08-28 17:51:59 +01001856 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01001857 data = of_id->data;
1858 smmu->version = data->version;
1859 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01001860
Will Deacon45ae7cf2013-06-24 18:31:25 +01001861 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001862 smmu->base = devm_ioremap_resource(dev, res);
1863 if (IS_ERR(smmu->base))
1864 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001865 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001866
1867 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1868 &smmu->num_global_irqs)) {
1869 dev_err(dev, "missing #global-interrupts property\n");
1870 return -ENODEV;
1871 }
1872
1873 num_irqs = 0;
1874 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1875 num_irqs++;
1876 if (num_irqs > smmu->num_global_irqs)
1877 smmu->num_context_irqs++;
1878 }
1879
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001880 if (!smmu->num_context_irqs) {
1881 dev_err(dev, "found %d interrupts but expected at least %d\n",
1882 num_irqs, smmu->num_global_irqs + 1);
1883 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001884 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001885
1886 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1887 GFP_KERNEL);
1888 if (!smmu->irqs) {
1889 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1890 return -ENOMEM;
1891 }
1892
1893 for (i = 0; i < num_irqs; ++i) {
1894 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001895
Will Deacon45ae7cf2013-06-24 18:31:25 +01001896 if (irq < 0) {
1897 dev_err(dev, "failed to get irq index %d\n", i);
1898 return -ENODEV;
1899 }
1900 smmu->irqs[i] = irq;
1901 }
1902
Olav Haugan3c8766d2014-08-22 17:12:32 -07001903 err = arm_smmu_device_cfg_probe(smmu);
1904 if (err)
1905 return err;
1906
Will Deacon45ae7cf2013-06-24 18:31:25 +01001907 i = 0;
1908 smmu->masters = RB_ROOT;
1909 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1910 "#stream-id-cells", i,
1911 &masterspec)) {
1912 err = register_smmu_master(smmu, dev, &masterspec);
1913 if (err) {
1914 dev_err(dev, "failed to add master %s\n",
1915 masterspec.np->name);
1916 goto out_put_masters;
1917 }
1918
1919 i++;
1920 }
1921 dev_notice(dev, "registered %d master devices\n", i);
1922
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001923 parse_driver_options(smmu);
1924
Robin Murphyb7862e32016-04-13 18:13:03 +01001925 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01001926 smmu->num_context_banks != smmu->num_context_irqs) {
1927 dev_err(dev,
1928 "found only %d context interrupt(s) but %d required\n",
1929 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cde2013-11-15 09:42:30 +00001930 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01001931 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001932 }
1933
Will Deacon45ae7cf2013-06-24 18:31:25 +01001934 for (i = 0; i < smmu->num_global_irqs; ++i) {
1935 err = request_irq(smmu->irqs[i],
1936 arm_smmu_global_fault,
1937 IRQF_SHARED,
1938 "arm-smmu global fault",
1939 smmu);
1940 if (err) {
1941 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1942 i, smmu->irqs[i]);
1943 goto out_free_irqs;
1944 }
1945 }
1946
1947 INIT_LIST_HEAD(&smmu->list);
1948 spin_lock(&arm_smmu_devices_lock);
1949 list_add(&smmu->list, &arm_smmu_devices);
1950 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01001951
1952 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001953 return 0;
1954
1955out_free_irqs:
1956 while (i--)
1957 free_irq(smmu->irqs[i], smmu);
1958
Will Deacon45ae7cf2013-06-24 18:31:25 +01001959out_put_masters:
1960 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07001961 struct arm_smmu_master *master
1962 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963 of_node_put(master->of_node);
1964 }
1965
1966 return err;
1967}
1968
1969static int arm_smmu_device_remove(struct platform_device *pdev)
1970{
1971 int i;
1972 struct device *dev = &pdev->dev;
1973 struct arm_smmu_device *curr, *smmu = NULL;
1974 struct rb_node *node;
1975
1976 spin_lock(&arm_smmu_devices_lock);
1977 list_for_each_entry(curr, &arm_smmu_devices, list) {
1978 if (curr->dev == dev) {
1979 smmu = curr;
1980 list_del(&smmu->list);
1981 break;
1982 }
1983 }
1984 spin_unlock(&arm_smmu_devices_lock);
1985
1986 if (!smmu)
1987 return -ENODEV;
1988
Will Deacon45ae7cf2013-06-24 18:31:25 +01001989 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07001990 struct arm_smmu_master *master
1991 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001992 of_node_put(master->of_node);
1993 }
1994
Will Deaconecfadb62013-07-31 19:21:28 +01001995 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001996 dev_err(dev, "removing device with active domains!\n");
1997
1998 for (i = 0; i < smmu->num_global_irqs; ++i)
1999 free_irq(smmu->irqs[i], smmu);
2000
2001 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002002 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002003 return 0;
2004}
2005
Will Deacon45ae7cf2013-06-24 18:31:25 +01002006static struct platform_driver arm_smmu_driver = {
2007 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002008 .name = "arm-smmu",
2009 .of_match_table = of_match_ptr(arm_smmu_of_match),
2010 },
2011 .probe = arm_smmu_device_dt_probe,
2012 .remove = arm_smmu_device_remove,
2013};
2014
2015static int __init arm_smmu_init(void)
2016{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002017 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002018 int ret;
2019
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002020 /*
2021 * Play nice with systems that don't have an ARM SMMU by checking that
2022 * an ARM SMMU exists in the system before proceeding with the driver
2023 * and IOMMU bus operation registration.
2024 */
2025 np = of_find_matching_node(NULL, arm_smmu_of_match);
2026 if (!np)
2027 return 0;
2028
2029 of_node_put(np);
2030
Will Deacon45ae7cf2013-06-24 18:31:25 +01002031 ret = platform_driver_register(&arm_smmu_driver);
2032 if (ret)
2033 return ret;
2034
2035 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002036 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002037 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2038
Will Deacond123cf82014-02-04 22:17:53 +00002039#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002040 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002041 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002042#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002043
Will Deacona9a1b0b2014-05-01 18:05:08 +01002044#ifdef CONFIG_PCI
2045 if (!iommu_present(&pci_bus_type))
2046 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2047#endif
2048
Will Deacon45ae7cf2013-06-24 18:31:25 +01002049 return 0;
2050}
2051
2052static void __exit arm_smmu_exit(void)
2053{
2054 return platform_driver_unregister(&arm_smmu_driver);
2055}
2056
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002057subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002058module_exit(arm_smmu_exit);
2059
2060MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2061MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2062MODULE_LICENSE("GPL v2");