blob: 2fb9c33a4a08678b547ccc6fa7c07af7faceac6f [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
Will Deacon518f7132014-11-14 17:17:54 +000050#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52/* Maximum number of stream IDs assigned to a single device */
Andreas Herrmann636e97b2014-01-30 18:18:08 +000053#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
Peng Fan3ca37122016-05-03 21:50:30 +0800101/* Auxiliary Configuration register */
102#define ARM_SMMU_GR0_sACR 0x10
103
Will Deacon45ae7cf2013-06-24 18:31:25 +0100104/* Identification registers */
105#define ARM_SMMU_GR0_ID0 0x20
106#define ARM_SMMU_GR0_ID1 0x24
107#define ARM_SMMU_GR0_ID2 0x28
108#define ARM_SMMU_GR0_ID3 0x2c
109#define ARM_SMMU_GR0_ID4 0x30
110#define ARM_SMMU_GR0_ID5 0x34
111#define ARM_SMMU_GR0_ID6 0x38
112#define ARM_SMMU_GR0_ID7 0x3c
113#define ARM_SMMU_GR0_sGFSR 0x48
114#define ARM_SMMU_GR0_sGFSYNR0 0x50
115#define ARM_SMMU_GR0_sGFSYNR1 0x54
116#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100117
118#define ID0_S1TS (1 << 30)
119#define ID0_S2TS (1 << 29)
120#define ID0_NTS (1 << 28)
121#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000122#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100123#define ID0_PTFS_NO_AARCH32 (1 << 25)
124#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_CTTW (1 << 14)
126#define ID0_NUMIRPT_SHIFT 16
127#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700128#define ID0_NUMSIDB_SHIFT 9
129#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100130#define ID0_NUMSMRG_SHIFT 0
131#define ID0_NUMSMRG_MASK 0xff
132
133#define ID1_PAGESIZE (1 << 31)
134#define ID1_NUMPAGENDXB_SHIFT 28
135#define ID1_NUMPAGENDXB_MASK 7
136#define ID1_NUMS2CB_SHIFT 16
137#define ID1_NUMS2CB_MASK 0xff
138#define ID1_NUMCB_SHIFT 0
139#define ID1_NUMCB_MASK 0xff
140
141#define ID2_OAS_SHIFT 4
142#define ID2_OAS_MASK 0xf
143#define ID2_IAS_SHIFT 0
144#define ID2_IAS_MASK 0xf
145#define ID2_UBS_SHIFT 8
146#define ID2_UBS_MASK 0xf
147#define ID2_PTFS_4K (1 << 12)
148#define ID2_PTFS_16K (1 << 13)
149#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800150#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Peng Fan3ca37122016-05-03 21:50:30 +0800152#define ID7_MAJOR_SHIFT 4
153#define ID7_MAJOR_MASK 0xf
154
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156#define ARM_SMMU_GR0_TLBIVMID 0x64
157#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
158#define ARM_SMMU_GR0_TLBIALLH 0x6c
159#define ARM_SMMU_GR0_sTLBGSYNC 0x70
160#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
161#define sTLBGSTATUS_GSACTIVE (1 << 0)
162#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163
164/* Stream mapping registers */
165#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
166#define SMR_VALID (1 << 31)
167#define SMR_MASK_SHIFT 16
168#define SMR_MASK_MASK 0x7fff
169#define SMR_ID_SHIFT 0
170#define SMR_ID_MASK 0x7fff
171
172#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173#define S2CR_CBNDX_SHIFT 0
174#define S2CR_CBNDX_MASK 0xff
175#define S2CR_TYPE_SHIFT 16
176#define S2CR_TYPE_MASK 0x3
177#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
178#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
179#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
180
Robin Murphyd3461802016-01-26 18:06:34 +0000181#define S2CR_PRIVCFG_SHIFT 24
182#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
183
Will Deacon45ae7cf2013-06-24 18:31:25 +0100184/* Context bank attribute registers */
185#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
186#define CBAR_VMID_SHIFT 0
187#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000188#define CBAR_S1_BPSHCFG_SHIFT 8
189#define CBAR_S1_BPSHCFG_MASK 3
190#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100191#define CBAR_S1_MEMATTR_SHIFT 12
192#define CBAR_S1_MEMATTR_MASK 0xf
193#define CBAR_S1_MEMATTR_WB 0xf
194#define CBAR_TYPE_SHIFT 16
195#define CBAR_TYPE_MASK 0x3
196#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
198#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
199#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
200#define CBAR_IRPTNDX_SHIFT 24
201#define CBAR_IRPTNDX_MASK 0xff
202
203#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
204#define CBA2R_RW64_32BIT (0 << 0)
205#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800206#define CBA2R_VMID_SHIFT 16
207#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100208
209/* Translation context bank */
210#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100211#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212
213#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100214#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215#define ARM_SMMU_CB_RESUME 0x8
216#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100217#define ARM_SMMU_CB_TTBR0 0x20
218#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219#define ARM_SMMU_CB_TTBCR 0x30
220#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000221#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100222#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100224#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000226#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100227#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000228#define ARM_SMMU_CB_S1_TLBIVAL 0x620
229#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
230#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100231#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000232#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100233
234#define SCTLR_S1_ASIDPNE (1 << 12)
235#define SCTLR_CFCFG (1 << 7)
236#define SCTLR_CFIE (1 << 6)
237#define SCTLR_CFRE (1 << 5)
238#define SCTLR_E (1 << 4)
239#define SCTLR_AFE (1 << 2)
240#define SCTLR_TRE (1 << 1)
241#define SCTLR_M (1 << 0)
242#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
243
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100244#define ARM_MMU500_ACTLR_CPRE (1 << 1)
245
Peng Fan3ca37122016-05-03 21:50:30 +0800246#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
247
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000248#define CB_PAR_F (1 << 0)
249
250#define ATSR_ACTIVE (1 << 0)
251
Will Deacon45ae7cf2013-06-24 18:31:25 +0100252#define RESUME_RETRY (0 << 0)
253#define RESUME_TERMINATE (1 << 0)
254
Will Deacon45ae7cf2013-06-24 18:31:25 +0100255#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100256#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100258#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100259
260#define FSR_MULTI (1 << 31)
261#define FSR_SS (1 << 30)
262#define FSR_UUT (1 << 8)
263#define FSR_ASF (1 << 7)
264#define FSR_TLBLKF (1 << 6)
265#define FSR_TLBMCF (1 << 5)
266#define FSR_EF (1 << 4)
267#define FSR_PF (1 << 3)
268#define FSR_AFF (1 << 2)
269#define FSR_TF (1 << 1)
270
Mitchel Humpherys29073202014-07-08 09:52:18 -0700271#define FSR_IGN (FSR_AFF | FSR_ASF | \
272 FSR_TLBMCF | FSR_TLBLKF)
273#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100274 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100275
276#define FSYNR0_WNR (1 << 4)
277
Will Deacon4cf740b2014-07-14 19:47:39 +0100278static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000279module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100280MODULE_PARM_DESC(force_stage,
281 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000282static bool disable_bypass;
283module_param(disable_bypass, bool, S_IRUGO);
284MODULE_PARM_DESC(disable_bypass,
285 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100286
Robin Murphy09360402014-08-28 17:51:59 +0100287enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100288 ARM_SMMU_V1,
289 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100290 ARM_SMMU_V2,
291};
292
Robin Murphy67b65a32016-04-13 18:12:57 +0100293enum arm_smmu_implementation {
294 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100295 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100296 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100297};
298
Will Deacon45ae7cf2013-06-24 18:31:25 +0100299struct arm_smmu_smr {
300 u8 idx;
301 u16 mask;
302 u16 id;
303};
304
Will Deacona9a1b0b2014-05-01 18:05:08 +0100305struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100306 int num_streamids;
307 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100308 struct arm_smmu_smr *smrs;
309};
310
Will Deacona9a1b0b2014-05-01 18:05:08 +0100311struct arm_smmu_master {
312 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100313 struct rb_node node;
314 struct arm_smmu_master_cfg cfg;
315};
316
Will Deacon45ae7cf2013-06-24 18:31:25 +0100317struct arm_smmu_device {
318 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100319
320 void __iomem *base;
321 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100322 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100323
324#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
325#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
326#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
327#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
328#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000329#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800330#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100331#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
332#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
333#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
334#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
335#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100336 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000337
338#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
339 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100340 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100341 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100342
343 u32 num_context_banks;
344 u32 num_s2_context_banks;
345 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
346 atomic_t irptndx;
347
348 u32 num_mapping_groups;
349 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
350
Will Deacon518f7132014-11-14 17:17:54 +0000351 unsigned long va_size;
352 unsigned long ipa_size;
353 unsigned long pa_size;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100354
355 u32 num_global_irqs;
356 u32 num_context_irqs;
357 unsigned int *irqs;
358
Will Deacon45ae7cf2013-06-24 18:31:25 +0100359 struct list_head list;
360 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800361
362 u32 cavium_id_base; /* Specific to Cavium */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100363};
364
Robin Murphy7602b872016-04-28 17:12:09 +0100365enum arm_smmu_context_fmt {
366 ARM_SMMU_CTX_FMT_NONE,
367 ARM_SMMU_CTX_FMT_AARCH64,
368 ARM_SMMU_CTX_FMT_AARCH32_L,
369 ARM_SMMU_CTX_FMT_AARCH32_S,
370};
371
Will Deacon45ae7cf2013-06-24 18:31:25 +0100372struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100373 u8 cbndx;
374 u8 irptndx;
375 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100376 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100377};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100378#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100379
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800380#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
381#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100382
Will Deaconc752ce42014-06-25 22:46:31 +0100383enum arm_smmu_domain_stage {
384 ARM_SMMU_DOMAIN_S1 = 0,
385 ARM_SMMU_DOMAIN_S2,
386 ARM_SMMU_DOMAIN_NESTED,
387};
388
Will Deacon45ae7cf2013-06-24 18:31:25 +0100389struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100390 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000391 struct io_pgtable_ops *pgtbl_ops;
392 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100393 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100394 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000395 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100396 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100397};
398
Will Deacon518f7132014-11-14 17:17:54 +0000399static struct iommu_ops arm_smmu_ops;
400
Will Deacon45ae7cf2013-06-24 18:31:25 +0100401static DEFINE_SPINLOCK(arm_smmu_devices_lock);
402static LIST_HEAD(arm_smmu_devices);
403
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000404struct arm_smmu_option_prop {
405 u32 opt;
406 const char *prop;
407};
408
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800409static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
410
Mitchel Humpherys29073202014-07-08 09:52:18 -0700411static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000412 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
413 { 0, NULL},
414};
415
Joerg Roedel1d672632015-03-26 13:43:10 +0100416static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
417{
418 return container_of(dom, struct arm_smmu_domain, domain);
419}
420
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000421static void parse_driver_options(struct arm_smmu_device *smmu)
422{
423 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700424
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000425 do {
426 if (of_property_read_bool(smmu->dev->of_node,
427 arm_smmu_options[i].prop)) {
428 smmu->options |= arm_smmu_options[i].opt;
429 dev_notice(smmu->dev, "option %s\n",
430 arm_smmu_options[i].prop);
431 }
432 } while (arm_smmu_options[++i].opt);
433}
434
Will Deacon8f68f8e2014-07-15 11:27:08 +0100435static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100436{
437 if (dev_is_pci(dev)) {
438 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700439
Will Deacona9a1b0b2014-05-01 18:05:08 +0100440 while (!pci_is_root_bus(bus))
441 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100442 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100443 }
444
Will Deacon8f68f8e2014-07-15 11:27:08 +0100445 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100446}
447
Will Deacon45ae7cf2013-06-24 18:31:25 +0100448static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
449 struct device_node *dev_node)
450{
451 struct rb_node *node = smmu->masters.rb_node;
452
453 while (node) {
454 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700455
Will Deacon45ae7cf2013-06-24 18:31:25 +0100456 master = container_of(node, struct arm_smmu_master, node);
457
458 if (dev_node < master->of_node)
459 node = node->rb_left;
460 else if (dev_node > master->of_node)
461 node = node->rb_right;
462 else
463 return master;
464 }
465
466 return NULL;
467}
468
Will Deacona9a1b0b2014-05-01 18:05:08 +0100469static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100470find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100471{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100472 struct arm_smmu_master_cfg *cfg = NULL;
473 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100474
Will Deacon8f68f8e2014-07-15 11:27:08 +0100475 if (group) {
476 cfg = iommu_group_get_iommudata(group);
477 iommu_group_put(group);
478 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100479
Will Deacon8f68f8e2014-07-15 11:27:08 +0100480 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100481}
482
Will Deacon45ae7cf2013-06-24 18:31:25 +0100483static int insert_smmu_master(struct arm_smmu_device *smmu,
484 struct arm_smmu_master *master)
485{
486 struct rb_node **new, *parent;
487
488 new = &smmu->masters.rb_node;
489 parent = NULL;
490 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700491 struct arm_smmu_master *this
492 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100493
494 parent = *new;
495 if (master->of_node < this->of_node)
496 new = &((*new)->rb_left);
497 else if (master->of_node > this->of_node)
498 new = &((*new)->rb_right);
499 else
500 return -EEXIST;
501 }
502
503 rb_link_node(&master->node, parent, new);
504 rb_insert_color(&master->node, &smmu->masters);
505 return 0;
506}
507
508static int register_smmu_master(struct arm_smmu_device *smmu,
509 struct device *dev,
510 struct of_phandle_args *masterspec)
511{
512 int i;
513 struct arm_smmu_master *master;
514
515 master = find_smmu_master(smmu, masterspec->np);
516 if (master) {
517 dev_err(dev,
518 "rejecting multiple registrations for master device %s\n",
519 masterspec->np->name);
520 return -EBUSY;
521 }
522
523 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
524 dev_err(dev,
525 "reached maximum number (%d) of stream IDs for master device %s\n",
526 MAX_MASTER_STREAMIDS, masterspec->np->name);
527 return -ENOSPC;
528 }
529
530 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
531 if (!master)
532 return -ENOMEM;
533
Will Deacona9a1b0b2014-05-01 18:05:08 +0100534 master->of_node = masterspec->np;
535 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100536
Olav Haugan3c8766d2014-08-22 17:12:32 -0700537 for (i = 0; i < master->cfg.num_streamids; ++i) {
538 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100539
Olav Haugan3c8766d2014-08-22 17:12:32 -0700540 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
541 (streamid >= smmu->num_mapping_groups)) {
542 dev_err(dev,
543 "stream ID for master device %s greater than maximum allowed (%d)\n",
544 masterspec->np->name, smmu->num_mapping_groups);
545 return -ERANGE;
546 }
547 master->cfg.streamids[i] = streamid;
548 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100549 return insert_smmu_master(smmu, master);
550}
551
Will Deacon44680ee2014-06-25 11:29:12 +0100552static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100553{
Will Deacon44680ee2014-06-25 11:29:12 +0100554 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100555 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100556 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100557
558 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100559 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100560 master = find_smmu_master(smmu, dev_node);
561 if (master)
562 break;
563 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100564 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100565
Will Deacona9a1b0b2014-05-01 18:05:08 +0100566 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100567}
568
569static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
570{
571 int idx;
572
573 do {
574 idx = find_next_zero_bit(map, end, start);
575 if (idx == end)
576 return -ENOSPC;
577 } while (test_and_set_bit(idx, map));
578
579 return idx;
580}
581
582static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
583{
584 clear_bit(idx, map);
585}
586
587/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000588static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100589{
590 int count = 0;
591 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
592
593 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
594 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
595 & sTLBGSTATUS_GSACTIVE) {
596 cpu_relax();
597 if (++count == TLB_LOOP_TIMEOUT) {
598 dev_err_ratelimited(smmu->dev,
599 "TLB sync timed out -- SMMU may be deadlocked\n");
600 return;
601 }
602 udelay(1);
603 }
604}
605
Will Deacon518f7132014-11-14 17:17:54 +0000606static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100607{
Will Deacon518f7132014-11-14 17:17:54 +0000608 struct arm_smmu_domain *smmu_domain = cookie;
609 __arm_smmu_tlb_sync(smmu_domain->smmu);
610}
611
612static void arm_smmu_tlb_inv_context(void *cookie)
613{
614 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100615 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
616 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100617 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000618 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100619
620 if (stage1) {
621 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800622 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100623 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100624 } else {
625 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800626 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100627 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100628 }
629
Will Deacon518f7132014-11-14 17:17:54 +0000630 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100631}
632
Will Deacon518f7132014-11-14 17:17:54 +0000633static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000634 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000635{
636 struct arm_smmu_domain *smmu_domain = cookie;
637 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
638 struct arm_smmu_device *smmu = smmu_domain->smmu;
639 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
640 void __iomem *reg;
641
642 if (stage1) {
643 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
644 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
645
Robin Murphy7602b872016-04-28 17:12:09 +0100646 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000647 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800648 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000649 do {
650 writel_relaxed(iova, reg);
651 iova += granule;
652 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000653 } else {
654 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800655 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000656 do {
657 writeq_relaxed(iova, reg);
658 iova += granule >> 12;
659 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000660 }
Will Deacon518f7132014-11-14 17:17:54 +0000661 } else if (smmu->version == ARM_SMMU_V2) {
662 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
663 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
664 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000665 iova >>= 12;
666 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100667 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000668 iova += granule >> 12;
669 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000670 } else {
671 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800672 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000673 }
674}
675
Will Deacon518f7132014-11-14 17:17:54 +0000676static struct iommu_gather_ops arm_smmu_gather_ops = {
677 .tlb_flush_all = arm_smmu_tlb_inv_context,
678 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
679 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000680};
681
Will Deacon45ae7cf2013-06-24 18:31:25 +0100682static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
683{
684 int flags, ret;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100685 u32 fsr, fsynr, resume;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100686 unsigned long iova;
687 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100688 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100689 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
690 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100691 void __iomem *cb_base;
692
Will Deacon44680ee2014-06-25 11:29:12 +0100693 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100694 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
695
696 if (!(fsr & FSR_FAULT))
697 return IRQ_NONE;
698
699 if (fsr & FSR_IGN)
700 dev_err_ratelimited(smmu->dev,
Hans Wennborg70c9a7d2014-08-06 05:42:01 +0100701 "Unexpected context fault (fsr 0x%x)\n",
Will Deacon45ae7cf2013-06-24 18:31:25 +0100702 fsr);
703
704 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
705 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
706
Robin Murphyf9a05f02016-04-13 18:13:01 +0100707 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100708 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
709 ret = IRQ_HANDLED;
710 resume = RESUME_RETRY;
711 } else {
Andreas Herrmann2ef0f032013-10-01 13:39:08 +0100712 dev_err_ratelimited(smmu->dev,
713 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100714 iova, fsynr, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100715 ret = IRQ_NONE;
716 resume = RESUME_TERMINATE;
717 }
718
719 /* Clear the faulting FSR */
720 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
721
722 /* Retry or terminate any stalled transactions */
723 if (fsr & FSR_SS)
724 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
725
726 return ret;
727}
728
729static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
730{
731 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
732 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000733 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100734
735 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
736 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
737 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
738 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
739
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000740 if (!gfsr)
741 return IRQ_NONE;
742
Will Deacon45ae7cf2013-06-24 18:31:25 +0100743 dev_err_ratelimited(smmu->dev,
744 "Unexpected global fault, this could be serious\n");
745 dev_err_ratelimited(smmu->dev,
746 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
747 gfsr, gfsynr0, gfsynr1, gfsynr2);
748
749 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100750 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100751}
752
Will Deacon518f7132014-11-14 17:17:54 +0000753static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
754 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100755{
756 u32 reg;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100757 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100758 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100759 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
760 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100761 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100762
Will Deacon45ae7cf2013-06-24 18:31:25 +0100763 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100764 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
765 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100766
Will Deacon4a1c93c2015-03-04 12:21:03 +0000767 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100768 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
769 reg = CBA2R_RW64_64BIT;
770 else
771 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800772 /* 16-bit VMIDs live in CBA2R */
773 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800774 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800775
Will Deacon4a1c93c2015-03-04 12:21:03 +0000776 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
777 }
778
Will Deacon45ae7cf2013-06-24 18:31:25 +0100779 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100780 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100781 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700782 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100783
Will Deacon57ca90f2014-02-06 14:59:05 +0000784 /*
785 * Use the weakest shareability/memory types, so they are
786 * overridden by the ttbcr/pte.
787 */
788 if (stage1) {
789 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
790 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800791 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
792 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800793 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000794 }
Will Deacon44680ee2014-06-25 11:29:12 +0100795 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100796
Will Deacon518f7132014-11-14 17:17:54 +0000797 /* TTBRs */
798 if (stage1) {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100799 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100800
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800801 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100802 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100803
804 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800805 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100806 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
Will Deacon518f7132014-11-14 17:17:54 +0000807 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100808 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100809 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000810 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100811
Will Deacon518f7132014-11-14 17:17:54 +0000812 /* TTBCR */
813 if (stage1) {
814 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
815 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
816 if (smmu->version > ARM_SMMU_V1) {
817 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
Will Deacon5dc56162015-05-08 17:44:22 +0100818 reg |= TTBCR2_SEP_UPSTREAM;
Will Deacon518f7132014-11-14 17:17:54 +0000819 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100820 }
821 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000822 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
823 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100824 }
825
Will Deacon518f7132014-11-14 17:17:54 +0000826 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100827 if (stage1) {
Will Deacon518f7132014-11-14 17:17:54 +0000828 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100829 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Will Deacon518f7132014-11-14 17:17:54 +0000830 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
831 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100832 }
833
Will Deacon45ae7cf2013-06-24 18:31:25 +0100834 /* SCTLR */
835 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
836 if (stage1)
837 reg |= SCTLR_S1_ASIDPNE;
838#ifdef __BIG_ENDIAN
839 reg |= SCTLR_E;
840#endif
Will Deacon25724842013-08-21 13:49:53 +0100841 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100842}
843
844static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100845 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100846{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100847 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000848 unsigned long ias, oas;
849 struct io_pgtable_ops *pgtbl_ops;
850 struct io_pgtable_cfg pgtbl_cfg;
851 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100852 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100853 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100854
Will Deacon518f7132014-11-14 17:17:54 +0000855 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100856 if (smmu_domain->smmu)
857 goto out_unlock;
858
Will Deaconc752ce42014-06-25 22:46:31 +0100859 /*
860 * Mapping the requested stage onto what we support is surprisingly
861 * complicated, mainly because the spec allows S1+S2 SMMUs without
862 * support for nested translation. That means we end up with the
863 * following table:
864 *
865 * Requested Supported Actual
866 * S1 N S1
867 * S1 S1+S2 S1
868 * S1 S2 S2
869 * S1 S1 S1
870 * N N N
871 * N S1+S2 S2
872 * N S2 S2
873 * N S1 S1
874 *
875 * Note that you can't actually request stage-2 mappings.
876 */
877 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
878 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
879 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
880 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
881
Robin Murphy7602b872016-04-28 17:12:09 +0100882 /*
883 * Choosing a suitable context format is even more fiddly. Until we
884 * grow some way for the caller to express a preference, and/or move
885 * the decision into the io-pgtable code where it arguably belongs,
886 * just aim for the closest thing to the rest of the system, and hope
887 * that the hardware isn't esoteric enough that we can't assume AArch64
888 * support to be a superset of AArch32 support...
889 */
890 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
891 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
892 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
893 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
894 ARM_SMMU_FEAT_FMT_AARCH64_16K |
895 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
896 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
897
898 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
899 ret = -EINVAL;
900 goto out_unlock;
901 }
902
Will Deaconc752ce42014-06-25 22:46:31 +0100903 switch (smmu_domain->stage) {
904 case ARM_SMMU_DOMAIN_S1:
905 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
906 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000907 ias = smmu->va_size;
908 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100909 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000910 fmt = ARM_64_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100911 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000912 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100913 ias = min(ias, 32UL);
914 oas = min(oas, 40UL);
915 }
Will Deaconc752ce42014-06-25 22:46:31 +0100916 break;
917 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100918 /*
919 * We will likely want to change this if/when KVM gets
920 * involved.
921 */
Will Deaconc752ce42014-06-25 22:46:31 +0100922 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100923 cfg->cbar = CBAR_TYPE_S2_TRANS;
924 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000925 ias = smmu->ipa_size;
926 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100927 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000928 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100929 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000930 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100931 ias = min(ias, 40UL);
932 oas = min(oas, 40UL);
933 }
Will Deaconc752ce42014-06-25 22:46:31 +0100934 break;
935 default:
936 ret = -EINVAL;
937 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100938 }
939
940 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
941 smmu->num_context_banks);
942 if (IS_ERR_VALUE(ret))
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100943 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100944
Will Deacon44680ee2014-06-25 11:29:12 +0100945 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100946 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100947 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
948 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100949 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100950 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100951 }
952
Will Deacon518f7132014-11-14 17:17:54 +0000953 pgtbl_cfg = (struct io_pgtable_cfg) {
954 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
955 .ias = ias,
956 .oas = oas,
957 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100958 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000959 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100960
Will Deacon518f7132014-11-14 17:17:54 +0000961 smmu_domain->smmu = smmu;
962 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
963 if (!pgtbl_ops) {
964 ret = -ENOMEM;
965 goto out_clear_smmu;
966 }
967
968 /* Update our support page sizes to reflect the page table format */
969 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
970
971 /* Initialise the context bank with our page table cfg */
972 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
973
974 /*
975 * Request context fault interrupt. Do this last to avoid the
976 * handler seeing a half-initialised domain state.
977 */
Will Deacon44680ee2014-06-25 11:29:12 +0100978 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100979 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
980 "arm-smmu-context-fault", domain);
981 if (IS_ERR_VALUE(ret)) {
982 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100983 cfg->irptndx, irq);
984 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100985 }
986
Will Deacon518f7132014-11-14 17:17:54 +0000987 mutex_unlock(&smmu_domain->init_mutex);
988
989 /* Publish page table ops for map/unmap */
990 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100991 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100992
Will Deacon518f7132014-11-14 17:17:54 +0000993out_clear_smmu:
994 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100995out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000996 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100997 return ret;
998}
999
1000static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1001{
Joerg Roedel1d672632015-03-26 13:43:10 +01001002 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001003 struct arm_smmu_device *smmu = smmu_domain->smmu;
1004 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001005 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001006 int irq;
1007
1008 if (!smmu)
1009 return;
1010
Will Deacon518f7132014-11-14 17:17:54 +00001011 /*
1012 * Disable the context bank and free the page tables before freeing
1013 * it.
1014 */
Will Deacon44680ee2014-06-25 11:29:12 +01001015 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001016 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001017
Will Deacon44680ee2014-06-25 11:29:12 +01001018 if (cfg->irptndx != INVALID_IRPTNDX) {
1019 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Will Deacon45ae7cf2013-06-24 18:31:25 +01001020 free_irq(irq, domain);
1021 }
1022
Markus Elfring44830b02015-11-06 18:32:41 +01001023 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001024 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001025}
1026
Joerg Roedel1d672632015-03-26 13:43:10 +01001027static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001028{
1029 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001030
Robin Murphy9adb9592016-01-26 18:06:36 +00001031 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001032 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001033 /*
1034 * Allocate the domain and initialise some of its data structures.
1035 * We can't really do anything meaningful until we've added a
1036 * master.
1037 */
1038 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1039 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001040 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001041
Robin Murphy9adb9592016-01-26 18:06:36 +00001042 if (type == IOMMU_DOMAIN_DMA &&
1043 iommu_get_dma_cookie(&smmu_domain->domain)) {
1044 kfree(smmu_domain);
1045 return NULL;
1046 }
1047
Will Deacon518f7132014-11-14 17:17:54 +00001048 mutex_init(&smmu_domain->init_mutex);
1049 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001050
1051 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001052}
1053
Joerg Roedel1d672632015-03-26 13:43:10 +01001054static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001055{
Joerg Roedel1d672632015-03-26 13:43:10 +01001056 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001057
1058 /*
1059 * Free the domain resources. We assume that all devices have
1060 * already been detached.
1061 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001062 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001063 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001064 kfree(smmu_domain);
1065}
1066
1067static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001068 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001069{
1070 int i;
1071 struct arm_smmu_smr *smrs;
1072 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1073
1074 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1075 return 0;
1076
Will Deacona9a1b0b2014-05-01 18:05:08 +01001077 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001078 return -EEXIST;
1079
Mitchel Humpherys29073202014-07-08 09:52:18 -07001080 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001081 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001082 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1083 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001084 return -ENOMEM;
1085 }
1086
Will Deacon44680ee2014-06-25 11:29:12 +01001087 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001088 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001089 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1090 smmu->num_mapping_groups);
1091 if (IS_ERR_VALUE(idx)) {
1092 dev_err(smmu->dev, "failed to allocate free SMR\n");
1093 goto err_free_smrs;
1094 }
1095
1096 smrs[i] = (struct arm_smmu_smr) {
1097 .idx = idx,
1098 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001099 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001100 };
1101 }
1102
1103 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001104 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001105 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1106 smrs[i].mask << SMR_MASK_SHIFT;
1107 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1108 }
1109
Will Deacona9a1b0b2014-05-01 18:05:08 +01001110 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001111 return 0;
1112
1113err_free_smrs:
1114 while (--i >= 0)
1115 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1116 kfree(smrs);
1117 return -ENOSPC;
1118}
1119
1120static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001121 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001122{
1123 int i;
1124 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001125 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001126
Will Deacon43b412b2014-07-15 11:22:24 +01001127 if (!smrs)
1128 return;
1129
Will Deacon45ae7cf2013-06-24 18:31:25 +01001130 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001131 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001132 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001133
Will Deacon45ae7cf2013-06-24 18:31:25 +01001134 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1135 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1136 }
1137
Will Deacona9a1b0b2014-05-01 18:05:08 +01001138 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001139 kfree(smrs);
1140}
1141
Will Deacon45ae7cf2013-06-24 18:31:25 +01001142static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001143 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001144{
1145 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001146 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001147 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1148
Will Deacon8f68f8e2014-07-15 11:27:08 +01001149 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001150 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001151 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001152 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001153
Will Deaconcbf82772016-02-18 12:05:57 +00001154 /*
1155 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1156 * for all devices behind the SMMU.
1157 */
1158 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1159 return 0;
1160
Will Deacona9a1b0b2014-05-01 18:05:08 +01001161 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001162 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001163
Will Deacona9a1b0b2014-05-01 18:05:08 +01001164 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001165 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001166 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001167 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1168 }
1169
1170 return 0;
1171}
1172
1173static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001174 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001175{
Will Deacon43b412b2014-07-15 11:22:24 +01001176 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001177 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001178 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001179
Will Deacon8f68f8e2014-07-15 11:27:08 +01001180 /* An IOMMU group is torn down by the first device to be removed */
1181 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1182 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001183
1184 /*
1185 * We *must* clear the S2CR first, because freeing the SMR means
1186 * that it can be re-allocated immediately.
1187 */
Will Deacon43b412b2014-07-15 11:22:24 +01001188 for (i = 0; i < cfg->num_streamids; ++i) {
1189 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001190 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001191
Robin Murphy25a1c962016-02-10 14:25:33 +00001192 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001193 }
1194
Will Deacona9a1b0b2014-05-01 18:05:08 +01001195 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001196}
1197
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001198static void arm_smmu_detach_dev(struct device *dev,
1199 struct arm_smmu_master_cfg *cfg)
1200{
1201 struct iommu_domain *domain = dev->archdata.iommu;
1202 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1203
1204 dev->archdata.iommu = NULL;
1205 arm_smmu_domain_remove_master(smmu_domain, cfg);
1206}
1207
Will Deacon45ae7cf2013-06-24 18:31:25 +01001208static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1209{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001210 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001211 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001212 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001213 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001214
Will Deacon8f68f8e2014-07-15 11:27:08 +01001215 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001216 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001217 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1218 return -ENXIO;
1219 }
1220
Will Deacon518f7132014-11-14 17:17:54 +00001221 /* Ensure that the domain is finalised */
1222 ret = arm_smmu_init_domain_context(domain, smmu);
1223 if (IS_ERR_VALUE(ret))
1224 return ret;
1225
Will Deacon45ae7cf2013-06-24 18:31:25 +01001226 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001227 * Sanity check the domain. We don't support domains across
1228 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001229 */
Will Deacon518f7132014-11-14 17:17:54 +00001230 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001231 dev_err(dev,
1232 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001233 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1234 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001235 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001236
1237 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001238 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001239 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001240 return -ENODEV;
1241
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001242 /* Detach the dev from its current domain */
1243 if (dev->archdata.iommu)
1244 arm_smmu_detach_dev(dev, cfg);
1245
Will Deacon844e35b2014-07-17 11:23:51 +01001246 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1247 if (!ret)
1248 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001249 return ret;
1250}
1251
Will Deacon45ae7cf2013-06-24 18:31:25 +01001252static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001253 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001254{
Will Deacon518f7132014-11-14 17:17:54 +00001255 int ret;
1256 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001257 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001258 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001259
Will Deacon518f7132014-11-14 17:17:54 +00001260 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001261 return -ENODEV;
1262
Will Deacon518f7132014-11-14 17:17:54 +00001263 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1264 ret = ops->map(ops, iova, paddr, size, prot);
1265 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1266 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267}
1268
1269static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1270 size_t size)
1271{
Will Deacon518f7132014-11-14 17:17:54 +00001272 size_t ret;
1273 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001274 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001275 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001276
Will Deacon518f7132014-11-14 17:17:54 +00001277 if (!ops)
1278 return 0;
1279
1280 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1281 ret = ops->unmap(ops, iova, size);
1282 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1283 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001284}
1285
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001286static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1287 dma_addr_t iova)
1288{
Joerg Roedel1d672632015-03-26 13:43:10 +01001289 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001290 struct arm_smmu_device *smmu = smmu_domain->smmu;
1291 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1292 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1293 struct device *dev = smmu->dev;
1294 void __iomem *cb_base;
1295 u32 tmp;
1296 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001297 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001298
1299 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1300
Robin Murphy661d9622015-05-27 17:09:34 +01001301 /* ATS1 registers can only be written atomically */
1302 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001303 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001304 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1305 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001306 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001307
1308 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1309 !(tmp & ATSR_ACTIVE), 5, 50)) {
1310 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001311 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001312 &iova);
1313 return ops->iova_to_phys(ops, iova);
1314 }
1315
Robin Murphyf9a05f02016-04-13 18:13:01 +01001316 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001317 if (phys & CB_PAR_F) {
1318 dev_err(dev, "translation fault!\n");
1319 dev_err(dev, "PAR = 0x%llx\n", phys);
1320 return 0;
1321 }
1322
1323 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1324}
1325
Will Deacon45ae7cf2013-06-24 18:31:25 +01001326static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001327 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001328{
Will Deacon518f7132014-11-14 17:17:54 +00001329 phys_addr_t ret;
1330 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001331 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001332 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001333
Will Deacon518f7132014-11-14 17:17:54 +00001334 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001335 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001336
Will Deacon518f7132014-11-14 17:17:54 +00001337 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001338 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1339 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001340 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001341 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001342 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001343 }
1344
Will Deacon518f7132014-11-14 17:17:54 +00001345 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001346
Will Deacon518f7132014-11-14 17:17:54 +00001347 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001348}
1349
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001350static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001351{
Will Deacond0948942014-06-24 17:30:10 +01001352 switch (cap) {
1353 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001354 /*
1355 * Return true here as the SMMU can always send out coherent
1356 * requests.
1357 */
1358 return true;
Will Deacond0948942014-06-24 17:30:10 +01001359 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001360 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001361 case IOMMU_CAP_NOEXEC:
1362 return true;
Will Deacond0948942014-06-24 17:30:10 +01001363 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001364 return false;
Will Deacond0948942014-06-24 17:30:10 +01001365 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001366}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001367
Will Deacona9a1b0b2014-05-01 18:05:08 +01001368static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1369{
1370 *((u16 *)data) = alias;
1371 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001372}
1373
Will Deacon8f68f8e2014-07-15 11:27:08 +01001374static void __arm_smmu_release_pci_iommudata(void *data)
1375{
1376 kfree(data);
1377}
1378
Joerg Roedelaf659932015-10-21 23:51:41 +02001379static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1380 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001381{
Will Deacon03edb222015-01-19 14:27:33 +00001382 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001383 u16 sid;
1384 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001385
Will Deacon03edb222015-01-19 14:27:33 +00001386 cfg = iommu_group_get_iommudata(group);
1387 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001388 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001389 if (!cfg)
1390 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001391
Will Deacon03edb222015-01-19 14:27:33 +00001392 iommu_group_set_iommudata(group, cfg,
1393 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001394 }
1395
Joerg Roedelaf659932015-10-21 23:51:41 +02001396 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1397 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001398
Will Deacon03edb222015-01-19 14:27:33 +00001399 /*
1400 * Assume Stream ID == Requester ID for now.
1401 * We need a way to describe the ID mappings in FDT.
1402 */
1403 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1404 for (i = 0; i < cfg->num_streamids; ++i)
1405 if (cfg->streamids[i] == sid)
1406 break;
1407
1408 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1409 if (i == cfg->num_streamids)
1410 cfg->streamids[cfg->num_streamids++] = sid;
1411
1412 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001413}
1414
Joerg Roedelaf659932015-10-21 23:51:41 +02001415static int arm_smmu_init_platform_device(struct device *dev,
1416 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001417{
Will Deacon03edb222015-01-19 14:27:33 +00001418 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001419 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001420
1421 if (!smmu)
1422 return -ENODEV;
1423
1424 master = find_smmu_master(smmu, dev->of_node);
1425 if (!master)
1426 return -ENODEV;
1427
Will Deacon03edb222015-01-19 14:27:33 +00001428 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001429
1430 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001431}
1432
1433static int arm_smmu_add_device(struct device *dev)
1434{
Joerg Roedelaf659932015-10-21 23:51:41 +02001435 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001436
Joerg Roedelaf659932015-10-21 23:51:41 +02001437 group = iommu_group_get_for_dev(dev);
1438 if (IS_ERR(group))
1439 return PTR_ERR(group);
1440
Peng Fan9a4a9d82015-11-20 16:56:18 +08001441 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001442 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001443}
1444
Will Deacon45ae7cf2013-06-24 18:31:25 +01001445static void arm_smmu_remove_device(struct device *dev)
1446{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001447 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001448}
1449
Joerg Roedelaf659932015-10-21 23:51:41 +02001450static struct iommu_group *arm_smmu_device_group(struct device *dev)
1451{
1452 struct iommu_group *group;
1453 int ret;
1454
1455 if (dev_is_pci(dev))
1456 group = pci_device_group(dev);
1457 else
1458 group = generic_device_group(dev);
1459
1460 if (IS_ERR(group))
1461 return group;
1462
1463 if (dev_is_pci(dev))
1464 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1465 else
1466 ret = arm_smmu_init_platform_device(dev, group);
1467
1468 if (ret) {
1469 iommu_group_put(group);
1470 group = ERR_PTR(ret);
1471 }
1472
1473 return group;
1474}
1475
Will Deaconc752ce42014-06-25 22:46:31 +01001476static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1477 enum iommu_attr attr, void *data)
1478{
Joerg Roedel1d672632015-03-26 13:43:10 +01001479 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001480
1481 switch (attr) {
1482 case DOMAIN_ATTR_NESTING:
1483 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1484 return 0;
1485 default:
1486 return -ENODEV;
1487 }
1488}
1489
1490static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1491 enum iommu_attr attr, void *data)
1492{
Will Deacon518f7132014-11-14 17:17:54 +00001493 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001494 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001495
Will Deacon518f7132014-11-14 17:17:54 +00001496 mutex_lock(&smmu_domain->init_mutex);
1497
Will Deaconc752ce42014-06-25 22:46:31 +01001498 switch (attr) {
1499 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001500 if (smmu_domain->smmu) {
1501 ret = -EPERM;
1502 goto out_unlock;
1503 }
1504
Will Deaconc752ce42014-06-25 22:46:31 +01001505 if (*(int *)data)
1506 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1507 else
1508 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1509
Will Deacon518f7132014-11-14 17:17:54 +00001510 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001511 default:
Will Deacon518f7132014-11-14 17:17:54 +00001512 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001513 }
Will Deacon518f7132014-11-14 17:17:54 +00001514
1515out_unlock:
1516 mutex_unlock(&smmu_domain->init_mutex);
1517 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001518}
1519
Will Deacon518f7132014-11-14 17:17:54 +00001520static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001521 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001522 .domain_alloc = arm_smmu_domain_alloc,
1523 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001524 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001525 .map = arm_smmu_map,
1526 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001527 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001528 .iova_to_phys = arm_smmu_iova_to_phys,
1529 .add_device = arm_smmu_add_device,
1530 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001531 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001532 .domain_get_attr = arm_smmu_domain_get_attr,
1533 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001534 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001535};
1536
1537static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1538{
1539 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001540 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001541 int i = 0;
Peng Fan3ca37122016-05-03 21:50:30 +08001542 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001543
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001544 /* clear global FSR */
1545 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1546 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001547
Robin Murphy25a1c962016-02-10 14:25:33 +00001548 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1549 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001550 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001551 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001552 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001553 }
1554
Peng Fan3ca37122016-05-03 21:50:30 +08001555 /*
1556 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1557 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1558 * bit is only present in MMU-500r2 onwards.
1559 */
1560 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1561 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1562 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1563 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1564 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1565 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1566 }
1567
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001568 /* Make sure all context banks are disabled and clear CB_FSR */
1569 for (i = 0; i < smmu->num_context_banks; ++i) {
1570 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1571 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1572 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001573 /*
1574 * Disable MMU-500's not-particularly-beneficial next-page
1575 * prefetcher for the sake of errata #841119 and #826419.
1576 */
1577 if (smmu->model == ARM_MMU500) {
1578 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1579 reg &= ~ARM_MMU500_ACTLR_CPRE;
1580 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1581 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001582 }
Will Deacon1463fe42013-07-31 19:21:27 +01001583
Will Deacon45ae7cf2013-06-24 18:31:25 +01001584 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001585 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1586 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1587
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001588 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001589
Will Deacon45ae7cf2013-06-24 18:31:25 +01001590 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001591 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001592
1593 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001594 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001595
Robin Murphy25a1c962016-02-10 14:25:33 +00001596 /* Enable client access, handling unmatched streams as appropriate */
1597 reg &= ~sCR0_CLIENTPD;
1598 if (disable_bypass)
1599 reg |= sCR0_USFCFG;
1600 else
1601 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001602
1603 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001604 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001605
1606 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001607 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001608
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001609 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1610 reg |= sCR0_VMID16EN;
1611
Will Deacon45ae7cf2013-06-24 18:31:25 +01001612 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001613 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001614 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001615}
1616
1617static int arm_smmu_id_size_to_bits(int size)
1618{
1619 switch (size) {
1620 case 0:
1621 return 32;
1622 case 1:
1623 return 36;
1624 case 2:
1625 return 40;
1626 case 3:
1627 return 42;
1628 case 4:
1629 return 44;
1630 case 5:
1631 default:
1632 return 48;
1633 }
1634}
1635
1636static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1637{
1638 unsigned long size;
1639 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1640 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001641 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001642
1643 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001644 dev_notice(smmu->dev, "SMMUv%d with:\n",
1645 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001646
1647 /* ID0 */
1648 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001649
1650 /* Restrict available stages based on module parameter */
1651 if (force_stage == 1)
1652 id &= ~(ID0_S2TS | ID0_NTS);
1653 else if (force_stage == 2)
1654 id &= ~(ID0_S1TS | ID0_NTS);
1655
Will Deacon45ae7cf2013-06-24 18:31:25 +01001656 if (id & ID0_S1TS) {
1657 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1658 dev_notice(smmu->dev, "\tstage 1 translation\n");
1659 }
1660
1661 if (id & ID0_S2TS) {
1662 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1663 dev_notice(smmu->dev, "\tstage 2 translation\n");
1664 }
1665
1666 if (id & ID0_NTS) {
1667 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1668 dev_notice(smmu->dev, "\tnested translation\n");
1669 }
1670
1671 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001672 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001673 dev_err(smmu->dev, "\tno translation support!\n");
1674 return -ENODEV;
1675 }
1676
Robin Murphyb7862e32016-04-13 18:13:03 +01001677 if ((id & ID0_S1TS) &&
1678 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001679 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1680 dev_notice(smmu->dev, "\taddress translation ops\n");
1681 }
1682
Robin Murphybae2c2d2015-07-29 19:46:05 +01001683 /*
1684 * In order for DMA API calls to work properly, we must defer to what
1685 * the DT says about coherency, regardless of what the hardware claims.
1686 * Fortunately, this also opens up a workaround for systems where the
1687 * ID register value has ended up configured incorrectly.
1688 */
1689 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1690 cttw_reg = !!(id & ID0_CTTW);
1691 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001692 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001693 if (cttw_dt || cttw_reg)
1694 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1695 cttw_dt ? "" : "non-");
1696 if (cttw_dt != cttw_reg)
1697 dev_notice(smmu->dev,
1698 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001699
1700 if (id & ID0_SMS) {
1701 u32 smr, sid, mask;
1702
1703 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1704 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1705 ID0_NUMSMRG_MASK;
1706 if (smmu->num_mapping_groups == 0) {
1707 dev_err(smmu->dev,
1708 "stream-matching supported, but no SMRs present!\n");
1709 return -ENODEV;
1710 }
1711
1712 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1713 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1714 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1715 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1716
1717 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1718 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1719 if ((mask & sid) != sid) {
1720 dev_err(smmu->dev,
1721 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1722 mask, sid);
1723 return -ENODEV;
1724 }
1725
1726 dev_notice(smmu->dev,
1727 "\tstream matching with %u register groups, mask 0x%x",
1728 smmu->num_mapping_groups, mask);
Olav Haugan3c8766d2014-08-22 17:12:32 -07001729 } else {
1730 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1731 ID0_NUMSIDB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001732 }
1733
Robin Murphy7602b872016-04-28 17:12:09 +01001734 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1735 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1736 if (!(id & ID0_PTFS_NO_AARCH32S))
1737 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1738 }
1739
Will Deacon45ae7cf2013-06-24 18:31:25 +01001740 /* ID1 */
1741 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001742 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001743
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001744 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001745 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001746 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001747 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001748 dev_warn(smmu->dev,
1749 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1750 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001751
Will Deacon518f7132014-11-14 17:17:54 +00001752 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001753 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1754 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1755 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1756 return -ENODEV;
1757 }
1758 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1759 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001760 /*
1761 * Cavium CN88xx erratum #27704.
1762 * Ensure ASID and VMID allocation is unique across all SMMUs in
1763 * the system.
1764 */
1765 if (smmu->model == CAVIUM_SMMUV2) {
1766 smmu->cavium_id_base =
1767 atomic_add_return(smmu->num_context_banks,
1768 &cavium_smmu_context_count);
1769 smmu->cavium_id_base -= smmu->num_context_banks;
1770 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001771
1772 /* ID2 */
1773 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1774 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001775 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001776
Will Deacon518f7132014-11-14 17:17:54 +00001777 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001778 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001779 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001780
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001781 if (id & ID2_VMID16)
1782 smmu->features |= ARM_SMMU_FEAT_VMID16;
1783
Robin Murphyf1d84542015-03-04 16:41:05 +00001784 /*
1785 * What the page table walker can address actually depends on which
1786 * descriptor format is in use, but since a) we don't know that yet,
1787 * and b) it can vary per context bank, this will have to do...
1788 */
1789 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1790 dev_warn(smmu->dev,
1791 "failed to set DMA mask for table walker\n");
1792
Robin Murphyb7862e32016-04-13 18:13:03 +01001793 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001794 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001795 if (smmu->version == ARM_SMMU_V1_64K)
1796 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001797 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001798 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001799 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001800 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001801 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001802 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001803 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001804 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001805 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001806 }
1807
Robin Murphy7602b872016-04-28 17:12:09 +01001808 /* Now we've corralled the various formats, what'll it do? */
1809 size = 0;
1810 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1811 size |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1812 if (smmu->features &
1813 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1814 size |= SZ_4K | SZ_2M | SZ_1G;
1815 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1816 size |= SZ_16K | SZ_32M;
1817 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1818 size |= SZ_64K | SZ_512M;
1819
Will Deacon518f7132014-11-14 17:17:54 +00001820 arm_smmu_ops.pgsize_bitmap &= size;
1821 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1822
Will Deacon28d60072014-09-01 16:24:48 +01001823 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1824 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001825 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001826
1827 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1828 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001829 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001830
Will Deacon45ae7cf2013-06-24 18:31:25 +01001831 return 0;
1832}
1833
Robin Murphy67b65a32016-04-13 18:12:57 +01001834struct arm_smmu_match_data {
1835 enum arm_smmu_arch_version version;
1836 enum arm_smmu_implementation model;
1837};
1838
1839#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1840static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1841
1842ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1843ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001844ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001845ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001846ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001847
Joerg Roedel09b52692014-10-02 12:24:45 +02001848static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001849 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1850 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1851 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001852 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001853 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001854 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001855 { },
1856};
1857MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1858
Will Deacon45ae7cf2013-06-24 18:31:25 +01001859static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1860{
Robin Murphy09360402014-08-28 17:51:59 +01001861 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01001862 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001863 struct resource *res;
1864 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001865 struct device *dev = &pdev->dev;
1866 struct rb_node *node;
1867 struct of_phandle_args masterspec;
1868 int num_irqs, i, err;
1869
1870 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1871 if (!smmu) {
1872 dev_err(dev, "failed to allocate arm_smmu_device\n");
1873 return -ENOMEM;
1874 }
1875 smmu->dev = dev;
1876
Robin Murphy09360402014-08-28 17:51:59 +01001877 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01001878 data = of_id->data;
1879 smmu->version = data->version;
1880 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01001881
Will Deacon45ae7cf2013-06-24 18:31:25 +01001882 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001883 smmu->base = devm_ioremap_resource(dev, res);
1884 if (IS_ERR(smmu->base))
1885 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001886 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001887
1888 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1889 &smmu->num_global_irqs)) {
1890 dev_err(dev, "missing #global-interrupts property\n");
1891 return -ENODEV;
1892 }
1893
1894 num_irqs = 0;
1895 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1896 num_irqs++;
1897 if (num_irqs > smmu->num_global_irqs)
1898 smmu->num_context_irqs++;
1899 }
1900
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001901 if (!smmu->num_context_irqs) {
1902 dev_err(dev, "found %d interrupts but expected at least %d\n",
1903 num_irqs, smmu->num_global_irqs + 1);
1904 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001905 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001906
1907 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1908 GFP_KERNEL);
1909 if (!smmu->irqs) {
1910 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1911 return -ENOMEM;
1912 }
1913
1914 for (i = 0; i < num_irqs; ++i) {
1915 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001916
Will Deacon45ae7cf2013-06-24 18:31:25 +01001917 if (irq < 0) {
1918 dev_err(dev, "failed to get irq index %d\n", i);
1919 return -ENODEV;
1920 }
1921 smmu->irqs[i] = irq;
1922 }
1923
Olav Haugan3c8766d2014-08-22 17:12:32 -07001924 err = arm_smmu_device_cfg_probe(smmu);
1925 if (err)
1926 return err;
1927
Will Deacon45ae7cf2013-06-24 18:31:25 +01001928 i = 0;
1929 smmu->masters = RB_ROOT;
1930 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1931 "#stream-id-cells", i,
1932 &masterspec)) {
1933 err = register_smmu_master(smmu, dev, &masterspec);
1934 if (err) {
1935 dev_err(dev, "failed to add master %s\n",
1936 masterspec.np->name);
1937 goto out_put_masters;
1938 }
1939
1940 i++;
1941 }
1942 dev_notice(dev, "registered %d master devices\n", i);
1943
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001944 parse_driver_options(smmu);
1945
Robin Murphyb7862e32016-04-13 18:13:03 +01001946 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01001947 smmu->num_context_banks != smmu->num_context_irqs) {
1948 dev_err(dev,
1949 "found only %d context interrupt(s) but %d required\n",
1950 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cde2013-11-15 09:42:30 +00001951 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01001952 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001953 }
1954
Will Deacon45ae7cf2013-06-24 18:31:25 +01001955 for (i = 0; i < smmu->num_global_irqs; ++i) {
1956 err = request_irq(smmu->irqs[i],
1957 arm_smmu_global_fault,
1958 IRQF_SHARED,
1959 "arm-smmu global fault",
1960 smmu);
1961 if (err) {
1962 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1963 i, smmu->irqs[i]);
1964 goto out_free_irqs;
1965 }
1966 }
1967
1968 INIT_LIST_HEAD(&smmu->list);
1969 spin_lock(&arm_smmu_devices_lock);
1970 list_add(&smmu->list, &arm_smmu_devices);
1971 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01001972
1973 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001974 return 0;
1975
1976out_free_irqs:
1977 while (i--)
1978 free_irq(smmu->irqs[i], smmu);
1979
Will Deacon45ae7cf2013-06-24 18:31:25 +01001980out_put_masters:
1981 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07001982 struct arm_smmu_master *master
1983 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001984 of_node_put(master->of_node);
1985 }
1986
1987 return err;
1988}
1989
1990static int arm_smmu_device_remove(struct platform_device *pdev)
1991{
1992 int i;
1993 struct device *dev = &pdev->dev;
1994 struct arm_smmu_device *curr, *smmu = NULL;
1995 struct rb_node *node;
1996
1997 spin_lock(&arm_smmu_devices_lock);
1998 list_for_each_entry(curr, &arm_smmu_devices, list) {
1999 if (curr->dev == dev) {
2000 smmu = curr;
2001 list_del(&smmu->list);
2002 break;
2003 }
2004 }
2005 spin_unlock(&arm_smmu_devices_lock);
2006
2007 if (!smmu)
2008 return -ENODEV;
2009
Will Deacon45ae7cf2013-06-24 18:31:25 +01002010 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002011 struct arm_smmu_master *master
2012 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002013 of_node_put(master->of_node);
2014 }
2015
Will Deaconecfadb62013-07-31 19:21:28 +01002016 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002017 dev_err(dev, "removing device with active domains!\n");
2018
2019 for (i = 0; i < smmu->num_global_irqs; ++i)
2020 free_irq(smmu->irqs[i], smmu);
2021
2022 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002023 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002024 return 0;
2025}
2026
Will Deacon45ae7cf2013-06-24 18:31:25 +01002027static struct platform_driver arm_smmu_driver = {
2028 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002029 .name = "arm-smmu",
2030 .of_match_table = of_match_ptr(arm_smmu_of_match),
2031 },
2032 .probe = arm_smmu_device_dt_probe,
2033 .remove = arm_smmu_device_remove,
2034};
2035
2036static int __init arm_smmu_init(void)
2037{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002038 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002039 int ret;
2040
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002041 /*
2042 * Play nice with systems that don't have an ARM SMMU by checking that
2043 * an ARM SMMU exists in the system before proceeding with the driver
2044 * and IOMMU bus operation registration.
2045 */
2046 np = of_find_matching_node(NULL, arm_smmu_of_match);
2047 if (!np)
2048 return 0;
2049
2050 of_node_put(np);
2051
Will Deacon45ae7cf2013-06-24 18:31:25 +01002052 ret = platform_driver_register(&arm_smmu_driver);
2053 if (ret)
2054 return ret;
2055
2056 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002057 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002058 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2059
Will Deacond123cf82014-02-04 22:17:53 +00002060#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002061 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002062 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002063#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002064
Will Deacona9a1b0b2014-05-01 18:05:08 +01002065#ifdef CONFIG_PCI
2066 if (!iommu_present(&pci_bus_type))
2067 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2068#endif
2069
Will Deacon45ae7cf2013-06-24 18:31:25 +01002070 return 0;
2071}
2072
2073static void __exit arm_smmu_exit(void)
2074{
2075 return platform_driver_unregister(&arm_smmu_driver);
2076}
2077
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002078subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002079module_exit(arm_smmu_exit);
2080
2081MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2082MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2083MODULE_LICENSE("GPL v2");