blob: fd6cc19c4ced7246b6c038c2c7f41f39c77387fc [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010044#include <linux/of_device.h>
Robin Murphyadfec2e2016-09-12 17:13:55 +010045#include <linux/of_iommu.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010046#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010047#include <linux/platform_device.h>
48#include <linux/slab.h>
49#include <linux/spinlock.h>
50
51#include <linux/amba/bus.h>
52
Will Deacon518f7132014-11-14 17:17:54 +000053#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
Will Deacon45ae7cf2013-06-24 18:31:25 +010055/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
Will Deacon45ae7cf2013-06-24 18:31:25 +010058/* SMMU global address space */
59#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010060#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010061
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000062/*
63 * SMMU global address space with conditional offset to access secure
64 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
65 * nsGFSYNR0: 0x450)
66 */
67#define ARM_SMMU_GR0_NS(smmu) \
68 ((smmu)->base + \
69 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
70 ? 0x400 : 0))
71
Robin Murphyf9a05f02016-04-13 18:13:01 +010072/*
73 * Some 64-bit registers only make sense to write atomically, but in such
74 * cases all the data relevant to AArch32 formats lies within the lower word,
75 * therefore this actually makes more sense than it might first appear.
76 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010077#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010078#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010079#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010080#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#endif
82
Will Deacon45ae7cf2013-06-24 18:31:25 +010083/* Configuration registers */
84#define ARM_SMMU_GR0_sCR0 0x0
85#define sCR0_CLIENTPD (1 << 0)
86#define sCR0_GFRE (1 << 1)
87#define sCR0_GFIE (1 << 2)
88#define sCR0_GCFGFRE (1 << 4)
89#define sCR0_GCFGFIE (1 << 5)
90#define sCR0_USFCFG (1 << 10)
91#define sCR0_VMIDPNE (1 << 11)
92#define sCR0_PTM (1 << 12)
93#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080094#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010095#define sCR0_BSU_SHIFT 14
96#define sCR0_BSU_MASK 0x3
97
Peng Fan3ca37122016-05-03 21:50:30 +080098/* Auxiliary Configuration register */
99#define ARM_SMMU_GR0_sACR 0x10
100
Will Deacon45ae7cf2013-06-24 18:31:25 +0100101/* Identification registers */
102#define ARM_SMMU_GR0_ID0 0x20
103#define ARM_SMMU_GR0_ID1 0x24
104#define ARM_SMMU_GR0_ID2 0x28
105#define ARM_SMMU_GR0_ID3 0x2c
106#define ARM_SMMU_GR0_ID4 0x30
107#define ARM_SMMU_GR0_ID5 0x34
108#define ARM_SMMU_GR0_ID6 0x38
109#define ARM_SMMU_GR0_ID7 0x3c
110#define ARM_SMMU_GR0_sGFSR 0x48
111#define ARM_SMMU_GR0_sGFSYNR0 0x50
112#define ARM_SMMU_GR0_sGFSYNR1 0x54
113#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100114
115#define ID0_S1TS (1 << 30)
116#define ID0_S2TS (1 << 29)
117#define ID0_NTS (1 << 28)
118#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000119#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100120#define ID0_PTFS_NO_AARCH32 (1 << 25)
121#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100122#define ID0_CTTW (1 << 14)
123#define ID0_NUMIRPT_SHIFT 16
124#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700125#define ID0_NUMSIDB_SHIFT 9
126#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100127#define ID0_NUMSMRG_SHIFT 0
128#define ID0_NUMSMRG_MASK 0xff
129
130#define ID1_PAGESIZE (1 << 31)
131#define ID1_NUMPAGENDXB_SHIFT 28
132#define ID1_NUMPAGENDXB_MASK 7
133#define ID1_NUMS2CB_SHIFT 16
134#define ID1_NUMS2CB_MASK 0xff
135#define ID1_NUMCB_SHIFT 0
136#define ID1_NUMCB_MASK 0xff
137
138#define ID2_OAS_SHIFT 4
139#define ID2_OAS_MASK 0xf
140#define ID2_IAS_SHIFT 0
141#define ID2_IAS_MASK 0xf
142#define ID2_UBS_SHIFT 8
143#define ID2_UBS_MASK 0xf
144#define ID2_PTFS_4K (1 << 12)
145#define ID2_PTFS_16K (1 << 13)
146#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800147#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100148
Peng Fan3ca37122016-05-03 21:50:30 +0800149#define ID7_MAJOR_SHIFT 4
150#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153#define ARM_SMMU_GR0_TLBIVMID 0x64
154#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
155#define ARM_SMMU_GR0_TLBIALLH 0x6c
156#define ARM_SMMU_GR0_sTLBGSYNC 0x70
157#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
158#define sTLBGSTATUS_GSACTIVE (1 << 0)
159#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
160
161/* Stream mapping registers */
162#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
163#define SMR_VALID (1 << 31)
164#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100165#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166
167#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
168#define S2CR_CBNDX_SHIFT 0
169#define S2CR_CBNDX_MASK 0xff
170#define S2CR_TYPE_SHIFT 16
171#define S2CR_TYPE_MASK 0x3
Robin Murphy8e8b2032016-09-12 17:13:50 +0100172enum arm_smmu_s2cr_type {
173 S2CR_TYPE_TRANS,
174 S2CR_TYPE_BYPASS,
175 S2CR_TYPE_FAULT,
176};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100177
Robin Murphyd3461802016-01-26 18:06:34 +0000178#define S2CR_PRIVCFG_SHIFT 24
Robin Murphy8e8b2032016-09-12 17:13:50 +0100179#define S2CR_PRIVCFG_MASK 0x3
180enum arm_smmu_s2cr_privcfg {
181 S2CR_PRIVCFG_DEFAULT,
182 S2CR_PRIVCFG_DIPAN,
183 S2CR_PRIVCFG_UNPRIV,
184 S2CR_PRIVCFG_PRIV,
185};
Robin Murphyd3461802016-01-26 18:06:34 +0000186
Will Deacon45ae7cf2013-06-24 18:31:25 +0100187/* Context bank attribute registers */
188#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
189#define CBAR_VMID_SHIFT 0
190#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000191#define CBAR_S1_BPSHCFG_SHIFT 8
192#define CBAR_S1_BPSHCFG_MASK 3
193#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100194#define CBAR_S1_MEMATTR_SHIFT 12
195#define CBAR_S1_MEMATTR_MASK 0xf
196#define CBAR_S1_MEMATTR_WB 0xf
197#define CBAR_TYPE_SHIFT 16
198#define CBAR_TYPE_MASK 0x3
199#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
200#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
201#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
202#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
203#define CBAR_IRPTNDX_SHIFT 24
204#define CBAR_IRPTNDX_MASK 0xff
205
206#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
207#define CBA2R_RW64_32BIT (0 << 0)
208#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800209#define CBA2R_VMID_SHIFT 16
210#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100211
212/* Translation context bank */
213#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100214#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100215
216#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100217#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100218#define ARM_SMMU_CB_RESUME 0x8
219#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100220#define ARM_SMMU_CB_TTBR0 0x20
221#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222#define ARM_SMMU_CB_TTBCR 0x30
Robin Murphy60705292016-08-11 17:44:06 +0100223#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000225#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100226#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100227#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100228#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000230#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100231#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_TLBIVAL 0x620
233#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
234#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100235#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000236#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100237
238#define SCTLR_S1_ASIDPNE (1 << 12)
239#define SCTLR_CFCFG (1 << 7)
240#define SCTLR_CFIE (1 << 6)
241#define SCTLR_CFRE (1 << 5)
242#define SCTLR_E (1 << 4)
243#define SCTLR_AFE (1 << 2)
244#define SCTLR_TRE (1 << 1)
245#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100246
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100247#define ARM_MMU500_ACTLR_CPRE (1 << 1)
248
Peng Fan3ca37122016-05-03 21:50:30 +0800249#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
250
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000251#define CB_PAR_F (1 << 0)
252
253#define ATSR_ACTIVE (1 << 0)
254
Will Deacon45ae7cf2013-06-24 18:31:25 +0100255#define RESUME_RETRY (0 << 0)
256#define RESUME_TERMINATE (1 << 0)
257
Will Deacon45ae7cf2013-06-24 18:31:25 +0100258#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100259#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100260
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100261#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100262
263#define FSR_MULTI (1 << 31)
264#define FSR_SS (1 << 30)
265#define FSR_UUT (1 << 8)
266#define FSR_ASF (1 << 7)
267#define FSR_TLBLKF (1 << 6)
268#define FSR_TLBMCF (1 << 5)
269#define FSR_EF (1 << 4)
270#define FSR_PF (1 << 3)
271#define FSR_AFF (1 << 2)
272#define FSR_TF (1 << 1)
273
Mitchel Humpherys29073202014-07-08 09:52:18 -0700274#define FSR_IGN (FSR_AFF | FSR_ASF | \
275 FSR_TLBMCF | FSR_TLBLKF)
276#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100277 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100278
279#define FSYNR0_WNR (1 << 4)
280
Will Deacon4cf740b2014-07-14 19:47:39 +0100281static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000282module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100283MODULE_PARM_DESC(force_stage,
284 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000285static bool disable_bypass;
286module_param(disable_bypass, bool, S_IRUGO);
287MODULE_PARM_DESC(disable_bypass,
288 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100289
Robin Murphy09360402014-08-28 17:51:59 +0100290enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100291 ARM_SMMU_V1,
292 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100293 ARM_SMMU_V2,
294};
295
Robin Murphy67b65a32016-04-13 18:12:57 +0100296enum arm_smmu_implementation {
297 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100298 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100299 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100300};
301
Robin Murphy8e8b2032016-09-12 17:13:50 +0100302struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100303 struct iommu_group *group;
304 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100305 enum arm_smmu_s2cr_type type;
306 enum arm_smmu_s2cr_privcfg privcfg;
307 u8 cbndx;
308};
309
310#define s2cr_init_val (struct arm_smmu_s2cr){ \
311 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
312}
313
Will Deacon45ae7cf2013-06-24 18:31:25 +0100314struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100315 u16 mask;
316 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100317 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100318};
319
Will Deacona9a1b0b2014-05-01 18:05:08 +0100320struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100321 struct arm_smmu_device *smmu;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100322 s16 smendx[];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100323};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100324#define INVALID_SMENDX -1
Robin Murphyadfec2e2016-09-12 17:13:55 +0100325#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
326#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
327#define for_each_cfg_sme(fw, i, idx) \
328 for (i = 0; idx = __fwspec_cfg(fw)->smendx[i], i < fw->num_ids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100329
330struct arm_smmu_device {
331 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100332
333 void __iomem *base;
334 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100335 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100336
337#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
338#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
339#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
340#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
341#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000342#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800343#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100344#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
345#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
346#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
347#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
348#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100349 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000350
351#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
352 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100353 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100354 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100355
356 u32 num_context_banks;
357 u32 num_s2_context_banks;
358 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
359 atomic_t irptndx;
360
361 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100362 u16 streamid_mask;
363 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100364 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100365 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100366 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100367
Will Deacon518f7132014-11-14 17:17:54 +0000368 unsigned long va_size;
369 unsigned long ipa_size;
370 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100371 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100372
373 u32 num_global_irqs;
374 u32 num_context_irqs;
375 unsigned int *irqs;
376
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800377 u32 cavium_id_base; /* Specific to Cavium */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100378};
379
Robin Murphy7602b872016-04-28 17:12:09 +0100380enum arm_smmu_context_fmt {
381 ARM_SMMU_CTX_FMT_NONE,
382 ARM_SMMU_CTX_FMT_AARCH64,
383 ARM_SMMU_CTX_FMT_AARCH32_L,
384 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100385};
386
387struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100388 u8 cbndx;
389 u8 irptndx;
390 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100391 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100392};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100393#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800395#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
396#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100397
Will Deaconc752ce42014-06-25 22:46:31 +0100398enum arm_smmu_domain_stage {
399 ARM_SMMU_DOMAIN_S1 = 0,
400 ARM_SMMU_DOMAIN_S2,
401 ARM_SMMU_DOMAIN_NESTED,
402};
403
Will Deacon45ae7cf2013-06-24 18:31:25 +0100404struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100405 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000406 struct io_pgtable_ops *pgtbl_ops;
407 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100408 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100409 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000410 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100411 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100412};
413
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000414struct arm_smmu_option_prop {
415 u32 opt;
416 const char *prop;
417};
418
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800419static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
420
Robin Murphy021bb842016-09-14 15:26:46 +0100421static bool using_legacy_binding, using_generic_binding;
422
Mitchel Humpherys29073202014-07-08 09:52:18 -0700423static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000424 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
425 { 0, NULL},
426};
427
Joerg Roedel1d672632015-03-26 13:43:10 +0100428static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
429{
430 return container_of(dom, struct arm_smmu_domain, domain);
431}
432
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000433static void parse_driver_options(struct arm_smmu_device *smmu)
434{
435 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700436
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000437 do {
438 if (of_property_read_bool(smmu->dev->of_node,
439 arm_smmu_options[i].prop)) {
440 smmu->options |= arm_smmu_options[i].opt;
441 dev_notice(smmu->dev, "option %s\n",
442 arm_smmu_options[i].prop);
443 }
444 } while (arm_smmu_options[++i].opt);
445}
446
Will Deacon8f68f8e2014-07-15 11:27:08 +0100447static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100448{
449 if (dev_is_pci(dev)) {
450 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700451
Will Deacona9a1b0b2014-05-01 18:05:08 +0100452 while (!pci_is_root_bus(bus))
453 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100454 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100455 }
456
Robin Murphyf80cd882016-09-14 15:21:39 +0100457 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100458}
459
Robin Murphyf80cd882016-09-14 15:21:39 +0100460static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100461{
Robin Murphyf80cd882016-09-14 15:21:39 +0100462 *((__be32 *)data) = cpu_to_be32(alias);
463 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100464}
465
Robin Murphyf80cd882016-09-14 15:21:39 +0100466static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100467{
Robin Murphyf80cd882016-09-14 15:21:39 +0100468 struct of_phandle_iterator *it = *(void **)data;
469 struct device_node *np = it->node;
470 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100471
Robin Murphyf80cd882016-09-14 15:21:39 +0100472 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
473 "#stream-id-cells", 0)
474 if (it->node == np) {
475 *(void **)data = dev;
476 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700477 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100478 it->node = np;
479 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100480}
481
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100482static struct platform_driver arm_smmu_driver;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100483static struct iommu_ops arm_smmu_ops;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100484
Robin Murphyadfec2e2016-09-12 17:13:55 +0100485static int arm_smmu_register_legacy_master(struct device *dev,
486 struct arm_smmu_device **smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100487{
Robin Murphyadfec2e2016-09-12 17:13:55 +0100488 struct device *smmu_dev;
Robin Murphyf80cd882016-09-14 15:21:39 +0100489 struct device_node *np;
490 struct of_phandle_iterator it;
491 void *data = &it;
Robin Murphyadfec2e2016-09-12 17:13:55 +0100492 u32 *sids;
Robin Murphyf80cd882016-09-14 15:21:39 +0100493 __be32 pci_sid;
494 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100495
Robin Murphyf80cd882016-09-14 15:21:39 +0100496 np = dev_get_dev_node(dev);
497 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
498 of_node_put(np);
499 return -ENODEV;
500 }
501
502 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100503 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
504 __find_legacy_master_phandle);
Robin Murphyadfec2e2016-09-12 17:13:55 +0100505 smmu_dev = data;
Robin Murphyf80cd882016-09-14 15:21:39 +0100506 of_node_put(np);
507 if (err == 0)
508 return -ENODEV;
509 if (err < 0)
510 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100511
Robin Murphyf80cd882016-09-14 15:21:39 +0100512 if (dev_is_pci(dev)) {
513 /* "mmu-masters" assumes Stream ID == Requester ID */
514 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
515 &pci_sid);
516 it.cur = &pci_sid;
517 it.cur_count = 1;
518 }
519
Robin Murphyadfec2e2016-09-12 17:13:55 +0100520 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
521 &arm_smmu_ops);
522 if (err)
523 return err;
524
525 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
526 if (!sids)
Robin Murphyf80cd882016-09-14 15:21:39 +0100527 return -ENOMEM;
528
Robin Murphyadfec2e2016-09-12 17:13:55 +0100529 *smmu = dev_get_drvdata(smmu_dev);
530 of_phandle_iterator_args(&it, sids, it.cur_count);
531 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
532 kfree(sids);
533 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100534}
535
536static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
537{
538 int idx;
539
540 do {
541 idx = find_next_zero_bit(map, end, start);
542 if (idx == end)
543 return -ENOSPC;
544 } while (test_and_set_bit(idx, map));
545
546 return idx;
547}
548
549static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
550{
551 clear_bit(idx, map);
552}
553
554/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000555static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100556{
557 int count = 0;
558 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
559
560 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
561 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
562 & sTLBGSTATUS_GSACTIVE) {
563 cpu_relax();
564 if (++count == TLB_LOOP_TIMEOUT) {
565 dev_err_ratelimited(smmu->dev,
566 "TLB sync timed out -- SMMU may be deadlocked\n");
567 return;
568 }
569 udelay(1);
570 }
571}
572
Will Deacon518f7132014-11-14 17:17:54 +0000573static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100574{
Will Deacon518f7132014-11-14 17:17:54 +0000575 struct arm_smmu_domain *smmu_domain = cookie;
576 __arm_smmu_tlb_sync(smmu_domain->smmu);
577}
578
579static void arm_smmu_tlb_inv_context(void *cookie)
580{
581 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100582 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
583 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100584 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000585 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100586
587 if (stage1) {
588 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800589 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100590 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100591 } else {
592 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800593 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100594 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100595 }
596
Will Deacon518f7132014-11-14 17:17:54 +0000597 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100598}
599
Will Deacon518f7132014-11-14 17:17:54 +0000600static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000601 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000602{
603 struct arm_smmu_domain *smmu_domain = cookie;
604 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
605 struct arm_smmu_device *smmu = smmu_domain->smmu;
606 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
607 void __iomem *reg;
608
609 if (stage1) {
610 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
611 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
612
Robin Murphy7602b872016-04-28 17:12:09 +0100613 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000614 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800615 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000616 do {
617 writel_relaxed(iova, reg);
618 iova += granule;
619 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000620 } else {
621 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800622 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000623 do {
624 writeq_relaxed(iova, reg);
625 iova += granule >> 12;
626 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000627 }
Will Deacon518f7132014-11-14 17:17:54 +0000628 } else if (smmu->version == ARM_SMMU_V2) {
629 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
630 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
631 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000632 iova >>= 12;
633 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100634 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000635 iova += granule >> 12;
636 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000637 } else {
638 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800639 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000640 }
641}
642
Will Deacon518f7132014-11-14 17:17:54 +0000643static struct iommu_gather_ops arm_smmu_gather_ops = {
644 .tlb_flush_all = arm_smmu_tlb_inv_context,
645 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
646 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000647};
648
Will Deacon45ae7cf2013-06-24 18:31:25 +0100649static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
650{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100651 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100652 unsigned long iova;
653 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100654 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100655 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
656 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100657 void __iomem *cb_base;
658
Will Deacon44680ee2014-06-25 11:29:12 +0100659 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100660 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
661
662 if (!(fsr & FSR_FAULT))
663 return IRQ_NONE;
664
Will Deacon45ae7cf2013-06-24 18:31:25 +0100665 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100666 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100667
Will Deacon3714ce1d2016-08-05 19:49:45 +0100668 dev_err_ratelimited(smmu->dev,
669 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
670 fsr, iova, fsynr, cfg->cbndx);
671
Will Deacon45ae7cf2013-06-24 18:31:25 +0100672 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100673 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100674}
675
676static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
677{
678 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
679 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000680 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100681
682 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
683 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
684 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
685 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
686
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000687 if (!gfsr)
688 return IRQ_NONE;
689
Will Deacon45ae7cf2013-06-24 18:31:25 +0100690 dev_err_ratelimited(smmu->dev,
691 "Unexpected global fault, this could be serious\n");
692 dev_err_ratelimited(smmu->dev,
693 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
694 gfsr, gfsynr0, gfsynr1, gfsynr2);
695
696 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100697 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100698}
699
Will Deacon518f7132014-11-14 17:17:54 +0000700static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
701 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100702{
Robin Murphy60705292016-08-11 17:44:06 +0100703 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100704 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100705 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100706 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
707 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100708 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100709
Will Deacon45ae7cf2013-06-24 18:31:25 +0100710 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100711 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
712 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100713
Will Deacon4a1c93c2015-03-04 12:21:03 +0000714 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100715 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
716 reg = CBA2R_RW64_64BIT;
717 else
718 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800719 /* 16-bit VMIDs live in CBA2R */
720 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800721 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800722
Will Deacon4a1c93c2015-03-04 12:21:03 +0000723 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
724 }
725
Will Deacon45ae7cf2013-06-24 18:31:25 +0100726 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100727 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100728 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700729 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100730
Will Deacon57ca90f2014-02-06 14:59:05 +0000731 /*
732 * Use the weakest shareability/memory types, so they are
733 * overridden by the ttbcr/pte.
734 */
735 if (stage1) {
736 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
737 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800738 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
739 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800740 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000741 }
Will Deacon44680ee2014-06-25 11:29:12 +0100742 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100743
Will Deacon518f7132014-11-14 17:17:54 +0000744 /* TTBRs */
745 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100746 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100747
Robin Murphy60705292016-08-11 17:44:06 +0100748 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
749 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
750 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
751 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
752 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
753 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
754 } else {
755 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
756 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
757 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
758 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
759 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
760 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
761 }
Will Deacon518f7132014-11-14 17:17:54 +0000762 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100763 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100764 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000765 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100766
Will Deacon518f7132014-11-14 17:17:54 +0000767 /* TTBCR */
768 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100769 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
770 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
771 reg2 = 0;
772 } else {
773 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
774 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
775 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100776 }
Robin Murphy60705292016-08-11 17:44:06 +0100777 if (smmu->version > ARM_SMMU_V1)
778 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100779 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000780 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100781 }
Robin Murphy60705292016-08-11 17:44:06 +0100782 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100783
Will Deacon518f7132014-11-14 17:17:54 +0000784 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100785 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100786 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
787 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
788 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
789 } else {
790 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
791 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
792 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100793 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphy60705292016-08-11 17:44:06 +0100794 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100795 }
796
Will Deacon45ae7cf2013-06-24 18:31:25 +0100797 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100798 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100799 if (stage1)
800 reg |= SCTLR_S1_ASIDPNE;
801#ifdef __BIG_ENDIAN
802 reg |= SCTLR_E;
803#endif
Will Deacon25724842013-08-21 13:49:53 +0100804 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100805}
806
807static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100808 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100809{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100810 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000811 unsigned long ias, oas;
812 struct io_pgtable_ops *pgtbl_ops;
813 struct io_pgtable_cfg pgtbl_cfg;
814 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100815 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100816 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100817
Will Deacon518f7132014-11-14 17:17:54 +0000818 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100819 if (smmu_domain->smmu)
820 goto out_unlock;
821
Will Deaconc752ce42014-06-25 22:46:31 +0100822 /*
823 * Mapping the requested stage onto what we support is surprisingly
824 * complicated, mainly because the spec allows S1+S2 SMMUs without
825 * support for nested translation. That means we end up with the
826 * following table:
827 *
828 * Requested Supported Actual
829 * S1 N S1
830 * S1 S1+S2 S1
831 * S1 S2 S2
832 * S1 S1 S1
833 * N N N
834 * N S1+S2 S2
835 * N S2 S2
836 * N S1 S1
837 *
838 * Note that you can't actually request stage-2 mappings.
839 */
840 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
841 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
842 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
843 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
844
Robin Murphy7602b872016-04-28 17:12:09 +0100845 /*
846 * Choosing a suitable context format is even more fiddly. Until we
847 * grow some way for the caller to express a preference, and/or move
848 * the decision into the io-pgtable code where it arguably belongs,
849 * just aim for the closest thing to the rest of the system, and hope
850 * that the hardware isn't esoteric enough that we can't assume AArch64
851 * support to be a superset of AArch32 support...
852 */
853 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
854 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100855 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
856 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
857 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
858 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
859 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100860 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
861 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
862 ARM_SMMU_FEAT_FMT_AARCH64_16K |
863 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
864 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
865
866 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
867 ret = -EINVAL;
868 goto out_unlock;
869 }
870
Will Deaconc752ce42014-06-25 22:46:31 +0100871 switch (smmu_domain->stage) {
872 case ARM_SMMU_DOMAIN_S1:
873 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
874 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000875 ias = smmu->va_size;
876 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100877 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000878 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100879 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000880 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100881 ias = min(ias, 32UL);
882 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100883 } else {
884 fmt = ARM_V7S;
885 ias = min(ias, 32UL);
886 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100887 }
Will Deaconc752ce42014-06-25 22:46:31 +0100888 break;
889 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100890 /*
891 * We will likely want to change this if/when KVM gets
892 * involved.
893 */
Will Deaconc752ce42014-06-25 22:46:31 +0100894 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100895 cfg->cbar = CBAR_TYPE_S2_TRANS;
896 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000897 ias = smmu->ipa_size;
898 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100899 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000900 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100901 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000902 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100903 ias = min(ias, 40UL);
904 oas = min(oas, 40UL);
905 }
Will Deaconc752ce42014-06-25 22:46:31 +0100906 break;
907 default:
908 ret = -EINVAL;
909 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100910 }
911
912 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
913 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200914 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100915 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100916
Will Deacon44680ee2014-06-25 11:29:12 +0100917 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100918 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100919 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
920 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100921 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100922 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100923 }
924
Will Deacon518f7132014-11-14 17:17:54 +0000925 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100926 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000927 .ias = ias,
928 .oas = oas,
929 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100930 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000931 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100932
Will Deacon518f7132014-11-14 17:17:54 +0000933 smmu_domain->smmu = smmu;
934 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
935 if (!pgtbl_ops) {
936 ret = -ENOMEM;
937 goto out_clear_smmu;
938 }
939
Robin Murphyd5466352016-05-09 17:20:09 +0100940 /* Update the domain's page sizes to reflect the page table format */
941 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +0000942
943 /* Initialise the context bank with our page table cfg */
944 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
945
946 /*
947 * Request context fault interrupt. Do this last to avoid the
948 * handler seeing a half-initialised domain state.
949 */
Will Deacon44680ee2014-06-25 11:29:12 +0100950 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800951 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
952 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200953 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100954 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100955 cfg->irptndx, irq);
956 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100957 }
958
Will Deacon518f7132014-11-14 17:17:54 +0000959 mutex_unlock(&smmu_domain->init_mutex);
960
961 /* Publish page table ops for map/unmap */
962 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100963 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100964
Will Deacon518f7132014-11-14 17:17:54 +0000965out_clear_smmu:
966 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100967out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000968 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100969 return ret;
970}
971
972static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
973{
Joerg Roedel1d672632015-03-26 13:43:10 +0100974 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100975 struct arm_smmu_device *smmu = smmu_domain->smmu;
976 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +0100977 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100978 int irq;
979
Robin Murphy021bb842016-09-14 15:26:46 +0100980 if (!smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100981 return;
982
Will Deacon518f7132014-11-14 17:17:54 +0000983 /*
984 * Disable the context bank and free the page tables before freeing
985 * it.
986 */
Will Deacon44680ee2014-06-25 11:29:12 +0100987 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100988 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +0100989
Will Deacon44680ee2014-06-25 11:29:12 +0100990 if (cfg->irptndx != INVALID_IRPTNDX) {
991 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800992 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100993 }
994
Markus Elfring44830b02015-11-06 18:32:41 +0100995 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +0100996 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100997}
998
Joerg Roedel1d672632015-03-26 13:43:10 +0100999static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001000{
1001 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001002
Robin Murphy9adb9592016-01-26 18:06:36 +00001003 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001004 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001005 /*
1006 * Allocate the domain and initialise some of its data structures.
1007 * We can't really do anything meaningful until we've added a
1008 * master.
1009 */
1010 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1011 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001012 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001013
Robin Murphy021bb842016-09-14 15:26:46 +01001014 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1015 iommu_get_dma_cookie(&smmu_domain->domain))) {
Robin Murphy9adb9592016-01-26 18:06:36 +00001016 kfree(smmu_domain);
1017 return NULL;
1018 }
1019
Will Deacon518f7132014-11-14 17:17:54 +00001020 mutex_init(&smmu_domain->init_mutex);
1021 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001022
1023 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001024}
1025
Joerg Roedel1d672632015-03-26 13:43:10 +01001026static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001027{
Joerg Roedel1d672632015-03-26 13:43:10 +01001028 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001029
1030 /*
1031 * Free the domain resources. We assume that all devices have
1032 * already been detached.
1033 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001034 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001035 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001036 kfree(smmu_domain);
1037}
1038
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001039static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1040{
1041 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001042 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001043
1044 if (smr->valid)
1045 reg |= SMR_VALID;
1046 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1047}
1048
Robin Murphy8e8b2032016-09-12 17:13:50 +01001049static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1050{
1051 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1052 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1053 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1054 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1055
1056 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1057}
1058
1059static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1060{
1061 arm_smmu_write_s2cr(smmu, idx);
1062 if (smmu->smrs)
1063 arm_smmu_write_smr(smmu, idx);
1064}
1065
Robin Murphy588888a2016-09-12 17:13:54 +01001066static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001067{
1068 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001069 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001070
Robin Murphy588888a2016-09-12 17:13:54 +01001071 /* Stream indexing is blissfully easy */
1072 if (!smrs)
1073 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001074
Robin Murphy588888a2016-09-12 17:13:54 +01001075 /* Validating SMRs is... less so */
1076 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1077 if (!smrs[i].valid) {
1078 /*
1079 * Note the first free entry we come across, which
1080 * we'll claim in the end if nothing else matches.
1081 */
1082 if (free_idx < 0)
1083 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001084 continue;
1085 }
Robin Murphy588888a2016-09-12 17:13:54 +01001086 /*
1087 * If the new entry is _entirely_ matched by an existing entry,
1088 * then reuse that, with the guarantee that there also cannot
1089 * be any subsequent conflicting entries. In normal use we'd
1090 * expect simply identical entries for this case, but there's
1091 * no harm in accommodating the generalisation.
1092 */
1093 if ((mask & smrs[i].mask) == mask &&
1094 !((id ^ smrs[i].id) & ~smrs[i].mask))
1095 return i;
1096 /*
1097 * If the new entry has any other overlap with an existing one,
1098 * though, then there always exists at least one stream ID
1099 * which would cause a conflict, and we can't allow that risk.
1100 */
1101 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1102 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001103 }
1104
Robin Murphy588888a2016-09-12 17:13:54 +01001105 return free_idx;
1106}
1107
1108static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1109{
1110 if (--smmu->s2crs[idx].count)
1111 return false;
1112
1113 smmu->s2crs[idx] = s2cr_init_val;
1114 if (smmu->smrs)
1115 smmu->smrs[idx].valid = false;
1116
1117 return true;
1118}
1119
1120static int arm_smmu_master_alloc_smes(struct device *dev)
1121{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001122 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1123 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphy588888a2016-09-12 17:13:54 +01001124 struct arm_smmu_device *smmu = cfg->smmu;
1125 struct arm_smmu_smr *smrs = smmu->smrs;
1126 struct iommu_group *group;
1127 int i, idx, ret;
1128
1129 mutex_lock(&smmu->stream_map_mutex);
1130 /* Figure out a viable stream map entry allocation */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001131 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy021bb842016-09-14 15:26:46 +01001132 u16 sid = fwspec->ids[i];
1133 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1134
Robin Murphy588888a2016-09-12 17:13:54 +01001135 if (idx != INVALID_SMENDX) {
1136 ret = -EEXIST;
1137 goto out_err;
1138 }
1139
Robin Murphy021bb842016-09-14 15:26:46 +01001140 ret = arm_smmu_find_sme(smmu, sid, mask);
Robin Murphy588888a2016-09-12 17:13:54 +01001141 if (ret < 0)
1142 goto out_err;
1143
1144 idx = ret;
1145 if (smrs && smmu->s2crs[idx].count == 0) {
Robin Murphy021bb842016-09-14 15:26:46 +01001146 smrs[idx].id = sid;
1147 smrs[idx].mask = mask;
Robin Murphy588888a2016-09-12 17:13:54 +01001148 smrs[idx].valid = true;
1149 }
1150 smmu->s2crs[idx].count++;
1151 cfg->smendx[i] = (s16)idx;
1152 }
1153
1154 group = iommu_group_get_for_dev(dev);
1155 if (!group)
1156 group = ERR_PTR(-ENOMEM);
1157 if (IS_ERR(group)) {
1158 ret = PTR_ERR(group);
1159 goto out_err;
1160 }
1161 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001162
Will Deacon45ae7cf2013-06-24 18:31:25 +01001163 /* It worked! Now, poke the actual hardware */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001164 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001165 arm_smmu_write_sme(smmu, idx);
1166 smmu->s2crs[idx].group = group;
1167 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001168
Robin Murphy588888a2016-09-12 17:13:54 +01001169 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001170 return 0;
1171
Robin Murphy588888a2016-09-12 17:13:54 +01001172out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001173 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001174 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001175 cfg->smendx[i] = INVALID_SMENDX;
1176 }
Robin Murphy588888a2016-09-12 17:13:54 +01001177 mutex_unlock(&smmu->stream_map_mutex);
1178 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001179}
1180
Robin Murphyadfec2e2016-09-12 17:13:55 +01001181static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001182{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001183 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1184 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
Robin Murphyd3097e32016-09-12 17:13:53 +01001185 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001186
Robin Murphy588888a2016-09-12 17:13:54 +01001187 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001188 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001189 if (arm_smmu_free_sme(smmu, idx))
1190 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001191 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001192 }
Robin Murphy588888a2016-09-12 17:13:54 +01001193 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001194}
1195
Will Deacon45ae7cf2013-06-24 18:31:25 +01001196static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Robin Murphyadfec2e2016-09-12 17:13:55 +01001197 struct iommu_fwspec *fwspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001198{
Will Deacon44680ee2014-06-25 11:29:12 +01001199 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001200 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1201 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1202 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy588888a2016-09-12 17:13:54 +01001203 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001204
Robin Murphyadfec2e2016-09-12 17:13:55 +01001205 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001206 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001207 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001208
Robin Murphy8e8b2032016-09-12 17:13:50 +01001209 s2cr[idx].type = type;
1210 s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
1211 s2cr[idx].cbndx = cbndx;
1212 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001213 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001214 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001215}
1216
Will Deacon45ae7cf2013-06-24 18:31:25 +01001217static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1218{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001219 int ret;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001220 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1221 struct arm_smmu_device *smmu;
Joerg Roedel1d672632015-03-26 13:43:10 +01001222 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001223
Robin Murphyadfec2e2016-09-12 17:13:55 +01001224 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001225 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1226 return -ENXIO;
1227 }
1228
Robin Murphyadfec2e2016-09-12 17:13:55 +01001229 smmu = fwspec_smmu(fwspec);
Will Deacon518f7132014-11-14 17:17:54 +00001230 /* Ensure that the domain is finalised */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001231 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001232 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001233 return ret;
1234
Will Deacon45ae7cf2013-06-24 18:31:25 +01001235 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001236 * Sanity check the domain. We don't support domains across
1237 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238 */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001239 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001240 dev_err(dev,
1241 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyadfec2e2016-09-12 17:13:55 +01001242 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001243 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001244 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001245
1246 /* Looks ok, so add the device to the domain */
Robin Murphyadfec2e2016-09-12 17:13:55 +01001247 return arm_smmu_domain_add_master(smmu_domain, fwspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001248}
1249
Will Deacon45ae7cf2013-06-24 18:31:25 +01001250static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001251 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001252{
Will Deacon518f7132014-11-14 17:17:54 +00001253 int ret;
1254 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001255 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001256 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001257
Will Deacon518f7132014-11-14 17:17:54 +00001258 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001259 return -ENODEV;
1260
Will Deacon518f7132014-11-14 17:17:54 +00001261 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1262 ret = ops->map(ops, iova, paddr, size, prot);
1263 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1264 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001265}
1266
1267static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1268 size_t size)
1269{
Will Deacon518f7132014-11-14 17:17:54 +00001270 size_t ret;
1271 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001272 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001273 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001274
Will Deacon518f7132014-11-14 17:17:54 +00001275 if (!ops)
1276 return 0;
1277
1278 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1279 ret = ops->unmap(ops, iova, size);
1280 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1281 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001282}
1283
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001284static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1285 dma_addr_t iova)
1286{
Joerg Roedel1d672632015-03-26 13:43:10 +01001287 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001288 struct arm_smmu_device *smmu = smmu_domain->smmu;
1289 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1290 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1291 struct device *dev = smmu->dev;
1292 void __iomem *cb_base;
1293 u32 tmp;
1294 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001295 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001296
1297 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1298
Robin Murphy661d9622015-05-27 17:09:34 +01001299 /* ATS1 registers can only be written atomically */
1300 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001301 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001302 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1303 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001304 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001305
1306 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1307 !(tmp & ATSR_ACTIVE), 5, 50)) {
1308 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001309 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001310 &iova);
1311 return ops->iova_to_phys(ops, iova);
1312 }
1313
Robin Murphyf9a05f02016-04-13 18:13:01 +01001314 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001315 if (phys & CB_PAR_F) {
1316 dev_err(dev, "translation fault!\n");
1317 dev_err(dev, "PAR = 0x%llx\n", phys);
1318 return 0;
1319 }
1320
1321 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1322}
1323
Will Deacon45ae7cf2013-06-24 18:31:25 +01001324static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001325 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001326{
Will Deacon518f7132014-11-14 17:17:54 +00001327 phys_addr_t ret;
1328 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001329 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001330 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001331
Will Deacon518f7132014-11-14 17:17:54 +00001332 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001333 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001334
Will Deacon518f7132014-11-14 17:17:54 +00001335 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001336 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1337 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001338 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001339 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001340 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001341 }
1342
Will Deacon518f7132014-11-14 17:17:54 +00001343 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001344
Will Deacon518f7132014-11-14 17:17:54 +00001345 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346}
1347
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001348static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001349{
Will Deacond0948942014-06-24 17:30:10 +01001350 switch (cap) {
1351 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001352 /*
1353 * Return true here as the SMMU can always send out coherent
1354 * requests.
1355 */
1356 return true;
Will Deacond0948942014-06-24 17:30:10 +01001357 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001358 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001359 case IOMMU_CAP_NOEXEC:
1360 return true;
Will Deacond0948942014-06-24 17:30:10 +01001361 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001362 return false;
Will Deacond0948942014-06-24 17:30:10 +01001363 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001364}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001365
Robin Murphy021bb842016-09-14 15:26:46 +01001366static int arm_smmu_match_node(struct device *dev, void *data)
1367{
1368 return dev->of_node == data;
1369}
1370
1371static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
1372{
1373 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1374 np, arm_smmu_match_node);
1375 put_device(dev);
1376 return dev ? dev_get_drvdata(dev) : NULL;
1377}
1378
Will Deacon03edb222015-01-19 14:27:33 +00001379static int arm_smmu_add_device(struct device *dev)
1380{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001381 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +01001382 struct arm_smmu_master_cfg *cfg;
Robin Murphy021bb842016-09-14 15:26:46 +01001383 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphyf80cd882016-09-14 15:21:39 +01001384 int i, ret;
1385
Robin Murphy021bb842016-09-14 15:26:46 +01001386 if (using_legacy_binding) {
1387 ret = arm_smmu_register_legacy_master(dev, &smmu);
1388 fwspec = dev->iommu_fwspec;
1389 if (ret)
1390 goto out_free;
1391 } else if (fwspec) {
1392 smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
1393 } else {
1394 return -ENODEV;
1395 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001396
1397 ret = -EINVAL;
Robin Murphyadfec2e2016-09-12 17:13:55 +01001398 for (i = 0; i < fwspec->num_ids; i++) {
1399 u16 sid = fwspec->ids[i];
Robin Murphy021bb842016-09-14 15:26:46 +01001400 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
Robin Murphyf80cd882016-09-14 15:21:39 +01001401
Robin Murphyadfec2e2016-09-12 17:13:55 +01001402 if (sid & ~smmu->streamid_mask) {
Robin Murphyf80cd882016-09-14 15:21:39 +01001403 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
Robin Murphy021bb842016-09-14 15:26:46 +01001404 sid, smmu->streamid_mask);
1405 goto out_free;
1406 }
1407 if (mask & ~smmu->smr_mask_mask) {
1408 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1409 sid, smmu->smr_mask_mask);
Robin Murphyf80cd882016-09-14 15:21:39 +01001410 goto out_free;
1411 }
Robin Murphyf80cd882016-09-14 15:21:39 +01001412 }
Will Deacon03edb222015-01-19 14:27:33 +00001413
Robin Murphyadfec2e2016-09-12 17:13:55 +01001414 ret = -ENOMEM;
1415 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1416 GFP_KERNEL);
1417 if (!cfg)
1418 goto out_free;
1419
1420 cfg->smmu = smmu;
1421 fwspec->iommu_priv = cfg;
1422 while (i--)
1423 cfg->smendx[i] = INVALID_SMENDX;
1424
Robin Murphy588888a2016-09-12 17:13:54 +01001425 ret = arm_smmu_master_alloc_smes(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001426 if (ret)
1427 goto out_free;
1428
1429 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001430
1431out_free:
Robin Murphyadfec2e2016-09-12 17:13:55 +01001432 if (fwspec)
1433 kfree(fwspec->iommu_priv);
1434 iommu_fwspec_free(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001435 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001436}
1437
Will Deacon45ae7cf2013-06-24 18:31:25 +01001438static void arm_smmu_remove_device(struct device *dev)
1439{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001440 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001441
Robin Murphyadfec2e2016-09-12 17:13:55 +01001442 if (!fwspec || fwspec->ops != &arm_smmu_ops)
Robin Murphyf80cd882016-09-14 15:21:39 +01001443 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001444
Robin Murphyadfec2e2016-09-12 17:13:55 +01001445 arm_smmu_master_free_smes(fwspec);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001446 iommu_group_remove_device(dev);
Robin Murphyadfec2e2016-09-12 17:13:55 +01001447 kfree(fwspec->iommu_priv);
1448 iommu_fwspec_free(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001449}
1450
Joerg Roedelaf659932015-10-21 23:51:41 +02001451static struct iommu_group *arm_smmu_device_group(struct device *dev)
1452{
Robin Murphyadfec2e2016-09-12 17:13:55 +01001453 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1454 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
Robin Murphy588888a2016-09-12 17:13:54 +01001455 struct iommu_group *group = NULL;
1456 int i, idx;
1457
Robin Murphyadfec2e2016-09-12 17:13:55 +01001458 for_each_cfg_sme(fwspec, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001459 if (group && smmu->s2crs[idx].group &&
1460 group != smmu->s2crs[idx].group)
1461 return ERR_PTR(-EINVAL);
1462
1463 group = smmu->s2crs[idx].group;
1464 }
1465
1466 if (group)
1467 return group;
Joerg Roedelaf659932015-10-21 23:51:41 +02001468
1469 if (dev_is_pci(dev))
1470 group = pci_device_group(dev);
1471 else
1472 group = generic_device_group(dev);
1473
Joerg Roedelaf659932015-10-21 23:51:41 +02001474 return group;
1475}
1476
Will Deaconc752ce42014-06-25 22:46:31 +01001477static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1478 enum iommu_attr attr, void *data)
1479{
Joerg Roedel1d672632015-03-26 13:43:10 +01001480 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001481
1482 switch (attr) {
1483 case DOMAIN_ATTR_NESTING:
1484 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1485 return 0;
1486 default:
1487 return -ENODEV;
1488 }
1489}
1490
1491static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1492 enum iommu_attr attr, void *data)
1493{
Will Deacon518f7132014-11-14 17:17:54 +00001494 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001495 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001496
Will Deacon518f7132014-11-14 17:17:54 +00001497 mutex_lock(&smmu_domain->init_mutex);
1498
Will Deaconc752ce42014-06-25 22:46:31 +01001499 switch (attr) {
1500 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001501 if (smmu_domain->smmu) {
1502 ret = -EPERM;
1503 goto out_unlock;
1504 }
1505
Will Deaconc752ce42014-06-25 22:46:31 +01001506 if (*(int *)data)
1507 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1508 else
1509 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1510
Will Deacon518f7132014-11-14 17:17:54 +00001511 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001512 default:
Will Deacon518f7132014-11-14 17:17:54 +00001513 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001514 }
Will Deacon518f7132014-11-14 17:17:54 +00001515
1516out_unlock:
1517 mutex_unlock(&smmu_domain->init_mutex);
1518 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001519}
1520
Robin Murphy021bb842016-09-14 15:26:46 +01001521static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1522{
1523 u32 fwid = 0;
1524
1525 if (args->args_count > 0)
1526 fwid |= (u16)args->args[0];
1527
1528 if (args->args_count > 1)
1529 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
1530
1531 return iommu_fwspec_add_ids(dev, &fwid, 1);
1532}
1533
Will Deacon518f7132014-11-14 17:17:54 +00001534static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001535 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001536 .domain_alloc = arm_smmu_domain_alloc,
1537 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001538 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001539 .map = arm_smmu_map,
1540 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001541 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001542 .iova_to_phys = arm_smmu_iova_to_phys,
1543 .add_device = arm_smmu_add_device,
1544 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001545 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001546 .domain_get_attr = arm_smmu_domain_get_attr,
1547 .domain_set_attr = arm_smmu_domain_set_attr,
Robin Murphy021bb842016-09-14 15:26:46 +01001548 .of_xlate = arm_smmu_of_xlate,
Will Deacon518f7132014-11-14 17:17:54 +00001549 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001550};
1551
1552static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1553{
1554 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001555 void __iomem *cb_base;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001556 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001557 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001558
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001559 /* clear global FSR */
1560 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1561 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001562
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001563 /*
1564 * Reset stream mapping groups: Initial values mark all SMRn as
1565 * invalid and all S2CRn as bypass unless overridden.
1566 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001567 for (i = 0; i < smmu->num_mapping_groups; ++i)
1568 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001569
Peng Fan3ca37122016-05-03 21:50:30 +08001570 /*
1571 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1572 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1573 * bit is only present in MMU-500r2 onwards.
1574 */
1575 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1576 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1577 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1578 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1579 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1580 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1581 }
1582
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001583 /* Make sure all context banks are disabled and clear CB_FSR */
1584 for (i = 0; i < smmu->num_context_banks; ++i) {
1585 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1586 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1587 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001588 /*
1589 * Disable MMU-500's not-particularly-beneficial next-page
1590 * prefetcher for the sake of errata #841119 and #826419.
1591 */
1592 if (smmu->model == ARM_MMU500) {
1593 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1594 reg &= ~ARM_MMU500_ACTLR_CPRE;
1595 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1596 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001597 }
Will Deacon1463fe42013-07-31 19:21:27 +01001598
Will Deacon45ae7cf2013-06-24 18:31:25 +01001599 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001600 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1601 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1602
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001603 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001604
Will Deacon45ae7cf2013-06-24 18:31:25 +01001605 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001606 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001607
1608 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001609 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001610
Robin Murphy25a1c962016-02-10 14:25:33 +00001611 /* Enable client access, handling unmatched streams as appropriate */
1612 reg &= ~sCR0_CLIENTPD;
1613 if (disable_bypass)
1614 reg |= sCR0_USFCFG;
1615 else
1616 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001617
1618 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001619 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001620
1621 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001622 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001623
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001624 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1625 reg |= sCR0_VMID16EN;
1626
Will Deacon45ae7cf2013-06-24 18:31:25 +01001627 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001628 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001629 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001630}
1631
1632static int arm_smmu_id_size_to_bits(int size)
1633{
1634 switch (size) {
1635 case 0:
1636 return 32;
1637 case 1:
1638 return 36;
1639 case 2:
1640 return 40;
1641 case 3:
1642 return 42;
1643 case 4:
1644 return 44;
1645 case 5:
1646 default:
1647 return 48;
1648 }
1649}
1650
1651static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1652{
1653 unsigned long size;
1654 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1655 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001656 bool cttw_dt, cttw_reg;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001657 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001658
1659 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001660 dev_notice(smmu->dev, "SMMUv%d with:\n",
1661 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001662
1663 /* ID0 */
1664 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001665
1666 /* Restrict available stages based on module parameter */
1667 if (force_stage == 1)
1668 id &= ~(ID0_S2TS | ID0_NTS);
1669 else if (force_stage == 2)
1670 id &= ~(ID0_S1TS | ID0_NTS);
1671
Will Deacon45ae7cf2013-06-24 18:31:25 +01001672 if (id & ID0_S1TS) {
1673 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1674 dev_notice(smmu->dev, "\tstage 1 translation\n");
1675 }
1676
1677 if (id & ID0_S2TS) {
1678 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1679 dev_notice(smmu->dev, "\tstage 2 translation\n");
1680 }
1681
1682 if (id & ID0_NTS) {
1683 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1684 dev_notice(smmu->dev, "\tnested translation\n");
1685 }
1686
1687 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001688 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001689 dev_err(smmu->dev, "\tno translation support!\n");
1690 return -ENODEV;
1691 }
1692
Robin Murphyb7862e32016-04-13 18:13:03 +01001693 if ((id & ID0_S1TS) &&
1694 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001695 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1696 dev_notice(smmu->dev, "\taddress translation ops\n");
1697 }
1698
Robin Murphybae2c2d2015-07-29 19:46:05 +01001699 /*
1700 * In order for DMA API calls to work properly, we must defer to what
1701 * the DT says about coherency, regardless of what the hardware claims.
1702 * Fortunately, this also opens up a workaround for systems where the
1703 * ID register value has ended up configured incorrectly.
1704 */
1705 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1706 cttw_reg = !!(id & ID0_CTTW);
1707 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001708 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001709 if (cttw_dt || cttw_reg)
1710 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1711 cttw_dt ? "" : "non-");
1712 if (cttw_dt != cttw_reg)
1713 dev_notice(smmu->dev,
1714 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001715
Robin Murphy21174242016-09-12 17:13:48 +01001716 /* Max. number of entries we have for stream matching/indexing */
1717 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1718 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001719 if (id & ID0_SMS) {
Robin Murphy21174242016-09-12 17:13:48 +01001720 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001721
1722 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001723 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1724 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001725 dev_err(smmu->dev,
1726 "stream-matching supported, but no SMRs present!\n");
1727 return -ENODEV;
1728 }
1729
Robin Murphy21174242016-09-12 17:13:48 +01001730 /*
1731 * SMR.ID bits may not be preserved if the corresponding MASK
1732 * bits are set, so check each one separately. We can reject
1733 * masters later if they try to claim IDs outside these masks.
1734 */
1735 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001736 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1737 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Robin Murphy21174242016-09-12 17:13:48 +01001738 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001739
Robin Murphy21174242016-09-12 17:13:48 +01001740 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1741 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1742 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1743 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001744
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001745 /* Zero-initialised to mark as invalid */
1746 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1747 GFP_KERNEL);
1748 if (!smmu->smrs)
1749 return -ENOMEM;
1750
Will Deacon45ae7cf2013-06-24 18:31:25 +01001751 dev_notice(smmu->dev,
Robin Murphy21174242016-09-12 17:13:48 +01001752 "\tstream matching with %lu register groups, mask 0x%x",
1753 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001754 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001755 /* s2cr->type == 0 means translation, so initialise explicitly */
1756 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1757 GFP_KERNEL);
1758 if (!smmu->s2crs)
1759 return -ENOMEM;
1760 for (i = 0; i < size; i++)
1761 smmu->s2crs[i] = s2cr_init_val;
1762
Robin Murphy21174242016-09-12 17:13:48 +01001763 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001764 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001765
Robin Murphy7602b872016-04-28 17:12:09 +01001766 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1767 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1768 if (!(id & ID0_PTFS_NO_AARCH32S))
1769 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1770 }
1771
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772 /* ID1 */
1773 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001774 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001775
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001776 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001777 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001778 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001779 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001780 dev_warn(smmu->dev,
1781 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1782 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001783
Will Deacon518f7132014-11-14 17:17:54 +00001784 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001785 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1786 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1787 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1788 return -ENODEV;
1789 }
1790 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1791 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001792 /*
1793 * Cavium CN88xx erratum #27704.
1794 * Ensure ASID and VMID allocation is unique across all SMMUs in
1795 * the system.
1796 */
1797 if (smmu->model == CAVIUM_SMMUV2) {
1798 smmu->cavium_id_base =
1799 atomic_add_return(smmu->num_context_banks,
1800 &cavium_smmu_context_count);
1801 smmu->cavium_id_base -= smmu->num_context_banks;
1802 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001803
1804 /* ID2 */
1805 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1806 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001807 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001808
Will Deacon518f7132014-11-14 17:17:54 +00001809 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001810 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001811 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001812
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001813 if (id & ID2_VMID16)
1814 smmu->features |= ARM_SMMU_FEAT_VMID16;
1815
Robin Murphyf1d84542015-03-04 16:41:05 +00001816 /*
1817 * What the page table walker can address actually depends on which
1818 * descriptor format is in use, but since a) we don't know that yet,
1819 * and b) it can vary per context bank, this will have to do...
1820 */
1821 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1822 dev_warn(smmu->dev,
1823 "failed to set DMA mask for table walker\n");
1824
Robin Murphyb7862e32016-04-13 18:13:03 +01001825 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001826 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001827 if (smmu->version == ARM_SMMU_V1_64K)
1828 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001829 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001830 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001831 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001832 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001833 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001834 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001835 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001836 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001837 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001838 }
1839
Robin Murphy7602b872016-04-28 17:12:09 +01001840 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001841 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001842 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001843 if (smmu->features &
1844 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001845 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001846 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001847 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001848 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001849 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001850
Robin Murphyd5466352016-05-09 17:20:09 +01001851 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1852 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1853 else
1854 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1855 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1856 smmu->pgsize_bitmap);
1857
Will Deacon518f7132014-11-14 17:17:54 +00001858
Will Deacon28d60072014-09-01 16:24:48 +01001859 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1860 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001861 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001862
1863 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1864 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001865 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001866
Will Deacon45ae7cf2013-06-24 18:31:25 +01001867 return 0;
1868}
1869
Robin Murphy67b65a32016-04-13 18:12:57 +01001870struct arm_smmu_match_data {
1871 enum arm_smmu_arch_version version;
1872 enum arm_smmu_implementation model;
1873};
1874
1875#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1876static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1877
1878ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1879ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001880ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001881ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001882ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001883
Joerg Roedel09b52692014-10-02 12:24:45 +02001884static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001885 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1886 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1887 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001888 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001889 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001890 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001891 { },
1892};
1893MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1894
Will Deacon45ae7cf2013-06-24 18:31:25 +01001895static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1896{
Robin Murphy67b65a32016-04-13 18:12:57 +01001897 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001898 struct resource *res;
1899 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900 struct device *dev = &pdev->dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001901 int num_irqs, i, err;
Robin Murphy021bb842016-09-14 15:26:46 +01001902 bool legacy_binding;
1903
1904 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
1905 if (legacy_binding && !using_generic_binding) {
1906 if (!using_legacy_binding)
1907 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
1908 using_legacy_binding = true;
1909 } else if (!legacy_binding && !using_legacy_binding) {
1910 using_generic_binding = true;
1911 } else {
1912 dev_err(dev, "not probing due to mismatched DT properties\n");
1913 return -ENODEV;
1914 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001915
1916 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1917 if (!smmu) {
1918 dev_err(dev, "failed to allocate arm_smmu_device\n");
1919 return -ENOMEM;
1920 }
1921 smmu->dev = dev;
1922
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001923 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01001924 smmu->version = data->version;
1925 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01001926
Will Deacon45ae7cf2013-06-24 18:31:25 +01001927 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001928 smmu->base = devm_ioremap_resource(dev, res);
1929 if (IS_ERR(smmu->base))
1930 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001931 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001932
1933 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1934 &smmu->num_global_irqs)) {
1935 dev_err(dev, "missing #global-interrupts property\n");
1936 return -ENODEV;
1937 }
1938
1939 num_irqs = 0;
1940 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1941 num_irqs++;
1942 if (num_irqs > smmu->num_global_irqs)
1943 smmu->num_context_irqs++;
1944 }
1945
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001946 if (!smmu->num_context_irqs) {
1947 dev_err(dev, "found %d interrupts but expected at least %d\n",
1948 num_irqs, smmu->num_global_irqs + 1);
1949 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001950 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001951
1952 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1953 GFP_KERNEL);
1954 if (!smmu->irqs) {
1955 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1956 return -ENOMEM;
1957 }
1958
1959 for (i = 0; i < num_irqs; ++i) {
1960 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001961
Will Deacon45ae7cf2013-06-24 18:31:25 +01001962 if (irq < 0) {
1963 dev_err(dev, "failed to get irq index %d\n", i);
1964 return -ENODEV;
1965 }
1966 smmu->irqs[i] = irq;
1967 }
1968
Olav Haugan3c8766d2014-08-22 17:12:32 -07001969 err = arm_smmu_device_cfg_probe(smmu);
1970 if (err)
1971 return err;
1972
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001973 parse_driver_options(smmu);
1974
Robin Murphyb7862e32016-04-13 18:13:03 +01001975 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01001976 smmu->num_context_banks != smmu->num_context_irqs) {
1977 dev_err(dev,
1978 "found only %d context interrupt(s) but %d required\n",
1979 smmu->num_context_irqs, smmu->num_context_banks);
Robin Murphyf80cd882016-09-14 15:21:39 +01001980 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001981 }
1982
Will Deacon45ae7cf2013-06-24 18:31:25 +01001983 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08001984 err = devm_request_irq(smmu->dev, smmu->irqs[i],
1985 arm_smmu_global_fault,
1986 IRQF_SHARED,
1987 "arm-smmu global fault",
1988 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001989 if (err) {
1990 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1991 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001992 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001993 }
1994 }
1995
Robin Murphyadfec2e2016-09-12 17:13:55 +01001996 of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001997 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01001998 arm_smmu_device_reset(smmu);
Robin Murphy021bb842016-09-14 15:26:46 +01001999
2000 /* Oh, for a proper bus abstraction */
2001 if (!iommu_present(&platform_bus_type))
2002 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2003#ifdef CONFIG_ARM_AMBA
2004 if (!iommu_present(&amba_bustype))
2005 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2006#endif
2007#ifdef CONFIG_PCI
2008 if (!iommu_present(&pci_bus_type)) {
2009 pci_request_acs();
2010 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2011 }
2012#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002013 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002014}
2015
2016static int arm_smmu_device_remove(struct platform_device *pdev)
2017{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002018 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002019
2020 if (!smmu)
2021 return -ENODEV;
2022
Will Deaconecfadb62013-07-31 19:21:28 +01002023 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01002024 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01002025
Will Deacon45ae7cf2013-06-24 18:31:25 +01002026 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002027 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002028 return 0;
2029}
2030
Will Deacon45ae7cf2013-06-24 18:31:25 +01002031static struct platform_driver arm_smmu_driver = {
2032 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002033 .name = "arm-smmu",
2034 .of_match_table = of_match_ptr(arm_smmu_of_match),
2035 },
2036 .probe = arm_smmu_device_dt_probe,
2037 .remove = arm_smmu_device_remove,
2038};
2039
2040static int __init arm_smmu_init(void)
2041{
Robin Murphy021bb842016-09-14 15:26:46 +01002042 static bool registered;
2043 int ret = 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002044
Robin Murphy021bb842016-09-14 15:26:46 +01002045 if (!registered) {
2046 ret = platform_driver_register(&arm_smmu_driver);
2047 registered = !ret;
Wei Chen112c8982016-06-13 17:20:17 +08002048 }
Robin Murphy021bb842016-09-14 15:26:46 +01002049 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002050}
2051
2052static void __exit arm_smmu_exit(void)
2053{
2054 return platform_driver_unregister(&arm_smmu_driver);
2055}
2056
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002057subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002058module_exit(arm_smmu_exit);
2059
Robin Murphy021bb842016-09-14 15:26:46 +01002060static int __init arm_smmu_of_init(struct device_node *np)
2061{
2062 int ret = arm_smmu_init();
2063
2064 if (ret)
2065 return ret;
2066
2067 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
2068 return -ENODEV;
2069
2070 return 0;
2071}
2072IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
2073IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
2074IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
2075IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
2076IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
2077IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
2078
Will Deacon45ae7cf2013-06-24 18:31:25 +01002079MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2080MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2081MODULE_LICENSE("GPL v2");