blob: 7a2bd60a54da6b81e118902bb2d84710fa84dfe4 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010044#include <linux/of_device.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010045#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010046#include <linux/platform_device.h>
47#include <linux/slab.h>
48#include <linux/spinlock.h>
49
50#include <linux/amba/bus.h>
51
Will Deacon518f7132014-11-14 17:17:54 +000052#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010053
54/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020055#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010056
57/* Maximum number of context banks per SMMU */
58#define ARM_SMMU_MAX_CBS 128
59
Will Deacon45ae7cf2013-06-24 18:31:25 +010060/* SMMU global address space */
61#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010062#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010063
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000064/*
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
67 * nsGFSYNR0: 0x450)
68 */
69#define ARM_SMMU_GR0_NS(smmu) \
70 ((smmu)->base + \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
72 ? 0x400 : 0))
73
Robin Murphyf9a05f02016-04-13 18:13:01 +010074/*
75 * Some 64-bit registers only make sense to write atomically, but in such
76 * cases all the data relevant to AArch32 formats lies within the lower word,
77 * therefore this actually makes more sense than it might first appear.
78 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010079#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010080#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010082#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#endif
84
Will Deacon45ae7cf2013-06-24 18:31:25 +010085/* Configuration registers */
86#define ARM_SMMU_GR0_sCR0 0x0
87#define sCR0_CLIENTPD (1 << 0)
88#define sCR0_GFRE (1 << 1)
89#define sCR0_GFIE (1 << 2)
90#define sCR0_GCFGFRE (1 << 4)
91#define sCR0_GCFGFIE (1 << 5)
92#define sCR0_USFCFG (1 << 10)
93#define sCR0_VMIDPNE (1 << 11)
94#define sCR0_PTM (1 << 12)
95#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080096#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010097#define sCR0_BSU_SHIFT 14
98#define sCR0_BSU_MASK 0x3
99
Peng Fan3ca37122016-05-03 21:50:30 +0800100/* Auxiliary Configuration register */
101#define ARM_SMMU_GR0_sACR 0x10
102
Will Deacon45ae7cf2013-06-24 18:31:25 +0100103/* Identification registers */
104#define ARM_SMMU_GR0_ID0 0x20
105#define ARM_SMMU_GR0_ID1 0x24
106#define ARM_SMMU_GR0_ID2 0x28
107#define ARM_SMMU_GR0_ID3 0x2c
108#define ARM_SMMU_GR0_ID4 0x30
109#define ARM_SMMU_GR0_ID5 0x34
110#define ARM_SMMU_GR0_ID6 0x38
111#define ARM_SMMU_GR0_ID7 0x3c
112#define ARM_SMMU_GR0_sGFSR 0x48
113#define ARM_SMMU_GR0_sGFSYNR0 0x50
114#define ARM_SMMU_GR0_sGFSYNR1 0x54
115#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100116
117#define ID0_S1TS (1 << 30)
118#define ID0_S2TS (1 << 29)
119#define ID0_NTS (1 << 28)
120#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000121#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100122#define ID0_PTFS_NO_AARCH32 (1 << 25)
123#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100124#define ID0_CTTW (1 << 14)
125#define ID0_NUMIRPT_SHIFT 16
126#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700127#define ID0_NUMSIDB_SHIFT 9
128#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100129#define ID0_NUMSMRG_SHIFT 0
130#define ID0_NUMSMRG_MASK 0xff
131
132#define ID1_PAGESIZE (1 << 31)
133#define ID1_NUMPAGENDXB_SHIFT 28
134#define ID1_NUMPAGENDXB_MASK 7
135#define ID1_NUMS2CB_SHIFT 16
136#define ID1_NUMS2CB_MASK 0xff
137#define ID1_NUMCB_SHIFT 0
138#define ID1_NUMCB_MASK 0xff
139
140#define ID2_OAS_SHIFT 4
141#define ID2_OAS_MASK 0xf
142#define ID2_IAS_SHIFT 0
143#define ID2_IAS_MASK 0xf
144#define ID2_UBS_SHIFT 8
145#define ID2_UBS_MASK 0xf
146#define ID2_PTFS_4K (1 << 12)
147#define ID2_PTFS_16K (1 << 13)
148#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800149#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100150
Peng Fan3ca37122016-05-03 21:50:30 +0800151#define ID7_MAJOR_SHIFT 4
152#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155#define ARM_SMMU_GR0_TLBIVMID 0x64
156#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
157#define ARM_SMMU_GR0_TLBIALLH 0x6c
158#define ARM_SMMU_GR0_sTLBGSYNC 0x70
159#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
160#define sTLBGSTATUS_GSACTIVE (1 << 0)
161#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
162
163/* Stream mapping registers */
164#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
165#define SMR_VALID (1 << 31)
166#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100167#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100168
169#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
170#define S2CR_CBNDX_SHIFT 0
171#define S2CR_CBNDX_MASK 0xff
172#define S2CR_TYPE_SHIFT 16
173#define S2CR_TYPE_MASK 0x3
Robin Murphy8e8b2032016-09-12 17:13:50 +0100174enum arm_smmu_s2cr_type {
175 S2CR_TYPE_TRANS,
176 S2CR_TYPE_BYPASS,
177 S2CR_TYPE_FAULT,
178};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100179
Robin Murphyd3461802016-01-26 18:06:34 +0000180#define S2CR_PRIVCFG_SHIFT 24
Robin Murphy8e8b2032016-09-12 17:13:50 +0100181#define S2CR_PRIVCFG_MASK 0x3
182enum arm_smmu_s2cr_privcfg {
183 S2CR_PRIVCFG_DEFAULT,
184 S2CR_PRIVCFG_DIPAN,
185 S2CR_PRIVCFG_UNPRIV,
186 S2CR_PRIVCFG_PRIV,
187};
Robin Murphyd3461802016-01-26 18:06:34 +0000188
Will Deacon45ae7cf2013-06-24 18:31:25 +0100189/* Context bank attribute registers */
190#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
191#define CBAR_VMID_SHIFT 0
192#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000193#define CBAR_S1_BPSHCFG_SHIFT 8
194#define CBAR_S1_BPSHCFG_MASK 3
195#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100196#define CBAR_S1_MEMATTR_SHIFT 12
197#define CBAR_S1_MEMATTR_MASK 0xf
198#define CBAR_S1_MEMATTR_WB 0xf
199#define CBAR_TYPE_SHIFT 16
200#define CBAR_TYPE_MASK 0x3
201#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
202#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
203#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
204#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
205#define CBAR_IRPTNDX_SHIFT 24
206#define CBAR_IRPTNDX_MASK 0xff
207
208#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
209#define CBA2R_RW64_32BIT (0 << 0)
210#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800211#define CBA2R_VMID_SHIFT 16
212#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213
214/* Translation context bank */
215#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100216#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100217
218#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100219#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100220#define ARM_SMMU_CB_RESUME 0x8
221#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100222#define ARM_SMMU_CB_TTBR0 0x20
223#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_TTBCR 0x30
Robin Murphy60705292016-08-11 17:44:06 +0100225#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000227#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100228#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100230#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100231#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100233#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000234#define ARM_SMMU_CB_S1_TLBIVAL 0x620
235#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
236#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100237#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000238#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100239
240#define SCTLR_S1_ASIDPNE (1 << 12)
241#define SCTLR_CFCFG (1 << 7)
242#define SCTLR_CFIE (1 << 6)
243#define SCTLR_CFRE (1 << 5)
244#define SCTLR_E (1 << 4)
245#define SCTLR_AFE (1 << 2)
246#define SCTLR_TRE (1 << 1)
247#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100249#define ARM_MMU500_ACTLR_CPRE (1 << 1)
250
Peng Fan3ca37122016-05-03 21:50:30 +0800251#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
252
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000253#define CB_PAR_F (1 << 0)
254
255#define ATSR_ACTIVE (1 << 0)
256
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257#define RESUME_RETRY (0 << 0)
258#define RESUME_TERMINATE (1 << 0)
259
Will Deacon45ae7cf2013-06-24 18:31:25 +0100260#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100261#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100262
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100263#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264
265#define FSR_MULTI (1 << 31)
266#define FSR_SS (1 << 30)
267#define FSR_UUT (1 << 8)
268#define FSR_ASF (1 << 7)
269#define FSR_TLBLKF (1 << 6)
270#define FSR_TLBMCF (1 << 5)
271#define FSR_EF (1 << 4)
272#define FSR_PF (1 << 3)
273#define FSR_AFF (1 << 2)
274#define FSR_TF (1 << 1)
275
Mitchel Humpherys29073202014-07-08 09:52:18 -0700276#define FSR_IGN (FSR_AFF | FSR_ASF | \
277 FSR_TLBMCF | FSR_TLBLKF)
278#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100279 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100280
281#define FSYNR0_WNR (1 << 4)
282
Will Deacon4cf740b2014-07-14 19:47:39 +0100283static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000284module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100285MODULE_PARM_DESC(force_stage,
286 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000287static bool disable_bypass;
288module_param(disable_bypass, bool, S_IRUGO);
289MODULE_PARM_DESC(disable_bypass,
290 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100291
Robin Murphy09360402014-08-28 17:51:59 +0100292enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100293 ARM_SMMU_V1,
294 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100295 ARM_SMMU_V2,
296};
297
Robin Murphy67b65a32016-04-13 18:12:57 +0100298enum arm_smmu_implementation {
299 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100300 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100301 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100302};
303
Robin Murphy8e8b2032016-09-12 17:13:50 +0100304struct arm_smmu_s2cr {
Robin Murphy588888a2016-09-12 17:13:54 +0100305 struct iommu_group *group;
306 int count;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100307 enum arm_smmu_s2cr_type type;
308 enum arm_smmu_s2cr_privcfg privcfg;
309 u8 cbndx;
310};
311
312#define s2cr_init_val (struct arm_smmu_s2cr){ \
313 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
314}
315
Will Deacon45ae7cf2013-06-24 18:31:25 +0100316struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100317 u16 mask;
318 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100319 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100320};
321
Will Deacona9a1b0b2014-05-01 18:05:08 +0100322struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100323 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100324 int num_streamids;
325 u16 streamids[MAX_MASTER_STREAMIDS];
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100326 s16 smendx[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100327};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100328#define INVALID_SMENDX -1
Robin Murphyd3097e32016-09-12 17:13:53 +0100329#define for_each_cfg_sme(cfg, i, idx) \
330 for (i = 0; idx = cfg->smendx[i], i < cfg->num_streamids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100331
332struct arm_smmu_device {
333 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100334
335 void __iomem *base;
336 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100337 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100338
339#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
340#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
341#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
342#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
343#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000344#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800345#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100346#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
347#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
348#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
349#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
350#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100351 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000352
353#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
354 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100355 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100356 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100357
358 u32 num_context_banks;
359 u32 num_s2_context_banks;
360 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
361 atomic_t irptndx;
362
363 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100364 u16 streamid_mask;
365 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100366 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100367 struct arm_smmu_s2cr *s2crs;
Robin Murphy588888a2016-09-12 17:13:54 +0100368 struct mutex stream_map_mutex;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100369
Will Deacon518f7132014-11-14 17:17:54 +0000370 unsigned long va_size;
371 unsigned long ipa_size;
372 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100373 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100374
375 u32 num_global_irqs;
376 u32 num_context_irqs;
377 unsigned int *irqs;
378
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800379 u32 cavium_id_base; /* Specific to Cavium */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100380};
381
Robin Murphy7602b872016-04-28 17:12:09 +0100382enum arm_smmu_context_fmt {
383 ARM_SMMU_CTX_FMT_NONE,
384 ARM_SMMU_CTX_FMT_AARCH64,
385 ARM_SMMU_CTX_FMT_AARCH32_L,
386 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100387};
388
389struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100390 u8 cbndx;
391 u8 irptndx;
392 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100393 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100394};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100395#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100396
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800397#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
398#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100399
Will Deaconc752ce42014-06-25 22:46:31 +0100400enum arm_smmu_domain_stage {
401 ARM_SMMU_DOMAIN_S1 = 0,
402 ARM_SMMU_DOMAIN_S2,
403 ARM_SMMU_DOMAIN_NESTED,
404};
405
Will Deacon45ae7cf2013-06-24 18:31:25 +0100406struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100407 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000408 struct io_pgtable_ops *pgtbl_ops;
409 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100410 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100411 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000412 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100413 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100414};
415
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000416struct arm_smmu_option_prop {
417 u32 opt;
418 const char *prop;
419};
420
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800421static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
422
Mitchel Humpherys29073202014-07-08 09:52:18 -0700423static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000424 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
425 { 0, NULL},
426};
427
Joerg Roedel1d672632015-03-26 13:43:10 +0100428static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
429{
430 return container_of(dom, struct arm_smmu_domain, domain);
431}
432
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000433static void parse_driver_options(struct arm_smmu_device *smmu)
434{
435 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700436
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000437 do {
438 if (of_property_read_bool(smmu->dev->of_node,
439 arm_smmu_options[i].prop)) {
440 smmu->options |= arm_smmu_options[i].opt;
441 dev_notice(smmu->dev, "option %s\n",
442 arm_smmu_options[i].prop);
443 }
444 } while (arm_smmu_options[++i].opt);
445}
446
Will Deacon8f68f8e2014-07-15 11:27:08 +0100447static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100448{
449 if (dev_is_pci(dev)) {
450 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700451
Will Deacona9a1b0b2014-05-01 18:05:08 +0100452 while (!pci_is_root_bus(bus))
453 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100454 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100455 }
456
Robin Murphyf80cd882016-09-14 15:21:39 +0100457 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100458}
459
Robin Murphyf80cd882016-09-14 15:21:39 +0100460static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100461{
Robin Murphyf80cd882016-09-14 15:21:39 +0100462 *((__be32 *)data) = cpu_to_be32(alias);
463 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100464}
465
Robin Murphyf80cd882016-09-14 15:21:39 +0100466static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100467{
Robin Murphyf80cd882016-09-14 15:21:39 +0100468 struct of_phandle_iterator *it = *(void **)data;
469 struct device_node *np = it->node;
470 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100471
Robin Murphyf80cd882016-09-14 15:21:39 +0100472 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
473 "#stream-id-cells", 0)
474 if (it->node == np) {
475 *(void **)data = dev;
476 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700477 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100478 it->node = np;
479 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100480}
481
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100482static struct platform_driver arm_smmu_driver;
483
Robin Murphyf80cd882016-09-14 15:21:39 +0100484static int arm_smmu_register_legacy_master(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100485{
Will Deacon44680ee2014-06-25 11:29:12 +0100486 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +0100487 struct arm_smmu_master_cfg *cfg;
488 struct device_node *np;
489 struct of_phandle_iterator it;
490 void *data = &it;
491 __be32 pci_sid;
492 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100493
Robin Murphyf80cd882016-09-14 15:21:39 +0100494 np = dev_get_dev_node(dev);
495 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
496 of_node_put(np);
497 return -ENODEV;
498 }
499
500 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100501 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
502 __find_legacy_master_phandle);
Robin Murphyf80cd882016-09-14 15:21:39 +0100503 of_node_put(np);
504 if (err == 0)
505 return -ENODEV;
506 if (err < 0)
507 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100508
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100509 smmu = dev_get_drvdata(data);
510
Robin Murphyf80cd882016-09-14 15:21:39 +0100511 if (it.cur_count > MAX_MASTER_STREAMIDS) {
512 dev_err(smmu->dev,
513 "reached maximum number (%d) of stream IDs for master device %s\n",
514 MAX_MASTER_STREAMIDS, dev_name(dev));
515 return -ENOSPC;
516 }
517 if (dev_is_pci(dev)) {
518 /* "mmu-masters" assumes Stream ID == Requester ID */
519 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
520 &pci_sid);
521 it.cur = &pci_sid;
522 it.cur_count = 1;
523 }
524
525 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
526 if (!cfg)
527 return -ENOMEM;
528
529 cfg->smmu = smmu;
530 dev->archdata.iommu = cfg;
531
532 while (it.cur_count--)
533 cfg->streamids[cfg->num_streamids++] = be32_to_cpup(it.cur++);
534
535 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100536}
537
538static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
539{
540 int idx;
541
542 do {
543 idx = find_next_zero_bit(map, end, start);
544 if (idx == end)
545 return -ENOSPC;
546 } while (test_and_set_bit(idx, map));
547
548 return idx;
549}
550
551static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
552{
553 clear_bit(idx, map);
554}
555
556/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000557static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100558{
559 int count = 0;
560 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
561
562 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
563 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
564 & sTLBGSTATUS_GSACTIVE) {
565 cpu_relax();
566 if (++count == TLB_LOOP_TIMEOUT) {
567 dev_err_ratelimited(smmu->dev,
568 "TLB sync timed out -- SMMU may be deadlocked\n");
569 return;
570 }
571 udelay(1);
572 }
573}
574
Will Deacon518f7132014-11-14 17:17:54 +0000575static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100576{
Will Deacon518f7132014-11-14 17:17:54 +0000577 struct arm_smmu_domain *smmu_domain = cookie;
578 __arm_smmu_tlb_sync(smmu_domain->smmu);
579}
580
581static void arm_smmu_tlb_inv_context(void *cookie)
582{
583 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100584 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
585 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100586 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000587 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100588
589 if (stage1) {
590 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800591 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100592 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100593 } else {
594 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800595 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100596 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100597 }
598
Will Deacon518f7132014-11-14 17:17:54 +0000599 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100600}
601
Will Deacon518f7132014-11-14 17:17:54 +0000602static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000603 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000604{
605 struct arm_smmu_domain *smmu_domain = cookie;
606 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
607 struct arm_smmu_device *smmu = smmu_domain->smmu;
608 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
609 void __iomem *reg;
610
611 if (stage1) {
612 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
613 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
614
Robin Murphy7602b872016-04-28 17:12:09 +0100615 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000616 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800617 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000618 do {
619 writel_relaxed(iova, reg);
620 iova += granule;
621 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000622 } else {
623 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800624 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000625 do {
626 writeq_relaxed(iova, reg);
627 iova += granule >> 12;
628 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000629 }
Will Deacon518f7132014-11-14 17:17:54 +0000630 } else if (smmu->version == ARM_SMMU_V2) {
631 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
632 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
633 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000634 iova >>= 12;
635 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100636 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000637 iova += granule >> 12;
638 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000639 } else {
640 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800641 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000642 }
643}
644
Will Deacon518f7132014-11-14 17:17:54 +0000645static struct iommu_gather_ops arm_smmu_gather_ops = {
646 .tlb_flush_all = arm_smmu_tlb_inv_context,
647 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
648 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000649};
650
Will Deacon45ae7cf2013-06-24 18:31:25 +0100651static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
652{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100653 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100654 unsigned long iova;
655 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100656 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100657 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
658 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100659 void __iomem *cb_base;
660
Will Deacon44680ee2014-06-25 11:29:12 +0100661 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100662 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
663
664 if (!(fsr & FSR_FAULT))
665 return IRQ_NONE;
666
Will Deacon45ae7cf2013-06-24 18:31:25 +0100667 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100668 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100669
Will Deacon3714ce1d2016-08-05 19:49:45 +0100670 dev_err_ratelimited(smmu->dev,
671 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
672 fsr, iova, fsynr, cfg->cbndx);
673
Will Deacon45ae7cf2013-06-24 18:31:25 +0100674 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100675 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100676}
677
678static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
679{
680 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
681 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000682 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100683
684 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
685 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
686 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
687 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
688
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000689 if (!gfsr)
690 return IRQ_NONE;
691
Will Deacon45ae7cf2013-06-24 18:31:25 +0100692 dev_err_ratelimited(smmu->dev,
693 "Unexpected global fault, this could be serious\n");
694 dev_err_ratelimited(smmu->dev,
695 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
696 gfsr, gfsynr0, gfsynr1, gfsynr2);
697
698 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100699 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100700}
701
Will Deacon518f7132014-11-14 17:17:54 +0000702static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
703 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100704{
Robin Murphy60705292016-08-11 17:44:06 +0100705 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100706 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100707 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100708 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
709 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100710 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100711
Will Deacon45ae7cf2013-06-24 18:31:25 +0100712 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100713 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
714 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100715
Will Deacon4a1c93c2015-03-04 12:21:03 +0000716 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100717 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
718 reg = CBA2R_RW64_64BIT;
719 else
720 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800721 /* 16-bit VMIDs live in CBA2R */
722 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800723 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800724
Will Deacon4a1c93c2015-03-04 12:21:03 +0000725 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
726 }
727
Will Deacon45ae7cf2013-06-24 18:31:25 +0100728 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100729 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100730 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700731 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100732
Will Deacon57ca90f2014-02-06 14:59:05 +0000733 /*
734 * Use the weakest shareability/memory types, so they are
735 * overridden by the ttbcr/pte.
736 */
737 if (stage1) {
738 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
739 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800740 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
741 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800742 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000743 }
Will Deacon44680ee2014-06-25 11:29:12 +0100744 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100745
Will Deacon518f7132014-11-14 17:17:54 +0000746 /* TTBRs */
747 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100748 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100749
Robin Murphy60705292016-08-11 17:44:06 +0100750 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
751 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
752 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
753 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
754 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
755 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
756 } else {
757 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
758 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
759 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
760 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
761 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
762 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
763 }
Will Deacon518f7132014-11-14 17:17:54 +0000764 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100765 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100766 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000767 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100768
Will Deacon518f7132014-11-14 17:17:54 +0000769 /* TTBCR */
770 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100771 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
772 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
773 reg2 = 0;
774 } else {
775 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
776 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
777 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100778 }
Robin Murphy60705292016-08-11 17:44:06 +0100779 if (smmu->version > ARM_SMMU_V1)
780 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100781 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000782 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100783 }
Robin Murphy60705292016-08-11 17:44:06 +0100784 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100785
Will Deacon518f7132014-11-14 17:17:54 +0000786 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100787 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100788 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
789 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
790 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
791 } else {
792 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
793 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
794 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100795 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphy60705292016-08-11 17:44:06 +0100796 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100797 }
798
Will Deacon45ae7cf2013-06-24 18:31:25 +0100799 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100800 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100801 if (stage1)
802 reg |= SCTLR_S1_ASIDPNE;
803#ifdef __BIG_ENDIAN
804 reg |= SCTLR_E;
805#endif
Will Deacon25724842013-08-21 13:49:53 +0100806 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100807}
808
809static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100810 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100811{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100812 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000813 unsigned long ias, oas;
814 struct io_pgtable_ops *pgtbl_ops;
815 struct io_pgtable_cfg pgtbl_cfg;
816 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100817 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100818 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100819
Will Deacon518f7132014-11-14 17:17:54 +0000820 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100821 if (smmu_domain->smmu)
822 goto out_unlock;
823
Robin Murphy98006992016-04-20 14:53:33 +0100824 /* We're bypassing these SIDs, so don't allocate an actual context */
825 if (domain->type == IOMMU_DOMAIN_DMA) {
826 smmu_domain->smmu = smmu;
827 goto out_unlock;
828 }
829
Will Deaconc752ce42014-06-25 22:46:31 +0100830 /*
831 * Mapping the requested stage onto what we support is surprisingly
832 * complicated, mainly because the spec allows S1+S2 SMMUs without
833 * support for nested translation. That means we end up with the
834 * following table:
835 *
836 * Requested Supported Actual
837 * S1 N S1
838 * S1 S1+S2 S1
839 * S1 S2 S2
840 * S1 S1 S1
841 * N N N
842 * N S1+S2 S2
843 * N S2 S2
844 * N S1 S1
845 *
846 * Note that you can't actually request stage-2 mappings.
847 */
848 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
849 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
850 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
851 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
852
Robin Murphy7602b872016-04-28 17:12:09 +0100853 /*
854 * Choosing a suitable context format is even more fiddly. Until we
855 * grow some way for the caller to express a preference, and/or move
856 * the decision into the io-pgtable code where it arguably belongs,
857 * just aim for the closest thing to the rest of the system, and hope
858 * that the hardware isn't esoteric enough that we can't assume AArch64
859 * support to be a superset of AArch32 support...
860 */
861 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
862 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100863 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
864 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
865 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
866 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
867 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100868 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
869 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
870 ARM_SMMU_FEAT_FMT_AARCH64_16K |
871 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
872 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
873
874 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
875 ret = -EINVAL;
876 goto out_unlock;
877 }
878
Will Deaconc752ce42014-06-25 22:46:31 +0100879 switch (smmu_domain->stage) {
880 case ARM_SMMU_DOMAIN_S1:
881 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
882 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000883 ias = smmu->va_size;
884 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100885 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000886 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100887 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000888 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100889 ias = min(ias, 32UL);
890 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100891 } else {
892 fmt = ARM_V7S;
893 ias = min(ias, 32UL);
894 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100895 }
Will Deaconc752ce42014-06-25 22:46:31 +0100896 break;
897 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100898 /*
899 * We will likely want to change this if/when KVM gets
900 * involved.
901 */
Will Deaconc752ce42014-06-25 22:46:31 +0100902 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100903 cfg->cbar = CBAR_TYPE_S2_TRANS;
904 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000905 ias = smmu->ipa_size;
906 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100907 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000908 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100909 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000910 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100911 ias = min(ias, 40UL);
912 oas = min(oas, 40UL);
913 }
Will Deaconc752ce42014-06-25 22:46:31 +0100914 break;
915 default:
916 ret = -EINVAL;
917 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100918 }
919
920 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
921 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200922 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100923 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100924
Will Deacon44680ee2014-06-25 11:29:12 +0100925 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100926 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100927 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
928 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100929 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100930 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100931 }
932
Will Deacon518f7132014-11-14 17:17:54 +0000933 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100934 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000935 .ias = ias,
936 .oas = oas,
937 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100938 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000939 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100940
Will Deacon518f7132014-11-14 17:17:54 +0000941 smmu_domain->smmu = smmu;
942 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
943 if (!pgtbl_ops) {
944 ret = -ENOMEM;
945 goto out_clear_smmu;
946 }
947
Robin Murphyd5466352016-05-09 17:20:09 +0100948 /* Update the domain's page sizes to reflect the page table format */
949 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +0000950
951 /* Initialise the context bank with our page table cfg */
952 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
953
954 /*
955 * Request context fault interrupt. Do this last to avoid the
956 * handler seeing a half-initialised domain state.
957 */
Will Deacon44680ee2014-06-25 11:29:12 +0100958 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800959 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
960 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200961 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100962 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100963 cfg->irptndx, irq);
964 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100965 }
966
Will Deacon518f7132014-11-14 17:17:54 +0000967 mutex_unlock(&smmu_domain->init_mutex);
968
969 /* Publish page table ops for map/unmap */
970 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100971 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100972
Will Deacon518f7132014-11-14 17:17:54 +0000973out_clear_smmu:
974 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100975out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000976 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100977 return ret;
978}
979
980static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
981{
Joerg Roedel1d672632015-03-26 13:43:10 +0100982 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100983 struct arm_smmu_device *smmu = smmu_domain->smmu;
984 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +0100985 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100986 int irq;
987
Robin Murphy98006992016-04-20 14:53:33 +0100988 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100989 return;
990
Will Deacon518f7132014-11-14 17:17:54 +0000991 /*
992 * Disable the context bank and free the page tables before freeing
993 * it.
994 */
Will Deacon44680ee2014-06-25 11:29:12 +0100995 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100996 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +0100997
Will Deacon44680ee2014-06-25 11:29:12 +0100998 if (cfg->irptndx != INVALID_IRPTNDX) {
999 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001000 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001001 }
1002
Markus Elfring44830b02015-11-06 18:32:41 +01001003 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001004 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001005}
1006
Joerg Roedel1d672632015-03-26 13:43:10 +01001007static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001008{
1009 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001010
Robin Murphy9adb9592016-01-26 18:06:36 +00001011 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001012 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001013 /*
1014 * Allocate the domain and initialise some of its data structures.
1015 * We can't really do anything meaningful until we've added a
1016 * master.
1017 */
1018 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1019 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001020 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001021
Robin Murphy9adb9592016-01-26 18:06:36 +00001022 if (type == IOMMU_DOMAIN_DMA &&
1023 iommu_get_dma_cookie(&smmu_domain->domain)) {
1024 kfree(smmu_domain);
1025 return NULL;
1026 }
1027
Will Deacon518f7132014-11-14 17:17:54 +00001028 mutex_init(&smmu_domain->init_mutex);
1029 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001030
1031 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001032}
1033
Joerg Roedel1d672632015-03-26 13:43:10 +01001034static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001035{
Joerg Roedel1d672632015-03-26 13:43:10 +01001036 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001037
1038 /*
1039 * Free the domain resources. We assume that all devices have
1040 * already been detached.
1041 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001042 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001043 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001044 kfree(smmu_domain);
1045}
1046
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001047static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1048{
1049 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001050 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001051
1052 if (smr->valid)
1053 reg |= SMR_VALID;
1054 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1055}
1056
Robin Murphy8e8b2032016-09-12 17:13:50 +01001057static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1058{
1059 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1060 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1061 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1062 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1063
1064 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1065}
1066
1067static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1068{
1069 arm_smmu_write_s2cr(smmu, idx);
1070 if (smmu->smrs)
1071 arm_smmu_write_smr(smmu, idx);
1072}
1073
Robin Murphy588888a2016-09-12 17:13:54 +01001074static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001075{
1076 struct arm_smmu_smr *smrs = smmu->smrs;
Robin Murphy588888a2016-09-12 17:13:54 +01001077 int i, free_idx = -ENOSPC;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001078
Robin Murphy588888a2016-09-12 17:13:54 +01001079 /* Stream indexing is blissfully easy */
1080 if (!smrs)
1081 return id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001082
Robin Murphy588888a2016-09-12 17:13:54 +01001083 /* Validating SMRs is... less so */
1084 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1085 if (!smrs[i].valid) {
1086 /*
1087 * Note the first free entry we come across, which
1088 * we'll claim in the end if nothing else matches.
1089 */
1090 if (free_idx < 0)
1091 free_idx = i;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001092 continue;
1093 }
Robin Murphy588888a2016-09-12 17:13:54 +01001094 /*
1095 * If the new entry is _entirely_ matched by an existing entry,
1096 * then reuse that, with the guarantee that there also cannot
1097 * be any subsequent conflicting entries. In normal use we'd
1098 * expect simply identical entries for this case, but there's
1099 * no harm in accommodating the generalisation.
1100 */
1101 if ((mask & smrs[i].mask) == mask &&
1102 !((id ^ smrs[i].id) & ~smrs[i].mask))
1103 return i;
1104 /*
1105 * If the new entry has any other overlap with an existing one,
1106 * though, then there always exists at least one stream ID
1107 * which would cause a conflict, and we can't allow that risk.
1108 */
1109 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1110 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001111 }
1112
Robin Murphy588888a2016-09-12 17:13:54 +01001113 return free_idx;
1114}
1115
1116static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1117{
1118 if (--smmu->s2crs[idx].count)
1119 return false;
1120
1121 smmu->s2crs[idx] = s2cr_init_val;
1122 if (smmu->smrs)
1123 smmu->smrs[idx].valid = false;
1124
1125 return true;
1126}
1127
1128static int arm_smmu_master_alloc_smes(struct device *dev)
1129{
1130 struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
1131 struct arm_smmu_device *smmu = cfg->smmu;
1132 struct arm_smmu_smr *smrs = smmu->smrs;
1133 struct iommu_group *group;
1134 int i, idx, ret;
1135
1136 mutex_lock(&smmu->stream_map_mutex);
1137 /* Figure out a viable stream map entry allocation */
1138 for_each_cfg_sme(cfg, i, idx) {
1139 if (idx != INVALID_SMENDX) {
1140 ret = -EEXIST;
1141 goto out_err;
1142 }
1143
1144 ret = arm_smmu_find_sme(smmu, cfg->streamids[i], 0);
1145 if (ret < 0)
1146 goto out_err;
1147
1148 idx = ret;
1149 if (smrs && smmu->s2crs[idx].count == 0) {
1150 smrs[idx].id = cfg->streamids[i];
1151 smrs[idx].mask = 0; /* We don't currently share SMRs */
1152 smrs[idx].valid = true;
1153 }
1154 smmu->s2crs[idx].count++;
1155 cfg->smendx[i] = (s16)idx;
1156 }
1157
1158 group = iommu_group_get_for_dev(dev);
1159 if (!group)
1160 group = ERR_PTR(-ENOMEM);
1161 if (IS_ERR(group)) {
1162 ret = PTR_ERR(group);
1163 goto out_err;
1164 }
1165 iommu_group_put(group);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001166
Will Deacon45ae7cf2013-06-24 18:31:25 +01001167 /* It worked! Now, poke the actual hardware */
Robin Murphy588888a2016-09-12 17:13:54 +01001168 for_each_cfg_sme(cfg, i, idx) {
1169 arm_smmu_write_sme(smmu, idx);
1170 smmu->s2crs[idx].group = group;
1171 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001172
Robin Murphy588888a2016-09-12 17:13:54 +01001173 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001174 return 0;
1175
Robin Murphy588888a2016-09-12 17:13:54 +01001176out_err:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001177 while (i--) {
Robin Murphy588888a2016-09-12 17:13:54 +01001178 arm_smmu_free_sme(smmu, cfg->smendx[i]);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001179 cfg->smendx[i] = INVALID_SMENDX;
1180 }
Robin Murphy588888a2016-09-12 17:13:54 +01001181 mutex_unlock(&smmu->stream_map_mutex);
1182 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001183}
1184
Robin Murphyf80cd882016-09-14 15:21:39 +01001185static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001186{
Robin Murphyf80cd882016-09-14 15:21:39 +01001187 struct arm_smmu_device *smmu = cfg->smmu;
Robin Murphyd3097e32016-09-12 17:13:53 +01001188 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001189
Robin Murphy588888a2016-09-12 17:13:54 +01001190 mutex_lock(&smmu->stream_map_mutex);
Robin Murphyd3097e32016-09-12 17:13:53 +01001191 for_each_cfg_sme(cfg, i, idx) {
Robin Murphy588888a2016-09-12 17:13:54 +01001192 if (arm_smmu_free_sme(smmu, idx))
1193 arm_smmu_write_sme(smmu, idx);
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001194 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001195 }
Robin Murphy588888a2016-09-12 17:13:54 +01001196 mutex_unlock(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001197}
1198
Will Deacon45ae7cf2013-06-24 18:31:25 +01001199static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001200 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001201{
Will Deacon44680ee2014-06-25 11:29:12 +01001202 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001203 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1204 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1205 u8 cbndx = smmu_domain->cfg.cbndx;
Robin Murphy588888a2016-09-12 17:13:54 +01001206 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001207
Will Deacon5f634952016-04-20 14:53:32 +01001208 /*
1209 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1210 * for all devices behind the SMMU. Note that we need to take
1211 * care configuring SMRs for devices both a platform_device and
1212 * and a PCI device (i.e. a PCI host controller)
1213 */
1214 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
Robin Murphy8e8b2032016-09-12 17:13:50 +01001215 type = S2CR_TYPE_BYPASS;
Will Deacon5f634952016-04-20 14:53:32 +01001216
Robin Murphyd3097e32016-09-12 17:13:53 +01001217 for_each_cfg_sme(cfg, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001218 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
Robin Murphy588888a2016-09-12 17:13:54 +01001219 continue;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001220
Robin Murphy8e8b2032016-09-12 17:13:50 +01001221 s2cr[idx].type = type;
1222 s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
1223 s2cr[idx].cbndx = cbndx;
1224 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001225 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001226 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001227}
1228
Will Deacon45ae7cf2013-06-24 18:31:25 +01001229static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1230{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001231 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001232 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphyf80cd882016-09-14 15:21:39 +01001233 struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001234
Robin Murphyf80cd882016-09-14 15:21:39 +01001235 if (!cfg) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001236 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1237 return -ENXIO;
1238 }
1239
Will Deacon518f7132014-11-14 17:17:54 +00001240 /* Ensure that the domain is finalised */
Robin Murphyf80cd882016-09-14 15:21:39 +01001241 ret = arm_smmu_init_domain_context(domain, cfg->smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001242 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001243 return ret;
1244
Will Deacon45ae7cf2013-06-24 18:31:25 +01001245 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001246 * Sanity check the domain. We don't support domains across
1247 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001248 */
Robin Murphyf80cd882016-09-14 15:21:39 +01001249 if (smmu_domain->smmu != cfg->smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001250 dev_err(dev,
1251 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyf80cd882016-09-14 15:21:39 +01001252 dev_name(smmu_domain->smmu->dev), dev_name(cfg->smmu->dev));
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001253 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001254 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001255
1256 /* Looks ok, so add the device to the domain */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001257 return arm_smmu_domain_add_master(smmu_domain, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001258}
1259
Will Deacon45ae7cf2013-06-24 18:31:25 +01001260static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001261 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001262{
Will Deacon518f7132014-11-14 17:17:54 +00001263 int ret;
1264 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001265 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001266 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267
Will Deacon518f7132014-11-14 17:17:54 +00001268 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001269 return -ENODEV;
1270
Will Deacon518f7132014-11-14 17:17:54 +00001271 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1272 ret = ops->map(ops, iova, paddr, size, prot);
1273 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1274 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001275}
1276
1277static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1278 size_t size)
1279{
Will Deacon518f7132014-11-14 17:17:54 +00001280 size_t ret;
1281 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001282 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001283 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001284
Will Deacon518f7132014-11-14 17:17:54 +00001285 if (!ops)
1286 return 0;
1287
1288 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1289 ret = ops->unmap(ops, iova, size);
1290 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1291 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001292}
1293
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001294static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1295 dma_addr_t iova)
1296{
Joerg Roedel1d672632015-03-26 13:43:10 +01001297 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001298 struct arm_smmu_device *smmu = smmu_domain->smmu;
1299 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1300 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1301 struct device *dev = smmu->dev;
1302 void __iomem *cb_base;
1303 u32 tmp;
1304 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001305 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001306
1307 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1308
Robin Murphy661d9622015-05-27 17:09:34 +01001309 /* ATS1 registers can only be written atomically */
1310 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001311 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001312 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1313 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001314 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001315
1316 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1317 !(tmp & ATSR_ACTIVE), 5, 50)) {
1318 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001319 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001320 &iova);
1321 return ops->iova_to_phys(ops, iova);
1322 }
1323
Robin Murphyf9a05f02016-04-13 18:13:01 +01001324 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001325 if (phys & CB_PAR_F) {
1326 dev_err(dev, "translation fault!\n");
1327 dev_err(dev, "PAR = 0x%llx\n", phys);
1328 return 0;
1329 }
1330
1331 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1332}
1333
Will Deacon45ae7cf2013-06-24 18:31:25 +01001334static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001335 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001336{
Will Deacon518f7132014-11-14 17:17:54 +00001337 phys_addr_t ret;
1338 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001339 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001340 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001341
Will Deacon518f7132014-11-14 17:17:54 +00001342 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001343 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001344
Will Deacon518f7132014-11-14 17:17:54 +00001345 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001346 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1347 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001348 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001349 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001350 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001351 }
1352
Will Deacon518f7132014-11-14 17:17:54 +00001353 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001354
Will Deacon518f7132014-11-14 17:17:54 +00001355 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001356}
1357
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001358static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001359{
Will Deacond0948942014-06-24 17:30:10 +01001360 switch (cap) {
1361 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001362 /*
1363 * Return true here as the SMMU can always send out coherent
1364 * requests.
1365 */
1366 return true;
Will Deacond0948942014-06-24 17:30:10 +01001367 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001368 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001369 case IOMMU_CAP_NOEXEC:
1370 return true;
Will Deacond0948942014-06-24 17:30:10 +01001371 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001372 return false;
Will Deacond0948942014-06-24 17:30:10 +01001373 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001374}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001375
Will Deacon03edb222015-01-19 14:27:33 +00001376static int arm_smmu_add_device(struct device *dev)
1377{
Robin Murphyf80cd882016-09-14 15:21:39 +01001378 struct arm_smmu_master_cfg *cfg;
Robin Murphyf80cd882016-09-14 15:21:39 +01001379 int i, ret;
1380
1381 ret = arm_smmu_register_legacy_master(dev);
1382 cfg = dev->archdata.iommu;
1383 if (ret)
1384 goto out_free;
1385
1386 ret = -EINVAL;
1387 for (i = 0; i < cfg->num_streamids; i++) {
1388 u16 sid = cfg->streamids[i];
1389
1390 if (sid & ~cfg->smmu->streamid_mask) {
1391 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1392 sid, cfg->smmu->streamid_mask);
1393 goto out_free;
1394 }
1395 cfg->smendx[i] = INVALID_SMENDX;
1396 }
Will Deacon03edb222015-01-19 14:27:33 +00001397
Robin Murphy588888a2016-09-12 17:13:54 +01001398 ret = arm_smmu_master_alloc_smes(dev);
1399 if (!ret)
1400 return ret;
Robin Murphyf80cd882016-09-14 15:21:39 +01001401
1402out_free:
1403 kfree(cfg);
1404 dev->archdata.iommu = NULL;
1405 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001406}
1407
Will Deacon45ae7cf2013-06-24 18:31:25 +01001408static void arm_smmu_remove_device(struct device *dev)
1409{
Robin Murphyf80cd882016-09-14 15:21:39 +01001410 struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001411
Robin Murphyf80cd882016-09-14 15:21:39 +01001412 if (!cfg)
1413 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001414
Robin Murphyf80cd882016-09-14 15:21:39 +01001415 arm_smmu_master_free_smes(cfg);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001416 iommu_group_remove_device(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001417 kfree(cfg);
1418 dev->archdata.iommu = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001419}
1420
Joerg Roedelaf659932015-10-21 23:51:41 +02001421static struct iommu_group *arm_smmu_device_group(struct device *dev)
1422{
Robin Murphy588888a2016-09-12 17:13:54 +01001423 struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
1424 struct arm_smmu_device *smmu = cfg->smmu;
1425 struct iommu_group *group = NULL;
1426 int i, idx;
1427
1428 for_each_cfg_sme(cfg, i, idx) {
1429 if (group && smmu->s2crs[idx].group &&
1430 group != smmu->s2crs[idx].group)
1431 return ERR_PTR(-EINVAL);
1432
1433 group = smmu->s2crs[idx].group;
1434 }
1435
1436 if (group)
1437 return group;
Joerg Roedelaf659932015-10-21 23:51:41 +02001438
1439 if (dev_is_pci(dev))
1440 group = pci_device_group(dev);
1441 else
1442 group = generic_device_group(dev);
1443
Joerg Roedelaf659932015-10-21 23:51:41 +02001444 return group;
1445}
1446
Will Deaconc752ce42014-06-25 22:46:31 +01001447static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1448 enum iommu_attr attr, void *data)
1449{
Joerg Roedel1d672632015-03-26 13:43:10 +01001450 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001451
1452 switch (attr) {
1453 case DOMAIN_ATTR_NESTING:
1454 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1455 return 0;
1456 default:
1457 return -ENODEV;
1458 }
1459}
1460
1461static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1462 enum iommu_attr attr, void *data)
1463{
Will Deacon518f7132014-11-14 17:17:54 +00001464 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001465 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001466
Will Deacon518f7132014-11-14 17:17:54 +00001467 mutex_lock(&smmu_domain->init_mutex);
1468
Will Deaconc752ce42014-06-25 22:46:31 +01001469 switch (attr) {
1470 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001471 if (smmu_domain->smmu) {
1472 ret = -EPERM;
1473 goto out_unlock;
1474 }
1475
Will Deaconc752ce42014-06-25 22:46:31 +01001476 if (*(int *)data)
1477 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1478 else
1479 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1480
Will Deacon518f7132014-11-14 17:17:54 +00001481 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001482 default:
Will Deacon518f7132014-11-14 17:17:54 +00001483 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001484 }
Will Deacon518f7132014-11-14 17:17:54 +00001485
1486out_unlock:
1487 mutex_unlock(&smmu_domain->init_mutex);
1488 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001489}
1490
Will Deacon518f7132014-11-14 17:17:54 +00001491static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001492 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001493 .domain_alloc = arm_smmu_domain_alloc,
1494 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001495 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001496 .map = arm_smmu_map,
1497 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001498 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001499 .iova_to_phys = arm_smmu_iova_to_phys,
1500 .add_device = arm_smmu_add_device,
1501 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001502 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001503 .domain_get_attr = arm_smmu_domain_get_attr,
1504 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001505 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001506};
1507
1508static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1509{
1510 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001511 void __iomem *cb_base;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001512 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001513 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001514
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001515 /* clear global FSR */
1516 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1517 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001518
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001519 /*
1520 * Reset stream mapping groups: Initial values mark all SMRn as
1521 * invalid and all S2CRn as bypass unless overridden.
1522 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001523 for (i = 0; i < smmu->num_mapping_groups; ++i)
1524 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001525
Peng Fan3ca37122016-05-03 21:50:30 +08001526 /*
1527 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1528 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1529 * bit is only present in MMU-500r2 onwards.
1530 */
1531 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1532 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1533 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1534 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1535 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1536 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1537 }
1538
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001539 /* Make sure all context banks are disabled and clear CB_FSR */
1540 for (i = 0; i < smmu->num_context_banks; ++i) {
1541 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1542 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1543 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001544 /*
1545 * Disable MMU-500's not-particularly-beneficial next-page
1546 * prefetcher for the sake of errata #841119 and #826419.
1547 */
1548 if (smmu->model == ARM_MMU500) {
1549 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1550 reg &= ~ARM_MMU500_ACTLR_CPRE;
1551 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1552 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001553 }
Will Deacon1463fe42013-07-31 19:21:27 +01001554
Will Deacon45ae7cf2013-06-24 18:31:25 +01001555 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001556 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1557 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1558
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001559 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001560
Will Deacon45ae7cf2013-06-24 18:31:25 +01001561 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001562 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001563
1564 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001565 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001566
Robin Murphy25a1c962016-02-10 14:25:33 +00001567 /* Enable client access, handling unmatched streams as appropriate */
1568 reg &= ~sCR0_CLIENTPD;
1569 if (disable_bypass)
1570 reg |= sCR0_USFCFG;
1571 else
1572 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001573
1574 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001575 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001576
1577 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001578 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001579
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001580 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1581 reg |= sCR0_VMID16EN;
1582
Will Deacon45ae7cf2013-06-24 18:31:25 +01001583 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001584 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001585 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001586}
1587
1588static int arm_smmu_id_size_to_bits(int size)
1589{
1590 switch (size) {
1591 case 0:
1592 return 32;
1593 case 1:
1594 return 36;
1595 case 2:
1596 return 40;
1597 case 3:
1598 return 42;
1599 case 4:
1600 return 44;
1601 case 5:
1602 default:
1603 return 48;
1604 }
1605}
1606
1607static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1608{
1609 unsigned long size;
1610 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1611 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001612 bool cttw_dt, cttw_reg;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001613 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001614
1615 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001616 dev_notice(smmu->dev, "SMMUv%d with:\n",
1617 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001618
1619 /* ID0 */
1620 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001621
1622 /* Restrict available stages based on module parameter */
1623 if (force_stage == 1)
1624 id &= ~(ID0_S2TS | ID0_NTS);
1625 else if (force_stage == 2)
1626 id &= ~(ID0_S1TS | ID0_NTS);
1627
Will Deacon45ae7cf2013-06-24 18:31:25 +01001628 if (id & ID0_S1TS) {
1629 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1630 dev_notice(smmu->dev, "\tstage 1 translation\n");
1631 }
1632
1633 if (id & ID0_S2TS) {
1634 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1635 dev_notice(smmu->dev, "\tstage 2 translation\n");
1636 }
1637
1638 if (id & ID0_NTS) {
1639 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1640 dev_notice(smmu->dev, "\tnested translation\n");
1641 }
1642
1643 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001644 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001645 dev_err(smmu->dev, "\tno translation support!\n");
1646 return -ENODEV;
1647 }
1648
Robin Murphyb7862e32016-04-13 18:13:03 +01001649 if ((id & ID0_S1TS) &&
1650 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001651 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1652 dev_notice(smmu->dev, "\taddress translation ops\n");
1653 }
1654
Robin Murphybae2c2d2015-07-29 19:46:05 +01001655 /*
1656 * In order for DMA API calls to work properly, we must defer to what
1657 * the DT says about coherency, regardless of what the hardware claims.
1658 * Fortunately, this also opens up a workaround for systems where the
1659 * ID register value has ended up configured incorrectly.
1660 */
1661 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1662 cttw_reg = !!(id & ID0_CTTW);
1663 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001664 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001665 if (cttw_dt || cttw_reg)
1666 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1667 cttw_dt ? "" : "non-");
1668 if (cttw_dt != cttw_reg)
1669 dev_notice(smmu->dev,
1670 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001671
Robin Murphy21174242016-09-12 17:13:48 +01001672 /* Max. number of entries we have for stream matching/indexing */
1673 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1674 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001675 if (id & ID0_SMS) {
Robin Murphy21174242016-09-12 17:13:48 +01001676 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001677
1678 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001679 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1680 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001681 dev_err(smmu->dev,
1682 "stream-matching supported, but no SMRs present!\n");
1683 return -ENODEV;
1684 }
1685
Robin Murphy21174242016-09-12 17:13:48 +01001686 /*
1687 * SMR.ID bits may not be preserved if the corresponding MASK
1688 * bits are set, so check each one separately. We can reject
1689 * masters later if they try to claim IDs outside these masks.
1690 */
1691 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001692 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1693 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Robin Murphy21174242016-09-12 17:13:48 +01001694 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001695
Robin Murphy21174242016-09-12 17:13:48 +01001696 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1697 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1698 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1699 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001700
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001701 /* Zero-initialised to mark as invalid */
1702 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1703 GFP_KERNEL);
1704 if (!smmu->smrs)
1705 return -ENOMEM;
1706
Will Deacon45ae7cf2013-06-24 18:31:25 +01001707 dev_notice(smmu->dev,
Robin Murphy21174242016-09-12 17:13:48 +01001708 "\tstream matching with %lu register groups, mask 0x%x",
1709 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001710 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001711 /* s2cr->type == 0 means translation, so initialise explicitly */
1712 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1713 GFP_KERNEL);
1714 if (!smmu->s2crs)
1715 return -ENOMEM;
1716 for (i = 0; i < size; i++)
1717 smmu->s2crs[i] = s2cr_init_val;
1718
Robin Murphy21174242016-09-12 17:13:48 +01001719 smmu->num_mapping_groups = size;
Robin Murphy588888a2016-09-12 17:13:54 +01001720 mutex_init(&smmu->stream_map_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001721
Robin Murphy7602b872016-04-28 17:12:09 +01001722 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1723 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1724 if (!(id & ID0_PTFS_NO_AARCH32S))
1725 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1726 }
1727
Will Deacon45ae7cf2013-06-24 18:31:25 +01001728 /* ID1 */
1729 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001730 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001731
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001732 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001733 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001734 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001735 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001736 dev_warn(smmu->dev,
1737 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1738 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001739
Will Deacon518f7132014-11-14 17:17:54 +00001740 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001741 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1742 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1743 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1744 return -ENODEV;
1745 }
1746 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1747 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001748 /*
1749 * Cavium CN88xx erratum #27704.
1750 * Ensure ASID and VMID allocation is unique across all SMMUs in
1751 * the system.
1752 */
1753 if (smmu->model == CAVIUM_SMMUV2) {
1754 smmu->cavium_id_base =
1755 atomic_add_return(smmu->num_context_banks,
1756 &cavium_smmu_context_count);
1757 smmu->cavium_id_base -= smmu->num_context_banks;
1758 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001759
1760 /* ID2 */
1761 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1762 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001763 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001764
Will Deacon518f7132014-11-14 17:17:54 +00001765 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001766 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001767 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001768
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001769 if (id & ID2_VMID16)
1770 smmu->features |= ARM_SMMU_FEAT_VMID16;
1771
Robin Murphyf1d84542015-03-04 16:41:05 +00001772 /*
1773 * What the page table walker can address actually depends on which
1774 * descriptor format is in use, but since a) we don't know that yet,
1775 * and b) it can vary per context bank, this will have to do...
1776 */
1777 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1778 dev_warn(smmu->dev,
1779 "failed to set DMA mask for table walker\n");
1780
Robin Murphyb7862e32016-04-13 18:13:03 +01001781 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001782 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001783 if (smmu->version == ARM_SMMU_V1_64K)
1784 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001785 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001786 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001787 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001788 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001789 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001790 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001791 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001792 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001793 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001794 }
1795
Robin Murphy7602b872016-04-28 17:12:09 +01001796 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001797 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001798 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001799 if (smmu->features &
1800 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001801 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001802 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001803 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001804 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001805 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001806
Robin Murphyd5466352016-05-09 17:20:09 +01001807 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1808 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1809 else
1810 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1811 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1812 smmu->pgsize_bitmap);
1813
Will Deacon518f7132014-11-14 17:17:54 +00001814
Will Deacon28d60072014-09-01 16:24:48 +01001815 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1816 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001817 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001818
1819 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1820 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001821 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001822
Will Deacon45ae7cf2013-06-24 18:31:25 +01001823 return 0;
1824}
1825
Robin Murphy67b65a32016-04-13 18:12:57 +01001826struct arm_smmu_match_data {
1827 enum arm_smmu_arch_version version;
1828 enum arm_smmu_implementation model;
1829};
1830
1831#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1832static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1833
1834ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1835ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001836ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001837ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001838ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001839
Joerg Roedel09b52692014-10-02 12:24:45 +02001840static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001841 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1842 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1843 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001844 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001845 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001846 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001847 { },
1848};
1849MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1850
Will Deacon45ae7cf2013-06-24 18:31:25 +01001851static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1852{
Robin Murphy67b65a32016-04-13 18:12:57 +01001853 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001854 struct resource *res;
1855 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001856 struct device *dev = &pdev->dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001857 int num_irqs, i, err;
1858
1859 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1860 if (!smmu) {
1861 dev_err(dev, "failed to allocate arm_smmu_device\n");
1862 return -ENOMEM;
1863 }
1864 smmu->dev = dev;
1865
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001866 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01001867 smmu->version = data->version;
1868 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01001869
Will Deacon45ae7cf2013-06-24 18:31:25 +01001870 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001871 smmu->base = devm_ioremap_resource(dev, res);
1872 if (IS_ERR(smmu->base))
1873 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001874 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001875
1876 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1877 &smmu->num_global_irqs)) {
1878 dev_err(dev, "missing #global-interrupts property\n");
1879 return -ENODEV;
1880 }
1881
1882 num_irqs = 0;
1883 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1884 num_irqs++;
1885 if (num_irqs > smmu->num_global_irqs)
1886 smmu->num_context_irqs++;
1887 }
1888
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001889 if (!smmu->num_context_irqs) {
1890 dev_err(dev, "found %d interrupts but expected at least %d\n",
1891 num_irqs, smmu->num_global_irqs + 1);
1892 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001893 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001894
1895 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1896 GFP_KERNEL);
1897 if (!smmu->irqs) {
1898 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1899 return -ENOMEM;
1900 }
1901
1902 for (i = 0; i < num_irqs; ++i) {
1903 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001904
Will Deacon45ae7cf2013-06-24 18:31:25 +01001905 if (irq < 0) {
1906 dev_err(dev, "failed to get irq index %d\n", i);
1907 return -ENODEV;
1908 }
1909 smmu->irqs[i] = irq;
1910 }
1911
Olav Haugan3c8766d2014-08-22 17:12:32 -07001912 err = arm_smmu_device_cfg_probe(smmu);
1913 if (err)
1914 return err;
1915
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001916 parse_driver_options(smmu);
1917
Robin Murphyb7862e32016-04-13 18:13:03 +01001918 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01001919 smmu->num_context_banks != smmu->num_context_irqs) {
1920 dev_err(dev,
1921 "found only %d context interrupt(s) but %d required\n",
1922 smmu->num_context_irqs, smmu->num_context_banks);
Robin Murphyf80cd882016-09-14 15:21:39 +01001923 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001924 }
1925
Will Deacon45ae7cf2013-06-24 18:31:25 +01001926 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08001927 err = devm_request_irq(smmu->dev, smmu->irqs[i],
1928 arm_smmu_global_fault,
1929 IRQF_SHARED,
1930 "arm-smmu global fault",
1931 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001932 if (err) {
1933 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1934 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001935 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001936 }
1937 }
1938
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001939 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01001940 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001941 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001942}
1943
1944static int arm_smmu_device_remove(struct platform_device *pdev)
1945{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001946 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001947
1948 if (!smmu)
1949 return -ENODEV;
1950
Will Deaconecfadb62013-07-31 19:21:28 +01001951 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001952 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001953
Will Deacon45ae7cf2013-06-24 18:31:25 +01001954 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07001955 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001956 return 0;
1957}
1958
Will Deacon45ae7cf2013-06-24 18:31:25 +01001959static struct platform_driver arm_smmu_driver = {
1960 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001961 .name = "arm-smmu",
1962 .of_match_table = of_match_ptr(arm_smmu_of_match),
1963 },
1964 .probe = arm_smmu_device_dt_probe,
1965 .remove = arm_smmu_device_remove,
1966};
1967
1968static int __init arm_smmu_init(void)
1969{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00001970 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001971 int ret;
1972
Thierry Reding0e7d37a2014-11-07 15:26:18 +00001973 /*
1974 * Play nice with systems that don't have an ARM SMMU by checking that
1975 * an ARM SMMU exists in the system before proceeding with the driver
1976 * and IOMMU bus operation registration.
1977 */
1978 np = of_find_matching_node(NULL, arm_smmu_of_match);
1979 if (!np)
1980 return 0;
1981
1982 of_node_put(np);
1983
Will Deacon45ae7cf2013-06-24 18:31:25 +01001984 ret = platform_driver_register(&arm_smmu_driver);
1985 if (ret)
1986 return ret;
1987
1988 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01001989 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001990 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1991
Will Deacond123cf82014-02-04 22:17:53 +00001992#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01001993 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001994 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00001995#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01001996
Will Deacona9a1b0b2014-05-01 18:05:08 +01001997#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08001998 if (!iommu_present(&pci_bus_type)) {
1999 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002000 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002001 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002002#endif
2003
Will Deacon45ae7cf2013-06-24 18:31:25 +01002004 return 0;
2005}
2006
2007static void __exit arm_smmu_exit(void)
2008{
2009 return platform_driver_unregister(&arm_smmu_driver);
2010}
2011
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002012subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002013module_exit(arm_smmu_exit);
2014
2015MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2016MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2017MODULE_LICENSE("GPL v2");