blob: f402f9e126a8704790eb3eaf1c48b636a8f88d99 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Robin Murphyd6fc5d92016-09-12 17:13:52 +010044#include <linux/of_device.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010045#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010046#include <linux/platform_device.h>
47#include <linux/slab.h>
48#include <linux/spinlock.h>
49
50#include <linux/amba/bus.h>
51
Will Deacon518f7132014-11-14 17:17:54 +000052#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010053
54/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020055#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010056
57/* Maximum number of context banks per SMMU */
58#define ARM_SMMU_MAX_CBS 128
59
Will Deacon45ae7cf2013-06-24 18:31:25 +010060/* SMMU global address space */
61#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010062#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010063
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000064/*
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
67 * nsGFSYNR0: 0x450)
68 */
69#define ARM_SMMU_GR0_NS(smmu) \
70 ((smmu)->base + \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
72 ? 0x400 : 0))
73
Robin Murphyf9a05f02016-04-13 18:13:01 +010074/*
75 * Some 64-bit registers only make sense to write atomically, but in such
76 * cases all the data relevant to AArch32 formats lies within the lower word,
77 * therefore this actually makes more sense than it might first appear.
78 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010079#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010080#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010081#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010082#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010083#endif
84
Will Deacon45ae7cf2013-06-24 18:31:25 +010085/* Configuration registers */
86#define ARM_SMMU_GR0_sCR0 0x0
87#define sCR0_CLIENTPD (1 << 0)
88#define sCR0_GFRE (1 << 1)
89#define sCR0_GFIE (1 << 2)
90#define sCR0_GCFGFRE (1 << 4)
91#define sCR0_GCFGFIE (1 << 5)
92#define sCR0_USFCFG (1 << 10)
93#define sCR0_VMIDPNE (1 << 11)
94#define sCR0_PTM (1 << 12)
95#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080096#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010097#define sCR0_BSU_SHIFT 14
98#define sCR0_BSU_MASK 0x3
99
Peng Fan3ca37122016-05-03 21:50:30 +0800100/* Auxiliary Configuration register */
101#define ARM_SMMU_GR0_sACR 0x10
102
Will Deacon45ae7cf2013-06-24 18:31:25 +0100103/* Identification registers */
104#define ARM_SMMU_GR0_ID0 0x20
105#define ARM_SMMU_GR0_ID1 0x24
106#define ARM_SMMU_GR0_ID2 0x28
107#define ARM_SMMU_GR0_ID3 0x2c
108#define ARM_SMMU_GR0_ID4 0x30
109#define ARM_SMMU_GR0_ID5 0x34
110#define ARM_SMMU_GR0_ID6 0x38
111#define ARM_SMMU_GR0_ID7 0x3c
112#define ARM_SMMU_GR0_sGFSR 0x48
113#define ARM_SMMU_GR0_sGFSYNR0 0x50
114#define ARM_SMMU_GR0_sGFSYNR1 0x54
115#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100116
117#define ID0_S1TS (1 << 30)
118#define ID0_S2TS (1 << 29)
119#define ID0_NTS (1 << 28)
120#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000121#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100122#define ID0_PTFS_NO_AARCH32 (1 << 25)
123#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100124#define ID0_CTTW (1 << 14)
125#define ID0_NUMIRPT_SHIFT 16
126#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700127#define ID0_NUMSIDB_SHIFT 9
128#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100129#define ID0_NUMSMRG_SHIFT 0
130#define ID0_NUMSMRG_MASK 0xff
131
132#define ID1_PAGESIZE (1 << 31)
133#define ID1_NUMPAGENDXB_SHIFT 28
134#define ID1_NUMPAGENDXB_MASK 7
135#define ID1_NUMS2CB_SHIFT 16
136#define ID1_NUMS2CB_MASK 0xff
137#define ID1_NUMCB_SHIFT 0
138#define ID1_NUMCB_MASK 0xff
139
140#define ID2_OAS_SHIFT 4
141#define ID2_OAS_MASK 0xf
142#define ID2_IAS_SHIFT 0
143#define ID2_IAS_MASK 0xf
144#define ID2_UBS_SHIFT 8
145#define ID2_UBS_MASK 0xf
146#define ID2_PTFS_4K (1 << 12)
147#define ID2_PTFS_16K (1 << 13)
148#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800149#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100150
Peng Fan3ca37122016-05-03 21:50:30 +0800151#define ID7_MAJOR_SHIFT 4
152#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155#define ARM_SMMU_GR0_TLBIVMID 0x64
156#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
157#define ARM_SMMU_GR0_TLBIALLH 0x6c
158#define ARM_SMMU_GR0_sTLBGSYNC 0x70
159#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
160#define sTLBGSTATUS_GSACTIVE (1 << 0)
161#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
162
163/* Stream mapping registers */
164#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
165#define SMR_VALID (1 << 31)
166#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100167#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100168
169#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
170#define S2CR_CBNDX_SHIFT 0
171#define S2CR_CBNDX_MASK 0xff
172#define S2CR_TYPE_SHIFT 16
173#define S2CR_TYPE_MASK 0x3
Robin Murphy8e8b2032016-09-12 17:13:50 +0100174enum arm_smmu_s2cr_type {
175 S2CR_TYPE_TRANS,
176 S2CR_TYPE_BYPASS,
177 S2CR_TYPE_FAULT,
178};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100179
Robin Murphyd3461802016-01-26 18:06:34 +0000180#define S2CR_PRIVCFG_SHIFT 24
Robin Murphy8e8b2032016-09-12 17:13:50 +0100181#define S2CR_PRIVCFG_MASK 0x3
182enum arm_smmu_s2cr_privcfg {
183 S2CR_PRIVCFG_DEFAULT,
184 S2CR_PRIVCFG_DIPAN,
185 S2CR_PRIVCFG_UNPRIV,
186 S2CR_PRIVCFG_PRIV,
187};
Robin Murphyd3461802016-01-26 18:06:34 +0000188
Will Deacon45ae7cf2013-06-24 18:31:25 +0100189/* Context bank attribute registers */
190#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
191#define CBAR_VMID_SHIFT 0
192#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000193#define CBAR_S1_BPSHCFG_SHIFT 8
194#define CBAR_S1_BPSHCFG_MASK 3
195#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100196#define CBAR_S1_MEMATTR_SHIFT 12
197#define CBAR_S1_MEMATTR_MASK 0xf
198#define CBAR_S1_MEMATTR_WB 0xf
199#define CBAR_TYPE_SHIFT 16
200#define CBAR_TYPE_MASK 0x3
201#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
202#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
203#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
204#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
205#define CBAR_IRPTNDX_SHIFT 24
206#define CBAR_IRPTNDX_MASK 0xff
207
208#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
209#define CBA2R_RW64_32BIT (0 << 0)
210#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800211#define CBA2R_VMID_SHIFT 16
212#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213
214/* Translation context bank */
215#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100216#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100217
218#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100219#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100220#define ARM_SMMU_CB_RESUME 0x8
221#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100222#define ARM_SMMU_CB_TTBR0 0x20
223#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_TTBCR 0x30
Robin Murphy60705292016-08-11 17:44:06 +0100225#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100226#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000227#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100228#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100229#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100230#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100231#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000232#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100233#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000234#define ARM_SMMU_CB_S1_TLBIVAL 0x620
235#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
236#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100237#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000238#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100239
240#define SCTLR_S1_ASIDPNE (1 << 12)
241#define SCTLR_CFCFG (1 << 7)
242#define SCTLR_CFIE (1 << 6)
243#define SCTLR_CFRE (1 << 5)
244#define SCTLR_E (1 << 4)
245#define SCTLR_AFE (1 << 2)
246#define SCTLR_TRE (1 << 1)
247#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100248
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100249#define ARM_MMU500_ACTLR_CPRE (1 << 1)
250
Peng Fan3ca37122016-05-03 21:50:30 +0800251#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
252
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000253#define CB_PAR_F (1 << 0)
254
255#define ATSR_ACTIVE (1 << 0)
256
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257#define RESUME_RETRY (0 << 0)
258#define RESUME_TERMINATE (1 << 0)
259
Will Deacon45ae7cf2013-06-24 18:31:25 +0100260#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100261#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100262
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100263#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100264
265#define FSR_MULTI (1 << 31)
266#define FSR_SS (1 << 30)
267#define FSR_UUT (1 << 8)
268#define FSR_ASF (1 << 7)
269#define FSR_TLBLKF (1 << 6)
270#define FSR_TLBMCF (1 << 5)
271#define FSR_EF (1 << 4)
272#define FSR_PF (1 << 3)
273#define FSR_AFF (1 << 2)
274#define FSR_TF (1 << 1)
275
Mitchel Humpherys29073202014-07-08 09:52:18 -0700276#define FSR_IGN (FSR_AFF | FSR_ASF | \
277 FSR_TLBMCF | FSR_TLBLKF)
278#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100279 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100280
281#define FSYNR0_WNR (1 << 4)
282
Will Deacon4cf740b2014-07-14 19:47:39 +0100283static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000284module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100285MODULE_PARM_DESC(force_stage,
286 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000287static bool disable_bypass;
288module_param(disable_bypass, bool, S_IRUGO);
289MODULE_PARM_DESC(disable_bypass,
290 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100291
Robin Murphy09360402014-08-28 17:51:59 +0100292enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100293 ARM_SMMU_V1,
294 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100295 ARM_SMMU_V2,
296};
297
Robin Murphy67b65a32016-04-13 18:12:57 +0100298enum arm_smmu_implementation {
299 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100300 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100301 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100302};
303
Robin Murphy8e8b2032016-09-12 17:13:50 +0100304struct arm_smmu_s2cr {
305 enum arm_smmu_s2cr_type type;
306 enum arm_smmu_s2cr_privcfg privcfg;
307 u8 cbndx;
308};
309
310#define s2cr_init_val (struct arm_smmu_s2cr){ \
311 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
312}
313
Will Deacon45ae7cf2013-06-24 18:31:25 +0100314struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100315 u16 mask;
316 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100317 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100318};
319
Will Deacona9a1b0b2014-05-01 18:05:08 +0100320struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100321 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100322 int num_streamids;
323 u16 streamids[MAX_MASTER_STREAMIDS];
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100324 s16 smendx[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100325};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100326#define INVALID_SMENDX -1
Robin Murphyd3097e32016-09-12 17:13:53 +0100327#define for_each_cfg_sme(cfg, i, idx) \
328 for (i = 0; idx = cfg->smendx[i], i < cfg->num_streamids; ++i)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100329
330struct arm_smmu_device {
331 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100332
333 void __iomem *base;
334 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100335 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100336
337#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
338#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
339#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
340#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
341#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000342#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800343#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100344#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
345#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
346#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
347#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
348#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100349 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000350
351#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
352 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100353 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100354 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100355
356 u32 num_context_banks;
357 u32 num_s2_context_banks;
358 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
359 atomic_t irptndx;
360
361 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100362 u16 streamid_mask;
363 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100364 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100365 struct arm_smmu_s2cr *s2crs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100366
Will Deacon518f7132014-11-14 17:17:54 +0000367 unsigned long va_size;
368 unsigned long ipa_size;
369 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100370 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371
372 u32 num_global_irqs;
373 u32 num_context_irqs;
374 unsigned int *irqs;
375
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800376 u32 cavium_id_base; /* Specific to Cavium */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100377};
378
Robin Murphy7602b872016-04-28 17:12:09 +0100379enum arm_smmu_context_fmt {
380 ARM_SMMU_CTX_FMT_NONE,
381 ARM_SMMU_CTX_FMT_AARCH64,
382 ARM_SMMU_CTX_FMT_AARCH32_L,
383 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100384};
385
386struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100387 u8 cbndx;
388 u8 irptndx;
389 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100390 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100391};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100392#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100393
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800394#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
395#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100396
Will Deaconc752ce42014-06-25 22:46:31 +0100397enum arm_smmu_domain_stage {
398 ARM_SMMU_DOMAIN_S1 = 0,
399 ARM_SMMU_DOMAIN_S2,
400 ARM_SMMU_DOMAIN_NESTED,
401};
402
Will Deacon45ae7cf2013-06-24 18:31:25 +0100403struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100404 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000405 struct io_pgtable_ops *pgtbl_ops;
406 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100407 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100408 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000409 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100410 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100411};
412
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000413struct arm_smmu_option_prop {
414 u32 opt;
415 const char *prop;
416};
417
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800418static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
419
Mitchel Humpherys29073202014-07-08 09:52:18 -0700420static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000421 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
422 { 0, NULL},
423};
424
Joerg Roedel1d672632015-03-26 13:43:10 +0100425static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
426{
427 return container_of(dom, struct arm_smmu_domain, domain);
428}
429
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000430static void parse_driver_options(struct arm_smmu_device *smmu)
431{
432 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700433
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000434 do {
435 if (of_property_read_bool(smmu->dev->of_node,
436 arm_smmu_options[i].prop)) {
437 smmu->options |= arm_smmu_options[i].opt;
438 dev_notice(smmu->dev, "option %s\n",
439 arm_smmu_options[i].prop);
440 }
441 } while (arm_smmu_options[++i].opt);
442}
443
Will Deacon8f68f8e2014-07-15 11:27:08 +0100444static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100445{
446 if (dev_is_pci(dev)) {
447 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700448
Will Deacona9a1b0b2014-05-01 18:05:08 +0100449 while (!pci_is_root_bus(bus))
450 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100451 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100452 }
453
Robin Murphyf80cd882016-09-14 15:21:39 +0100454 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100455}
456
Robin Murphyf80cd882016-09-14 15:21:39 +0100457static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100458{
Robin Murphyf80cd882016-09-14 15:21:39 +0100459 *((__be32 *)data) = cpu_to_be32(alias);
460 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100461}
462
Robin Murphyf80cd882016-09-14 15:21:39 +0100463static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100464{
Robin Murphyf80cd882016-09-14 15:21:39 +0100465 struct of_phandle_iterator *it = *(void **)data;
466 struct device_node *np = it->node;
467 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100468
Robin Murphyf80cd882016-09-14 15:21:39 +0100469 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
470 "#stream-id-cells", 0)
471 if (it->node == np) {
472 *(void **)data = dev;
473 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700474 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100475 it->node = np;
476 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100477}
478
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100479static struct platform_driver arm_smmu_driver;
480
Robin Murphyf80cd882016-09-14 15:21:39 +0100481static int arm_smmu_register_legacy_master(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100482{
Will Deacon44680ee2014-06-25 11:29:12 +0100483 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +0100484 struct arm_smmu_master_cfg *cfg;
485 struct device_node *np;
486 struct of_phandle_iterator it;
487 void *data = &it;
488 __be32 pci_sid;
489 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100490
Robin Murphyf80cd882016-09-14 15:21:39 +0100491 np = dev_get_dev_node(dev);
492 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
493 of_node_put(np);
494 return -ENODEV;
495 }
496
497 it.node = np;
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100498 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
499 __find_legacy_master_phandle);
Robin Murphyf80cd882016-09-14 15:21:39 +0100500 of_node_put(np);
501 if (err == 0)
502 return -ENODEV;
503 if (err < 0)
504 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100505
Robin Murphyd6fc5d92016-09-12 17:13:52 +0100506 smmu = dev_get_drvdata(data);
507
Robin Murphyf80cd882016-09-14 15:21:39 +0100508 if (it.cur_count > MAX_MASTER_STREAMIDS) {
509 dev_err(smmu->dev,
510 "reached maximum number (%d) of stream IDs for master device %s\n",
511 MAX_MASTER_STREAMIDS, dev_name(dev));
512 return -ENOSPC;
513 }
514 if (dev_is_pci(dev)) {
515 /* "mmu-masters" assumes Stream ID == Requester ID */
516 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
517 &pci_sid);
518 it.cur = &pci_sid;
519 it.cur_count = 1;
520 }
521
522 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
523 if (!cfg)
524 return -ENOMEM;
525
526 cfg->smmu = smmu;
527 dev->archdata.iommu = cfg;
528
529 while (it.cur_count--)
530 cfg->streamids[cfg->num_streamids++] = be32_to_cpup(it.cur++);
531
532 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100533}
534
535static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
536{
537 int idx;
538
539 do {
540 idx = find_next_zero_bit(map, end, start);
541 if (idx == end)
542 return -ENOSPC;
543 } while (test_and_set_bit(idx, map));
544
545 return idx;
546}
547
548static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
549{
550 clear_bit(idx, map);
551}
552
553/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000554static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100555{
556 int count = 0;
557 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
558
559 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
560 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
561 & sTLBGSTATUS_GSACTIVE) {
562 cpu_relax();
563 if (++count == TLB_LOOP_TIMEOUT) {
564 dev_err_ratelimited(smmu->dev,
565 "TLB sync timed out -- SMMU may be deadlocked\n");
566 return;
567 }
568 udelay(1);
569 }
570}
571
Will Deacon518f7132014-11-14 17:17:54 +0000572static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100573{
Will Deacon518f7132014-11-14 17:17:54 +0000574 struct arm_smmu_domain *smmu_domain = cookie;
575 __arm_smmu_tlb_sync(smmu_domain->smmu);
576}
577
578static void arm_smmu_tlb_inv_context(void *cookie)
579{
580 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100581 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
582 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100583 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000584 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100585
586 if (stage1) {
587 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800588 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100589 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100590 } else {
591 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800592 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100593 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100594 }
595
Will Deacon518f7132014-11-14 17:17:54 +0000596 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100597}
598
Will Deacon518f7132014-11-14 17:17:54 +0000599static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000600 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000601{
602 struct arm_smmu_domain *smmu_domain = cookie;
603 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
604 struct arm_smmu_device *smmu = smmu_domain->smmu;
605 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
606 void __iomem *reg;
607
608 if (stage1) {
609 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
610 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
611
Robin Murphy7602b872016-04-28 17:12:09 +0100612 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000613 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800614 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000615 do {
616 writel_relaxed(iova, reg);
617 iova += granule;
618 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000619 } else {
620 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800621 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000622 do {
623 writeq_relaxed(iova, reg);
624 iova += granule >> 12;
625 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000626 }
Will Deacon518f7132014-11-14 17:17:54 +0000627 } else if (smmu->version == ARM_SMMU_V2) {
628 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
629 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
630 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000631 iova >>= 12;
632 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100633 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000634 iova += granule >> 12;
635 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000636 } else {
637 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800638 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000639 }
640}
641
Will Deacon518f7132014-11-14 17:17:54 +0000642static struct iommu_gather_ops arm_smmu_gather_ops = {
643 .tlb_flush_all = arm_smmu_tlb_inv_context,
644 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
645 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000646};
647
Will Deacon45ae7cf2013-06-24 18:31:25 +0100648static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
649{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100650 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100651 unsigned long iova;
652 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100653 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100654 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
655 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100656 void __iomem *cb_base;
657
Will Deacon44680ee2014-06-25 11:29:12 +0100658 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100659 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
660
661 if (!(fsr & FSR_FAULT))
662 return IRQ_NONE;
663
Will Deacon45ae7cf2013-06-24 18:31:25 +0100664 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100665 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100666
Will Deacon3714ce1d2016-08-05 19:49:45 +0100667 dev_err_ratelimited(smmu->dev,
668 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
669 fsr, iova, fsynr, cfg->cbndx);
670
Will Deacon45ae7cf2013-06-24 18:31:25 +0100671 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100672 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100673}
674
675static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
676{
677 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
678 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000679 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100680
681 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
682 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
683 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
684 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
685
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000686 if (!gfsr)
687 return IRQ_NONE;
688
Will Deacon45ae7cf2013-06-24 18:31:25 +0100689 dev_err_ratelimited(smmu->dev,
690 "Unexpected global fault, this could be serious\n");
691 dev_err_ratelimited(smmu->dev,
692 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
693 gfsr, gfsynr0, gfsynr1, gfsynr2);
694
695 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100696 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100697}
698
Will Deacon518f7132014-11-14 17:17:54 +0000699static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
700 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100701{
Robin Murphy60705292016-08-11 17:44:06 +0100702 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100703 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100704 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100705 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
706 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100707 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100708
Will Deacon45ae7cf2013-06-24 18:31:25 +0100709 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100710 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
711 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100712
Will Deacon4a1c93c2015-03-04 12:21:03 +0000713 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100714 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
715 reg = CBA2R_RW64_64BIT;
716 else
717 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800718 /* 16-bit VMIDs live in CBA2R */
719 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800720 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800721
Will Deacon4a1c93c2015-03-04 12:21:03 +0000722 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
723 }
724
Will Deacon45ae7cf2013-06-24 18:31:25 +0100725 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100726 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100727 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700728 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100729
Will Deacon57ca90f2014-02-06 14:59:05 +0000730 /*
731 * Use the weakest shareability/memory types, so they are
732 * overridden by the ttbcr/pte.
733 */
734 if (stage1) {
735 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
736 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800737 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
738 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800739 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000740 }
Will Deacon44680ee2014-06-25 11:29:12 +0100741 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100742
Will Deacon518f7132014-11-14 17:17:54 +0000743 /* TTBRs */
744 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100745 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100746
Robin Murphy60705292016-08-11 17:44:06 +0100747 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
748 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
749 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
750 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
751 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
752 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
753 } else {
754 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
755 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
756 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
757 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
758 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
759 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
760 }
Will Deacon518f7132014-11-14 17:17:54 +0000761 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100762 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100763 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000764 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100765
Will Deacon518f7132014-11-14 17:17:54 +0000766 /* TTBCR */
767 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100768 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
769 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
770 reg2 = 0;
771 } else {
772 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
773 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
774 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100775 }
Robin Murphy60705292016-08-11 17:44:06 +0100776 if (smmu->version > ARM_SMMU_V1)
777 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100778 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000779 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100780 }
Robin Murphy60705292016-08-11 17:44:06 +0100781 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100782
Will Deacon518f7132014-11-14 17:17:54 +0000783 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100784 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100785 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
786 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
787 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
788 } else {
789 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
790 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
791 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100792 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphy60705292016-08-11 17:44:06 +0100793 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100794 }
795
Will Deacon45ae7cf2013-06-24 18:31:25 +0100796 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100797 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100798 if (stage1)
799 reg |= SCTLR_S1_ASIDPNE;
800#ifdef __BIG_ENDIAN
801 reg |= SCTLR_E;
802#endif
Will Deacon25724842013-08-21 13:49:53 +0100803 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100804}
805
806static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100807 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100808{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100809 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000810 unsigned long ias, oas;
811 struct io_pgtable_ops *pgtbl_ops;
812 struct io_pgtable_cfg pgtbl_cfg;
813 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100814 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100815 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100816
Will Deacon518f7132014-11-14 17:17:54 +0000817 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100818 if (smmu_domain->smmu)
819 goto out_unlock;
820
Robin Murphy98006992016-04-20 14:53:33 +0100821 /* We're bypassing these SIDs, so don't allocate an actual context */
822 if (domain->type == IOMMU_DOMAIN_DMA) {
823 smmu_domain->smmu = smmu;
824 goto out_unlock;
825 }
826
Will Deaconc752ce42014-06-25 22:46:31 +0100827 /*
828 * Mapping the requested stage onto what we support is surprisingly
829 * complicated, mainly because the spec allows S1+S2 SMMUs without
830 * support for nested translation. That means we end up with the
831 * following table:
832 *
833 * Requested Supported Actual
834 * S1 N S1
835 * S1 S1+S2 S1
836 * S1 S2 S2
837 * S1 S1 S1
838 * N N N
839 * N S1+S2 S2
840 * N S2 S2
841 * N S1 S1
842 *
843 * Note that you can't actually request stage-2 mappings.
844 */
845 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
846 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
847 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
848 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
849
Robin Murphy7602b872016-04-28 17:12:09 +0100850 /*
851 * Choosing a suitable context format is even more fiddly. Until we
852 * grow some way for the caller to express a preference, and/or move
853 * the decision into the io-pgtable code where it arguably belongs,
854 * just aim for the closest thing to the rest of the system, and hope
855 * that the hardware isn't esoteric enough that we can't assume AArch64
856 * support to be a superset of AArch32 support...
857 */
858 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
859 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100860 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
861 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
862 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
863 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
864 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100865 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
866 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
867 ARM_SMMU_FEAT_FMT_AARCH64_16K |
868 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
869 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
870
871 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
872 ret = -EINVAL;
873 goto out_unlock;
874 }
875
Will Deaconc752ce42014-06-25 22:46:31 +0100876 switch (smmu_domain->stage) {
877 case ARM_SMMU_DOMAIN_S1:
878 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
879 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000880 ias = smmu->va_size;
881 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100882 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000883 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100884 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000885 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100886 ias = min(ias, 32UL);
887 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100888 } else {
889 fmt = ARM_V7S;
890 ias = min(ias, 32UL);
891 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100892 }
Will Deaconc752ce42014-06-25 22:46:31 +0100893 break;
894 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100895 /*
896 * We will likely want to change this if/when KVM gets
897 * involved.
898 */
Will Deaconc752ce42014-06-25 22:46:31 +0100899 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100900 cfg->cbar = CBAR_TYPE_S2_TRANS;
901 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000902 ias = smmu->ipa_size;
903 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100904 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000905 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100906 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000907 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100908 ias = min(ias, 40UL);
909 oas = min(oas, 40UL);
910 }
Will Deaconc752ce42014-06-25 22:46:31 +0100911 break;
912 default:
913 ret = -EINVAL;
914 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100915 }
916
917 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
918 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200919 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100920 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100921
Will Deacon44680ee2014-06-25 11:29:12 +0100922 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100923 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100924 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
925 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100926 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100927 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100928 }
929
Will Deacon518f7132014-11-14 17:17:54 +0000930 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100931 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000932 .ias = ias,
933 .oas = oas,
934 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100935 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000936 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100937
Will Deacon518f7132014-11-14 17:17:54 +0000938 smmu_domain->smmu = smmu;
939 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
940 if (!pgtbl_ops) {
941 ret = -ENOMEM;
942 goto out_clear_smmu;
943 }
944
Robin Murphyd5466352016-05-09 17:20:09 +0100945 /* Update the domain's page sizes to reflect the page table format */
946 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +0000947
948 /* Initialise the context bank with our page table cfg */
949 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
950
951 /*
952 * Request context fault interrupt. Do this last to avoid the
953 * handler seeing a half-initialised domain state.
954 */
Will Deacon44680ee2014-06-25 11:29:12 +0100955 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800956 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
957 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200958 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100959 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100960 cfg->irptndx, irq);
961 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100962 }
963
Will Deacon518f7132014-11-14 17:17:54 +0000964 mutex_unlock(&smmu_domain->init_mutex);
965
966 /* Publish page table ops for map/unmap */
967 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100968 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100969
Will Deacon518f7132014-11-14 17:17:54 +0000970out_clear_smmu:
971 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100972out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000973 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100974 return ret;
975}
976
977static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
978{
Joerg Roedel1d672632015-03-26 13:43:10 +0100979 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100980 struct arm_smmu_device *smmu = smmu_domain->smmu;
981 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +0100982 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100983 int irq;
984
Robin Murphy98006992016-04-20 14:53:33 +0100985 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100986 return;
987
Will Deacon518f7132014-11-14 17:17:54 +0000988 /*
989 * Disable the context bank and free the page tables before freeing
990 * it.
991 */
Will Deacon44680ee2014-06-25 11:29:12 +0100992 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100993 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +0100994
Will Deacon44680ee2014-06-25 11:29:12 +0100995 if (cfg->irptndx != INVALID_IRPTNDX) {
996 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800997 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100998 }
999
Markus Elfring44830b02015-11-06 18:32:41 +01001000 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001001 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001002}
1003
Joerg Roedel1d672632015-03-26 13:43:10 +01001004static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001005{
1006 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001007
Robin Murphy9adb9592016-01-26 18:06:36 +00001008 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001009 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001010 /*
1011 * Allocate the domain and initialise some of its data structures.
1012 * We can't really do anything meaningful until we've added a
1013 * master.
1014 */
1015 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1016 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001017 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001018
Robin Murphy9adb9592016-01-26 18:06:36 +00001019 if (type == IOMMU_DOMAIN_DMA &&
1020 iommu_get_dma_cookie(&smmu_domain->domain)) {
1021 kfree(smmu_domain);
1022 return NULL;
1023 }
1024
Will Deacon518f7132014-11-14 17:17:54 +00001025 mutex_init(&smmu_domain->init_mutex);
1026 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001027
1028 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001029}
1030
Joerg Roedel1d672632015-03-26 13:43:10 +01001031static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001032{
Joerg Roedel1d672632015-03-26 13:43:10 +01001033 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001034
1035 /*
1036 * Free the domain resources. We assume that all devices have
1037 * already been detached.
1038 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001039 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001040 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001041 kfree(smmu_domain);
1042}
1043
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001044static int arm_smmu_alloc_smr(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001045{
1046 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001047
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001048 for (i = 0; i < smmu->num_mapping_groups; i++)
1049 if (!cmpxchg(&smmu->smrs[i].valid, false, true))
1050 return i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001051
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001052 return INVALID_SMENDX;
1053}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001054
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001055static void arm_smmu_free_smr(struct arm_smmu_device *smmu, int idx)
1056{
1057 writel_relaxed(~SMR_VALID, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1058 WRITE_ONCE(smmu->smrs[idx].valid, false);
1059}
1060
1061static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1062{
1063 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001064 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001065
1066 if (smr->valid)
1067 reg |= SMR_VALID;
1068 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1069}
1070
Robin Murphy8e8b2032016-09-12 17:13:50 +01001071static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1072{
1073 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1074 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1075 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1076 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1077
1078 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1079}
1080
1081static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1082{
1083 arm_smmu_write_s2cr(smmu, idx);
1084 if (smmu->smrs)
1085 arm_smmu_write_smr(smmu, idx);
1086}
1087
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001088static int arm_smmu_master_alloc_smes(struct arm_smmu_device *smmu,
1089 struct arm_smmu_master_cfg *cfg)
1090{
1091 struct arm_smmu_smr *smrs = smmu->smrs;
1092 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001093
Will Deacon44680ee2014-06-25 11:29:12 +01001094 /* Allocate the SMRs on the SMMU */
Robin Murphyd3097e32016-09-12 17:13:53 +01001095 for_each_cfg_sme(cfg, i, idx) {
1096 if (idx != INVALID_SMENDX)
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001097 return -EEXIST;
1098
1099 /* ...except on stream indexing hardware, of course */
1100 if (!smrs) {
1101 cfg->smendx[i] = cfg->streamids[i];
1102 continue;
1103 }
1104
1105 idx = arm_smmu_alloc_smr(smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001106 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001107 dev_err(smmu->dev, "failed to allocate free SMR\n");
1108 goto err_free_smrs;
1109 }
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001110 cfg->smendx[i] = idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001111
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001112 smrs[idx].id = cfg->streamids[i];
1113 smrs[idx].mask = 0; /* We don't currently share SMRs */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001114 }
1115
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001116 if (!smrs)
1117 return 0;
1118
Will Deacon45ae7cf2013-06-24 18:31:25 +01001119 /* It worked! Now, poke the actual hardware */
Robin Murphyd3097e32016-09-12 17:13:53 +01001120 for_each_cfg_sme(cfg, i, idx)
1121 arm_smmu_write_smr(smmu, idx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001122
Will Deacon45ae7cf2013-06-24 18:31:25 +01001123 return 0;
1124
1125err_free_smrs:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001126 while (i--) {
1127 arm_smmu_free_smr(smmu, cfg->smendx[i]);
1128 cfg->smendx[i] = INVALID_SMENDX;
1129 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001130 return -ENOSPC;
1131}
1132
Robin Murphyf80cd882016-09-14 15:21:39 +01001133static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001134{
Robin Murphyf80cd882016-09-14 15:21:39 +01001135 struct arm_smmu_device *smmu = cfg->smmu;
Robin Murphyd3097e32016-09-12 17:13:53 +01001136 int i, idx;
Will Deacon43b412b2014-07-15 11:22:24 +01001137
Robin Murphy8e8b2032016-09-12 17:13:50 +01001138 /*
1139 * We *must* clear the S2CR first, because freeing the SMR means
1140 * that it can be re-allocated immediately.
1141 */
Robin Murphyd3097e32016-09-12 17:13:53 +01001142 for_each_cfg_sme(cfg, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001143 /* An IOMMU group is torn down by the first device to be removed */
1144 if (idx == INVALID_SMENDX)
1145 return;
1146
1147 smmu->s2crs[idx] = s2cr_init_val;
1148 arm_smmu_write_s2cr(smmu, idx);
1149 }
1150 /* Sync S2CR updates before touching anything else */
1151 __iowmb();
1152
Will Deacon45ae7cf2013-06-24 18:31:25 +01001153 /* Invalidate the SMRs before freeing back to the allocator */
Robin Murphyd3097e32016-09-12 17:13:53 +01001154 for_each_cfg_sme(cfg, i, idx) {
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001155 if (smmu->smrs)
Robin Murphyd3097e32016-09-12 17:13:53 +01001156 arm_smmu_free_smr(smmu, idx);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001157
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001158 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001159 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001160}
1161
Will Deacon45ae7cf2013-06-24 18:31:25 +01001162static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001163 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001164{
Robin Murphyd3097e32016-09-12 17:13:53 +01001165 int i, idx, ret = 0;
Will Deacon44680ee2014-06-25 11:29:12 +01001166 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001167 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1168 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1169 u8 cbndx = smmu_domain->cfg.cbndx;
1170
1171 if (cfg->smendx[0] == INVALID_SMENDX)
1172 ret = arm_smmu_master_alloc_smes(smmu, cfg);
1173 if (ret)
1174 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001175
Will Deacon5f634952016-04-20 14:53:32 +01001176 /*
1177 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1178 * for all devices behind the SMMU. Note that we need to take
1179 * care configuring SMRs for devices both a platform_device and
1180 * and a PCI device (i.e. a PCI host controller)
1181 */
1182 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
Robin Murphy8e8b2032016-09-12 17:13:50 +01001183 type = S2CR_TYPE_BYPASS;
Will Deacon5f634952016-04-20 14:53:32 +01001184
Robin Murphyd3097e32016-09-12 17:13:53 +01001185 for_each_cfg_sme(cfg, i, idx) {
Robin Murphy8e8b2032016-09-12 17:13:50 +01001186 /* Devices in an IOMMU group may already be configured */
1187 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1188 break;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001189
Robin Murphy8e8b2032016-09-12 17:13:50 +01001190 s2cr[idx].type = type;
1191 s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
1192 s2cr[idx].cbndx = cbndx;
1193 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001194 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001195 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001196}
1197
Will Deacon45ae7cf2013-06-24 18:31:25 +01001198static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1199{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001200 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001201 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphyf80cd882016-09-14 15:21:39 +01001202 struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001203
Robin Murphyf80cd882016-09-14 15:21:39 +01001204 if (!cfg) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001205 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1206 return -ENXIO;
1207 }
1208
Will Deacon518f7132014-11-14 17:17:54 +00001209 /* Ensure that the domain is finalised */
Robin Murphyf80cd882016-09-14 15:21:39 +01001210 ret = arm_smmu_init_domain_context(domain, cfg->smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001211 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001212 return ret;
1213
Will Deacon45ae7cf2013-06-24 18:31:25 +01001214 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001215 * Sanity check the domain. We don't support domains across
1216 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001217 */
Robin Murphyf80cd882016-09-14 15:21:39 +01001218 if (smmu_domain->smmu != cfg->smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001219 dev_err(dev,
1220 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyf80cd882016-09-14 15:21:39 +01001221 dev_name(smmu_domain->smmu->dev), dev_name(cfg->smmu->dev));
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001222 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001223 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001224
1225 /* Looks ok, so add the device to the domain */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001226 return arm_smmu_domain_add_master(smmu_domain, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001227}
1228
Will Deacon45ae7cf2013-06-24 18:31:25 +01001229static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001230 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001231{
Will Deacon518f7132014-11-14 17:17:54 +00001232 int ret;
1233 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001234 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001235 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001236
Will Deacon518f7132014-11-14 17:17:54 +00001237 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238 return -ENODEV;
1239
Will Deacon518f7132014-11-14 17:17:54 +00001240 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1241 ret = ops->map(ops, iova, paddr, size, prot);
1242 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1243 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001244}
1245
1246static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1247 size_t size)
1248{
Will Deacon518f7132014-11-14 17:17:54 +00001249 size_t ret;
1250 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001251 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001252 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001253
Will Deacon518f7132014-11-14 17:17:54 +00001254 if (!ops)
1255 return 0;
1256
1257 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1258 ret = ops->unmap(ops, iova, size);
1259 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1260 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001261}
1262
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001263static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1264 dma_addr_t iova)
1265{
Joerg Roedel1d672632015-03-26 13:43:10 +01001266 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001267 struct arm_smmu_device *smmu = smmu_domain->smmu;
1268 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1269 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1270 struct device *dev = smmu->dev;
1271 void __iomem *cb_base;
1272 u32 tmp;
1273 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001274 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001275
1276 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1277
Robin Murphy661d9622015-05-27 17:09:34 +01001278 /* ATS1 registers can only be written atomically */
1279 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001280 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001281 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1282 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001283 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001284
1285 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1286 !(tmp & ATSR_ACTIVE), 5, 50)) {
1287 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001288 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001289 &iova);
1290 return ops->iova_to_phys(ops, iova);
1291 }
1292
Robin Murphyf9a05f02016-04-13 18:13:01 +01001293 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001294 if (phys & CB_PAR_F) {
1295 dev_err(dev, "translation fault!\n");
1296 dev_err(dev, "PAR = 0x%llx\n", phys);
1297 return 0;
1298 }
1299
1300 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1301}
1302
Will Deacon45ae7cf2013-06-24 18:31:25 +01001303static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001304 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001305{
Will Deacon518f7132014-11-14 17:17:54 +00001306 phys_addr_t ret;
1307 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001308 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001309 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001310
Will Deacon518f7132014-11-14 17:17:54 +00001311 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001312 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001313
Will Deacon518f7132014-11-14 17:17:54 +00001314 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001315 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1316 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001317 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001318 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001319 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001320 }
1321
Will Deacon518f7132014-11-14 17:17:54 +00001322 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001323
Will Deacon518f7132014-11-14 17:17:54 +00001324 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001325}
1326
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001327static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001328{
Will Deacond0948942014-06-24 17:30:10 +01001329 switch (cap) {
1330 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001331 /*
1332 * Return true here as the SMMU can always send out coherent
1333 * requests.
1334 */
1335 return true;
Will Deacond0948942014-06-24 17:30:10 +01001336 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001337 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001338 case IOMMU_CAP_NOEXEC:
1339 return true;
Will Deacond0948942014-06-24 17:30:10 +01001340 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001341 return false;
Will Deacond0948942014-06-24 17:30:10 +01001342 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001343}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001344
Will Deacon03edb222015-01-19 14:27:33 +00001345static int arm_smmu_add_device(struct device *dev)
1346{
Robin Murphyf80cd882016-09-14 15:21:39 +01001347 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001348 struct iommu_group *group;
Robin Murphyf80cd882016-09-14 15:21:39 +01001349 int i, ret;
1350
1351 ret = arm_smmu_register_legacy_master(dev);
1352 cfg = dev->archdata.iommu;
1353 if (ret)
1354 goto out_free;
1355
1356 ret = -EINVAL;
1357 for (i = 0; i < cfg->num_streamids; i++) {
1358 u16 sid = cfg->streamids[i];
1359
1360 if (sid & ~cfg->smmu->streamid_mask) {
1361 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1362 sid, cfg->smmu->streamid_mask);
1363 goto out_free;
1364 }
1365 cfg->smendx[i] = INVALID_SMENDX;
1366 }
Will Deacon03edb222015-01-19 14:27:33 +00001367
Joerg Roedelaf659932015-10-21 23:51:41 +02001368 group = iommu_group_get_for_dev(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001369 if (IS_ERR(group)) {
1370 ret = PTR_ERR(group);
1371 goto out_free;
1372 }
Peng Fan9a4a9d82015-11-20 16:56:18 +08001373 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001374 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001375
1376out_free:
1377 kfree(cfg);
1378 dev->archdata.iommu = NULL;
1379 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001380}
1381
Will Deacon45ae7cf2013-06-24 18:31:25 +01001382static void arm_smmu_remove_device(struct device *dev)
1383{
Robin Murphyf80cd882016-09-14 15:21:39 +01001384 struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001385
Robin Murphyf80cd882016-09-14 15:21:39 +01001386 if (!cfg)
1387 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001388
Robin Murphyf80cd882016-09-14 15:21:39 +01001389 arm_smmu_master_free_smes(cfg);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001390 iommu_group_remove_device(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001391 kfree(cfg);
1392 dev->archdata.iommu = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001393}
1394
Joerg Roedelaf659932015-10-21 23:51:41 +02001395static struct iommu_group *arm_smmu_device_group(struct device *dev)
1396{
1397 struct iommu_group *group;
Joerg Roedelaf659932015-10-21 23:51:41 +02001398
1399 if (dev_is_pci(dev))
1400 group = pci_device_group(dev);
1401 else
1402 group = generic_device_group(dev);
1403
Joerg Roedelaf659932015-10-21 23:51:41 +02001404 return group;
1405}
1406
Will Deaconc752ce42014-06-25 22:46:31 +01001407static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1408 enum iommu_attr attr, void *data)
1409{
Joerg Roedel1d672632015-03-26 13:43:10 +01001410 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001411
1412 switch (attr) {
1413 case DOMAIN_ATTR_NESTING:
1414 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1415 return 0;
1416 default:
1417 return -ENODEV;
1418 }
1419}
1420
1421static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1422 enum iommu_attr attr, void *data)
1423{
Will Deacon518f7132014-11-14 17:17:54 +00001424 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001425 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001426
Will Deacon518f7132014-11-14 17:17:54 +00001427 mutex_lock(&smmu_domain->init_mutex);
1428
Will Deaconc752ce42014-06-25 22:46:31 +01001429 switch (attr) {
1430 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001431 if (smmu_domain->smmu) {
1432 ret = -EPERM;
1433 goto out_unlock;
1434 }
1435
Will Deaconc752ce42014-06-25 22:46:31 +01001436 if (*(int *)data)
1437 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1438 else
1439 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1440
Will Deacon518f7132014-11-14 17:17:54 +00001441 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001442 default:
Will Deacon518f7132014-11-14 17:17:54 +00001443 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001444 }
Will Deacon518f7132014-11-14 17:17:54 +00001445
1446out_unlock:
1447 mutex_unlock(&smmu_domain->init_mutex);
1448 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001449}
1450
Will Deacon518f7132014-11-14 17:17:54 +00001451static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001452 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001453 .domain_alloc = arm_smmu_domain_alloc,
1454 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001455 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001456 .map = arm_smmu_map,
1457 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001458 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001459 .iova_to_phys = arm_smmu_iova_to_phys,
1460 .add_device = arm_smmu_add_device,
1461 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001462 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001463 .domain_get_attr = arm_smmu_domain_get_attr,
1464 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001465 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001466};
1467
1468static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1469{
1470 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001471 void __iomem *cb_base;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001472 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001473 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001474
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001475 /* clear global FSR */
1476 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1477 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001478
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001479 /*
1480 * Reset stream mapping groups: Initial values mark all SMRn as
1481 * invalid and all S2CRn as bypass unless overridden.
1482 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001483 for (i = 0; i < smmu->num_mapping_groups; ++i)
1484 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001485
Peng Fan3ca37122016-05-03 21:50:30 +08001486 /*
1487 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1488 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1489 * bit is only present in MMU-500r2 onwards.
1490 */
1491 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1492 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1493 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1494 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1495 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1496 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1497 }
1498
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001499 /* Make sure all context banks are disabled and clear CB_FSR */
1500 for (i = 0; i < smmu->num_context_banks; ++i) {
1501 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1502 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1503 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001504 /*
1505 * Disable MMU-500's not-particularly-beneficial next-page
1506 * prefetcher for the sake of errata #841119 and #826419.
1507 */
1508 if (smmu->model == ARM_MMU500) {
1509 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1510 reg &= ~ARM_MMU500_ACTLR_CPRE;
1511 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1512 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001513 }
Will Deacon1463fe42013-07-31 19:21:27 +01001514
Will Deacon45ae7cf2013-06-24 18:31:25 +01001515 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001516 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1517 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1518
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001519 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001520
Will Deacon45ae7cf2013-06-24 18:31:25 +01001521 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001522 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001523
1524 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001525 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001526
Robin Murphy25a1c962016-02-10 14:25:33 +00001527 /* Enable client access, handling unmatched streams as appropriate */
1528 reg &= ~sCR0_CLIENTPD;
1529 if (disable_bypass)
1530 reg |= sCR0_USFCFG;
1531 else
1532 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001533
1534 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001535 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001536
1537 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001538 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001539
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001540 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1541 reg |= sCR0_VMID16EN;
1542
Will Deacon45ae7cf2013-06-24 18:31:25 +01001543 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001544 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001545 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001546}
1547
1548static int arm_smmu_id_size_to_bits(int size)
1549{
1550 switch (size) {
1551 case 0:
1552 return 32;
1553 case 1:
1554 return 36;
1555 case 2:
1556 return 40;
1557 case 3:
1558 return 42;
1559 case 4:
1560 return 44;
1561 case 5:
1562 default:
1563 return 48;
1564 }
1565}
1566
1567static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1568{
1569 unsigned long size;
1570 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1571 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001572 bool cttw_dt, cttw_reg;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001573 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001574
1575 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001576 dev_notice(smmu->dev, "SMMUv%d with:\n",
1577 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001578
1579 /* ID0 */
1580 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001581
1582 /* Restrict available stages based on module parameter */
1583 if (force_stage == 1)
1584 id &= ~(ID0_S2TS | ID0_NTS);
1585 else if (force_stage == 2)
1586 id &= ~(ID0_S1TS | ID0_NTS);
1587
Will Deacon45ae7cf2013-06-24 18:31:25 +01001588 if (id & ID0_S1TS) {
1589 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1590 dev_notice(smmu->dev, "\tstage 1 translation\n");
1591 }
1592
1593 if (id & ID0_S2TS) {
1594 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1595 dev_notice(smmu->dev, "\tstage 2 translation\n");
1596 }
1597
1598 if (id & ID0_NTS) {
1599 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1600 dev_notice(smmu->dev, "\tnested translation\n");
1601 }
1602
1603 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001604 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001605 dev_err(smmu->dev, "\tno translation support!\n");
1606 return -ENODEV;
1607 }
1608
Robin Murphyb7862e32016-04-13 18:13:03 +01001609 if ((id & ID0_S1TS) &&
1610 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001611 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1612 dev_notice(smmu->dev, "\taddress translation ops\n");
1613 }
1614
Robin Murphybae2c2d2015-07-29 19:46:05 +01001615 /*
1616 * In order for DMA API calls to work properly, we must defer to what
1617 * the DT says about coherency, regardless of what the hardware claims.
1618 * Fortunately, this also opens up a workaround for systems where the
1619 * ID register value has ended up configured incorrectly.
1620 */
1621 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1622 cttw_reg = !!(id & ID0_CTTW);
1623 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001624 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001625 if (cttw_dt || cttw_reg)
1626 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1627 cttw_dt ? "" : "non-");
1628 if (cttw_dt != cttw_reg)
1629 dev_notice(smmu->dev,
1630 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001631
Robin Murphy21174242016-09-12 17:13:48 +01001632 /* Max. number of entries we have for stream matching/indexing */
1633 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1634 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001635 if (id & ID0_SMS) {
Robin Murphy21174242016-09-12 17:13:48 +01001636 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001637
1638 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001639 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1640 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001641 dev_err(smmu->dev,
1642 "stream-matching supported, but no SMRs present!\n");
1643 return -ENODEV;
1644 }
1645
Robin Murphy21174242016-09-12 17:13:48 +01001646 /*
1647 * SMR.ID bits may not be preserved if the corresponding MASK
1648 * bits are set, so check each one separately. We can reject
1649 * masters later if they try to claim IDs outside these masks.
1650 */
1651 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001652 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1653 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Robin Murphy21174242016-09-12 17:13:48 +01001654 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001655
Robin Murphy21174242016-09-12 17:13:48 +01001656 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1657 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1658 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1659 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001660
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001661 /* Zero-initialised to mark as invalid */
1662 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1663 GFP_KERNEL);
1664 if (!smmu->smrs)
1665 return -ENOMEM;
1666
Will Deacon45ae7cf2013-06-24 18:31:25 +01001667 dev_notice(smmu->dev,
Robin Murphy21174242016-09-12 17:13:48 +01001668 "\tstream matching with %lu register groups, mask 0x%x",
1669 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001670 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001671 /* s2cr->type == 0 means translation, so initialise explicitly */
1672 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1673 GFP_KERNEL);
1674 if (!smmu->s2crs)
1675 return -ENOMEM;
1676 for (i = 0; i < size; i++)
1677 smmu->s2crs[i] = s2cr_init_val;
1678
Robin Murphy21174242016-09-12 17:13:48 +01001679 smmu->num_mapping_groups = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001680
Robin Murphy7602b872016-04-28 17:12:09 +01001681 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1682 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1683 if (!(id & ID0_PTFS_NO_AARCH32S))
1684 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1685 }
1686
Will Deacon45ae7cf2013-06-24 18:31:25 +01001687 /* ID1 */
1688 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001689 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001690
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001691 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001692 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001693 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001694 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001695 dev_warn(smmu->dev,
1696 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1697 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001698
Will Deacon518f7132014-11-14 17:17:54 +00001699 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001700 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1701 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1702 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1703 return -ENODEV;
1704 }
1705 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1706 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001707 /*
1708 * Cavium CN88xx erratum #27704.
1709 * Ensure ASID and VMID allocation is unique across all SMMUs in
1710 * the system.
1711 */
1712 if (smmu->model == CAVIUM_SMMUV2) {
1713 smmu->cavium_id_base =
1714 atomic_add_return(smmu->num_context_banks,
1715 &cavium_smmu_context_count);
1716 smmu->cavium_id_base -= smmu->num_context_banks;
1717 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001718
1719 /* ID2 */
1720 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1721 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001722 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001723
Will Deacon518f7132014-11-14 17:17:54 +00001724 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001725 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001726 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001727
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001728 if (id & ID2_VMID16)
1729 smmu->features |= ARM_SMMU_FEAT_VMID16;
1730
Robin Murphyf1d84542015-03-04 16:41:05 +00001731 /*
1732 * What the page table walker can address actually depends on which
1733 * descriptor format is in use, but since a) we don't know that yet,
1734 * and b) it can vary per context bank, this will have to do...
1735 */
1736 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1737 dev_warn(smmu->dev,
1738 "failed to set DMA mask for table walker\n");
1739
Robin Murphyb7862e32016-04-13 18:13:03 +01001740 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001741 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001742 if (smmu->version == ARM_SMMU_V1_64K)
1743 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001744 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001745 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001746 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001747 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001748 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001749 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001750 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001751 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001752 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001753 }
1754
Robin Murphy7602b872016-04-28 17:12:09 +01001755 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001756 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001757 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001758 if (smmu->features &
1759 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001760 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001761 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001762 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001763 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001764 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001765
Robin Murphyd5466352016-05-09 17:20:09 +01001766 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1767 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1768 else
1769 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1770 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1771 smmu->pgsize_bitmap);
1772
Will Deacon518f7132014-11-14 17:17:54 +00001773
Will Deacon28d60072014-09-01 16:24:48 +01001774 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1775 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001776 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001777
1778 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1779 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001780 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001781
Will Deacon45ae7cf2013-06-24 18:31:25 +01001782 return 0;
1783}
1784
Robin Murphy67b65a32016-04-13 18:12:57 +01001785struct arm_smmu_match_data {
1786 enum arm_smmu_arch_version version;
1787 enum arm_smmu_implementation model;
1788};
1789
1790#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1791static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1792
1793ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1794ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001795ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001796ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001797ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001798
Joerg Roedel09b52692014-10-02 12:24:45 +02001799static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001800 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1801 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1802 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001803 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001804 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001805 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001806 { },
1807};
1808MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1809
Will Deacon45ae7cf2013-06-24 18:31:25 +01001810static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1811{
Robin Murphy67b65a32016-04-13 18:12:57 +01001812 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001813 struct resource *res;
1814 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001815 struct device *dev = &pdev->dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001816 int num_irqs, i, err;
1817
1818 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1819 if (!smmu) {
1820 dev_err(dev, "failed to allocate arm_smmu_device\n");
1821 return -ENOMEM;
1822 }
1823 smmu->dev = dev;
1824
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001825 data = of_device_get_match_data(dev);
Robin Murphy67b65a32016-04-13 18:12:57 +01001826 smmu->version = data->version;
1827 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01001828
Will Deacon45ae7cf2013-06-24 18:31:25 +01001829 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001830 smmu->base = devm_ioremap_resource(dev, res);
1831 if (IS_ERR(smmu->base))
1832 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001833 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001834
1835 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1836 &smmu->num_global_irqs)) {
1837 dev_err(dev, "missing #global-interrupts property\n");
1838 return -ENODEV;
1839 }
1840
1841 num_irqs = 0;
1842 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1843 num_irqs++;
1844 if (num_irqs > smmu->num_global_irqs)
1845 smmu->num_context_irqs++;
1846 }
1847
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001848 if (!smmu->num_context_irqs) {
1849 dev_err(dev, "found %d interrupts but expected at least %d\n",
1850 num_irqs, smmu->num_global_irqs + 1);
1851 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001852 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001853
1854 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1855 GFP_KERNEL);
1856 if (!smmu->irqs) {
1857 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1858 return -ENOMEM;
1859 }
1860
1861 for (i = 0; i < num_irqs; ++i) {
1862 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001863
Will Deacon45ae7cf2013-06-24 18:31:25 +01001864 if (irq < 0) {
1865 dev_err(dev, "failed to get irq index %d\n", i);
1866 return -ENODEV;
1867 }
1868 smmu->irqs[i] = irq;
1869 }
1870
Olav Haugan3c8766d2014-08-22 17:12:32 -07001871 err = arm_smmu_device_cfg_probe(smmu);
1872 if (err)
1873 return err;
1874
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001875 parse_driver_options(smmu);
1876
Robin Murphyb7862e32016-04-13 18:13:03 +01001877 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01001878 smmu->num_context_banks != smmu->num_context_irqs) {
1879 dev_err(dev,
1880 "found only %d context interrupt(s) but %d required\n",
1881 smmu->num_context_irqs, smmu->num_context_banks);
Robin Murphyf80cd882016-09-14 15:21:39 +01001882 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001883 }
1884
Will Deacon45ae7cf2013-06-24 18:31:25 +01001885 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08001886 err = devm_request_irq(smmu->dev, smmu->irqs[i],
1887 arm_smmu_global_fault,
1888 IRQF_SHARED,
1889 "arm-smmu global fault",
1890 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001891 if (err) {
1892 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1893 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001894 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001895 }
1896 }
1897
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001898 platform_set_drvdata(pdev, smmu);
Will Deaconfd90cec2013-08-21 13:56:34 +01001899 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001901}
1902
1903static int arm_smmu_device_remove(struct platform_device *pdev)
1904{
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001905 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001906
1907 if (!smmu)
1908 return -ENODEV;
1909
Will Deaconecfadb62013-07-31 19:21:28 +01001910 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Robin Murphyd6fc5d92016-09-12 17:13:52 +01001911 dev_err(&pdev->dev, "removing device with active domains!\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001912
Will Deacon45ae7cf2013-06-24 18:31:25 +01001913 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07001914 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001915 return 0;
1916}
1917
Will Deacon45ae7cf2013-06-24 18:31:25 +01001918static struct platform_driver arm_smmu_driver = {
1919 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001920 .name = "arm-smmu",
1921 .of_match_table = of_match_ptr(arm_smmu_of_match),
1922 },
1923 .probe = arm_smmu_device_dt_probe,
1924 .remove = arm_smmu_device_remove,
1925};
1926
1927static int __init arm_smmu_init(void)
1928{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00001929 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001930 int ret;
1931
Thierry Reding0e7d37a2014-11-07 15:26:18 +00001932 /*
1933 * Play nice with systems that don't have an ARM SMMU by checking that
1934 * an ARM SMMU exists in the system before proceeding with the driver
1935 * and IOMMU bus operation registration.
1936 */
1937 np = of_find_matching_node(NULL, arm_smmu_of_match);
1938 if (!np)
1939 return 0;
1940
1941 of_node_put(np);
1942
Will Deacon45ae7cf2013-06-24 18:31:25 +01001943 ret = platform_driver_register(&arm_smmu_driver);
1944 if (ret)
1945 return ret;
1946
1947 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01001948 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001949 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1950
Will Deacond123cf82014-02-04 22:17:53 +00001951#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01001952 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001953 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00001954#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01001955
Will Deacona9a1b0b2014-05-01 18:05:08 +01001956#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08001957 if (!iommu_present(&pci_bus_type)) {
1958 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01001959 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08001960 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01001961#endif
1962
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963 return 0;
1964}
1965
1966static void __exit arm_smmu_exit(void)
1967{
1968 return platform_driver_unregister(&arm_smmu_driver);
1969}
1970
Andreas Herrmannb1950b22013-10-01 13:39:05 +01001971subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001972module_exit(arm_smmu_exit);
1973
1974MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
1975MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1976MODULE_LICENSE("GPL v2");