blob: 69b6cab6542179b2a4e808d246d698492da7de96 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010044#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010045#include <linux/platform_device.h>
46#include <linux/slab.h>
47#include <linux/spinlock.h>
48
49#include <linux/amba/bus.h>
50
Will Deacon518f7132014-11-14 17:17:54 +000051#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010052
53/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020054#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010055
56/* Maximum number of context banks per SMMU */
57#define ARM_SMMU_MAX_CBS 128
58
Will Deacon45ae7cf2013-06-24 18:31:25 +010059/* SMMU global address space */
60#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010061#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010062
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000063/*
64 * SMMU global address space with conditional offset to access secure
65 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
66 * nsGFSYNR0: 0x450)
67 */
68#define ARM_SMMU_GR0_NS(smmu) \
69 ((smmu)->base + \
70 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
71 ? 0x400 : 0))
72
Robin Murphyf9a05f02016-04-13 18:13:01 +010073/*
74 * Some 64-bit registers only make sense to write atomically, but in such
75 * cases all the data relevant to AArch32 formats lies within the lower word,
76 * therefore this actually makes more sense than it might first appear.
77 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010078#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010079#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#endif
83
Will Deacon45ae7cf2013-06-24 18:31:25 +010084/* Configuration registers */
85#define ARM_SMMU_GR0_sCR0 0x0
86#define sCR0_CLIENTPD (1 << 0)
87#define sCR0_GFRE (1 << 1)
88#define sCR0_GFIE (1 << 2)
89#define sCR0_GCFGFRE (1 << 4)
90#define sCR0_GCFGFIE (1 << 5)
91#define sCR0_USFCFG (1 << 10)
92#define sCR0_VMIDPNE (1 << 11)
93#define sCR0_PTM (1 << 12)
94#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080095#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010096#define sCR0_BSU_SHIFT 14
97#define sCR0_BSU_MASK 0x3
98
Peng Fan3ca37122016-05-03 21:50:30 +080099/* Auxiliary Configuration register */
100#define ARM_SMMU_GR0_sACR 0x10
101
Will Deacon45ae7cf2013-06-24 18:31:25 +0100102/* Identification registers */
103#define ARM_SMMU_GR0_ID0 0x20
104#define ARM_SMMU_GR0_ID1 0x24
105#define ARM_SMMU_GR0_ID2 0x28
106#define ARM_SMMU_GR0_ID3 0x2c
107#define ARM_SMMU_GR0_ID4 0x30
108#define ARM_SMMU_GR0_ID5 0x34
109#define ARM_SMMU_GR0_ID6 0x38
110#define ARM_SMMU_GR0_ID7 0x3c
111#define ARM_SMMU_GR0_sGFSR 0x48
112#define ARM_SMMU_GR0_sGFSYNR0 0x50
113#define ARM_SMMU_GR0_sGFSYNR1 0x54
114#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100115
116#define ID0_S1TS (1 << 30)
117#define ID0_S2TS (1 << 29)
118#define ID0_NTS (1 << 28)
119#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000120#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100121#define ID0_PTFS_NO_AARCH32 (1 << 25)
122#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100123#define ID0_CTTW (1 << 14)
124#define ID0_NUMIRPT_SHIFT 16
125#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700126#define ID0_NUMSIDB_SHIFT 9
127#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100128#define ID0_NUMSMRG_SHIFT 0
129#define ID0_NUMSMRG_MASK 0xff
130
131#define ID1_PAGESIZE (1 << 31)
132#define ID1_NUMPAGENDXB_SHIFT 28
133#define ID1_NUMPAGENDXB_MASK 7
134#define ID1_NUMS2CB_SHIFT 16
135#define ID1_NUMS2CB_MASK 0xff
136#define ID1_NUMCB_SHIFT 0
137#define ID1_NUMCB_MASK 0xff
138
139#define ID2_OAS_SHIFT 4
140#define ID2_OAS_MASK 0xf
141#define ID2_IAS_SHIFT 0
142#define ID2_IAS_MASK 0xf
143#define ID2_UBS_SHIFT 8
144#define ID2_UBS_MASK 0xf
145#define ID2_PTFS_4K (1 << 12)
146#define ID2_PTFS_16K (1 << 13)
147#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800148#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100149
Peng Fan3ca37122016-05-03 21:50:30 +0800150#define ID7_MAJOR_SHIFT 4
151#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154#define ARM_SMMU_GR0_TLBIVMID 0x64
155#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
156#define ARM_SMMU_GR0_TLBIALLH 0x6c
157#define ARM_SMMU_GR0_sTLBGSYNC 0x70
158#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
159#define sTLBGSTATUS_GSACTIVE (1 << 0)
160#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
161
162/* Stream mapping registers */
163#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
164#define SMR_VALID (1 << 31)
165#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100167
168#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
169#define S2CR_CBNDX_SHIFT 0
170#define S2CR_CBNDX_MASK 0xff
171#define S2CR_TYPE_SHIFT 16
172#define S2CR_TYPE_MASK 0x3
Robin Murphy8e8b2032016-09-12 17:13:50 +0100173enum arm_smmu_s2cr_type {
174 S2CR_TYPE_TRANS,
175 S2CR_TYPE_BYPASS,
176 S2CR_TYPE_FAULT,
177};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100178
Robin Murphyd3461802016-01-26 18:06:34 +0000179#define S2CR_PRIVCFG_SHIFT 24
Robin Murphy8e8b2032016-09-12 17:13:50 +0100180#define S2CR_PRIVCFG_MASK 0x3
181enum arm_smmu_s2cr_privcfg {
182 S2CR_PRIVCFG_DEFAULT,
183 S2CR_PRIVCFG_DIPAN,
184 S2CR_PRIVCFG_UNPRIV,
185 S2CR_PRIVCFG_PRIV,
186};
Robin Murphyd3461802016-01-26 18:06:34 +0000187
Will Deacon45ae7cf2013-06-24 18:31:25 +0100188/* Context bank attribute registers */
189#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
190#define CBAR_VMID_SHIFT 0
191#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000192#define CBAR_S1_BPSHCFG_SHIFT 8
193#define CBAR_S1_BPSHCFG_MASK 3
194#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100195#define CBAR_S1_MEMATTR_SHIFT 12
196#define CBAR_S1_MEMATTR_MASK 0xf
197#define CBAR_S1_MEMATTR_WB 0xf
198#define CBAR_TYPE_SHIFT 16
199#define CBAR_TYPE_MASK 0x3
200#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
201#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
202#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
203#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
204#define CBAR_IRPTNDX_SHIFT 24
205#define CBAR_IRPTNDX_MASK 0xff
206
207#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
208#define CBA2R_RW64_32BIT (0 << 0)
209#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800210#define CBA2R_VMID_SHIFT 16
211#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212
213/* Translation context bank */
214#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100215#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100216
217#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100218#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219#define ARM_SMMU_CB_RESUME 0x8
220#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100221#define ARM_SMMU_CB_TTBR0 0x20
222#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223#define ARM_SMMU_CB_TTBCR 0x30
Robin Murphy60705292016-08-11 17:44:06 +0100224#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000226#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100227#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100229#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100232#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000233#define ARM_SMMU_CB_S1_TLBIVAL 0x620
234#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
235#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100236#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000237#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100238
239#define SCTLR_S1_ASIDPNE (1 << 12)
240#define SCTLR_CFCFG (1 << 7)
241#define SCTLR_CFIE (1 << 6)
242#define SCTLR_CFRE (1 << 5)
243#define SCTLR_E (1 << 4)
244#define SCTLR_AFE (1 << 2)
245#define SCTLR_TRE (1 << 1)
246#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100247
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100248#define ARM_MMU500_ACTLR_CPRE (1 << 1)
249
Peng Fan3ca37122016-05-03 21:50:30 +0800250#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
251
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000252#define CB_PAR_F (1 << 0)
253
254#define ATSR_ACTIVE (1 << 0)
255
Will Deacon45ae7cf2013-06-24 18:31:25 +0100256#define RESUME_RETRY (0 << 0)
257#define RESUME_TERMINATE (1 << 0)
258
Will Deacon45ae7cf2013-06-24 18:31:25 +0100259#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100260#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100261
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100262#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100263
264#define FSR_MULTI (1 << 31)
265#define FSR_SS (1 << 30)
266#define FSR_UUT (1 << 8)
267#define FSR_ASF (1 << 7)
268#define FSR_TLBLKF (1 << 6)
269#define FSR_TLBMCF (1 << 5)
270#define FSR_EF (1 << 4)
271#define FSR_PF (1 << 3)
272#define FSR_AFF (1 << 2)
273#define FSR_TF (1 << 1)
274
Mitchel Humpherys29073202014-07-08 09:52:18 -0700275#define FSR_IGN (FSR_AFF | FSR_ASF | \
276 FSR_TLBMCF | FSR_TLBLKF)
277#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100278 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100279
280#define FSYNR0_WNR (1 << 4)
281
Will Deacon4cf740b2014-07-14 19:47:39 +0100282static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000283module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100284MODULE_PARM_DESC(force_stage,
285 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000286static bool disable_bypass;
287module_param(disable_bypass, bool, S_IRUGO);
288MODULE_PARM_DESC(disable_bypass,
289 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100290
Robin Murphy09360402014-08-28 17:51:59 +0100291enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100292 ARM_SMMU_V1,
293 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100294 ARM_SMMU_V2,
295};
296
Robin Murphy67b65a32016-04-13 18:12:57 +0100297enum arm_smmu_implementation {
298 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100299 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100300 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100301};
302
Robin Murphy8e8b2032016-09-12 17:13:50 +0100303struct arm_smmu_s2cr {
304 enum arm_smmu_s2cr_type type;
305 enum arm_smmu_s2cr_privcfg privcfg;
306 u8 cbndx;
307};
308
309#define s2cr_init_val (struct arm_smmu_s2cr){ \
310 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
311}
312
Will Deacon45ae7cf2013-06-24 18:31:25 +0100313struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100314 u16 mask;
315 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100316 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100317};
318
Will Deacona9a1b0b2014-05-01 18:05:08 +0100319struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100320 int num_streamids;
321 u16 streamids[MAX_MASTER_STREAMIDS];
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100322 s16 smendx[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100323};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100324#define INVALID_SMENDX -1
Will Deacon45ae7cf2013-06-24 18:31:25 +0100325
Will Deacona9a1b0b2014-05-01 18:05:08 +0100326struct arm_smmu_master {
327 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100328 struct rb_node node;
329 struct arm_smmu_master_cfg cfg;
330};
331
Will Deacon45ae7cf2013-06-24 18:31:25 +0100332struct arm_smmu_device {
333 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100334
335 void __iomem *base;
336 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100337 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100338
339#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
340#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
341#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
342#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
343#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000344#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800345#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100346#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
347#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
348#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
349#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
350#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100351 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000352
353#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
354 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100355 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100356 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100357
358 u32 num_context_banks;
359 u32 num_s2_context_banks;
360 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
361 atomic_t irptndx;
362
363 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100364 u16 streamid_mask;
365 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100366 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100367 struct arm_smmu_s2cr *s2crs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100368
Will Deacon518f7132014-11-14 17:17:54 +0000369 unsigned long va_size;
370 unsigned long ipa_size;
371 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100372 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100373
374 u32 num_global_irqs;
375 u32 num_context_irqs;
376 unsigned int *irqs;
377
Will Deacon45ae7cf2013-06-24 18:31:25 +0100378 struct list_head list;
379 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800380
381 u32 cavium_id_base; /* Specific to Cavium */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100382};
383
Robin Murphy7602b872016-04-28 17:12:09 +0100384enum arm_smmu_context_fmt {
385 ARM_SMMU_CTX_FMT_NONE,
386 ARM_SMMU_CTX_FMT_AARCH64,
387 ARM_SMMU_CTX_FMT_AARCH32_L,
388 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100389};
390
391struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100392 u8 cbndx;
393 u8 irptndx;
394 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100395 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100396};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100397#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100398
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800399#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
400#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100401
Will Deaconc752ce42014-06-25 22:46:31 +0100402enum arm_smmu_domain_stage {
403 ARM_SMMU_DOMAIN_S1 = 0,
404 ARM_SMMU_DOMAIN_S2,
405 ARM_SMMU_DOMAIN_NESTED,
406};
407
Will Deacon45ae7cf2013-06-24 18:31:25 +0100408struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100409 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000410 struct io_pgtable_ops *pgtbl_ops;
411 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100412 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100413 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000414 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100415 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100416};
417
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200418struct arm_smmu_phandle_args {
419 struct device_node *np;
420 int args_count;
421 uint32_t args[MAX_MASTER_STREAMIDS];
422};
423
Will Deacon45ae7cf2013-06-24 18:31:25 +0100424static DEFINE_SPINLOCK(arm_smmu_devices_lock);
425static LIST_HEAD(arm_smmu_devices);
426
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000427struct arm_smmu_option_prop {
428 u32 opt;
429 const char *prop;
430};
431
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800432static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
433
Mitchel Humpherys29073202014-07-08 09:52:18 -0700434static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000435 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
436 { 0, NULL},
437};
438
Joerg Roedel1d672632015-03-26 13:43:10 +0100439static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
440{
441 return container_of(dom, struct arm_smmu_domain, domain);
442}
443
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000444static void parse_driver_options(struct arm_smmu_device *smmu)
445{
446 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700447
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000448 do {
449 if (of_property_read_bool(smmu->dev->of_node,
450 arm_smmu_options[i].prop)) {
451 smmu->options |= arm_smmu_options[i].opt;
452 dev_notice(smmu->dev, "option %s\n",
453 arm_smmu_options[i].prop);
454 }
455 } while (arm_smmu_options[++i].opt);
456}
457
Will Deacon8f68f8e2014-07-15 11:27:08 +0100458static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100459{
460 if (dev_is_pci(dev)) {
461 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700462
Will Deacona9a1b0b2014-05-01 18:05:08 +0100463 while (!pci_is_root_bus(bus))
464 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100465 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100466 }
467
Will Deacon8f68f8e2014-07-15 11:27:08 +0100468 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100469}
470
Will Deacon45ae7cf2013-06-24 18:31:25 +0100471static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
472 struct device_node *dev_node)
473{
474 struct rb_node *node = smmu->masters.rb_node;
475
476 while (node) {
477 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700478
Will Deacon45ae7cf2013-06-24 18:31:25 +0100479 master = container_of(node, struct arm_smmu_master, node);
480
481 if (dev_node < master->of_node)
482 node = node->rb_left;
483 else if (dev_node > master->of_node)
484 node = node->rb_right;
485 else
486 return master;
487 }
488
489 return NULL;
490}
491
Will Deacona9a1b0b2014-05-01 18:05:08 +0100492static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100493find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100494{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100495 struct arm_smmu_master_cfg *cfg = NULL;
496 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100497
Will Deacon8f68f8e2014-07-15 11:27:08 +0100498 if (group) {
499 cfg = iommu_group_get_iommudata(group);
500 iommu_group_put(group);
501 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100502
Will Deacon8f68f8e2014-07-15 11:27:08 +0100503 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100504}
505
Will Deacon45ae7cf2013-06-24 18:31:25 +0100506static int insert_smmu_master(struct arm_smmu_device *smmu,
507 struct arm_smmu_master *master)
508{
509 struct rb_node **new, *parent;
510
511 new = &smmu->masters.rb_node;
512 parent = NULL;
513 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700514 struct arm_smmu_master *this
515 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100516
517 parent = *new;
518 if (master->of_node < this->of_node)
519 new = &((*new)->rb_left);
520 else if (master->of_node > this->of_node)
521 new = &((*new)->rb_right);
522 else
523 return -EEXIST;
524 }
525
526 rb_link_node(&master->node, parent, new);
527 rb_insert_color(&master->node, &smmu->masters);
528 return 0;
529}
530
531static int register_smmu_master(struct arm_smmu_device *smmu,
532 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200533 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100534{
535 int i;
536 struct arm_smmu_master *master;
537
538 master = find_smmu_master(smmu, masterspec->np);
539 if (master) {
540 dev_err(dev,
541 "rejecting multiple registrations for master device %s\n",
542 masterspec->np->name);
543 return -EBUSY;
544 }
545
546 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
547 dev_err(dev,
548 "reached maximum number (%d) of stream IDs for master device %s\n",
549 MAX_MASTER_STREAMIDS, masterspec->np->name);
550 return -ENOSPC;
551 }
552
553 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
554 if (!master)
555 return -ENOMEM;
556
Will Deacona9a1b0b2014-05-01 18:05:08 +0100557 master->of_node = masterspec->np;
558 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100559
Olav Haugan3c8766d2014-08-22 17:12:32 -0700560 for (i = 0; i < master->cfg.num_streamids; ++i) {
561 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100562
Olav Haugan3c8766d2014-08-22 17:12:32 -0700563 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
564 (streamid >= smmu->num_mapping_groups)) {
565 dev_err(dev,
566 "stream ID for master device %s greater than maximum allowed (%d)\n",
567 masterspec->np->name, smmu->num_mapping_groups);
568 return -ERANGE;
569 }
570 master->cfg.streamids[i] = streamid;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100571 master->cfg.smendx[i] = INVALID_SMENDX;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700572 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100573 return insert_smmu_master(smmu, master);
574}
575
Will Deacon44680ee2014-06-25 11:29:12 +0100576static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100577{
Will Deacon44680ee2014-06-25 11:29:12 +0100578 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100579 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100580 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100581
582 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100583 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100584 master = find_smmu_master(smmu, dev_node);
585 if (master)
586 break;
587 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100588 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100589
Will Deacona9a1b0b2014-05-01 18:05:08 +0100590 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100591}
592
593static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
594{
595 int idx;
596
597 do {
598 idx = find_next_zero_bit(map, end, start);
599 if (idx == end)
600 return -ENOSPC;
601 } while (test_and_set_bit(idx, map));
602
603 return idx;
604}
605
606static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
607{
608 clear_bit(idx, map);
609}
610
611/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000612static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100613{
614 int count = 0;
615 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
616
617 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
618 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
619 & sTLBGSTATUS_GSACTIVE) {
620 cpu_relax();
621 if (++count == TLB_LOOP_TIMEOUT) {
622 dev_err_ratelimited(smmu->dev,
623 "TLB sync timed out -- SMMU may be deadlocked\n");
624 return;
625 }
626 udelay(1);
627 }
628}
629
Will Deacon518f7132014-11-14 17:17:54 +0000630static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100631{
Will Deacon518f7132014-11-14 17:17:54 +0000632 struct arm_smmu_domain *smmu_domain = cookie;
633 __arm_smmu_tlb_sync(smmu_domain->smmu);
634}
635
636static void arm_smmu_tlb_inv_context(void *cookie)
637{
638 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100639 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
640 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100641 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000642 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100643
644 if (stage1) {
645 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800646 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100647 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100648 } else {
649 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800650 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100651 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100652 }
653
Will Deacon518f7132014-11-14 17:17:54 +0000654 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100655}
656
Will Deacon518f7132014-11-14 17:17:54 +0000657static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000658 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000659{
660 struct arm_smmu_domain *smmu_domain = cookie;
661 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
662 struct arm_smmu_device *smmu = smmu_domain->smmu;
663 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
664 void __iomem *reg;
665
666 if (stage1) {
667 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
668 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
669
Robin Murphy7602b872016-04-28 17:12:09 +0100670 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000671 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800672 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000673 do {
674 writel_relaxed(iova, reg);
675 iova += granule;
676 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000677 } else {
678 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800679 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000680 do {
681 writeq_relaxed(iova, reg);
682 iova += granule >> 12;
683 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000684 }
Will Deacon518f7132014-11-14 17:17:54 +0000685 } else if (smmu->version == ARM_SMMU_V2) {
686 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
687 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
688 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000689 iova >>= 12;
690 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100691 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000692 iova += granule >> 12;
693 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000694 } else {
695 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800696 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000697 }
698}
699
Will Deacon518f7132014-11-14 17:17:54 +0000700static struct iommu_gather_ops arm_smmu_gather_ops = {
701 .tlb_flush_all = arm_smmu_tlb_inv_context,
702 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
703 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000704};
705
Will Deacon45ae7cf2013-06-24 18:31:25 +0100706static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
707{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100708 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100709 unsigned long iova;
710 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100711 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100712 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
713 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100714 void __iomem *cb_base;
715
Will Deacon44680ee2014-06-25 11:29:12 +0100716 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100717 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
718
719 if (!(fsr & FSR_FAULT))
720 return IRQ_NONE;
721
Will Deacon45ae7cf2013-06-24 18:31:25 +0100722 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100723 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100724
Will Deacon3714ce1d2016-08-05 19:49:45 +0100725 dev_err_ratelimited(smmu->dev,
726 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
727 fsr, iova, fsynr, cfg->cbndx);
728
Will Deacon45ae7cf2013-06-24 18:31:25 +0100729 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100730 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100731}
732
733static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
734{
735 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
736 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000737 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100738
739 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
740 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
741 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
742 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
743
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000744 if (!gfsr)
745 return IRQ_NONE;
746
Will Deacon45ae7cf2013-06-24 18:31:25 +0100747 dev_err_ratelimited(smmu->dev,
748 "Unexpected global fault, this could be serious\n");
749 dev_err_ratelimited(smmu->dev,
750 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
751 gfsr, gfsynr0, gfsynr1, gfsynr2);
752
753 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100754 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100755}
756
Will Deacon518f7132014-11-14 17:17:54 +0000757static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
758 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100759{
Robin Murphy60705292016-08-11 17:44:06 +0100760 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100761 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100762 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100763 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
764 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100765 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100766
Will Deacon45ae7cf2013-06-24 18:31:25 +0100767 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100768 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
769 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100770
Will Deacon4a1c93c2015-03-04 12:21:03 +0000771 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100772 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
773 reg = CBA2R_RW64_64BIT;
774 else
775 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800776 /* 16-bit VMIDs live in CBA2R */
777 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800778 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800779
Will Deacon4a1c93c2015-03-04 12:21:03 +0000780 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
781 }
782
Will Deacon45ae7cf2013-06-24 18:31:25 +0100783 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100784 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100785 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700786 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100787
Will Deacon57ca90f2014-02-06 14:59:05 +0000788 /*
789 * Use the weakest shareability/memory types, so they are
790 * overridden by the ttbcr/pte.
791 */
792 if (stage1) {
793 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
794 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800795 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
796 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800797 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000798 }
Will Deacon44680ee2014-06-25 11:29:12 +0100799 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100800
Will Deacon518f7132014-11-14 17:17:54 +0000801 /* TTBRs */
802 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100803 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100804
Robin Murphy60705292016-08-11 17:44:06 +0100805 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
806 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
807 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
808 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
809 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
810 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
811 } else {
812 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
813 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
814 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
815 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
816 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
817 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
818 }
Will Deacon518f7132014-11-14 17:17:54 +0000819 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100820 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100821 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000822 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100823
Will Deacon518f7132014-11-14 17:17:54 +0000824 /* TTBCR */
825 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100826 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
827 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
828 reg2 = 0;
829 } else {
830 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
831 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
832 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100833 }
Robin Murphy60705292016-08-11 17:44:06 +0100834 if (smmu->version > ARM_SMMU_V1)
835 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100836 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000837 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100838 }
Robin Murphy60705292016-08-11 17:44:06 +0100839 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100840
Will Deacon518f7132014-11-14 17:17:54 +0000841 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100842 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100843 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
844 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
845 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
846 } else {
847 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
848 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
849 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100850 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphy60705292016-08-11 17:44:06 +0100851 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100852 }
853
Will Deacon45ae7cf2013-06-24 18:31:25 +0100854 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100855 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100856 if (stage1)
857 reg |= SCTLR_S1_ASIDPNE;
858#ifdef __BIG_ENDIAN
859 reg |= SCTLR_E;
860#endif
Will Deacon25724842013-08-21 13:49:53 +0100861 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100862}
863
864static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100865 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100866{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100867 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000868 unsigned long ias, oas;
869 struct io_pgtable_ops *pgtbl_ops;
870 struct io_pgtable_cfg pgtbl_cfg;
871 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100872 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100873 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100874
Will Deacon518f7132014-11-14 17:17:54 +0000875 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100876 if (smmu_domain->smmu)
877 goto out_unlock;
878
Robin Murphy98006992016-04-20 14:53:33 +0100879 /* We're bypassing these SIDs, so don't allocate an actual context */
880 if (domain->type == IOMMU_DOMAIN_DMA) {
881 smmu_domain->smmu = smmu;
882 goto out_unlock;
883 }
884
Will Deaconc752ce42014-06-25 22:46:31 +0100885 /*
886 * Mapping the requested stage onto what we support is surprisingly
887 * complicated, mainly because the spec allows S1+S2 SMMUs without
888 * support for nested translation. That means we end up with the
889 * following table:
890 *
891 * Requested Supported Actual
892 * S1 N S1
893 * S1 S1+S2 S1
894 * S1 S2 S2
895 * S1 S1 S1
896 * N N N
897 * N S1+S2 S2
898 * N S2 S2
899 * N S1 S1
900 *
901 * Note that you can't actually request stage-2 mappings.
902 */
903 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
904 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
905 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
906 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
907
Robin Murphy7602b872016-04-28 17:12:09 +0100908 /*
909 * Choosing a suitable context format is even more fiddly. Until we
910 * grow some way for the caller to express a preference, and/or move
911 * the decision into the io-pgtable code where it arguably belongs,
912 * just aim for the closest thing to the rest of the system, and hope
913 * that the hardware isn't esoteric enough that we can't assume AArch64
914 * support to be a superset of AArch32 support...
915 */
916 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
917 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100918 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
919 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
920 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
921 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
922 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100923 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
924 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
925 ARM_SMMU_FEAT_FMT_AARCH64_16K |
926 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
927 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
928
929 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
930 ret = -EINVAL;
931 goto out_unlock;
932 }
933
Will Deaconc752ce42014-06-25 22:46:31 +0100934 switch (smmu_domain->stage) {
935 case ARM_SMMU_DOMAIN_S1:
936 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
937 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000938 ias = smmu->va_size;
939 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100940 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000941 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100942 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000943 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100944 ias = min(ias, 32UL);
945 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100946 } else {
947 fmt = ARM_V7S;
948 ias = min(ias, 32UL);
949 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100950 }
Will Deaconc752ce42014-06-25 22:46:31 +0100951 break;
952 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100953 /*
954 * We will likely want to change this if/when KVM gets
955 * involved.
956 */
Will Deaconc752ce42014-06-25 22:46:31 +0100957 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100958 cfg->cbar = CBAR_TYPE_S2_TRANS;
959 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000960 ias = smmu->ipa_size;
961 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100962 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000963 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100964 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000965 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100966 ias = min(ias, 40UL);
967 oas = min(oas, 40UL);
968 }
Will Deaconc752ce42014-06-25 22:46:31 +0100969 break;
970 default:
971 ret = -EINVAL;
972 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100973 }
974
975 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
976 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200977 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100978 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100979
Will Deacon44680ee2014-06-25 11:29:12 +0100980 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100981 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100982 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
983 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100984 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100985 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100986 }
987
Will Deacon518f7132014-11-14 17:17:54 +0000988 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100989 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000990 .ias = ias,
991 .oas = oas,
992 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100993 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000994 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100995
Will Deacon518f7132014-11-14 17:17:54 +0000996 smmu_domain->smmu = smmu;
997 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
998 if (!pgtbl_ops) {
999 ret = -ENOMEM;
1000 goto out_clear_smmu;
1001 }
1002
Robin Murphyd5466352016-05-09 17:20:09 +01001003 /* Update the domain's page sizes to reflect the page table format */
1004 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +00001005
1006 /* Initialise the context bank with our page table cfg */
1007 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
1008
1009 /*
1010 * Request context fault interrupt. Do this last to avoid the
1011 * handler seeing a half-initialised domain state.
1012 */
Will Deacon44680ee2014-06-25 11:29:12 +01001013 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001014 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
1015 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001016 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001017 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +01001018 cfg->irptndx, irq);
1019 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001020 }
1021
Will Deacon518f7132014-11-14 17:17:54 +00001022 mutex_unlock(&smmu_domain->init_mutex);
1023
1024 /* Publish page table ops for map/unmap */
1025 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001026 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001027
Will Deacon518f7132014-11-14 17:17:54 +00001028out_clear_smmu:
1029 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001030out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001031 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001032 return ret;
1033}
1034
1035static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1036{
Joerg Roedel1d672632015-03-26 13:43:10 +01001037 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001038 struct arm_smmu_device *smmu = smmu_domain->smmu;
1039 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001040 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001041 int irq;
1042
Robin Murphy98006992016-04-20 14:53:33 +01001043 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001044 return;
1045
Will Deacon518f7132014-11-14 17:17:54 +00001046 /*
1047 * Disable the context bank and free the page tables before freeing
1048 * it.
1049 */
Will Deacon44680ee2014-06-25 11:29:12 +01001050 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001051 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001052
Will Deacon44680ee2014-06-25 11:29:12 +01001053 if (cfg->irptndx != INVALID_IRPTNDX) {
1054 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001055 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001056 }
1057
Markus Elfring44830b02015-11-06 18:32:41 +01001058 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001059 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001060}
1061
Joerg Roedel1d672632015-03-26 13:43:10 +01001062static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001063{
1064 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001065
Robin Murphy9adb9592016-01-26 18:06:36 +00001066 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001067 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001068 /*
1069 * Allocate the domain and initialise some of its data structures.
1070 * We can't really do anything meaningful until we've added a
1071 * master.
1072 */
1073 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1074 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001075 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001076
Robin Murphy9adb9592016-01-26 18:06:36 +00001077 if (type == IOMMU_DOMAIN_DMA &&
1078 iommu_get_dma_cookie(&smmu_domain->domain)) {
1079 kfree(smmu_domain);
1080 return NULL;
1081 }
1082
Will Deacon518f7132014-11-14 17:17:54 +00001083 mutex_init(&smmu_domain->init_mutex);
1084 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001085
1086 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001087}
1088
Joerg Roedel1d672632015-03-26 13:43:10 +01001089static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001090{
Joerg Roedel1d672632015-03-26 13:43:10 +01001091 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001092
1093 /*
1094 * Free the domain resources. We assume that all devices have
1095 * already been detached.
1096 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001097 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001098 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001099 kfree(smmu_domain);
1100}
1101
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001102static int arm_smmu_alloc_smr(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001103{
1104 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001105
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001106 for (i = 0; i < smmu->num_mapping_groups; i++)
1107 if (!cmpxchg(&smmu->smrs[i].valid, false, true))
1108 return i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001109
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001110 return INVALID_SMENDX;
1111}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001112
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001113static void arm_smmu_free_smr(struct arm_smmu_device *smmu, int idx)
1114{
1115 writel_relaxed(~SMR_VALID, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1116 WRITE_ONCE(smmu->smrs[idx].valid, false);
1117}
1118
1119static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1120{
1121 struct arm_smmu_smr *smr = smmu->smrs + idx;
1122 u32 reg = (smr->id & smmu->streamid_mask) << SMR_ID_SHIFT |
1123 (smr->mask & smmu->smr_mask_mask) << SMR_MASK_SHIFT;
1124
1125 if (smr->valid)
1126 reg |= SMR_VALID;
1127 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1128}
1129
Robin Murphy8e8b2032016-09-12 17:13:50 +01001130static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1131{
1132 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1133 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1134 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1135 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1136
1137 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1138}
1139
1140static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1141{
1142 arm_smmu_write_s2cr(smmu, idx);
1143 if (smmu->smrs)
1144 arm_smmu_write_smr(smmu, idx);
1145}
1146
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001147static int arm_smmu_master_alloc_smes(struct arm_smmu_device *smmu,
1148 struct arm_smmu_master_cfg *cfg)
1149{
1150 struct arm_smmu_smr *smrs = smmu->smrs;
1151 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001152
Will Deacon44680ee2014-06-25 11:29:12 +01001153 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001154 for (i = 0; i < cfg->num_streamids; ++i) {
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001155 if (cfg->smendx[i] != INVALID_SMENDX)
1156 return -EEXIST;
1157
1158 /* ...except on stream indexing hardware, of course */
1159 if (!smrs) {
1160 cfg->smendx[i] = cfg->streamids[i];
1161 continue;
1162 }
1163
1164 idx = arm_smmu_alloc_smr(smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001165 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001166 dev_err(smmu->dev, "failed to allocate free SMR\n");
1167 goto err_free_smrs;
1168 }
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001169 cfg->smendx[i] = idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001170
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001171 smrs[idx].id = cfg->streamids[i];
1172 smrs[idx].mask = 0; /* We don't currently share SMRs */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001173 }
1174
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001175 if (!smrs)
1176 return 0;
1177
Will Deacon45ae7cf2013-06-24 18:31:25 +01001178 /* It worked! Now, poke the actual hardware */
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001179 for (i = 0; i < cfg->num_streamids; ++i)
1180 arm_smmu_write_smr(smmu, cfg->smendx[i]);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001181
Will Deacon45ae7cf2013-06-24 18:31:25 +01001182 return 0;
1183
1184err_free_smrs:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001185 while (i--) {
1186 arm_smmu_free_smr(smmu, cfg->smendx[i]);
1187 cfg->smendx[i] = INVALID_SMENDX;
1188 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001189 return -ENOSPC;
1190}
1191
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001192static void arm_smmu_master_free_smes(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001193 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001194{
1195 int i;
Will Deacon43b412b2014-07-15 11:22:24 +01001196
Robin Murphy8e8b2032016-09-12 17:13:50 +01001197 /*
1198 * We *must* clear the S2CR first, because freeing the SMR means
1199 * that it can be re-allocated immediately.
1200 */
1201 for (i = 0; i < cfg->num_streamids; ++i) {
1202 int idx = cfg->smendx[i];
1203
1204 /* An IOMMU group is torn down by the first device to be removed */
1205 if (idx == INVALID_SMENDX)
1206 return;
1207
1208 smmu->s2crs[idx] = s2cr_init_val;
1209 arm_smmu_write_s2cr(smmu, idx);
1210 }
1211 /* Sync S2CR updates before touching anything else */
1212 __iowmb();
1213
Will Deacon45ae7cf2013-06-24 18:31:25 +01001214 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001215 for (i = 0; i < cfg->num_streamids; ++i) {
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001216 if (smmu->smrs)
1217 arm_smmu_free_smr(smmu, cfg->smendx[i]);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001218
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001219 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001220 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001221}
1222
Will Deacon45ae7cf2013-06-24 18:31:25 +01001223static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001224 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001225{
Robin Murphy8e8b2032016-09-12 17:13:50 +01001226 int i, ret = 0;
Will Deacon44680ee2014-06-25 11:29:12 +01001227 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001228 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1229 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1230 u8 cbndx = smmu_domain->cfg.cbndx;
1231
1232 if (cfg->smendx[0] == INVALID_SMENDX)
1233 ret = arm_smmu_master_alloc_smes(smmu, cfg);
1234 if (ret)
1235 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001236
Will Deacon5f634952016-04-20 14:53:32 +01001237 /*
1238 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1239 * for all devices behind the SMMU. Note that we need to take
1240 * care configuring SMRs for devices both a platform_device and
1241 * and a PCI device (i.e. a PCI host controller)
1242 */
1243 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
Robin Murphy8e8b2032016-09-12 17:13:50 +01001244 type = S2CR_TYPE_BYPASS;
Will Deacon5f634952016-04-20 14:53:32 +01001245
Will Deacon43b412b2014-07-15 11:22:24 +01001246 for (i = 0; i < cfg->num_streamids; ++i) {
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001247 int idx = cfg->smendx[i];
Will Deacon43b412b2014-07-15 11:22:24 +01001248
Robin Murphy8e8b2032016-09-12 17:13:50 +01001249 /* Devices in an IOMMU group may already be configured */
1250 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1251 break;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001252
Robin Murphy8e8b2032016-09-12 17:13:50 +01001253 s2cr[idx].type = type;
1254 s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
1255 s2cr[idx].cbndx = cbndx;
1256 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001257 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001258 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001259}
1260
Will Deacon45ae7cf2013-06-24 18:31:25 +01001261static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1262{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001263 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001264 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001265 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001266 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267
Will Deacon8f68f8e2014-07-15 11:27:08 +01001268 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001269 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001270 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1271 return -ENXIO;
1272 }
1273
Will Deacon518f7132014-11-14 17:17:54 +00001274 /* Ensure that the domain is finalised */
1275 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001276 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001277 return ret;
1278
Will Deacon45ae7cf2013-06-24 18:31:25 +01001279 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001280 * Sanity check the domain. We don't support domains across
1281 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001282 */
Will Deacon518f7132014-11-14 17:17:54 +00001283 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001284 dev_err(dev,
1285 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001286 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1287 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001288 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001289
1290 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001291 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001292 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001293 return -ENODEV;
1294
Robin Murphy8e8b2032016-09-12 17:13:50 +01001295 return arm_smmu_domain_add_master(smmu_domain, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001296}
1297
Will Deacon45ae7cf2013-06-24 18:31:25 +01001298static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001299 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001300{
Will Deacon518f7132014-11-14 17:17:54 +00001301 int ret;
1302 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001303 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001304 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001305
Will Deacon518f7132014-11-14 17:17:54 +00001306 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001307 return -ENODEV;
1308
Will Deacon518f7132014-11-14 17:17:54 +00001309 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1310 ret = ops->map(ops, iova, paddr, size, prot);
1311 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1312 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001313}
1314
1315static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1316 size_t size)
1317{
Will Deacon518f7132014-11-14 17:17:54 +00001318 size_t ret;
1319 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001320 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001321 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001322
Will Deacon518f7132014-11-14 17:17:54 +00001323 if (!ops)
1324 return 0;
1325
1326 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1327 ret = ops->unmap(ops, iova, size);
1328 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1329 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001330}
1331
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001332static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1333 dma_addr_t iova)
1334{
Joerg Roedel1d672632015-03-26 13:43:10 +01001335 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001336 struct arm_smmu_device *smmu = smmu_domain->smmu;
1337 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1338 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1339 struct device *dev = smmu->dev;
1340 void __iomem *cb_base;
1341 u32 tmp;
1342 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001343 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001344
1345 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1346
Robin Murphy661d9622015-05-27 17:09:34 +01001347 /* ATS1 registers can only be written atomically */
1348 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001349 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001350 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1351 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001352 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001353
1354 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1355 !(tmp & ATSR_ACTIVE), 5, 50)) {
1356 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001357 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001358 &iova);
1359 return ops->iova_to_phys(ops, iova);
1360 }
1361
Robin Murphyf9a05f02016-04-13 18:13:01 +01001362 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001363 if (phys & CB_PAR_F) {
1364 dev_err(dev, "translation fault!\n");
1365 dev_err(dev, "PAR = 0x%llx\n", phys);
1366 return 0;
1367 }
1368
1369 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1370}
1371
Will Deacon45ae7cf2013-06-24 18:31:25 +01001372static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001373 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001374{
Will Deacon518f7132014-11-14 17:17:54 +00001375 phys_addr_t ret;
1376 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001377 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001378 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001379
Will Deacon518f7132014-11-14 17:17:54 +00001380 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001381 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001382
Will Deacon518f7132014-11-14 17:17:54 +00001383 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001384 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1385 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001386 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001387 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001388 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001389 }
1390
Will Deacon518f7132014-11-14 17:17:54 +00001391 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001392
Will Deacon518f7132014-11-14 17:17:54 +00001393 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001394}
1395
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001396static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001397{
Will Deacond0948942014-06-24 17:30:10 +01001398 switch (cap) {
1399 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001400 /*
1401 * Return true here as the SMMU can always send out coherent
1402 * requests.
1403 */
1404 return true;
Will Deacond0948942014-06-24 17:30:10 +01001405 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001406 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001407 case IOMMU_CAP_NOEXEC:
1408 return true;
Will Deacond0948942014-06-24 17:30:10 +01001409 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001410 return false;
Will Deacond0948942014-06-24 17:30:10 +01001411 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001412}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001413
Will Deacona9a1b0b2014-05-01 18:05:08 +01001414static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1415{
1416 *((u16 *)data) = alias;
1417 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001418}
1419
Will Deacon8f68f8e2014-07-15 11:27:08 +01001420static void __arm_smmu_release_pci_iommudata(void *data)
1421{
1422 kfree(data);
1423}
1424
Joerg Roedelaf659932015-10-21 23:51:41 +02001425static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1426 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001427{
Will Deacon03edb222015-01-19 14:27:33 +00001428 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001429 u16 sid;
1430 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001431
Will Deacon03edb222015-01-19 14:27:33 +00001432 cfg = iommu_group_get_iommudata(group);
1433 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001434 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001435 if (!cfg)
1436 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001437
Will Deacon03edb222015-01-19 14:27:33 +00001438 iommu_group_set_iommudata(group, cfg,
1439 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001440 }
1441
Joerg Roedelaf659932015-10-21 23:51:41 +02001442 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1443 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001444
Will Deacon03edb222015-01-19 14:27:33 +00001445 /*
1446 * Assume Stream ID == Requester ID for now.
1447 * We need a way to describe the ID mappings in FDT.
1448 */
1449 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1450 for (i = 0; i < cfg->num_streamids; ++i)
1451 if (cfg->streamids[i] == sid)
1452 break;
1453
1454 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001455 if (i == cfg->num_streamids) {
1456 cfg->streamids[i] = sid;
1457 cfg->smendx[i] = INVALID_SMENDX;
1458 cfg->num_streamids++;
1459 }
Will Deacon03edb222015-01-19 14:27:33 +00001460
1461 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001462}
1463
Joerg Roedelaf659932015-10-21 23:51:41 +02001464static int arm_smmu_init_platform_device(struct device *dev,
1465 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001466{
Will Deacon03edb222015-01-19 14:27:33 +00001467 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001468 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001469
1470 if (!smmu)
1471 return -ENODEV;
1472
1473 master = find_smmu_master(smmu, dev->of_node);
1474 if (!master)
1475 return -ENODEV;
1476
Will Deacon03edb222015-01-19 14:27:33 +00001477 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001478
1479 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001480}
1481
1482static int arm_smmu_add_device(struct device *dev)
1483{
Joerg Roedelaf659932015-10-21 23:51:41 +02001484 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001485
Joerg Roedelaf659932015-10-21 23:51:41 +02001486 group = iommu_group_get_for_dev(dev);
1487 if (IS_ERR(group))
1488 return PTR_ERR(group);
1489
Peng Fan9a4a9d82015-11-20 16:56:18 +08001490 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001491 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001492}
1493
Will Deacon45ae7cf2013-06-24 18:31:25 +01001494static void arm_smmu_remove_device(struct device *dev)
1495{
Robin Murphy8e8b2032016-09-12 17:13:50 +01001496 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
1497 struct arm_smmu_master_cfg *cfg = find_smmu_master_cfg(dev);
1498
1499 if (smmu && cfg)
1500 arm_smmu_master_free_smes(smmu, cfg);
1501
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001502 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001503}
1504
Joerg Roedelaf659932015-10-21 23:51:41 +02001505static struct iommu_group *arm_smmu_device_group(struct device *dev)
1506{
1507 struct iommu_group *group;
1508 int ret;
1509
1510 if (dev_is_pci(dev))
1511 group = pci_device_group(dev);
1512 else
1513 group = generic_device_group(dev);
1514
1515 if (IS_ERR(group))
1516 return group;
1517
1518 if (dev_is_pci(dev))
1519 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1520 else
1521 ret = arm_smmu_init_platform_device(dev, group);
1522
1523 if (ret) {
1524 iommu_group_put(group);
1525 group = ERR_PTR(ret);
1526 }
1527
1528 return group;
1529}
1530
Will Deaconc752ce42014-06-25 22:46:31 +01001531static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1532 enum iommu_attr attr, void *data)
1533{
Joerg Roedel1d672632015-03-26 13:43:10 +01001534 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001535
1536 switch (attr) {
1537 case DOMAIN_ATTR_NESTING:
1538 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1539 return 0;
1540 default:
1541 return -ENODEV;
1542 }
1543}
1544
1545static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1546 enum iommu_attr attr, void *data)
1547{
Will Deacon518f7132014-11-14 17:17:54 +00001548 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001549 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001550
Will Deacon518f7132014-11-14 17:17:54 +00001551 mutex_lock(&smmu_domain->init_mutex);
1552
Will Deaconc752ce42014-06-25 22:46:31 +01001553 switch (attr) {
1554 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001555 if (smmu_domain->smmu) {
1556 ret = -EPERM;
1557 goto out_unlock;
1558 }
1559
Will Deaconc752ce42014-06-25 22:46:31 +01001560 if (*(int *)data)
1561 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1562 else
1563 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1564
Will Deacon518f7132014-11-14 17:17:54 +00001565 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001566 default:
Will Deacon518f7132014-11-14 17:17:54 +00001567 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001568 }
Will Deacon518f7132014-11-14 17:17:54 +00001569
1570out_unlock:
1571 mutex_unlock(&smmu_domain->init_mutex);
1572 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001573}
1574
Will Deacon518f7132014-11-14 17:17:54 +00001575static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001576 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001577 .domain_alloc = arm_smmu_domain_alloc,
1578 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001579 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001580 .map = arm_smmu_map,
1581 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001582 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001583 .iova_to_phys = arm_smmu_iova_to_phys,
1584 .add_device = arm_smmu_add_device,
1585 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001586 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001587 .domain_get_attr = arm_smmu_domain_get_attr,
1588 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001589 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001590};
1591
1592static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1593{
1594 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001595 void __iomem *cb_base;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001596 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001597 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001598
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001599 /* clear global FSR */
1600 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1601 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001602
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001603 /*
1604 * Reset stream mapping groups: Initial values mark all SMRn as
1605 * invalid and all S2CRn as bypass unless overridden.
1606 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001607 for (i = 0; i < smmu->num_mapping_groups; ++i)
1608 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001609
Peng Fan3ca37122016-05-03 21:50:30 +08001610 /*
1611 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1612 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1613 * bit is only present in MMU-500r2 onwards.
1614 */
1615 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1616 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1617 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1618 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1619 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1620 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1621 }
1622
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001623 /* Make sure all context banks are disabled and clear CB_FSR */
1624 for (i = 0; i < smmu->num_context_banks; ++i) {
1625 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1626 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1627 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001628 /*
1629 * Disable MMU-500's not-particularly-beneficial next-page
1630 * prefetcher for the sake of errata #841119 and #826419.
1631 */
1632 if (smmu->model == ARM_MMU500) {
1633 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1634 reg &= ~ARM_MMU500_ACTLR_CPRE;
1635 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1636 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001637 }
Will Deacon1463fe42013-07-31 19:21:27 +01001638
Will Deacon45ae7cf2013-06-24 18:31:25 +01001639 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001640 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1641 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1642
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001643 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001644
Will Deacon45ae7cf2013-06-24 18:31:25 +01001645 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001646 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001647
1648 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001649 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001650
Robin Murphy25a1c962016-02-10 14:25:33 +00001651 /* Enable client access, handling unmatched streams as appropriate */
1652 reg &= ~sCR0_CLIENTPD;
1653 if (disable_bypass)
1654 reg |= sCR0_USFCFG;
1655 else
1656 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001657
1658 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001659 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001660
1661 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001662 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001663
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001664 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1665 reg |= sCR0_VMID16EN;
1666
Will Deacon45ae7cf2013-06-24 18:31:25 +01001667 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001668 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001669 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001670}
1671
1672static int arm_smmu_id_size_to_bits(int size)
1673{
1674 switch (size) {
1675 case 0:
1676 return 32;
1677 case 1:
1678 return 36;
1679 case 2:
1680 return 40;
1681 case 3:
1682 return 42;
1683 case 4:
1684 return 44;
1685 case 5:
1686 default:
1687 return 48;
1688 }
1689}
1690
1691static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1692{
1693 unsigned long size;
1694 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1695 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001696 bool cttw_dt, cttw_reg;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001697 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001698
1699 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001700 dev_notice(smmu->dev, "SMMUv%d with:\n",
1701 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001702
1703 /* ID0 */
1704 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001705
1706 /* Restrict available stages based on module parameter */
1707 if (force_stage == 1)
1708 id &= ~(ID0_S2TS | ID0_NTS);
1709 else if (force_stage == 2)
1710 id &= ~(ID0_S1TS | ID0_NTS);
1711
Will Deacon45ae7cf2013-06-24 18:31:25 +01001712 if (id & ID0_S1TS) {
1713 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1714 dev_notice(smmu->dev, "\tstage 1 translation\n");
1715 }
1716
1717 if (id & ID0_S2TS) {
1718 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1719 dev_notice(smmu->dev, "\tstage 2 translation\n");
1720 }
1721
1722 if (id & ID0_NTS) {
1723 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1724 dev_notice(smmu->dev, "\tnested translation\n");
1725 }
1726
1727 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001728 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001729 dev_err(smmu->dev, "\tno translation support!\n");
1730 return -ENODEV;
1731 }
1732
Robin Murphyb7862e32016-04-13 18:13:03 +01001733 if ((id & ID0_S1TS) &&
1734 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001735 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1736 dev_notice(smmu->dev, "\taddress translation ops\n");
1737 }
1738
Robin Murphybae2c2d2015-07-29 19:46:05 +01001739 /*
1740 * In order for DMA API calls to work properly, we must defer to what
1741 * the DT says about coherency, regardless of what the hardware claims.
1742 * Fortunately, this also opens up a workaround for systems where the
1743 * ID register value has ended up configured incorrectly.
1744 */
1745 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1746 cttw_reg = !!(id & ID0_CTTW);
1747 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001748 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001749 if (cttw_dt || cttw_reg)
1750 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1751 cttw_dt ? "" : "non-");
1752 if (cttw_dt != cttw_reg)
1753 dev_notice(smmu->dev,
1754 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001755
Robin Murphy21174242016-09-12 17:13:48 +01001756 /* Max. number of entries we have for stream matching/indexing */
1757 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1758 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001759 if (id & ID0_SMS) {
Robin Murphy21174242016-09-12 17:13:48 +01001760 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001761
1762 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001763 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1764 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001765 dev_err(smmu->dev,
1766 "stream-matching supported, but no SMRs present!\n");
1767 return -ENODEV;
1768 }
1769
Robin Murphy21174242016-09-12 17:13:48 +01001770 /*
1771 * SMR.ID bits may not be preserved if the corresponding MASK
1772 * bits are set, so check each one separately. We can reject
1773 * masters later if they try to claim IDs outside these masks.
1774 */
1775 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001776 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1777 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Robin Murphy21174242016-09-12 17:13:48 +01001778 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001779
Robin Murphy21174242016-09-12 17:13:48 +01001780 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1781 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1782 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1783 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001784
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001785 /* Zero-initialised to mark as invalid */
1786 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1787 GFP_KERNEL);
1788 if (!smmu->smrs)
1789 return -ENOMEM;
1790
Will Deacon45ae7cf2013-06-24 18:31:25 +01001791 dev_notice(smmu->dev,
Robin Murphy21174242016-09-12 17:13:48 +01001792 "\tstream matching with %lu register groups, mask 0x%x",
1793 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001794 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001795 /* s2cr->type == 0 means translation, so initialise explicitly */
1796 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1797 GFP_KERNEL);
1798 if (!smmu->s2crs)
1799 return -ENOMEM;
1800 for (i = 0; i < size; i++)
1801 smmu->s2crs[i] = s2cr_init_val;
1802
Robin Murphy21174242016-09-12 17:13:48 +01001803 smmu->num_mapping_groups = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001804
Robin Murphy7602b872016-04-28 17:12:09 +01001805 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1806 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1807 if (!(id & ID0_PTFS_NO_AARCH32S))
1808 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1809 }
1810
Will Deacon45ae7cf2013-06-24 18:31:25 +01001811 /* ID1 */
1812 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001813 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001814
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001815 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001816 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001817 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001818 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001819 dev_warn(smmu->dev,
1820 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1821 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001822
Will Deacon518f7132014-11-14 17:17:54 +00001823 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001824 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1825 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1826 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1827 return -ENODEV;
1828 }
1829 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1830 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001831 /*
1832 * Cavium CN88xx erratum #27704.
1833 * Ensure ASID and VMID allocation is unique across all SMMUs in
1834 * the system.
1835 */
1836 if (smmu->model == CAVIUM_SMMUV2) {
1837 smmu->cavium_id_base =
1838 atomic_add_return(smmu->num_context_banks,
1839 &cavium_smmu_context_count);
1840 smmu->cavium_id_base -= smmu->num_context_banks;
1841 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001842
1843 /* ID2 */
1844 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1845 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001846 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001847
Will Deacon518f7132014-11-14 17:17:54 +00001848 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001849 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001850 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001851
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001852 if (id & ID2_VMID16)
1853 smmu->features |= ARM_SMMU_FEAT_VMID16;
1854
Robin Murphyf1d84542015-03-04 16:41:05 +00001855 /*
1856 * What the page table walker can address actually depends on which
1857 * descriptor format is in use, but since a) we don't know that yet,
1858 * and b) it can vary per context bank, this will have to do...
1859 */
1860 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1861 dev_warn(smmu->dev,
1862 "failed to set DMA mask for table walker\n");
1863
Robin Murphyb7862e32016-04-13 18:13:03 +01001864 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001865 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001866 if (smmu->version == ARM_SMMU_V1_64K)
1867 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001868 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001869 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001870 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001871 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001872 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001873 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001874 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001875 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001876 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001877 }
1878
Robin Murphy7602b872016-04-28 17:12:09 +01001879 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001880 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001881 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001882 if (smmu->features &
1883 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001884 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001885 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001886 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001887 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001888 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001889
Robin Murphyd5466352016-05-09 17:20:09 +01001890 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1891 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1892 else
1893 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1894 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1895 smmu->pgsize_bitmap);
1896
Will Deacon518f7132014-11-14 17:17:54 +00001897
Will Deacon28d60072014-09-01 16:24:48 +01001898 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1899 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001900 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001901
1902 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1903 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001904 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001905
Will Deacon45ae7cf2013-06-24 18:31:25 +01001906 return 0;
1907}
1908
Robin Murphy67b65a32016-04-13 18:12:57 +01001909struct arm_smmu_match_data {
1910 enum arm_smmu_arch_version version;
1911 enum arm_smmu_implementation model;
1912};
1913
1914#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1915static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1916
1917ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1918ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001919ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001920ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001921ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001922
Joerg Roedel09b52692014-10-02 12:24:45 +02001923static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001924 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1925 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1926 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001927 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001928 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001929 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001930 { },
1931};
1932MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1933
Will Deacon45ae7cf2013-06-24 18:31:25 +01001934static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1935{
Robin Murphy09360402014-08-28 17:51:59 +01001936 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01001937 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001938 struct resource *res;
1939 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001940 struct device *dev = &pdev->dev;
1941 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02001942 struct of_phandle_iterator it;
1943 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001944 int num_irqs, i, err;
1945
1946 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1947 if (!smmu) {
1948 dev_err(dev, "failed to allocate arm_smmu_device\n");
1949 return -ENOMEM;
1950 }
1951 smmu->dev = dev;
1952
Robin Murphy09360402014-08-28 17:51:59 +01001953 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01001954 data = of_id->data;
1955 smmu->version = data->version;
1956 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01001957
Will Deacon45ae7cf2013-06-24 18:31:25 +01001958 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001959 smmu->base = devm_ioremap_resource(dev, res);
1960 if (IS_ERR(smmu->base))
1961 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001962 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001963
1964 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1965 &smmu->num_global_irqs)) {
1966 dev_err(dev, "missing #global-interrupts property\n");
1967 return -ENODEV;
1968 }
1969
1970 num_irqs = 0;
1971 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1972 num_irqs++;
1973 if (num_irqs > smmu->num_global_irqs)
1974 smmu->num_context_irqs++;
1975 }
1976
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001977 if (!smmu->num_context_irqs) {
1978 dev_err(dev, "found %d interrupts but expected at least %d\n",
1979 num_irqs, smmu->num_global_irqs + 1);
1980 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001981 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001982
1983 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1984 GFP_KERNEL);
1985 if (!smmu->irqs) {
1986 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1987 return -ENOMEM;
1988 }
1989
1990 for (i = 0; i < num_irqs; ++i) {
1991 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001992
Will Deacon45ae7cf2013-06-24 18:31:25 +01001993 if (irq < 0) {
1994 dev_err(dev, "failed to get irq index %d\n", i);
1995 return -ENODEV;
1996 }
1997 smmu->irqs[i] = irq;
1998 }
1999
Olav Haugan3c8766d2014-08-22 17:12:32 -07002000 err = arm_smmu_device_cfg_probe(smmu);
2001 if (err)
2002 return err;
2003
Will Deacon45ae7cf2013-06-24 18:31:25 +01002004 i = 0;
2005 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002006
2007 err = -ENOMEM;
2008 /* No need to zero the memory for masterspec */
2009 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
2010 if (!masterspec)
2011 goto out_put_masters;
2012
2013 of_for_each_phandle(&it, err, dev->of_node,
2014 "mmu-masters", "#stream-id-cells", 0) {
2015 int count = of_phandle_iterator_args(&it, masterspec->args,
2016 MAX_MASTER_STREAMIDS);
2017 masterspec->np = of_node_get(it.node);
2018 masterspec->args_count = count;
2019
2020 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002021 if (err) {
2022 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002023 masterspec->np->name);
2024 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002025 goto out_put_masters;
2026 }
2027
2028 i++;
2029 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002030
Will Deacon45ae7cf2013-06-24 18:31:25 +01002031 dev_notice(dev, "registered %d master devices\n", i);
2032
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02002033 kfree(masterspec);
2034
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00002035 parse_driver_options(smmu);
2036
Robin Murphyb7862e32016-04-13 18:13:03 +01002037 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01002038 smmu->num_context_banks != smmu->num_context_irqs) {
2039 dev_err(dev,
2040 "found only %d context interrupt(s) but %d required\n",
2041 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cde2013-11-15 09:42:30 +00002042 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01002043 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002044 }
2045
Will Deacon45ae7cf2013-06-24 18:31:25 +01002046 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08002047 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2048 arm_smmu_global_fault,
2049 IRQF_SHARED,
2050 "arm-smmu global fault",
2051 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002052 if (err) {
2053 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2054 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08002055 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002056 }
2057 }
2058
2059 INIT_LIST_HEAD(&smmu->list);
2060 spin_lock(&arm_smmu_devices_lock);
2061 list_add(&smmu->list, &arm_smmu_devices);
2062 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01002063
2064 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002065 return 0;
2066
Will Deacon45ae7cf2013-06-24 18:31:25 +01002067out_put_masters:
2068 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002069 struct arm_smmu_master *master
2070 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002071 of_node_put(master->of_node);
2072 }
2073
2074 return err;
2075}
2076
2077static int arm_smmu_device_remove(struct platform_device *pdev)
2078{
Will Deacon45ae7cf2013-06-24 18:31:25 +01002079 struct device *dev = &pdev->dev;
2080 struct arm_smmu_device *curr, *smmu = NULL;
2081 struct rb_node *node;
2082
2083 spin_lock(&arm_smmu_devices_lock);
2084 list_for_each_entry(curr, &arm_smmu_devices, list) {
2085 if (curr->dev == dev) {
2086 smmu = curr;
2087 list_del(&smmu->list);
2088 break;
2089 }
2090 }
2091 spin_unlock(&arm_smmu_devices_lock);
2092
2093 if (!smmu)
2094 return -ENODEV;
2095
Will Deacon45ae7cf2013-06-24 18:31:25 +01002096 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002097 struct arm_smmu_master *master
2098 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002099 of_node_put(master->of_node);
2100 }
2101
Will Deaconecfadb62013-07-31 19:21:28 +01002102 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002103 dev_err(dev, "removing device with active domains!\n");
2104
Will Deacon45ae7cf2013-06-24 18:31:25 +01002105 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002106 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002107 return 0;
2108}
2109
Will Deacon45ae7cf2013-06-24 18:31:25 +01002110static struct platform_driver arm_smmu_driver = {
2111 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002112 .name = "arm-smmu",
2113 .of_match_table = of_match_ptr(arm_smmu_of_match),
2114 },
2115 .probe = arm_smmu_device_dt_probe,
2116 .remove = arm_smmu_device_remove,
2117};
2118
2119static int __init arm_smmu_init(void)
2120{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002121 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002122 int ret;
2123
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002124 /*
2125 * Play nice with systems that don't have an ARM SMMU by checking that
2126 * an ARM SMMU exists in the system before proceeding with the driver
2127 * and IOMMU bus operation registration.
2128 */
2129 np = of_find_matching_node(NULL, arm_smmu_of_match);
2130 if (!np)
2131 return 0;
2132
2133 of_node_put(np);
2134
Will Deacon45ae7cf2013-06-24 18:31:25 +01002135 ret = platform_driver_register(&arm_smmu_driver);
2136 if (ret)
2137 return ret;
2138
2139 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002140 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002141 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2142
Will Deacond123cf82014-02-04 22:17:53 +00002143#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002144 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002145 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002146#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002147
Will Deacona9a1b0b2014-05-01 18:05:08 +01002148#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08002149 if (!iommu_present(&pci_bus_type)) {
2150 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002151 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002152 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002153#endif
2154
Will Deacon45ae7cf2013-06-24 18:31:25 +01002155 return 0;
2156}
2157
2158static void __exit arm_smmu_exit(void)
2159{
2160 return platform_driver_unregister(&arm_smmu_driver);
2161}
2162
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002163subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002164module_exit(arm_smmu_exit);
2165
2166MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2167MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2168MODULE_LICENSE("GPL v2");