blob: 2023a77015a00d9349383eb9aa99efdb202c5102 [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
Robin Murphy1f3d5ca2016-09-12 17:13:49 +010031#include <linux/atomic.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010032#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000033#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010034#include <linux/dma-mapping.h>
35#include <linux/err.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010038#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010039#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000040#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010041#include <linux/module.h>
42#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010043#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010044#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010045#include <linux/platform_device.h>
46#include <linux/slab.h>
47#include <linux/spinlock.h>
48
49#include <linux/amba/bus.h>
50
Will Deacon518f7132014-11-14 17:17:54 +000051#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010052
53/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020054#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010055
56/* Maximum number of context banks per SMMU */
57#define ARM_SMMU_MAX_CBS 128
58
Will Deacon45ae7cf2013-06-24 18:31:25 +010059/* SMMU global address space */
60#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010061#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010062
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000063/*
64 * SMMU global address space with conditional offset to access secure
65 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
66 * nsGFSYNR0: 0x450)
67 */
68#define ARM_SMMU_GR0_NS(smmu) \
69 ((smmu)->base + \
70 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
71 ? 0x400 : 0))
72
Robin Murphyf9a05f02016-04-13 18:13:01 +010073/*
74 * Some 64-bit registers only make sense to write atomically, but in such
75 * cases all the data relevant to AArch32 formats lies within the lower word,
76 * therefore this actually makes more sense than it might first appear.
77 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010078#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010079#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#endif
83
Will Deacon45ae7cf2013-06-24 18:31:25 +010084/* Configuration registers */
85#define ARM_SMMU_GR0_sCR0 0x0
86#define sCR0_CLIENTPD (1 << 0)
87#define sCR0_GFRE (1 << 1)
88#define sCR0_GFIE (1 << 2)
89#define sCR0_GCFGFRE (1 << 4)
90#define sCR0_GCFGFIE (1 << 5)
91#define sCR0_USFCFG (1 << 10)
92#define sCR0_VMIDPNE (1 << 11)
93#define sCR0_PTM (1 << 12)
94#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080095#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010096#define sCR0_BSU_SHIFT 14
97#define sCR0_BSU_MASK 0x3
98
Peng Fan3ca37122016-05-03 21:50:30 +080099/* Auxiliary Configuration register */
100#define ARM_SMMU_GR0_sACR 0x10
101
Will Deacon45ae7cf2013-06-24 18:31:25 +0100102/* Identification registers */
103#define ARM_SMMU_GR0_ID0 0x20
104#define ARM_SMMU_GR0_ID1 0x24
105#define ARM_SMMU_GR0_ID2 0x28
106#define ARM_SMMU_GR0_ID3 0x2c
107#define ARM_SMMU_GR0_ID4 0x30
108#define ARM_SMMU_GR0_ID5 0x34
109#define ARM_SMMU_GR0_ID6 0x38
110#define ARM_SMMU_GR0_ID7 0x3c
111#define ARM_SMMU_GR0_sGFSR 0x48
112#define ARM_SMMU_GR0_sGFSYNR0 0x50
113#define ARM_SMMU_GR0_sGFSYNR1 0x54
114#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100115
116#define ID0_S1TS (1 << 30)
117#define ID0_S2TS (1 << 29)
118#define ID0_NTS (1 << 28)
119#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000120#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100121#define ID0_PTFS_NO_AARCH32 (1 << 25)
122#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100123#define ID0_CTTW (1 << 14)
124#define ID0_NUMIRPT_SHIFT 16
125#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700126#define ID0_NUMSIDB_SHIFT 9
127#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100128#define ID0_NUMSMRG_SHIFT 0
129#define ID0_NUMSMRG_MASK 0xff
130
131#define ID1_PAGESIZE (1 << 31)
132#define ID1_NUMPAGENDXB_SHIFT 28
133#define ID1_NUMPAGENDXB_MASK 7
134#define ID1_NUMS2CB_SHIFT 16
135#define ID1_NUMS2CB_MASK 0xff
136#define ID1_NUMCB_SHIFT 0
137#define ID1_NUMCB_MASK 0xff
138
139#define ID2_OAS_SHIFT 4
140#define ID2_OAS_MASK 0xf
141#define ID2_IAS_SHIFT 0
142#define ID2_IAS_MASK 0xf
143#define ID2_UBS_SHIFT 8
144#define ID2_UBS_MASK 0xf
145#define ID2_PTFS_4K (1 << 12)
146#define ID2_PTFS_16K (1 << 13)
147#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800148#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100149
Peng Fan3ca37122016-05-03 21:50:30 +0800150#define ID7_MAJOR_SHIFT 4
151#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100152
Will Deacon45ae7cf2013-06-24 18:31:25 +0100153/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154#define ARM_SMMU_GR0_TLBIVMID 0x64
155#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
156#define ARM_SMMU_GR0_TLBIALLH 0x6c
157#define ARM_SMMU_GR0_sTLBGSYNC 0x70
158#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
159#define sTLBGSTATUS_GSACTIVE (1 << 0)
160#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
161
162/* Stream mapping registers */
163#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
164#define SMR_VALID (1 << 31)
165#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100166#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100167
168#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
169#define S2CR_CBNDX_SHIFT 0
170#define S2CR_CBNDX_MASK 0xff
171#define S2CR_TYPE_SHIFT 16
172#define S2CR_TYPE_MASK 0x3
Robin Murphy8e8b2032016-09-12 17:13:50 +0100173enum arm_smmu_s2cr_type {
174 S2CR_TYPE_TRANS,
175 S2CR_TYPE_BYPASS,
176 S2CR_TYPE_FAULT,
177};
Will Deacon45ae7cf2013-06-24 18:31:25 +0100178
Robin Murphyd3461802016-01-26 18:06:34 +0000179#define S2CR_PRIVCFG_SHIFT 24
Robin Murphy8e8b2032016-09-12 17:13:50 +0100180#define S2CR_PRIVCFG_MASK 0x3
181enum arm_smmu_s2cr_privcfg {
182 S2CR_PRIVCFG_DEFAULT,
183 S2CR_PRIVCFG_DIPAN,
184 S2CR_PRIVCFG_UNPRIV,
185 S2CR_PRIVCFG_PRIV,
186};
Robin Murphyd3461802016-01-26 18:06:34 +0000187
Will Deacon45ae7cf2013-06-24 18:31:25 +0100188/* Context bank attribute registers */
189#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
190#define CBAR_VMID_SHIFT 0
191#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000192#define CBAR_S1_BPSHCFG_SHIFT 8
193#define CBAR_S1_BPSHCFG_MASK 3
194#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100195#define CBAR_S1_MEMATTR_SHIFT 12
196#define CBAR_S1_MEMATTR_MASK 0xf
197#define CBAR_S1_MEMATTR_WB 0xf
198#define CBAR_TYPE_SHIFT 16
199#define CBAR_TYPE_MASK 0x3
200#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
201#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
202#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
203#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
204#define CBAR_IRPTNDX_SHIFT 24
205#define CBAR_IRPTNDX_MASK 0xff
206
207#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
208#define CBA2R_RW64_32BIT (0 << 0)
209#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800210#define CBA2R_VMID_SHIFT 16
211#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100212
213/* Translation context bank */
214#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100215#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100216
217#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100218#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219#define ARM_SMMU_CB_RESUME 0x8
220#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100221#define ARM_SMMU_CB_TTBR0 0x20
222#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100223#define ARM_SMMU_CB_TTBCR 0x30
Robin Murphy60705292016-08-11 17:44:06 +0100224#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100225#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000226#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100227#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100228#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100229#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100230#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000231#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100232#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000233#define ARM_SMMU_CB_S1_TLBIVAL 0x620
234#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
235#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100236#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000237#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100238
239#define SCTLR_S1_ASIDPNE (1 << 12)
240#define SCTLR_CFCFG (1 << 7)
241#define SCTLR_CFIE (1 << 6)
242#define SCTLR_CFRE (1 << 5)
243#define SCTLR_E (1 << 4)
244#define SCTLR_AFE (1 << 2)
245#define SCTLR_TRE (1 << 1)
246#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100247
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100248#define ARM_MMU500_ACTLR_CPRE (1 << 1)
249
Peng Fan3ca37122016-05-03 21:50:30 +0800250#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
251
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000252#define CB_PAR_F (1 << 0)
253
254#define ATSR_ACTIVE (1 << 0)
255
Will Deacon45ae7cf2013-06-24 18:31:25 +0100256#define RESUME_RETRY (0 << 0)
257#define RESUME_TERMINATE (1 << 0)
258
Will Deacon45ae7cf2013-06-24 18:31:25 +0100259#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100260#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100261
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100262#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100263
264#define FSR_MULTI (1 << 31)
265#define FSR_SS (1 << 30)
266#define FSR_UUT (1 << 8)
267#define FSR_ASF (1 << 7)
268#define FSR_TLBLKF (1 << 6)
269#define FSR_TLBMCF (1 << 5)
270#define FSR_EF (1 << 4)
271#define FSR_PF (1 << 3)
272#define FSR_AFF (1 << 2)
273#define FSR_TF (1 << 1)
274
Mitchel Humpherys29073202014-07-08 09:52:18 -0700275#define FSR_IGN (FSR_AFF | FSR_ASF | \
276 FSR_TLBMCF | FSR_TLBLKF)
277#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100278 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100279
280#define FSYNR0_WNR (1 << 4)
281
Will Deacon4cf740b2014-07-14 19:47:39 +0100282static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000283module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100284MODULE_PARM_DESC(force_stage,
285 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000286static bool disable_bypass;
287module_param(disable_bypass, bool, S_IRUGO);
288MODULE_PARM_DESC(disable_bypass,
289 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100290
Robin Murphy09360402014-08-28 17:51:59 +0100291enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100292 ARM_SMMU_V1,
293 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100294 ARM_SMMU_V2,
295};
296
Robin Murphy67b65a32016-04-13 18:12:57 +0100297enum arm_smmu_implementation {
298 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100299 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100300 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100301};
302
Robin Murphy8e8b2032016-09-12 17:13:50 +0100303struct arm_smmu_s2cr {
304 enum arm_smmu_s2cr_type type;
305 enum arm_smmu_s2cr_privcfg privcfg;
306 u8 cbndx;
307};
308
309#define s2cr_init_val (struct arm_smmu_s2cr){ \
310 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
311}
312
Will Deacon45ae7cf2013-06-24 18:31:25 +0100313struct arm_smmu_smr {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100314 u16 mask;
315 u16 id;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100316 bool valid;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100317};
318
Will Deacona9a1b0b2014-05-01 18:05:08 +0100319struct arm_smmu_master_cfg {
Robin Murphyf80cd882016-09-14 15:21:39 +0100320 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100321 int num_streamids;
322 u16 streamids[MAX_MASTER_STREAMIDS];
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100323 s16 smendx[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100324};
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100325#define INVALID_SMENDX -1
Will Deacon45ae7cf2013-06-24 18:31:25 +0100326
327struct arm_smmu_device {
328 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100329
330 void __iomem *base;
331 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100332 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100333
334#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
335#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
336#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
337#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
338#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000339#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800340#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100341#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
342#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
343#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
344#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
345#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100346 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000347
348#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
349 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100350 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100351 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100352
353 u32 num_context_banks;
354 u32 num_s2_context_banks;
355 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
356 atomic_t irptndx;
357
358 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100359 u16 streamid_mask;
360 u16 smr_mask_mask;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +0100361 struct arm_smmu_smr *smrs;
Robin Murphy8e8b2032016-09-12 17:13:50 +0100362 struct arm_smmu_s2cr *s2crs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100363
Will Deacon518f7132014-11-14 17:17:54 +0000364 unsigned long va_size;
365 unsigned long ipa_size;
366 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100367 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100368
369 u32 num_global_irqs;
370 u32 num_context_irqs;
371 unsigned int *irqs;
372
Will Deacon45ae7cf2013-06-24 18:31:25 +0100373 struct list_head list;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800374
375 u32 cavium_id_base; /* Specific to Cavium */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100376};
377
Robin Murphy7602b872016-04-28 17:12:09 +0100378enum arm_smmu_context_fmt {
379 ARM_SMMU_CTX_FMT_NONE,
380 ARM_SMMU_CTX_FMT_AARCH64,
381 ARM_SMMU_CTX_FMT_AARCH32_L,
382 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100383};
384
385struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100386 u8 cbndx;
387 u8 irptndx;
388 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100389 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100390};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100391#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100392
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800393#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
394#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100395
Will Deaconc752ce42014-06-25 22:46:31 +0100396enum arm_smmu_domain_stage {
397 ARM_SMMU_DOMAIN_S1 = 0,
398 ARM_SMMU_DOMAIN_S2,
399 ARM_SMMU_DOMAIN_NESTED,
400};
401
Will Deacon45ae7cf2013-06-24 18:31:25 +0100402struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100403 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000404 struct io_pgtable_ops *pgtbl_ops;
405 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100406 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100407 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000408 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100409 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100410};
411
412static DEFINE_SPINLOCK(arm_smmu_devices_lock);
413static LIST_HEAD(arm_smmu_devices);
414
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000415struct arm_smmu_option_prop {
416 u32 opt;
417 const char *prop;
418};
419
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800420static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
421
Mitchel Humpherys29073202014-07-08 09:52:18 -0700422static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000423 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
424 { 0, NULL},
425};
426
Joerg Roedel1d672632015-03-26 13:43:10 +0100427static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
428{
429 return container_of(dom, struct arm_smmu_domain, domain);
430}
431
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000432static void parse_driver_options(struct arm_smmu_device *smmu)
433{
434 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700435
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000436 do {
437 if (of_property_read_bool(smmu->dev->of_node,
438 arm_smmu_options[i].prop)) {
439 smmu->options |= arm_smmu_options[i].opt;
440 dev_notice(smmu->dev, "option %s\n",
441 arm_smmu_options[i].prop);
442 }
443 } while (arm_smmu_options[++i].opt);
444}
445
Will Deacon8f68f8e2014-07-15 11:27:08 +0100446static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100447{
448 if (dev_is_pci(dev)) {
449 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700450
Will Deacona9a1b0b2014-05-01 18:05:08 +0100451 while (!pci_is_root_bus(bus))
452 bus = bus->parent;
Robin Murphyf80cd882016-09-14 15:21:39 +0100453 return of_node_get(bus->bridge->parent->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100454 }
455
Robin Murphyf80cd882016-09-14 15:21:39 +0100456 return of_node_get(dev->of_node);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100457}
458
Robin Murphyf80cd882016-09-14 15:21:39 +0100459static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100460{
Robin Murphyf80cd882016-09-14 15:21:39 +0100461 *((__be32 *)data) = cpu_to_be32(alias);
462 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100463}
464
Robin Murphyf80cd882016-09-14 15:21:39 +0100465static int __find_legacy_master_phandle(struct device *dev, void *data)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100466{
Robin Murphyf80cd882016-09-14 15:21:39 +0100467 struct of_phandle_iterator *it = *(void **)data;
468 struct device_node *np = it->node;
469 int err;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100470
Robin Murphyf80cd882016-09-14 15:21:39 +0100471 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
472 "#stream-id-cells", 0)
473 if (it->node == np) {
474 *(void **)data = dev;
475 return 1;
Olav Haugan3c8766d2014-08-22 17:12:32 -0700476 }
Robin Murphyf80cd882016-09-14 15:21:39 +0100477 it->node = np;
478 return err == -ENOENT ? 0 : err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100479}
480
Robin Murphyf80cd882016-09-14 15:21:39 +0100481static int arm_smmu_register_legacy_master(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100482{
Will Deacon44680ee2014-06-25 11:29:12 +0100483 struct arm_smmu_device *smmu;
Robin Murphyf80cd882016-09-14 15:21:39 +0100484 struct arm_smmu_master_cfg *cfg;
485 struct device_node *np;
486 struct of_phandle_iterator it;
487 void *data = &it;
488 __be32 pci_sid;
489 int err;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100490
Robin Murphyf80cd882016-09-14 15:21:39 +0100491 np = dev_get_dev_node(dev);
492 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
493 of_node_put(np);
494 return -ENODEV;
495 }
496
497 it.node = np;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100498 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100499 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Robin Murphyf80cd882016-09-14 15:21:39 +0100500 err = __find_legacy_master_phandle(smmu->dev, &data);
501 if (err)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100502 break;
503 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100504 spin_unlock(&arm_smmu_devices_lock);
Robin Murphyf80cd882016-09-14 15:21:39 +0100505 of_node_put(np);
506 if (err == 0)
507 return -ENODEV;
508 if (err < 0)
509 return err;
Will Deacon44680ee2014-06-25 11:29:12 +0100510
Robin Murphyf80cd882016-09-14 15:21:39 +0100511 if (it.cur_count > MAX_MASTER_STREAMIDS) {
512 dev_err(smmu->dev,
513 "reached maximum number (%d) of stream IDs for master device %s\n",
514 MAX_MASTER_STREAMIDS, dev_name(dev));
515 return -ENOSPC;
516 }
517 if (dev_is_pci(dev)) {
518 /* "mmu-masters" assumes Stream ID == Requester ID */
519 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
520 &pci_sid);
521 it.cur = &pci_sid;
522 it.cur_count = 1;
523 }
524
525 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
526 if (!cfg)
527 return -ENOMEM;
528
529 cfg->smmu = smmu;
530 dev->archdata.iommu = cfg;
531
532 while (it.cur_count--)
533 cfg->streamids[cfg->num_streamids++] = be32_to_cpup(it.cur++);
534
535 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100536}
537
538static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
539{
540 int idx;
541
542 do {
543 idx = find_next_zero_bit(map, end, start);
544 if (idx == end)
545 return -ENOSPC;
546 } while (test_and_set_bit(idx, map));
547
548 return idx;
549}
550
551static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
552{
553 clear_bit(idx, map);
554}
555
556/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000557static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100558{
559 int count = 0;
560 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
561
562 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
563 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
564 & sTLBGSTATUS_GSACTIVE) {
565 cpu_relax();
566 if (++count == TLB_LOOP_TIMEOUT) {
567 dev_err_ratelimited(smmu->dev,
568 "TLB sync timed out -- SMMU may be deadlocked\n");
569 return;
570 }
571 udelay(1);
572 }
573}
574
Will Deacon518f7132014-11-14 17:17:54 +0000575static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100576{
Will Deacon518f7132014-11-14 17:17:54 +0000577 struct arm_smmu_domain *smmu_domain = cookie;
578 __arm_smmu_tlb_sync(smmu_domain->smmu);
579}
580
581static void arm_smmu_tlb_inv_context(void *cookie)
582{
583 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100584 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
585 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100586 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000587 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100588
589 if (stage1) {
590 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800591 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100592 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100593 } else {
594 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800595 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100596 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100597 }
598
Will Deacon518f7132014-11-14 17:17:54 +0000599 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100600}
601
Will Deacon518f7132014-11-14 17:17:54 +0000602static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000603 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000604{
605 struct arm_smmu_domain *smmu_domain = cookie;
606 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
607 struct arm_smmu_device *smmu = smmu_domain->smmu;
608 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
609 void __iomem *reg;
610
611 if (stage1) {
612 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
613 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
614
Robin Murphy7602b872016-04-28 17:12:09 +0100615 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000616 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800617 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000618 do {
619 writel_relaxed(iova, reg);
620 iova += granule;
621 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000622 } else {
623 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800624 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000625 do {
626 writeq_relaxed(iova, reg);
627 iova += granule >> 12;
628 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000629 }
Will Deacon518f7132014-11-14 17:17:54 +0000630 } else if (smmu->version == ARM_SMMU_V2) {
631 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
632 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
633 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000634 iova >>= 12;
635 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100636 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000637 iova += granule >> 12;
638 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000639 } else {
640 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800641 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000642 }
643}
644
Will Deacon518f7132014-11-14 17:17:54 +0000645static struct iommu_gather_ops arm_smmu_gather_ops = {
646 .tlb_flush_all = arm_smmu_tlb_inv_context,
647 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
648 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000649};
650
Will Deacon45ae7cf2013-06-24 18:31:25 +0100651static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
652{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100653 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100654 unsigned long iova;
655 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100656 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100657 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
658 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100659 void __iomem *cb_base;
660
Will Deacon44680ee2014-06-25 11:29:12 +0100661 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100662 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
663
664 if (!(fsr & FSR_FAULT))
665 return IRQ_NONE;
666
Will Deacon45ae7cf2013-06-24 18:31:25 +0100667 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100668 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100669
Will Deacon3714ce1d2016-08-05 19:49:45 +0100670 dev_err_ratelimited(smmu->dev,
671 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
672 fsr, iova, fsynr, cfg->cbndx);
673
Will Deacon45ae7cf2013-06-24 18:31:25 +0100674 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100675 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100676}
677
678static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
679{
680 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
681 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000682 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100683
684 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
685 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
686 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
687 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
688
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000689 if (!gfsr)
690 return IRQ_NONE;
691
Will Deacon45ae7cf2013-06-24 18:31:25 +0100692 dev_err_ratelimited(smmu->dev,
693 "Unexpected global fault, this could be serious\n");
694 dev_err_ratelimited(smmu->dev,
695 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
696 gfsr, gfsynr0, gfsynr1, gfsynr2);
697
698 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100699 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100700}
701
Will Deacon518f7132014-11-14 17:17:54 +0000702static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
703 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100704{
Robin Murphy60705292016-08-11 17:44:06 +0100705 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100706 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100707 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100708 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
709 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100710 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100711
Will Deacon45ae7cf2013-06-24 18:31:25 +0100712 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100713 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
714 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100715
Will Deacon4a1c93c2015-03-04 12:21:03 +0000716 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100717 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
718 reg = CBA2R_RW64_64BIT;
719 else
720 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800721 /* 16-bit VMIDs live in CBA2R */
722 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800723 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800724
Will Deacon4a1c93c2015-03-04 12:21:03 +0000725 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
726 }
727
Will Deacon45ae7cf2013-06-24 18:31:25 +0100728 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100729 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100730 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700731 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100732
Will Deacon57ca90f2014-02-06 14:59:05 +0000733 /*
734 * Use the weakest shareability/memory types, so they are
735 * overridden by the ttbcr/pte.
736 */
737 if (stage1) {
738 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
739 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800740 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
741 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800742 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000743 }
Will Deacon44680ee2014-06-25 11:29:12 +0100744 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100745
Will Deacon518f7132014-11-14 17:17:54 +0000746 /* TTBRs */
747 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100748 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100749
Robin Murphy60705292016-08-11 17:44:06 +0100750 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
751 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
752 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
753 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
754 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
755 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
756 } else {
757 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
758 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
759 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
760 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
761 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
762 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
763 }
Will Deacon518f7132014-11-14 17:17:54 +0000764 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100765 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100766 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000767 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100768
Will Deacon518f7132014-11-14 17:17:54 +0000769 /* TTBCR */
770 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100771 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
772 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
773 reg2 = 0;
774 } else {
775 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
776 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
777 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100778 }
Robin Murphy60705292016-08-11 17:44:06 +0100779 if (smmu->version > ARM_SMMU_V1)
780 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100781 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000782 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100783 }
Robin Murphy60705292016-08-11 17:44:06 +0100784 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100785
Will Deacon518f7132014-11-14 17:17:54 +0000786 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100787 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100788 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
789 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
790 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
791 } else {
792 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
793 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
794 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100795 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphy60705292016-08-11 17:44:06 +0100796 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100797 }
798
Will Deacon45ae7cf2013-06-24 18:31:25 +0100799 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100800 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100801 if (stage1)
802 reg |= SCTLR_S1_ASIDPNE;
803#ifdef __BIG_ENDIAN
804 reg |= SCTLR_E;
805#endif
Will Deacon25724842013-08-21 13:49:53 +0100806 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100807}
808
809static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100810 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100811{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100812 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000813 unsigned long ias, oas;
814 struct io_pgtable_ops *pgtbl_ops;
815 struct io_pgtable_cfg pgtbl_cfg;
816 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100817 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100818 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100819
Will Deacon518f7132014-11-14 17:17:54 +0000820 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100821 if (smmu_domain->smmu)
822 goto out_unlock;
823
Robin Murphy98006992016-04-20 14:53:33 +0100824 /* We're bypassing these SIDs, so don't allocate an actual context */
825 if (domain->type == IOMMU_DOMAIN_DMA) {
826 smmu_domain->smmu = smmu;
827 goto out_unlock;
828 }
829
Will Deaconc752ce42014-06-25 22:46:31 +0100830 /*
831 * Mapping the requested stage onto what we support is surprisingly
832 * complicated, mainly because the spec allows S1+S2 SMMUs without
833 * support for nested translation. That means we end up with the
834 * following table:
835 *
836 * Requested Supported Actual
837 * S1 N S1
838 * S1 S1+S2 S1
839 * S1 S2 S2
840 * S1 S1 S1
841 * N N N
842 * N S1+S2 S2
843 * N S2 S2
844 * N S1 S1
845 *
846 * Note that you can't actually request stage-2 mappings.
847 */
848 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
849 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
850 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
851 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
852
Robin Murphy7602b872016-04-28 17:12:09 +0100853 /*
854 * Choosing a suitable context format is even more fiddly. Until we
855 * grow some way for the caller to express a preference, and/or move
856 * the decision into the io-pgtable code where it arguably belongs,
857 * just aim for the closest thing to the rest of the system, and hope
858 * that the hardware isn't esoteric enough that we can't assume AArch64
859 * support to be a superset of AArch32 support...
860 */
861 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
862 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100863 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
864 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
865 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
866 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
867 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100868 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
869 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
870 ARM_SMMU_FEAT_FMT_AARCH64_16K |
871 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
872 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
873
874 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
875 ret = -EINVAL;
876 goto out_unlock;
877 }
878
Will Deaconc752ce42014-06-25 22:46:31 +0100879 switch (smmu_domain->stage) {
880 case ARM_SMMU_DOMAIN_S1:
881 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
882 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000883 ias = smmu->va_size;
884 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100885 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000886 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100887 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000888 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100889 ias = min(ias, 32UL);
890 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100891 } else {
892 fmt = ARM_V7S;
893 ias = min(ias, 32UL);
894 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100895 }
Will Deaconc752ce42014-06-25 22:46:31 +0100896 break;
897 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100898 /*
899 * We will likely want to change this if/when KVM gets
900 * involved.
901 */
Will Deaconc752ce42014-06-25 22:46:31 +0100902 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100903 cfg->cbar = CBAR_TYPE_S2_TRANS;
904 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000905 ias = smmu->ipa_size;
906 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100907 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000908 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100909 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000910 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100911 ias = min(ias, 40UL);
912 oas = min(oas, 40UL);
913 }
Will Deaconc752ce42014-06-25 22:46:31 +0100914 break;
915 default:
916 ret = -EINVAL;
917 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100918 }
919
920 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
921 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200922 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100923 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100924
Will Deacon44680ee2014-06-25 11:29:12 +0100925 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100926 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100927 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
928 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100929 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100930 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100931 }
932
Will Deacon518f7132014-11-14 17:17:54 +0000933 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100934 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000935 .ias = ias,
936 .oas = oas,
937 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100938 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000939 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100940
Will Deacon518f7132014-11-14 17:17:54 +0000941 smmu_domain->smmu = smmu;
942 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
943 if (!pgtbl_ops) {
944 ret = -ENOMEM;
945 goto out_clear_smmu;
946 }
947
Robin Murphyd5466352016-05-09 17:20:09 +0100948 /* Update the domain's page sizes to reflect the page table format */
949 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +0000950
951 /* Initialise the context bank with our page table cfg */
952 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
953
954 /*
955 * Request context fault interrupt. Do this last to avoid the
956 * handler seeing a half-initialised domain state.
957 */
Will Deacon44680ee2014-06-25 11:29:12 +0100958 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800959 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
960 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200961 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100962 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100963 cfg->irptndx, irq);
964 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100965 }
966
Will Deacon518f7132014-11-14 17:17:54 +0000967 mutex_unlock(&smmu_domain->init_mutex);
968
969 /* Publish page table ops for map/unmap */
970 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100971 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100972
Will Deacon518f7132014-11-14 17:17:54 +0000973out_clear_smmu:
974 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100975out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +0000976 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100977 return ret;
978}
979
980static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
981{
Joerg Roedel1d672632015-03-26 13:43:10 +0100982 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100983 struct arm_smmu_device *smmu = smmu_domain->smmu;
984 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +0100985 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100986 int irq;
987
Robin Murphy98006992016-04-20 14:53:33 +0100988 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100989 return;
990
Will Deacon518f7132014-11-14 17:17:54 +0000991 /*
992 * Disable the context bank and free the page tables before freeing
993 * it.
994 */
Will Deacon44680ee2014-06-25 11:29:12 +0100995 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +0100996 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +0100997
Will Deacon44680ee2014-06-25 11:29:12 +0100998 if (cfg->irptndx != INVALID_IRPTNDX) {
999 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001000 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001001 }
1002
Markus Elfring44830b02015-11-06 18:32:41 +01001003 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001004 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001005}
1006
Joerg Roedel1d672632015-03-26 13:43:10 +01001007static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001008{
1009 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001010
Robin Murphy9adb9592016-01-26 18:06:36 +00001011 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001012 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001013 /*
1014 * Allocate the domain and initialise some of its data structures.
1015 * We can't really do anything meaningful until we've added a
1016 * master.
1017 */
1018 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1019 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001020 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001021
Robin Murphy9adb9592016-01-26 18:06:36 +00001022 if (type == IOMMU_DOMAIN_DMA &&
1023 iommu_get_dma_cookie(&smmu_domain->domain)) {
1024 kfree(smmu_domain);
1025 return NULL;
1026 }
1027
Will Deacon518f7132014-11-14 17:17:54 +00001028 mutex_init(&smmu_domain->init_mutex);
1029 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001030
1031 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001032}
1033
Joerg Roedel1d672632015-03-26 13:43:10 +01001034static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001035{
Joerg Roedel1d672632015-03-26 13:43:10 +01001036 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001037
1038 /*
1039 * Free the domain resources. We assume that all devices have
1040 * already been detached.
1041 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001042 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001043 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001044 kfree(smmu_domain);
1045}
1046
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001047static int arm_smmu_alloc_smr(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001048{
1049 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001050
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001051 for (i = 0; i < smmu->num_mapping_groups; i++)
1052 if (!cmpxchg(&smmu->smrs[i].valid, false, true))
1053 return i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001054
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001055 return INVALID_SMENDX;
1056}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001057
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001058static void arm_smmu_free_smr(struct arm_smmu_device *smmu, int idx)
1059{
1060 writel_relaxed(~SMR_VALID, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1061 WRITE_ONCE(smmu->smrs[idx].valid, false);
1062}
1063
1064static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1065{
1066 struct arm_smmu_smr *smr = smmu->smrs + idx;
Robin Murphyf80cd882016-09-14 15:21:39 +01001067 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001068
1069 if (smr->valid)
1070 reg |= SMR_VALID;
1071 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1072}
1073
Robin Murphy8e8b2032016-09-12 17:13:50 +01001074static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1075{
1076 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1077 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1078 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1079 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1080
1081 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1082}
1083
1084static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1085{
1086 arm_smmu_write_s2cr(smmu, idx);
1087 if (smmu->smrs)
1088 arm_smmu_write_smr(smmu, idx);
1089}
1090
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001091static int arm_smmu_master_alloc_smes(struct arm_smmu_device *smmu,
1092 struct arm_smmu_master_cfg *cfg)
1093{
1094 struct arm_smmu_smr *smrs = smmu->smrs;
1095 int i, idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001096
Will Deacon44680ee2014-06-25 11:29:12 +01001097 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001098 for (i = 0; i < cfg->num_streamids; ++i) {
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001099 if (cfg->smendx[i] != INVALID_SMENDX)
1100 return -EEXIST;
1101
1102 /* ...except on stream indexing hardware, of course */
1103 if (!smrs) {
1104 cfg->smendx[i] = cfg->streamids[i];
1105 continue;
1106 }
1107
1108 idx = arm_smmu_alloc_smr(smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001109 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001110 dev_err(smmu->dev, "failed to allocate free SMR\n");
1111 goto err_free_smrs;
1112 }
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001113 cfg->smendx[i] = idx;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001114
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001115 smrs[idx].id = cfg->streamids[i];
1116 smrs[idx].mask = 0; /* We don't currently share SMRs */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001117 }
1118
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001119 if (!smrs)
1120 return 0;
1121
Will Deacon45ae7cf2013-06-24 18:31:25 +01001122 /* It worked! Now, poke the actual hardware */
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001123 for (i = 0; i < cfg->num_streamids; ++i)
1124 arm_smmu_write_smr(smmu, cfg->smendx[i]);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001125
Will Deacon45ae7cf2013-06-24 18:31:25 +01001126 return 0;
1127
1128err_free_smrs:
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001129 while (i--) {
1130 arm_smmu_free_smr(smmu, cfg->smendx[i]);
1131 cfg->smendx[i] = INVALID_SMENDX;
1132 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001133 return -ENOSPC;
1134}
1135
Robin Murphyf80cd882016-09-14 15:21:39 +01001136static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001137{
Robin Murphyf80cd882016-09-14 15:21:39 +01001138 struct arm_smmu_device *smmu = cfg->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001139 int i;
Will Deacon43b412b2014-07-15 11:22:24 +01001140
Robin Murphy8e8b2032016-09-12 17:13:50 +01001141 /*
1142 * We *must* clear the S2CR first, because freeing the SMR means
1143 * that it can be re-allocated immediately.
1144 */
1145 for (i = 0; i < cfg->num_streamids; ++i) {
1146 int idx = cfg->smendx[i];
1147
1148 /* An IOMMU group is torn down by the first device to be removed */
1149 if (idx == INVALID_SMENDX)
1150 return;
1151
1152 smmu->s2crs[idx] = s2cr_init_val;
1153 arm_smmu_write_s2cr(smmu, idx);
1154 }
1155 /* Sync S2CR updates before touching anything else */
1156 __iowmb();
1157
Will Deacon45ae7cf2013-06-24 18:31:25 +01001158 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001159 for (i = 0; i < cfg->num_streamids; ++i) {
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001160 if (smmu->smrs)
1161 arm_smmu_free_smr(smmu, cfg->smendx[i]);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001162
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001163 cfg->smendx[i] = INVALID_SMENDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001164 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001165}
1166
Will Deacon45ae7cf2013-06-24 18:31:25 +01001167static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001168 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001169{
Robin Murphy8e8b2032016-09-12 17:13:50 +01001170 int i, ret = 0;
Will Deacon44680ee2014-06-25 11:29:12 +01001171 struct arm_smmu_device *smmu = smmu_domain->smmu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001172 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1173 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1174 u8 cbndx = smmu_domain->cfg.cbndx;
1175
1176 if (cfg->smendx[0] == INVALID_SMENDX)
1177 ret = arm_smmu_master_alloc_smes(smmu, cfg);
1178 if (ret)
1179 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001180
Will Deacon5f634952016-04-20 14:53:32 +01001181 /*
1182 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1183 * for all devices behind the SMMU. Note that we need to take
1184 * care configuring SMRs for devices both a platform_device and
1185 * and a PCI device (i.e. a PCI host controller)
1186 */
1187 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
Robin Murphy8e8b2032016-09-12 17:13:50 +01001188 type = S2CR_TYPE_BYPASS;
Will Deacon5f634952016-04-20 14:53:32 +01001189
Will Deacon43b412b2014-07-15 11:22:24 +01001190 for (i = 0; i < cfg->num_streamids; ++i) {
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001191 int idx = cfg->smendx[i];
Will Deacon43b412b2014-07-15 11:22:24 +01001192
Robin Murphy8e8b2032016-09-12 17:13:50 +01001193 /* Devices in an IOMMU group may already be configured */
1194 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1195 break;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001196
Robin Murphy8e8b2032016-09-12 17:13:50 +01001197 s2cr[idx].type = type;
1198 s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
1199 s2cr[idx].cbndx = cbndx;
1200 arm_smmu_write_s2cr(smmu, idx);
Will Deacon43b412b2014-07-15 11:22:24 +01001201 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001202 return 0;
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001203}
1204
Will Deacon45ae7cf2013-06-24 18:31:25 +01001205static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1206{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001207 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001208 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Robin Murphyf80cd882016-09-14 15:21:39 +01001209 struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001210
Robin Murphyf80cd882016-09-14 15:21:39 +01001211 if (!cfg) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001212 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1213 return -ENXIO;
1214 }
1215
Will Deacon518f7132014-11-14 17:17:54 +00001216 /* Ensure that the domain is finalised */
Robin Murphyf80cd882016-09-14 15:21:39 +01001217 ret = arm_smmu_init_domain_context(domain, cfg->smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001218 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001219 return ret;
1220
Will Deacon45ae7cf2013-06-24 18:31:25 +01001221 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001222 * Sanity check the domain. We don't support domains across
1223 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001224 */
Robin Murphyf80cd882016-09-14 15:21:39 +01001225 if (smmu_domain->smmu != cfg->smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001226 dev_err(dev,
1227 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Robin Murphyf80cd882016-09-14 15:21:39 +01001228 dev_name(smmu_domain->smmu->dev), dev_name(cfg->smmu->dev));
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001229 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001230 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001231
1232 /* Looks ok, so add the device to the domain */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001233 return arm_smmu_domain_add_master(smmu_domain, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001234}
1235
Will Deacon45ae7cf2013-06-24 18:31:25 +01001236static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001237 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001238{
Will Deacon518f7132014-11-14 17:17:54 +00001239 int ret;
1240 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001241 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001242 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001243
Will Deacon518f7132014-11-14 17:17:54 +00001244 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001245 return -ENODEV;
1246
Will Deacon518f7132014-11-14 17:17:54 +00001247 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1248 ret = ops->map(ops, iova, paddr, size, prot);
1249 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1250 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001251}
1252
1253static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1254 size_t size)
1255{
Will Deacon518f7132014-11-14 17:17:54 +00001256 size_t ret;
1257 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001258 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001259 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001260
Will Deacon518f7132014-11-14 17:17:54 +00001261 if (!ops)
1262 return 0;
1263
1264 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1265 ret = ops->unmap(ops, iova, size);
1266 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1267 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001268}
1269
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001270static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1271 dma_addr_t iova)
1272{
Joerg Roedel1d672632015-03-26 13:43:10 +01001273 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001274 struct arm_smmu_device *smmu = smmu_domain->smmu;
1275 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1276 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1277 struct device *dev = smmu->dev;
1278 void __iomem *cb_base;
1279 u32 tmp;
1280 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001281 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001282
1283 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1284
Robin Murphy661d9622015-05-27 17:09:34 +01001285 /* ATS1 registers can only be written atomically */
1286 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001287 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001288 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1289 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001290 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001291
1292 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1293 !(tmp & ATSR_ACTIVE), 5, 50)) {
1294 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001295 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001296 &iova);
1297 return ops->iova_to_phys(ops, iova);
1298 }
1299
Robin Murphyf9a05f02016-04-13 18:13:01 +01001300 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001301 if (phys & CB_PAR_F) {
1302 dev_err(dev, "translation fault!\n");
1303 dev_err(dev, "PAR = 0x%llx\n", phys);
1304 return 0;
1305 }
1306
1307 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1308}
1309
Will Deacon45ae7cf2013-06-24 18:31:25 +01001310static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001311 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001312{
Will Deacon518f7132014-11-14 17:17:54 +00001313 phys_addr_t ret;
1314 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001315 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001316 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001317
Will Deacon518f7132014-11-14 17:17:54 +00001318 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001319 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001320
Will Deacon518f7132014-11-14 17:17:54 +00001321 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001322 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1323 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001324 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001325 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001326 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001327 }
1328
Will Deacon518f7132014-11-14 17:17:54 +00001329 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001330
Will Deacon518f7132014-11-14 17:17:54 +00001331 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001332}
1333
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001334static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001335{
Will Deacond0948942014-06-24 17:30:10 +01001336 switch (cap) {
1337 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001338 /*
1339 * Return true here as the SMMU can always send out coherent
1340 * requests.
1341 */
1342 return true;
Will Deacond0948942014-06-24 17:30:10 +01001343 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001344 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001345 case IOMMU_CAP_NOEXEC:
1346 return true;
Will Deacond0948942014-06-24 17:30:10 +01001347 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001348 return false;
Will Deacond0948942014-06-24 17:30:10 +01001349 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001350}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001351
Will Deacon03edb222015-01-19 14:27:33 +00001352static int arm_smmu_add_device(struct device *dev)
1353{
Robin Murphyf80cd882016-09-14 15:21:39 +01001354 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001355 struct iommu_group *group;
Robin Murphyf80cd882016-09-14 15:21:39 +01001356 int i, ret;
1357
1358 ret = arm_smmu_register_legacy_master(dev);
1359 cfg = dev->archdata.iommu;
1360 if (ret)
1361 goto out_free;
1362
1363 ret = -EINVAL;
1364 for (i = 0; i < cfg->num_streamids; i++) {
1365 u16 sid = cfg->streamids[i];
1366
1367 if (sid & ~cfg->smmu->streamid_mask) {
1368 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1369 sid, cfg->smmu->streamid_mask);
1370 goto out_free;
1371 }
1372 cfg->smendx[i] = INVALID_SMENDX;
1373 }
Will Deacon03edb222015-01-19 14:27:33 +00001374
Joerg Roedelaf659932015-10-21 23:51:41 +02001375 group = iommu_group_get_for_dev(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001376 if (IS_ERR(group)) {
1377 ret = PTR_ERR(group);
1378 goto out_free;
1379 }
Peng Fan9a4a9d82015-11-20 16:56:18 +08001380 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001381 return 0;
Robin Murphyf80cd882016-09-14 15:21:39 +01001382
1383out_free:
1384 kfree(cfg);
1385 dev->archdata.iommu = NULL;
1386 return ret;
Will Deacon03edb222015-01-19 14:27:33 +00001387}
1388
Will Deacon45ae7cf2013-06-24 18:31:25 +01001389static void arm_smmu_remove_device(struct device *dev)
1390{
Robin Murphyf80cd882016-09-14 15:21:39 +01001391 struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001392
Robin Murphyf80cd882016-09-14 15:21:39 +01001393 if (!cfg)
1394 return;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001395
Robin Murphyf80cd882016-09-14 15:21:39 +01001396 arm_smmu_master_free_smes(cfg);
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001397 iommu_group_remove_device(dev);
Robin Murphyf80cd882016-09-14 15:21:39 +01001398 kfree(cfg);
1399 dev->archdata.iommu = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001400}
1401
Joerg Roedelaf659932015-10-21 23:51:41 +02001402static struct iommu_group *arm_smmu_device_group(struct device *dev)
1403{
1404 struct iommu_group *group;
Joerg Roedelaf659932015-10-21 23:51:41 +02001405
1406 if (dev_is_pci(dev))
1407 group = pci_device_group(dev);
1408 else
1409 group = generic_device_group(dev);
1410
Joerg Roedelaf659932015-10-21 23:51:41 +02001411 return group;
1412}
1413
Will Deaconc752ce42014-06-25 22:46:31 +01001414static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1415 enum iommu_attr attr, void *data)
1416{
Joerg Roedel1d672632015-03-26 13:43:10 +01001417 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001418
1419 switch (attr) {
1420 case DOMAIN_ATTR_NESTING:
1421 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1422 return 0;
1423 default:
1424 return -ENODEV;
1425 }
1426}
1427
1428static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1429 enum iommu_attr attr, void *data)
1430{
Will Deacon518f7132014-11-14 17:17:54 +00001431 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001432 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001433
Will Deacon518f7132014-11-14 17:17:54 +00001434 mutex_lock(&smmu_domain->init_mutex);
1435
Will Deaconc752ce42014-06-25 22:46:31 +01001436 switch (attr) {
1437 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001438 if (smmu_domain->smmu) {
1439 ret = -EPERM;
1440 goto out_unlock;
1441 }
1442
Will Deaconc752ce42014-06-25 22:46:31 +01001443 if (*(int *)data)
1444 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1445 else
1446 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1447
Will Deacon518f7132014-11-14 17:17:54 +00001448 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001449 default:
Will Deacon518f7132014-11-14 17:17:54 +00001450 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001451 }
Will Deacon518f7132014-11-14 17:17:54 +00001452
1453out_unlock:
1454 mutex_unlock(&smmu_domain->init_mutex);
1455 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001456}
1457
Will Deacon518f7132014-11-14 17:17:54 +00001458static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001459 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001460 .domain_alloc = arm_smmu_domain_alloc,
1461 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001462 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001463 .map = arm_smmu_map,
1464 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001465 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001466 .iova_to_phys = arm_smmu_iova_to_phys,
1467 .add_device = arm_smmu_add_device,
1468 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001469 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001470 .domain_get_attr = arm_smmu_domain_get_attr,
1471 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001472 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001473};
1474
1475static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1476{
1477 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001478 void __iomem *cb_base;
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001479 int i;
Peng Fan3ca37122016-05-03 21:50:30 +08001480 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001481
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001482 /* clear global FSR */
1483 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1484 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001485
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001486 /*
1487 * Reset stream mapping groups: Initial values mark all SMRn as
1488 * invalid and all S2CRn as bypass unless overridden.
1489 */
Robin Murphy8e8b2032016-09-12 17:13:50 +01001490 for (i = 0; i < smmu->num_mapping_groups; ++i)
1491 arm_smmu_write_sme(smmu, i);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001492
Peng Fan3ca37122016-05-03 21:50:30 +08001493 /*
1494 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1495 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1496 * bit is only present in MMU-500r2 onwards.
1497 */
1498 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1499 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1500 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1501 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1502 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1503 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1504 }
1505
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001506 /* Make sure all context banks are disabled and clear CB_FSR */
1507 for (i = 0; i < smmu->num_context_banks; ++i) {
1508 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1509 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1510 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001511 /*
1512 * Disable MMU-500's not-particularly-beneficial next-page
1513 * prefetcher for the sake of errata #841119 and #826419.
1514 */
1515 if (smmu->model == ARM_MMU500) {
1516 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1517 reg &= ~ARM_MMU500_ACTLR_CPRE;
1518 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1519 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001520 }
Will Deacon1463fe42013-07-31 19:21:27 +01001521
Will Deacon45ae7cf2013-06-24 18:31:25 +01001522 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001523 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1524 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1525
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001526 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001527
Will Deacon45ae7cf2013-06-24 18:31:25 +01001528 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001529 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001530
1531 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001532 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001533
Robin Murphy25a1c962016-02-10 14:25:33 +00001534 /* Enable client access, handling unmatched streams as appropriate */
1535 reg &= ~sCR0_CLIENTPD;
1536 if (disable_bypass)
1537 reg |= sCR0_USFCFG;
1538 else
1539 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001540
1541 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001542 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001543
1544 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001545 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001546
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001547 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1548 reg |= sCR0_VMID16EN;
1549
Will Deacon45ae7cf2013-06-24 18:31:25 +01001550 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001551 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001552 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001553}
1554
1555static int arm_smmu_id_size_to_bits(int size)
1556{
1557 switch (size) {
1558 case 0:
1559 return 32;
1560 case 1:
1561 return 36;
1562 case 2:
1563 return 40;
1564 case 3:
1565 return 42;
1566 case 4:
1567 return 44;
1568 case 5:
1569 default:
1570 return 48;
1571 }
1572}
1573
1574static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1575{
1576 unsigned long size;
1577 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1578 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001579 bool cttw_dt, cttw_reg;
Robin Murphy8e8b2032016-09-12 17:13:50 +01001580 int i;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001581
1582 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001583 dev_notice(smmu->dev, "SMMUv%d with:\n",
1584 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001585
1586 /* ID0 */
1587 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001588
1589 /* Restrict available stages based on module parameter */
1590 if (force_stage == 1)
1591 id &= ~(ID0_S2TS | ID0_NTS);
1592 else if (force_stage == 2)
1593 id &= ~(ID0_S1TS | ID0_NTS);
1594
Will Deacon45ae7cf2013-06-24 18:31:25 +01001595 if (id & ID0_S1TS) {
1596 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1597 dev_notice(smmu->dev, "\tstage 1 translation\n");
1598 }
1599
1600 if (id & ID0_S2TS) {
1601 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1602 dev_notice(smmu->dev, "\tstage 2 translation\n");
1603 }
1604
1605 if (id & ID0_NTS) {
1606 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1607 dev_notice(smmu->dev, "\tnested translation\n");
1608 }
1609
1610 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001611 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001612 dev_err(smmu->dev, "\tno translation support!\n");
1613 return -ENODEV;
1614 }
1615
Robin Murphyb7862e32016-04-13 18:13:03 +01001616 if ((id & ID0_S1TS) &&
1617 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001618 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1619 dev_notice(smmu->dev, "\taddress translation ops\n");
1620 }
1621
Robin Murphybae2c2d2015-07-29 19:46:05 +01001622 /*
1623 * In order for DMA API calls to work properly, we must defer to what
1624 * the DT says about coherency, regardless of what the hardware claims.
1625 * Fortunately, this also opens up a workaround for systems where the
1626 * ID register value has ended up configured incorrectly.
1627 */
1628 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1629 cttw_reg = !!(id & ID0_CTTW);
1630 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001631 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001632 if (cttw_dt || cttw_reg)
1633 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1634 cttw_dt ? "" : "non-");
1635 if (cttw_dt != cttw_reg)
1636 dev_notice(smmu->dev,
1637 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001638
Robin Murphy21174242016-09-12 17:13:48 +01001639 /* Max. number of entries we have for stream matching/indexing */
1640 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1641 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001642 if (id & ID0_SMS) {
Robin Murphy21174242016-09-12 17:13:48 +01001643 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001644
1645 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001646 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1647 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001648 dev_err(smmu->dev,
1649 "stream-matching supported, but no SMRs present!\n");
1650 return -ENODEV;
1651 }
1652
Robin Murphy21174242016-09-12 17:13:48 +01001653 /*
1654 * SMR.ID bits may not be preserved if the corresponding MASK
1655 * bits are set, so check each one separately. We can reject
1656 * masters later if they try to claim IDs outside these masks.
1657 */
1658 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001659 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1660 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Robin Murphy21174242016-09-12 17:13:48 +01001661 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001662
Robin Murphy21174242016-09-12 17:13:48 +01001663 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1664 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1665 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1666 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001667
Robin Murphy1f3d5ca2016-09-12 17:13:49 +01001668 /* Zero-initialised to mark as invalid */
1669 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1670 GFP_KERNEL);
1671 if (!smmu->smrs)
1672 return -ENOMEM;
1673
Will Deacon45ae7cf2013-06-24 18:31:25 +01001674 dev_notice(smmu->dev,
Robin Murphy21174242016-09-12 17:13:48 +01001675 "\tstream matching with %lu register groups, mask 0x%x",
1676 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001677 }
Robin Murphy8e8b2032016-09-12 17:13:50 +01001678 /* s2cr->type == 0 means translation, so initialise explicitly */
1679 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1680 GFP_KERNEL);
1681 if (!smmu->s2crs)
1682 return -ENOMEM;
1683 for (i = 0; i < size; i++)
1684 smmu->s2crs[i] = s2cr_init_val;
1685
Robin Murphy21174242016-09-12 17:13:48 +01001686 smmu->num_mapping_groups = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001687
Robin Murphy7602b872016-04-28 17:12:09 +01001688 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1689 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1690 if (!(id & ID0_PTFS_NO_AARCH32S))
1691 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1692 }
1693
Will Deacon45ae7cf2013-06-24 18:31:25 +01001694 /* ID1 */
1695 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001696 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001697
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001698 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001699 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001700 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001701 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001702 dev_warn(smmu->dev,
1703 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1704 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001705
Will Deacon518f7132014-11-14 17:17:54 +00001706 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001707 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1708 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1709 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1710 return -ENODEV;
1711 }
1712 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1713 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001714 /*
1715 * Cavium CN88xx erratum #27704.
1716 * Ensure ASID and VMID allocation is unique across all SMMUs in
1717 * the system.
1718 */
1719 if (smmu->model == CAVIUM_SMMUV2) {
1720 smmu->cavium_id_base =
1721 atomic_add_return(smmu->num_context_banks,
1722 &cavium_smmu_context_count);
1723 smmu->cavium_id_base -= smmu->num_context_banks;
1724 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001725
1726 /* ID2 */
1727 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1728 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001729 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001730
Will Deacon518f7132014-11-14 17:17:54 +00001731 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001732 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001733 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001734
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001735 if (id & ID2_VMID16)
1736 smmu->features |= ARM_SMMU_FEAT_VMID16;
1737
Robin Murphyf1d84542015-03-04 16:41:05 +00001738 /*
1739 * What the page table walker can address actually depends on which
1740 * descriptor format is in use, but since a) we don't know that yet,
1741 * and b) it can vary per context bank, this will have to do...
1742 */
1743 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1744 dev_warn(smmu->dev,
1745 "failed to set DMA mask for table walker\n");
1746
Robin Murphyb7862e32016-04-13 18:13:03 +01001747 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001748 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001749 if (smmu->version == ARM_SMMU_V1_64K)
1750 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001751 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001752 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001753 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001754 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001755 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001756 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001757 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001758 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001759 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001760 }
1761
Robin Murphy7602b872016-04-28 17:12:09 +01001762 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001763 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001764 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001765 if (smmu->features &
1766 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001767 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001768 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001769 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001770 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001771 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001772
Robin Murphyd5466352016-05-09 17:20:09 +01001773 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1774 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1775 else
1776 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1777 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1778 smmu->pgsize_bitmap);
1779
Will Deacon518f7132014-11-14 17:17:54 +00001780
Will Deacon28d60072014-09-01 16:24:48 +01001781 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1782 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001783 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001784
1785 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1786 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001787 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001788
Will Deacon45ae7cf2013-06-24 18:31:25 +01001789 return 0;
1790}
1791
Robin Murphy67b65a32016-04-13 18:12:57 +01001792struct arm_smmu_match_data {
1793 enum arm_smmu_arch_version version;
1794 enum arm_smmu_implementation model;
1795};
1796
1797#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1798static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1799
1800ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1801ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001802ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001803ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001804ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001805
Joerg Roedel09b52692014-10-02 12:24:45 +02001806static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001807 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1808 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1809 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001810 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001811 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001812 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001813 { },
1814};
1815MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1816
Will Deacon45ae7cf2013-06-24 18:31:25 +01001817static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1818{
Robin Murphy09360402014-08-28 17:51:59 +01001819 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01001820 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001821 struct resource *res;
1822 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001823 struct device *dev = &pdev->dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001824 int num_irqs, i, err;
1825
1826 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1827 if (!smmu) {
1828 dev_err(dev, "failed to allocate arm_smmu_device\n");
1829 return -ENOMEM;
1830 }
1831 smmu->dev = dev;
1832
Robin Murphy09360402014-08-28 17:51:59 +01001833 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01001834 data = of_id->data;
1835 smmu->version = data->version;
1836 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01001837
Will Deacon45ae7cf2013-06-24 18:31:25 +01001838 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001839 smmu->base = devm_ioremap_resource(dev, res);
1840 if (IS_ERR(smmu->base))
1841 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001842 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001843
1844 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1845 &smmu->num_global_irqs)) {
1846 dev_err(dev, "missing #global-interrupts property\n");
1847 return -ENODEV;
1848 }
1849
1850 num_irqs = 0;
1851 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1852 num_irqs++;
1853 if (num_irqs > smmu->num_global_irqs)
1854 smmu->num_context_irqs++;
1855 }
1856
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001857 if (!smmu->num_context_irqs) {
1858 dev_err(dev, "found %d interrupts but expected at least %d\n",
1859 num_irqs, smmu->num_global_irqs + 1);
1860 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001861 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001862
1863 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1864 GFP_KERNEL);
1865 if (!smmu->irqs) {
1866 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1867 return -ENOMEM;
1868 }
1869
1870 for (i = 0; i < num_irqs; ++i) {
1871 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001872
Will Deacon45ae7cf2013-06-24 18:31:25 +01001873 if (irq < 0) {
1874 dev_err(dev, "failed to get irq index %d\n", i);
1875 return -ENODEV;
1876 }
1877 smmu->irqs[i] = irq;
1878 }
1879
Olav Haugan3c8766d2014-08-22 17:12:32 -07001880 err = arm_smmu_device_cfg_probe(smmu);
1881 if (err)
1882 return err;
1883
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001884 parse_driver_options(smmu);
1885
Robin Murphyb7862e32016-04-13 18:13:03 +01001886 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01001887 smmu->num_context_banks != smmu->num_context_irqs) {
1888 dev_err(dev,
1889 "found only %d context interrupt(s) but %d required\n",
1890 smmu->num_context_irqs, smmu->num_context_banks);
Robin Murphyf80cd882016-09-14 15:21:39 +01001891 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001892 }
1893
Will Deacon45ae7cf2013-06-24 18:31:25 +01001894 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08001895 err = devm_request_irq(smmu->dev, smmu->irqs[i],
1896 arm_smmu_global_fault,
1897 IRQF_SHARED,
1898 "arm-smmu global fault",
1899 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001900 if (err) {
1901 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1902 i, smmu->irqs[i]);
Robin Murphyf80cd882016-09-14 15:21:39 +01001903 return err;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001904 }
1905 }
1906
1907 INIT_LIST_HEAD(&smmu->list);
1908 spin_lock(&arm_smmu_devices_lock);
1909 list_add(&smmu->list, &arm_smmu_devices);
1910 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01001911
1912 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001913 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001914}
1915
1916static int arm_smmu_device_remove(struct platform_device *pdev)
1917{
Will Deacon45ae7cf2013-06-24 18:31:25 +01001918 struct device *dev = &pdev->dev;
1919 struct arm_smmu_device *curr, *smmu = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001920
1921 spin_lock(&arm_smmu_devices_lock);
1922 list_for_each_entry(curr, &arm_smmu_devices, list) {
1923 if (curr->dev == dev) {
1924 smmu = curr;
1925 list_del(&smmu->list);
1926 break;
1927 }
1928 }
1929 spin_unlock(&arm_smmu_devices_lock);
1930
1931 if (!smmu)
1932 return -ENODEV;
1933
Will Deaconecfadb62013-07-31 19:21:28 +01001934 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001935 dev_err(dev, "removing device with active domains!\n");
1936
Will Deacon45ae7cf2013-06-24 18:31:25 +01001937 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07001938 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001939 return 0;
1940}
1941
Will Deacon45ae7cf2013-06-24 18:31:25 +01001942static struct platform_driver arm_smmu_driver = {
1943 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001944 .name = "arm-smmu",
1945 .of_match_table = of_match_ptr(arm_smmu_of_match),
1946 },
1947 .probe = arm_smmu_device_dt_probe,
1948 .remove = arm_smmu_device_remove,
1949};
1950
1951static int __init arm_smmu_init(void)
1952{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00001953 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001954 int ret;
1955
Thierry Reding0e7d37a2014-11-07 15:26:18 +00001956 /*
1957 * Play nice with systems that don't have an ARM SMMU by checking that
1958 * an ARM SMMU exists in the system before proceeding with the driver
1959 * and IOMMU bus operation registration.
1960 */
1961 np = of_find_matching_node(NULL, arm_smmu_of_match);
1962 if (!np)
1963 return 0;
1964
1965 of_node_put(np);
1966
Will Deacon45ae7cf2013-06-24 18:31:25 +01001967 ret = platform_driver_register(&arm_smmu_driver);
1968 if (ret)
1969 return ret;
1970
1971 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01001972 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001973 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1974
Will Deacond123cf82014-02-04 22:17:53 +00001975#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01001976 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01001977 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00001978#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01001979
Will Deacona9a1b0b2014-05-01 18:05:08 +01001980#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08001981 if (!iommu_present(&pci_bus_type)) {
1982 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01001983 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08001984 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01001985#endif
1986
Will Deacon45ae7cf2013-06-24 18:31:25 +01001987 return 0;
1988}
1989
1990static void __exit arm_smmu_exit(void)
1991{
1992 return platform_driver_unregister(&arm_smmu_driver);
1993}
1994
Andreas Herrmannb1950b22013-10-01 13:39:05 +01001995subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001996module_exit(arm_smmu_exit);
1997
1998MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
1999MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2000MODULE_LICENSE("GPL v2");