blob: f86d7887f69a6f8102729da75f279165764daefa [file] [log] [blame]
Will Deacon45ae7cf2013-06-24 18:31:25 +01001/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
Will Deacon45ae7cf2013-06-24 18:31:25 +010026 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
Robin Murphy9adb9592016-01-26 18:06:36 +000032#include <linux/dma-iommu.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010033#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
Robin Murphyf9a05f02016-04-13 18:13:01 +010037#include <linux/io-64-nonatomic-hi-lo.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010038#include <linux/iommu.h>
Mitchel Humpherys859a7322014-10-29 21:13:40 +000039#include <linux/iopoll.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010040#include <linux/module.h>
41#include <linux/of.h>
Robin Murphybae2c2d2015-07-29 19:46:05 +010042#include <linux/of_address.h>
Will Deacona9a1b0b2014-05-01 18:05:08 +010043#include <linux/pci.h>
Will Deacon45ae7cf2013-06-24 18:31:25 +010044#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
Will Deacon518f7132014-11-14 17:17:54 +000050#include "io-pgtable.h"
Will Deacon45ae7cf2013-06-24 18:31:25 +010051
52/* Maximum number of stream IDs assigned to a single device */
Joerg Roedelcb6c27b2016-04-04 17:49:22 +020053#define MAX_MASTER_STREAMIDS 128
Will Deacon45ae7cf2013-06-24 18:31:25 +010054
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
Will Deacon45ae7cf2013-06-24 18:31:25 +010061/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
Will Deaconc757e852014-07-30 11:33:25 +010063#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +010064
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +000065/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
Robin Murphyf9a05f02016-04-13 18:13:01 +010075/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010080#ifdef CONFIG_64BIT
Robin Murphyf9a05f02016-04-13 18:13:01 +010081#define smmu_write_atomic_lq writeq_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010082#else
Robin Murphyf9a05f02016-04-13 18:13:01 +010083#define smmu_write_atomic_lq writel_relaxed
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +010084#endif
85
Will Deacon45ae7cf2013-06-24 18:31:25 +010086/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -080097#define sCR0_VMID16EN (1 << 31)
Will Deacon45ae7cf2013-06-24 18:31:25 +010098#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
Peng Fan3ca37122016-05-03 21:50:30 +0800101/* Auxiliary Configuration register */
102#define ARM_SMMU_GR0_sACR 0x10
103
Will Deacon45ae7cf2013-06-24 18:31:25 +0100104/* Identification registers */
105#define ARM_SMMU_GR0_ID0 0x20
106#define ARM_SMMU_GR0_ID1 0x24
107#define ARM_SMMU_GR0_ID2 0x28
108#define ARM_SMMU_GR0_ID3 0x2c
109#define ARM_SMMU_GR0_ID4 0x30
110#define ARM_SMMU_GR0_ID5 0x34
111#define ARM_SMMU_GR0_ID6 0x38
112#define ARM_SMMU_GR0_ID7 0x3c
113#define ARM_SMMU_GR0_sGFSR 0x48
114#define ARM_SMMU_GR0_sGFSYNR0 0x50
115#define ARM_SMMU_GR0_sGFSYNR1 0x54
116#define ARM_SMMU_GR0_sGFSYNR2 0x58
Will Deacon45ae7cf2013-06-24 18:31:25 +0100117
118#define ID0_S1TS (1 << 30)
119#define ID0_S2TS (1 << 29)
120#define ID0_NTS (1 << 28)
121#define ID0_SMS (1 << 27)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000122#define ID0_ATOSNS (1 << 26)
Robin Murphy7602b872016-04-28 17:12:09 +0100123#define ID0_PTFS_NO_AARCH32 (1 << 25)
124#define ID0_PTFS_NO_AARCH32S (1 << 24)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100125#define ID0_CTTW (1 << 14)
126#define ID0_NUMIRPT_SHIFT 16
127#define ID0_NUMIRPT_MASK 0xff
Olav Haugan3c8766d2014-08-22 17:12:32 -0700128#define ID0_NUMSIDB_SHIFT 9
129#define ID0_NUMSIDB_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100130#define ID0_NUMSMRG_SHIFT 0
131#define ID0_NUMSMRG_MASK 0xff
132
133#define ID1_PAGESIZE (1 << 31)
134#define ID1_NUMPAGENDXB_SHIFT 28
135#define ID1_NUMPAGENDXB_MASK 7
136#define ID1_NUMS2CB_SHIFT 16
137#define ID1_NUMS2CB_MASK 0xff
138#define ID1_NUMCB_SHIFT 0
139#define ID1_NUMCB_MASK 0xff
140
141#define ID2_OAS_SHIFT 4
142#define ID2_OAS_MASK 0xf
143#define ID2_IAS_SHIFT 0
144#define ID2_IAS_MASK 0xf
145#define ID2_UBS_SHIFT 8
146#define ID2_UBS_MASK 0xf
147#define ID2_PTFS_4K (1 << 12)
148#define ID2_PTFS_16K (1 << 13)
149#define ID2_PTFS_64K (1 << 14)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800150#define ID2_VMID16 (1 << 15)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100151
Peng Fan3ca37122016-05-03 21:50:30 +0800152#define ID7_MAJOR_SHIFT 4
153#define ID7_MAJOR_MASK 0xf
Will Deacon45ae7cf2013-06-24 18:31:25 +0100154
Will Deacon45ae7cf2013-06-24 18:31:25 +0100155/* Global TLB invalidation */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100156#define ARM_SMMU_GR0_TLBIVMID 0x64
157#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
158#define ARM_SMMU_GR0_TLBIALLH 0x6c
159#define ARM_SMMU_GR0_sTLBGSYNC 0x70
160#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
161#define sTLBGSTATUS_GSACTIVE (1 << 0)
162#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
163
164/* Stream mapping registers */
165#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
166#define SMR_VALID (1 << 31)
167#define SMR_MASK_SHIFT 16
Will Deacon45ae7cf2013-06-24 18:31:25 +0100168#define SMR_ID_SHIFT 0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100169
170#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
171#define S2CR_CBNDX_SHIFT 0
172#define S2CR_CBNDX_MASK 0xff
173#define S2CR_TYPE_SHIFT 16
174#define S2CR_TYPE_MASK 0x3
175#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
176#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
177#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
178
Robin Murphyd3461802016-01-26 18:06:34 +0000179#define S2CR_PRIVCFG_SHIFT 24
180#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
181
Will Deacon45ae7cf2013-06-24 18:31:25 +0100182/* Context bank attribute registers */
183#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
184#define CBAR_VMID_SHIFT 0
185#define CBAR_VMID_MASK 0xff
Will Deacon57ca90f2014-02-06 14:59:05 +0000186#define CBAR_S1_BPSHCFG_SHIFT 8
187#define CBAR_S1_BPSHCFG_MASK 3
188#define CBAR_S1_BPSHCFG_NSH 3
Will Deacon45ae7cf2013-06-24 18:31:25 +0100189#define CBAR_S1_MEMATTR_SHIFT 12
190#define CBAR_S1_MEMATTR_MASK 0xf
191#define CBAR_S1_MEMATTR_WB 0xf
192#define CBAR_TYPE_SHIFT 16
193#define CBAR_TYPE_MASK 0x3
194#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
195#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
196#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
197#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
198#define CBAR_IRPTNDX_SHIFT 24
199#define CBAR_IRPTNDX_MASK 0xff
200
201#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
202#define CBA2R_RW64_32BIT (0 << 0)
203#define CBA2R_RW64_64BIT (1 << 0)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800204#define CBA2R_VMID_SHIFT 16
205#define CBA2R_VMID_MASK 0xffff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100206
207/* Translation context bank */
208#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
Will Deaconc757e852014-07-30 11:33:25 +0100209#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
Will Deacon45ae7cf2013-06-24 18:31:25 +0100210
211#define ARM_SMMU_CB_SCTLR 0x0
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100212#define ARM_SMMU_CB_ACTLR 0x4
Will Deacon45ae7cf2013-06-24 18:31:25 +0100213#define ARM_SMMU_CB_RESUME 0x8
214#define ARM_SMMU_CB_TTBCR2 0x10
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100215#define ARM_SMMU_CB_TTBR0 0x20
216#define ARM_SMMU_CB_TTBR1 0x28
Will Deacon45ae7cf2013-06-24 18:31:25 +0100217#define ARM_SMMU_CB_TTBCR 0x30
Robin Murphy60705292016-08-11 17:44:06 +0100218#define ARM_SMMU_CB_CONTEXTIDR 0x34
Will Deacon45ae7cf2013-06-24 18:31:25 +0100219#define ARM_SMMU_CB_S1_MAIR0 0x38
Will Deacon518f7132014-11-14 17:17:54 +0000220#define ARM_SMMU_CB_S1_MAIR1 0x3c
Robin Murphyf9a05f02016-04-13 18:13:01 +0100221#define ARM_SMMU_CB_PAR 0x50
Will Deacon45ae7cf2013-06-24 18:31:25 +0100222#define ARM_SMMU_CB_FSR 0x58
Robin Murphyf9a05f02016-04-13 18:13:01 +0100223#define ARM_SMMU_CB_FAR 0x60
Will Deacon45ae7cf2013-06-24 18:31:25 +0100224#define ARM_SMMU_CB_FSYNR0 0x68
Will Deacon518f7132014-11-14 17:17:54 +0000225#define ARM_SMMU_CB_S1_TLBIVA 0x600
Will Deacon1463fe42013-07-31 19:21:27 +0100226#define ARM_SMMU_CB_S1_TLBIASID 0x610
Will Deacon518f7132014-11-14 17:17:54 +0000227#define ARM_SMMU_CB_S1_TLBIVAL 0x620
228#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
229#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
Robin Murphy661d9622015-05-27 17:09:34 +0100230#define ARM_SMMU_CB_ATS1PR 0x800
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000231#define ARM_SMMU_CB_ATSR 0x8f0
Will Deacon45ae7cf2013-06-24 18:31:25 +0100232
233#define SCTLR_S1_ASIDPNE (1 << 12)
234#define SCTLR_CFCFG (1 << 7)
235#define SCTLR_CFIE (1 << 6)
236#define SCTLR_CFRE (1 << 5)
237#define SCTLR_E (1 << 4)
238#define SCTLR_AFE (1 << 2)
239#define SCTLR_TRE (1 << 1)
240#define SCTLR_M (1 << 0)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100241
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100242#define ARM_MMU500_ACTLR_CPRE (1 << 1)
243
Peng Fan3ca37122016-05-03 21:50:30 +0800244#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
245
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000246#define CB_PAR_F (1 << 0)
247
248#define ATSR_ACTIVE (1 << 0)
249
Will Deacon45ae7cf2013-06-24 18:31:25 +0100250#define RESUME_RETRY (0 << 0)
251#define RESUME_TERMINATE (1 << 0)
252
Will Deacon45ae7cf2013-06-24 18:31:25 +0100253#define TTBCR2_SEP_SHIFT 15
Will Deacon5dc56162015-05-08 17:44:22 +0100254#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100255
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100256#define TTBRn_ASID_SHIFT 48
Will Deacon45ae7cf2013-06-24 18:31:25 +0100257
258#define FSR_MULTI (1 << 31)
259#define FSR_SS (1 << 30)
260#define FSR_UUT (1 << 8)
261#define FSR_ASF (1 << 7)
262#define FSR_TLBLKF (1 << 6)
263#define FSR_TLBMCF (1 << 5)
264#define FSR_EF (1 << 4)
265#define FSR_PF (1 << 3)
266#define FSR_AFF (1 << 2)
267#define FSR_TF (1 << 1)
268
Mitchel Humpherys29073202014-07-08 09:52:18 -0700269#define FSR_IGN (FSR_AFF | FSR_ASF | \
270 FSR_TLBMCF | FSR_TLBLKF)
271#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
Will Deaconadaba322013-07-31 19:21:26 +0100272 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100273
274#define FSYNR0_WNR (1 << 4)
275
Will Deacon4cf740b2014-07-14 19:47:39 +0100276static int force_stage;
Robin Murphy25a1c962016-02-10 14:25:33 +0000277module_param(force_stage, int, S_IRUGO);
Will Deacon4cf740b2014-07-14 19:47:39 +0100278MODULE_PARM_DESC(force_stage,
279 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
Robin Murphy25a1c962016-02-10 14:25:33 +0000280static bool disable_bypass;
281module_param(disable_bypass, bool, S_IRUGO);
282MODULE_PARM_DESC(disable_bypass,
283 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
Will Deacon4cf740b2014-07-14 19:47:39 +0100284
Robin Murphy09360402014-08-28 17:51:59 +0100285enum arm_smmu_arch_version {
Robin Murphyb7862e32016-04-13 18:13:03 +0100286 ARM_SMMU_V1,
287 ARM_SMMU_V1_64K,
Robin Murphy09360402014-08-28 17:51:59 +0100288 ARM_SMMU_V2,
289};
290
Robin Murphy67b65a32016-04-13 18:12:57 +0100291enum arm_smmu_implementation {
292 GENERIC_SMMU,
Robin Murphyf0cfffc2016-04-13 18:12:59 +0100293 ARM_MMU500,
Robin Murphye086d912016-04-13 18:12:58 +0100294 CAVIUM_SMMUV2,
Robin Murphy67b65a32016-04-13 18:12:57 +0100295};
296
Will Deacon45ae7cf2013-06-24 18:31:25 +0100297struct arm_smmu_smr {
298 u8 idx;
299 u16 mask;
300 u16 id;
301};
302
Will Deacona9a1b0b2014-05-01 18:05:08 +0100303struct arm_smmu_master_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100304 int num_streamids;
305 u16 streamids[MAX_MASTER_STREAMIDS];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100306 struct arm_smmu_smr *smrs;
307};
308
Will Deacona9a1b0b2014-05-01 18:05:08 +0100309struct arm_smmu_master {
310 struct device_node *of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100311 struct rb_node node;
312 struct arm_smmu_master_cfg cfg;
313};
314
Will Deacon45ae7cf2013-06-24 18:31:25 +0100315struct arm_smmu_device {
316 struct device *dev;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100317
318 void __iomem *base;
319 unsigned long size;
Will Deaconc757e852014-07-30 11:33:25 +0100320 unsigned long pgshift;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100321
322#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
323#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
324#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
325#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
326#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
Mitchel Humpherys859a7322014-10-29 21:13:40 +0000327#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800328#define ARM_SMMU_FEAT_VMID16 (1 << 6)
Robin Murphy7602b872016-04-28 17:12:09 +0100329#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
330#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
331#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
332#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
333#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100334 u32 features;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000335
336#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
337 u32 options;
Robin Murphy09360402014-08-28 17:51:59 +0100338 enum arm_smmu_arch_version version;
Robin Murphy67b65a32016-04-13 18:12:57 +0100339 enum arm_smmu_implementation model;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100340
341 u32 num_context_banks;
342 u32 num_s2_context_banks;
343 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
344 atomic_t irptndx;
345
346 u32 num_mapping_groups;
Robin Murphy21174242016-09-12 17:13:48 +0100347 u16 streamid_mask;
348 u16 smr_mask_mask;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100349 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
350
Will Deacon518f7132014-11-14 17:17:54 +0000351 unsigned long va_size;
352 unsigned long ipa_size;
353 unsigned long pa_size;
Robin Murphyd5466352016-05-09 17:20:09 +0100354 unsigned long pgsize_bitmap;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100355
356 u32 num_global_irqs;
357 u32 num_context_irqs;
358 unsigned int *irqs;
359
Will Deacon45ae7cf2013-06-24 18:31:25 +0100360 struct list_head list;
361 struct rb_root masters;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800362
363 u32 cavium_id_base; /* Specific to Cavium */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100364};
365
Robin Murphy7602b872016-04-28 17:12:09 +0100366enum arm_smmu_context_fmt {
367 ARM_SMMU_CTX_FMT_NONE,
368 ARM_SMMU_CTX_FMT_AARCH64,
369 ARM_SMMU_CTX_FMT_AARCH32_L,
370 ARM_SMMU_CTX_FMT_AARCH32_S,
Will Deacon45ae7cf2013-06-24 18:31:25 +0100371};
372
373struct arm_smmu_cfg {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100374 u8 cbndx;
375 u8 irptndx;
376 u32 cbar;
Robin Murphy7602b872016-04-28 17:12:09 +0100377 enum arm_smmu_context_fmt fmt;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100378};
Dan Carpenterfaea13b72013-08-21 09:33:30 +0100379#define INVALID_IRPTNDX 0xff
Will Deacon45ae7cf2013-06-24 18:31:25 +0100380
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800381#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
382#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
Will Deaconecfadb62013-07-31 19:21:28 +0100383
Will Deaconc752ce42014-06-25 22:46:31 +0100384enum arm_smmu_domain_stage {
385 ARM_SMMU_DOMAIN_S1 = 0,
386 ARM_SMMU_DOMAIN_S2,
387 ARM_SMMU_DOMAIN_NESTED,
388};
389
Will Deacon45ae7cf2013-06-24 18:31:25 +0100390struct arm_smmu_domain {
Will Deacon44680ee2014-06-25 11:29:12 +0100391 struct arm_smmu_device *smmu;
Will Deacon518f7132014-11-14 17:17:54 +0000392 struct io_pgtable_ops *pgtbl_ops;
393 spinlock_t pgtbl_lock;
Will Deacon44680ee2014-06-25 11:29:12 +0100394 struct arm_smmu_cfg cfg;
Will Deaconc752ce42014-06-25 22:46:31 +0100395 enum arm_smmu_domain_stage stage;
Will Deacon518f7132014-11-14 17:17:54 +0000396 struct mutex init_mutex; /* Protects smmu pointer */
Joerg Roedel1d672632015-03-26 13:43:10 +0100397 struct iommu_domain domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100398};
399
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200400struct arm_smmu_phandle_args {
401 struct device_node *np;
402 int args_count;
403 uint32_t args[MAX_MASTER_STREAMIDS];
404};
405
Will Deacon45ae7cf2013-06-24 18:31:25 +0100406static DEFINE_SPINLOCK(arm_smmu_devices_lock);
407static LIST_HEAD(arm_smmu_devices);
408
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000409struct arm_smmu_option_prop {
410 u32 opt;
411 const char *prop;
412};
413
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800414static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
415
Mitchel Humpherys29073202014-07-08 09:52:18 -0700416static struct arm_smmu_option_prop arm_smmu_options[] = {
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000417 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
418 { 0, NULL},
419};
420
Joerg Roedel1d672632015-03-26 13:43:10 +0100421static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
422{
423 return container_of(dom, struct arm_smmu_domain, domain);
424}
425
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000426static void parse_driver_options(struct arm_smmu_device *smmu)
427{
428 int i = 0;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700429
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000430 do {
431 if (of_property_read_bool(smmu->dev->of_node,
432 arm_smmu_options[i].prop)) {
433 smmu->options |= arm_smmu_options[i].opt;
434 dev_notice(smmu->dev, "option %s\n",
435 arm_smmu_options[i].prop);
436 }
437 } while (arm_smmu_options[++i].opt);
438}
439
Will Deacon8f68f8e2014-07-15 11:27:08 +0100440static struct device_node *dev_get_dev_node(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100441{
442 if (dev_is_pci(dev)) {
443 struct pci_bus *bus = to_pci_dev(dev)->bus;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700444
Will Deacona9a1b0b2014-05-01 18:05:08 +0100445 while (!pci_is_root_bus(bus))
446 bus = bus->parent;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100447 return bus->bridge->parent->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100448 }
449
Will Deacon8f68f8e2014-07-15 11:27:08 +0100450 return dev->of_node;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100451}
452
Will Deacon45ae7cf2013-06-24 18:31:25 +0100453static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
454 struct device_node *dev_node)
455{
456 struct rb_node *node = smmu->masters.rb_node;
457
458 while (node) {
459 struct arm_smmu_master *master;
Mitchel Humpherys29073202014-07-08 09:52:18 -0700460
Will Deacon45ae7cf2013-06-24 18:31:25 +0100461 master = container_of(node, struct arm_smmu_master, node);
462
463 if (dev_node < master->of_node)
464 node = node->rb_left;
465 else if (dev_node > master->of_node)
466 node = node->rb_right;
467 else
468 return master;
469 }
470
471 return NULL;
472}
473
Will Deacona9a1b0b2014-05-01 18:05:08 +0100474static struct arm_smmu_master_cfg *
Will Deacon8f68f8e2014-07-15 11:27:08 +0100475find_smmu_master_cfg(struct device *dev)
Will Deacona9a1b0b2014-05-01 18:05:08 +0100476{
Will Deacon8f68f8e2014-07-15 11:27:08 +0100477 struct arm_smmu_master_cfg *cfg = NULL;
478 struct iommu_group *group = iommu_group_get(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +0100479
Will Deacon8f68f8e2014-07-15 11:27:08 +0100480 if (group) {
481 cfg = iommu_group_get_iommudata(group);
482 iommu_group_put(group);
483 }
Will Deacona9a1b0b2014-05-01 18:05:08 +0100484
Will Deacon8f68f8e2014-07-15 11:27:08 +0100485 return cfg;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100486}
487
Will Deacon45ae7cf2013-06-24 18:31:25 +0100488static int insert_smmu_master(struct arm_smmu_device *smmu,
489 struct arm_smmu_master *master)
490{
491 struct rb_node **new, *parent;
492
493 new = &smmu->masters.rb_node;
494 parent = NULL;
495 while (*new) {
Mitchel Humpherys29073202014-07-08 09:52:18 -0700496 struct arm_smmu_master *this
497 = container_of(*new, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100498
499 parent = *new;
500 if (master->of_node < this->of_node)
501 new = &((*new)->rb_left);
502 else if (master->of_node > this->of_node)
503 new = &((*new)->rb_right);
504 else
505 return -EEXIST;
506 }
507
508 rb_link_node(&master->node, parent, new);
509 rb_insert_color(&master->node, &smmu->masters);
510 return 0;
511}
512
513static int register_smmu_master(struct arm_smmu_device *smmu,
514 struct device *dev,
Joerg Roedelcb6c27b2016-04-04 17:49:22 +0200515 struct arm_smmu_phandle_args *masterspec)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100516{
517 int i;
518 struct arm_smmu_master *master;
519
520 master = find_smmu_master(smmu, masterspec->np);
521 if (master) {
522 dev_err(dev,
523 "rejecting multiple registrations for master device %s\n",
524 masterspec->np->name);
525 return -EBUSY;
526 }
527
528 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
529 dev_err(dev,
530 "reached maximum number (%d) of stream IDs for master device %s\n",
531 MAX_MASTER_STREAMIDS, masterspec->np->name);
532 return -ENOSPC;
533 }
534
535 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
536 if (!master)
537 return -ENOMEM;
538
Will Deacona9a1b0b2014-05-01 18:05:08 +0100539 master->of_node = masterspec->np;
540 master->cfg.num_streamids = masterspec->args_count;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100541
Olav Haugan3c8766d2014-08-22 17:12:32 -0700542 for (i = 0; i < master->cfg.num_streamids; ++i) {
543 u16 streamid = masterspec->args[i];
Will Deacon45ae7cf2013-06-24 18:31:25 +0100544
Olav Haugan3c8766d2014-08-22 17:12:32 -0700545 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
546 (streamid >= smmu->num_mapping_groups)) {
547 dev_err(dev,
548 "stream ID for master device %s greater than maximum allowed (%d)\n",
549 masterspec->np->name, smmu->num_mapping_groups);
550 return -ERANGE;
551 }
552 master->cfg.streamids[i] = streamid;
553 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100554 return insert_smmu_master(smmu, master);
555}
556
Will Deacon44680ee2014-06-25 11:29:12 +0100557static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100558{
Will Deacon44680ee2014-06-25 11:29:12 +0100559 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +0100560 struct arm_smmu_master *master = NULL;
Will Deacon8f68f8e2014-07-15 11:27:08 +0100561 struct device_node *dev_node = dev_get_dev_node(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100562
563 spin_lock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100564 list_for_each_entry(smmu, &arm_smmu_devices, list) {
Will Deacona9a1b0b2014-05-01 18:05:08 +0100565 master = find_smmu_master(smmu, dev_node);
566 if (master)
567 break;
568 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100569 spin_unlock(&arm_smmu_devices_lock);
Will Deacon44680ee2014-06-25 11:29:12 +0100570
Will Deacona9a1b0b2014-05-01 18:05:08 +0100571 return master ? smmu : NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100572}
573
574static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
575{
576 int idx;
577
578 do {
579 idx = find_next_zero_bit(map, end, start);
580 if (idx == end)
581 return -ENOSPC;
582 } while (test_and_set_bit(idx, map));
583
584 return idx;
585}
586
587static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
588{
589 clear_bit(idx, map);
590}
591
592/* Wait for any pending TLB invalidations to complete */
Will Deacon518f7132014-11-14 17:17:54 +0000593static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100594{
595 int count = 0;
596 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
597
598 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
599 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
600 & sTLBGSTATUS_GSACTIVE) {
601 cpu_relax();
602 if (++count == TLB_LOOP_TIMEOUT) {
603 dev_err_ratelimited(smmu->dev,
604 "TLB sync timed out -- SMMU may be deadlocked\n");
605 return;
606 }
607 udelay(1);
608 }
609}
610
Will Deacon518f7132014-11-14 17:17:54 +0000611static void arm_smmu_tlb_sync(void *cookie)
Will Deacon1463fe42013-07-31 19:21:27 +0100612{
Will Deacon518f7132014-11-14 17:17:54 +0000613 struct arm_smmu_domain *smmu_domain = cookie;
614 __arm_smmu_tlb_sync(smmu_domain->smmu);
615}
616
617static void arm_smmu_tlb_inv_context(void *cookie)
618{
619 struct arm_smmu_domain *smmu_domain = cookie;
Will Deacon44680ee2014-06-25 11:29:12 +0100620 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
621 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon1463fe42013-07-31 19:21:27 +0100622 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
Will Deacon518f7132014-11-14 17:17:54 +0000623 void __iomem *base;
Will Deacon1463fe42013-07-31 19:21:27 +0100624
625 if (stage1) {
626 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800627 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100628 base + ARM_SMMU_CB_S1_TLBIASID);
Will Deacon1463fe42013-07-31 19:21:27 +0100629 } else {
630 base = ARM_SMMU_GR0(smmu);
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800631 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
Will Deaconecfadb62013-07-31 19:21:28 +0100632 base + ARM_SMMU_GR0_TLBIVMID);
Will Deacon1463fe42013-07-31 19:21:27 +0100633 }
634
Will Deacon518f7132014-11-14 17:17:54 +0000635 __arm_smmu_tlb_sync(smmu);
Will Deacon1463fe42013-07-31 19:21:27 +0100636}
637
Will Deacon518f7132014-11-14 17:17:54 +0000638static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
Robin Murphy06c610e2015-12-07 18:18:53 +0000639 size_t granule, bool leaf, void *cookie)
Will Deacon518f7132014-11-14 17:17:54 +0000640{
641 struct arm_smmu_domain *smmu_domain = cookie;
642 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
643 struct arm_smmu_device *smmu = smmu_domain->smmu;
644 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
645 void __iomem *reg;
646
647 if (stage1) {
648 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
649 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
650
Robin Murphy7602b872016-04-28 17:12:09 +0100651 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000652 iova &= ~12UL;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800653 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
Robin Murphy75df1382015-12-07 18:18:52 +0000654 do {
655 writel_relaxed(iova, reg);
656 iova += granule;
657 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000658 } else {
659 iova >>= 12;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800660 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
Robin Murphy75df1382015-12-07 18:18:52 +0000661 do {
662 writeq_relaxed(iova, reg);
663 iova += granule >> 12;
664 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000665 }
Will Deacon518f7132014-11-14 17:17:54 +0000666 } else if (smmu->version == ARM_SMMU_V2) {
667 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
668 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
669 ARM_SMMU_CB_S2_TLBIIPAS2;
Robin Murphy75df1382015-12-07 18:18:52 +0000670 iova >>= 12;
671 do {
Robin Murphyf9a05f02016-04-13 18:13:01 +0100672 smmu_write_atomic_lq(iova, reg);
Robin Murphy75df1382015-12-07 18:18:52 +0000673 iova += granule >> 12;
674 } while (size -= granule);
Will Deacon518f7132014-11-14 17:17:54 +0000675 } else {
676 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800677 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
Will Deacon518f7132014-11-14 17:17:54 +0000678 }
679}
680
Will Deacon518f7132014-11-14 17:17:54 +0000681static struct iommu_gather_ops arm_smmu_gather_ops = {
682 .tlb_flush_all = arm_smmu_tlb_inv_context,
683 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
684 .tlb_sync = arm_smmu_tlb_sync,
Will Deacon518f7132014-11-14 17:17:54 +0000685};
686
Will Deacon45ae7cf2013-06-24 18:31:25 +0100687static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
688{
Will Deacon3714ce1d2016-08-05 19:49:45 +0100689 u32 fsr, fsynr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100690 unsigned long iova;
691 struct iommu_domain *domain = dev;
Joerg Roedel1d672632015-03-26 13:43:10 +0100692 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100693 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
694 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100695 void __iomem *cb_base;
696
Will Deacon44680ee2014-06-25 11:29:12 +0100697 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100698 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
699
700 if (!(fsr & FSR_FAULT))
701 return IRQ_NONE;
702
Will Deacon45ae7cf2013-06-24 18:31:25 +0100703 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
Robin Murphyf9a05f02016-04-13 18:13:01 +0100704 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100705
Will Deacon3714ce1d2016-08-05 19:49:45 +0100706 dev_err_ratelimited(smmu->dev,
707 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
708 fsr, iova, fsynr, cfg->cbndx);
709
Will Deacon45ae7cf2013-06-24 18:31:25 +0100710 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
Will Deacon3714ce1d2016-08-05 19:49:45 +0100711 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100712}
713
714static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
715{
716 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
717 struct arm_smmu_device *smmu = dev;
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000718 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100719
720 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
721 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
722 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
723 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
724
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +0000725 if (!gfsr)
726 return IRQ_NONE;
727
Will Deacon45ae7cf2013-06-24 18:31:25 +0100728 dev_err_ratelimited(smmu->dev,
729 "Unexpected global fault, this could be serious\n");
730 dev_err_ratelimited(smmu->dev,
731 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
732 gfsr, gfsynr0, gfsynr1, gfsynr2);
733
734 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
Will Deaconadaba322013-07-31 19:21:26 +0100735 return IRQ_HANDLED;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100736}
737
Will Deacon518f7132014-11-14 17:17:54 +0000738static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
739 struct io_pgtable_cfg *pgtbl_cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100740{
Robin Murphy60705292016-08-11 17:44:06 +0100741 u32 reg, reg2;
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100742 u64 reg64;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100743 bool stage1;
Will Deacon44680ee2014-06-25 11:29:12 +0100744 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
745 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deaconc88ae5d2015-10-13 17:53:24 +0100746 void __iomem *cb_base, *gr1_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100747
Will Deacon45ae7cf2013-06-24 18:31:25 +0100748 gr1_base = ARM_SMMU_GR1(smmu);
Will Deacon44680ee2014-06-25 11:29:12 +0100749 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
750 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100751
Will Deacon4a1c93c2015-03-04 12:21:03 +0000752 if (smmu->version > ARM_SMMU_V1) {
Robin Murphy7602b872016-04-28 17:12:09 +0100753 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
754 reg = CBA2R_RW64_64BIT;
755 else
756 reg = CBA2R_RW64_32BIT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800757 /* 16-bit VMIDs live in CBA2R */
758 if (smmu->features & ARM_SMMU_FEAT_VMID16)
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800759 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800760
Will Deacon4a1c93c2015-03-04 12:21:03 +0000761 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
762 }
763
Will Deacon45ae7cf2013-06-24 18:31:25 +0100764 /* CBAR */
Will Deacon44680ee2014-06-25 11:29:12 +0100765 reg = cfg->cbar;
Robin Murphyb7862e32016-04-13 18:13:03 +0100766 if (smmu->version < ARM_SMMU_V2)
Mitchel Humpherys29073202014-07-08 09:52:18 -0700767 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100768
Will Deacon57ca90f2014-02-06 14:59:05 +0000769 /*
770 * Use the weakest shareability/memory types, so they are
771 * overridden by the ttbcr/pte.
772 */
773 if (stage1) {
774 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
775 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -0800776 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
777 /* 8-bit VMIDs live in CBAR */
Tirumalesh Chalamarla1bd37a62016-03-04 13:56:09 -0800778 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
Will Deacon57ca90f2014-02-06 14:59:05 +0000779 }
Will Deacon44680ee2014-06-25 11:29:12 +0100780 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
Will Deacon45ae7cf2013-06-24 18:31:25 +0100781
Will Deacon518f7132014-11-14 17:17:54 +0000782 /* TTBRs */
783 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100784 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100785
Robin Murphy60705292016-08-11 17:44:06 +0100786 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
787 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
788 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
789 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
790 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
791 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
792 } else {
793 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
794 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
795 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
796 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
797 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
798 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
799 }
Will Deacon518f7132014-11-14 17:17:54 +0000800 } else {
Tirumalesh Chalamarla668b4ad2015-08-19 00:40:30 +0100801 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
Robin Murphyf9a05f02016-04-13 18:13:01 +0100802 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
Will Deacon518f7132014-11-14 17:17:54 +0000803 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100804
Will Deacon518f7132014-11-14 17:17:54 +0000805 /* TTBCR */
806 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100807 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
808 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
809 reg2 = 0;
810 } else {
811 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
812 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
813 reg2 |= TTBCR2_SEP_UPSTREAM;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100814 }
Robin Murphy60705292016-08-11 17:44:06 +0100815 if (smmu->version > ARM_SMMU_V1)
816 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100817 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000818 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100819 }
Robin Murphy60705292016-08-11 17:44:06 +0100820 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100821
Will Deacon518f7132014-11-14 17:17:54 +0000822 /* MAIRs (stage-1 only) */
Will Deacon45ae7cf2013-06-24 18:31:25 +0100823 if (stage1) {
Robin Murphy60705292016-08-11 17:44:06 +0100824 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
825 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
826 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
827 } else {
828 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
829 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
830 }
Will Deacon45ae7cf2013-06-24 18:31:25 +0100831 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
Robin Murphy60705292016-08-11 17:44:06 +0100832 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100833 }
834
Will Deacon45ae7cf2013-06-24 18:31:25 +0100835 /* SCTLR */
Robin Murphy60705292016-08-11 17:44:06 +0100836 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100837 if (stage1)
838 reg |= SCTLR_S1_ASIDPNE;
839#ifdef __BIG_ENDIAN
840 reg |= SCTLR_E;
841#endif
Will Deacon25724842013-08-21 13:49:53 +0100842 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon45ae7cf2013-06-24 18:31:25 +0100843}
844
845static int arm_smmu_init_domain_context(struct iommu_domain *domain,
Will Deacon44680ee2014-06-25 11:29:12 +0100846 struct arm_smmu_device *smmu)
Will Deacon45ae7cf2013-06-24 18:31:25 +0100847{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100848 int irq, start, ret = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000849 unsigned long ias, oas;
850 struct io_pgtable_ops *pgtbl_ops;
851 struct io_pgtable_cfg pgtbl_cfg;
852 enum io_pgtable_fmt fmt;
Joerg Roedel1d672632015-03-26 13:43:10 +0100853 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +0100854 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100855
Will Deacon518f7132014-11-14 17:17:54 +0000856 mutex_lock(&smmu_domain->init_mutex);
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100857 if (smmu_domain->smmu)
858 goto out_unlock;
859
Robin Murphy98006992016-04-20 14:53:33 +0100860 /* We're bypassing these SIDs, so don't allocate an actual context */
861 if (domain->type == IOMMU_DOMAIN_DMA) {
862 smmu_domain->smmu = smmu;
863 goto out_unlock;
864 }
865
Will Deaconc752ce42014-06-25 22:46:31 +0100866 /*
867 * Mapping the requested stage onto what we support is surprisingly
868 * complicated, mainly because the spec allows S1+S2 SMMUs without
869 * support for nested translation. That means we end up with the
870 * following table:
871 *
872 * Requested Supported Actual
873 * S1 N S1
874 * S1 S1+S2 S1
875 * S1 S2 S2
876 * S1 S1 S1
877 * N N N
878 * N S1+S2 S2
879 * N S2 S2
880 * N S1 S1
881 *
882 * Note that you can't actually request stage-2 mappings.
883 */
884 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
885 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
886 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
887 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
888
Robin Murphy7602b872016-04-28 17:12:09 +0100889 /*
890 * Choosing a suitable context format is even more fiddly. Until we
891 * grow some way for the caller to express a preference, and/or move
892 * the decision into the io-pgtable code where it arguably belongs,
893 * just aim for the closest thing to the rest of the system, and hope
894 * that the hardware isn't esoteric enough that we can't assume AArch64
895 * support to be a superset of AArch32 support...
896 */
897 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
898 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
Robin Murphy60705292016-08-11 17:44:06 +0100899 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
900 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
901 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
902 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
903 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
Robin Murphy7602b872016-04-28 17:12:09 +0100904 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
905 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
906 ARM_SMMU_FEAT_FMT_AARCH64_16K |
907 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
908 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
909
910 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
911 ret = -EINVAL;
912 goto out_unlock;
913 }
914
Will Deaconc752ce42014-06-25 22:46:31 +0100915 switch (smmu_domain->stage) {
916 case ARM_SMMU_DOMAIN_S1:
917 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
918 start = smmu->num_s2_context_banks;
Will Deacon518f7132014-11-14 17:17:54 +0000919 ias = smmu->va_size;
920 oas = smmu->ipa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100921 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000922 fmt = ARM_64_LPAE_S1;
Robin Murphy60705292016-08-11 17:44:06 +0100923 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
Will Deacon518f7132014-11-14 17:17:54 +0000924 fmt = ARM_32_LPAE_S1;
Robin Murphy7602b872016-04-28 17:12:09 +0100925 ias = min(ias, 32UL);
926 oas = min(oas, 40UL);
Robin Murphy60705292016-08-11 17:44:06 +0100927 } else {
928 fmt = ARM_V7S;
929 ias = min(ias, 32UL);
930 oas = min(oas, 32UL);
Robin Murphy7602b872016-04-28 17:12:09 +0100931 }
Will Deaconc752ce42014-06-25 22:46:31 +0100932 break;
933 case ARM_SMMU_DOMAIN_NESTED:
Will Deacon45ae7cf2013-06-24 18:31:25 +0100934 /*
935 * We will likely want to change this if/when KVM gets
936 * involved.
937 */
Will Deaconc752ce42014-06-25 22:46:31 +0100938 case ARM_SMMU_DOMAIN_S2:
Will Deacon9c5c92e2014-06-25 12:12:41 +0100939 cfg->cbar = CBAR_TYPE_S2_TRANS;
940 start = 0;
Will Deacon518f7132014-11-14 17:17:54 +0000941 ias = smmu->ipa_size;
942 oas = smmu->pa_size;
Robin Murphy7602b872016-04-28 17:12:09 +0100943 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
Will Deacon518f7132014-11-14 17:17:54 +0000944 fmt = ARM_64_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100945 } else {
Will Deacon518f7132014-11-14 17:17:54 +0000946 fmt = ARM_32_LPAE_S2;
Robin Murphy7602b872016-04-28 17:12:09 +0100947 ias = min(ias, 40UL);
948 oas = min(oas, 40UL);
949 }
Will Deaconc752ce42014-06-25 22:46:31 +0100950 break;
951 default:
952 ret = -EINVAL;
953 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100954 }
955
956 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
957 smmu->num_context_banks);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200958 if (ret < 0)
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100959 goto out_unlock;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100960
Will Deacon44680ee2014-06-25 11:29:12 +0100961 cfg->cbndx = ret;
Robin Murphyb7862e32016-04-13 18:13:03 +0100962 if (smmu->version < ARM_SMMU_V2) {
Will Deacon44680ee2014-06-25 11:29:12 +0100963 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
964 cfg->irptndx %= smmu->num_context_irqs;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100965 } else {
Will Deacon44680ee2014-06-25 11:29:12 +0100966 cfg->irptndx = cfg->cbndx;
Will Deacon45ae7cf2013-06-24 18:31:25 +0100967 }
968
Will Deacon518f7132014-11-14 17:17:54 +0000969 pgtbl_cfg = (struct io_pgtable_cfg) {
Robin Murphyd5466352016-05-09 17:20:09 +0100970 .pgsize_bitmap = smmu->pgsize_bitmap,
Will Deacon518f7132014-11-14 17:17:54 +0000971 .ias = ias,
972 .oas = oas,
973 .tlb = &arm_smmu_gather_ops,
Robin Murphy2df7a252015-07-29 19:46:06 +0100974 .iommu_dev = smmu->dev,
Will Deacon518f7132014-11-14 17:17:54 +0000975 };
Mitchel Humpherysa18037b2014-07-30 18:58:13 +0100976
Will Deacon518f7132014-11-14 17:17:54 +0000977 smmu_domain->smmu = smmu;
978 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
979 if (!pgtbl_ops) {
980 ret = -ENOMEM;
981 goto out_clear_smmu;
982 }
983
Robin Murphyd5466352016-05-09 17:20:09 +0100984 /* Update the domain's page sizes to reflect the page table format */
985 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
Will Deacon518f7132014-11-14 17:17:54 +0000986
987 /* Initialise the context bank with our page table cfg */
988 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
989
990 /*
991 * Request context fault interrupt. Do this last to avoid the
992 * handler seeing a half-initialised domain state.
993 */
Will Deacon44680ee2014-06-25 11:29:12 +0100994 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +0800995 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
996 IRQF_SHARED, "arm-smmu-context-fault", domain);
Arnd Bergmann287980e2016-05-27 23:23:25 +0200997 if (ret < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +0100998 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
Will Deacon44680ee2014-06-25 11:29:12 +0100999 cfg->irptndx, irq);
1000 cfg->irptndx = INVALID_IRPTNDX;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001001 }
1002
Will Deacon518f7132014-11-14 17:17:54 +00001003 mutex_unlock(&smmu_domain->init_mutex);
1004
1005 /* Publish page table ops for map/unmap */
1006 smmu_domain->pgtbl_ops = pgtbl_ops;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001007 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001008
Will Deacon518f7132014-11-14 17:17:54 +00001009out_clear_smmu:
1010 smmu_domain->smmu = NULL;
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001011out_unlock:
Will Deacon518f7132014-11-14 17:17:54 +00001012 mutex_unlock(&smmu_domain->init_mutex);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001013 return ret;
1014}
1015
1016static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1017{
Joerg Roedel1d672632015-03-26 13:43:10 +01001018 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon44680ee2014-06-25 11:29:12 +01001019 struct arm_smmu_device *smmu = smmu_domain->smmu;
1020 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
Will Deacon1463fe42013-07-31 19:21:27 +01001021 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001022 int irq;
1023
Robin Murphy98006992016-04-20 14:53:33 +01001024 if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001025 return;
1026
Will Deacon518f7132014-11-14 17:17:54 +00001027 /*
1028 * Disable the context bank and free the page tables before freeing
1029 * it.
1030 */
Will Deacon44680ee2014-06-25 11:29:12 +01001031 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
Will Deacon1463fe42013-07-31 19:21:27 +01001032 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
Will Deacon1463fe42013-07-31 19:21:27 +01001033
Will Deacon44680ee2014-06-25 11:29:12 +01001034 if (cfg->irptndx != INVALID_IRPTNDX) {
1035 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
Peng Fanbee14002016-07-04 17:38:22 +08001036 devm_free_irq(smmu->dev, irq, domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001037 }
1038
Markus Elfring44830b02015-11-06 18:32:41 +01001039 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
Will Deacon44680ee2014-06-25 11:29:12 +01001040 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001041}
1042
Joerg Roedel1d672632015-03-26 13:43:10 +01001043static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001044{
1045 struct arm_smmu_domain *smmu_domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001046
Robin Murphy9adb9592016-01-26 18:06:36 +00001047 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
Joerg Roedel1d672632015-03-26 13:43:10 +01001048 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001049 /*
1050 * Allocate the domain and initialise some of its data structures.
1051 * We can't really do anything meaningful until we've added a
1052 * master.
1053 */
1054 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1055 if (!smmu_domain)
Joerg Roedel1d672632015-03-26 13:43:10 +01001056 return NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001057
Robin Murphy9adb9592016-01-26 18:06:36 +00001058 if (type == IOMMU_DOMAIN_DMA &&
1059 iommu_get_dma_cookie(&smmu_domain->domain)) {
1060 kfree(smmu_domain);
1061 return NULL;
1062 }
1063
Will Deacon518f7132014-11-14 17:17:54 +00001064 mutex_init(&smmu_domain->init_mutex);
1065 spin_lock_init(&smmu_domain->pgtbl_lock);
Joerg Roedel1d672632015-03-26 13:43:10 +01001066
1067 return &smmu_domain->domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001068}
1069
Joerg Roedel1d672632015-03-26 13:43:10 +01001070static void arm_smmu_domain_free(struct iommu_domain *domain)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001071{
Joerg Roedel1d672632015-03-26 13:43:10 +01001072 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon1463fe42013-07-31 19:21:27 +01001073
1074 /*
1075 * Free the domain resources. We assume that all devices have
1076 * already been detached.
1077 */
Robin Murphy9adb9592016-01-26 18:06:36 +00001078 iommu_put_dma_cookie(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001079 arm_smmu_destroy_domain_context(domain);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001080 kfree(smmu_domain);
1081}
1082
1083static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001084 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001085{
1086 int i;
1087 struct arm_smmu_smr *smrs;
1088 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1089
1090 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1091 return 0;
1092
Will Deacona9a1b0b2014-05-01 18:05:08 +01001093 if (cfg->smrs)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001094 return -EEXIST;
1095
Mitchel Humpherys29073202014-07-08 09:52:18 -07001096 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001097 if (!smrs) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001098 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1099 cfg->num_streamids);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001100 return -ENOMEM;
1101 }
1102
Will Deacon44680ee2014-06-25 11:29:12 +01001103 /* Allocate the SMRs on the SMMU */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001104 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001105 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1106 smmu->num_mapping_groups);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001107 if (idx < 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001108 dev_err(smmu->dev, "failed to allocate free SMR\n");
1109 goto err_free_smrs;
1110 }
1111
1112 smrs[i] = (struct arm_smmu_smr) {
1113 .idx = idx,
1114 .mask = 0, /* We don't currently share SMRs */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001115 .id = cfg->streamids[i],
Will Deacon45ae7cf2013-06-24 18:31:25 +01001116 };
1117 }
1118
1119 /* It worked! Now, poke the actual hardware */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001120 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001121 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1122 smrs[i].mask << SMR_MASK_SHIFT;
1123 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1124 }
1125
Will Deacona9a1b0b2014-05-01 18:05:08 +01001126 cfg->smrs = smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001127 return 0;
1128
1129err_free_smrs:
1130 while (--i >= 0)
1131 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1132 kfree(smrs);
1133 return -ENOSPC;
1134}
1135
1136static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001137 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001138{
1139 int i;
1140 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001141 struct arm_smmu_smr *smrs = cfg->smrs;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001142
Will Deacon43b412b2014-07-15 11:22:24 +01001143 if (!smrs)
1144 return;
1145
Will Deacon45ae7cf2013-06-24 18:31:25 +01001146 /* Invalidate the SMRs before freeing back to the allocator */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001147 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001148 u8 idx = smrs[i].idx;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001149
Will Deacon45ae7cf2013-06-24 18:31:25 +01001150 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1151 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1152 }
1153
Will Deacona9a1b0b2014-05-01 18:05:08 +01001154 cfg->smrs = NULL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001155 kfree(smrs);
1156}
1157
Will Deacon45ae7cf2013-06-24 18:31:25 +01001158static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001159 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001160{
1161 int i, ret;
Will Deacon44680ee2014-06-25 11:29:12 +01001162 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001163 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1164
Will Deacon5f634952016-04-20 14:53:32 +01001165 /*
1166 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1167 * for all devices behind the SMMU. Note that we need to take
1168 * care configuring SMRs for devices both a platform_device and
1169 * and a PCI device (i.e. a PCI host controller)
1170 */
1171 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1172 return 0;
1173
Will Deacon8f68f8e2014-07-15 11:27:08 +01001174 /* Devices in an IOMMU group may already be configured */
Will Deacona9a1b0b2014-05-01 18:05:08 +01001175 ret = arm_smmu_master_configure_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001176 if (ret)
Will Deacon8f68f8e2014-07-15 11:27:08 +01001177 return ret == -EEXIST ? 0 : ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001178
Will Deacona9a1b0b2014-05-01 18:05:08 +01001179 for (i = 0; i < cfg->num_streamids; ++i) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001180 u32 idx, s2cr;
Mitchel Humpherys29073202014-07-08 09:52:18 -07001181
Will Deacona9a1b0b2014-05-01 18:05:08 +01001182 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphyd3461802016-01-26 18:06:34 +00001183 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
Will Deacon44680ee2014-06-25 11:29:12 +01001184 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001185 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1186 }
1187
1188 return 0;
1189}
1190
1191static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
Will Deacona9a1b0b2014-05-01 18:05:08 +01001192 struct arm_smmu_master_cfg *cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001193{
Will Deacon43b412b2014-07-15 11:22:24 +01001194 int i;
Will Deacon44680ee2014-06-25 11:29:12 +01001195 struct arm_smmu_device *smmu = smmu_domain->smmu;
Will Deacon43b412b2014-07-15 11:22:24 +01001196 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001197
Will Deacon8f68f8e2014-07-15 11:27:08 +01001198 /* An IOMMU group is torn down by the first device to be removed */
1199 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1200 return;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001201
1202 /*
1203 * We *must* clear the S2CR first, because freeing the SMR means
1204 * that it can be re-allocated immediately.
1205 */
Will Deacon43b412b2014-07-15 11:22:24 +01001206 for (i = 0; i < cfg->num_streamids; ++i) {
1207 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
Robin Murphy25a1c962016-02-10 14:25:33 +00001208 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon43b412b2014-07-15 11:22:24 +01001209
Robin Murphy25a1c962016-02-10 14:25:33 +00001210 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
Will Deacon43b412b2014-07-15 11:22:24 +01001211 }
1212
Will Deacona9a1b0b2014-05-01 18:05:08 +01001213 arm_smmu_master_free_smrs(smmu, cfg);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001214}
1215
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001216static void arm_smmu_detach_dev(struct device *dev,
1217 struct arm_smmu_master_cfg *cfg)
1218{
1219 struct iommu_domain *domain = dev->archdata.iommu;
1220 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1221
1222 dev->archdata.iommu = NULL;
1223 arm_smmu_domain_remove_master(smmu_domain, cfg);
1224}
1225
Will Deacon45ae7cf2013-06-24 18:31:25 +01001226static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1227{
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001228 int ret;
Joerg Roedel1d672632015-03-26 13:43:10 +01001229 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001230 struct arm_smmu_device *smmu;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001231 struct arm_smmu_master_cfg *cfg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001232
Will Deacon8f68f8e2014-07-15 11:27:08 +01001233 smmu = find_smmu_for_device(dev);
Will Deacon44680ee2014-06-25 11:29:12 +01001234 if (!smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001235 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1236 return -ENXIO;
1237 }
1238
Will Deacon518f7132014-11-14 17:17:54 +00001239 /* Ensure that the domain is finalised */
1240 ret = arm_smmu_init_domain_context(domain, smmu);
Arnd Bergmann287980e2016-05-27 23:23:25 +02001241 if (ret < 0)
Will Deacon518f7132014-11-14 17:17:54 +00001242 return ret;
1243
Will Deacon45ae7cf2013-06-24 18:31:25 +01001244 /*
Will Deacon44680ee2014-06-25 11:29:12 +01001245 * Sanity check the domain. We don't support domains across
1246 * different SMMUs.
Will Deacon45ae7cf2013-06-24 18:31:25 +01001247 */
Will Deacon518f7132014-11-14 17:17:54 +00001248 if (smmu_domain->smmu != smmu) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001249 dev_err(dev,
1250 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
Mitchel Humpherysa18037b2014-07-30 18:58:13 +01001251 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1252 return -EINVAL;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001253 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001254
1255 /* Looks ok, so add the device to the domain */
Will Deacon8f68f8e2014-07-15 11:27:08 +01001256 cfg = find_smmu_master_cfg(dev);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001257 if (!cfg)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001258 return -ENODEV;
1259
Will Deaconbc7f2ce2016-02-17 17:41:57 +00001260 /* Detach the dev from its current domain */
1261 if (dev->archdata.iommu)
1262 arm_smmu_detach_dev(dev, cfg);
1263
Will Deacon844e35b2014-07-17 11:23:51 +01001264 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1265 if (!ret)
1266 dev->archdata.iommu = domain;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001267 return ret;
1268}
1269
Will Deacon45ae7cf2013-06-24 18:31:25 +01001270static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
Will Deaconb410aed2014-02-20 16:31:06 +00001271 phys_addr_t paddr, size_t size, int prot)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001272{
Will Deacon518f7132014-11-14 17:17:54 +00001273 int ret;
1274 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001275 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001276 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001277
Will Deacon518f7132014-11-14 17:17:54 +00001278 if (!ops)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001279 return -ENODEV;
1280
Will Deacon518f7132014-11-14 17:17:54 +00001281 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1282 ret = ops->map(ops, iova, paddr, size, prot);
1283 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1284 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001285}
1286
1287static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1288 size_t size)
1289{
Will Deacon518f7132014-11-14 17:17:54 +00001290 size_t ret;
1291 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001292 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001293 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001294
Will Deacon518f7132014-11-14 17:17:54 +00001295 if (!ops)
1296 return 0;
1297
1298 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1299 ret = ops->unmap(ops, iova, size);
1300 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1301 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001302}
1303
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001304static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1305 dma_addr_t iova)
1306{
Joerg Roedel1d672632015-03-26 13:43:10 +01001307 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001308 struct arm_smmu_device *smmu = smmu_domain->smmu;
1309 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1310 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1311 struct device *dev = smmu->dev;
1312 void __iomem *cb_base;
1313 u32 tmp;
1314 u64 phys;
Robin Murphy661d9622015-05-27 17:09:34 +01001315 unsigned long va;
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001316
1317 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1318
Robin Murphy661d9622015-05-27 17:09:34 +01001319 /* ATS1 registers can only be written atomically */
1320 va = iova & ~0xfffUL;
Robin Murphy661d9622015-05-27 17:09:34 +01001321 if (smmu->version == ARM_SMMU_V2)
Robin Murphyf9a05f02016-04-13 18:13:01 +01001322 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1323 else /* Register is only 32-bit in v1 */
Robin Murphy661d9622015-05-27 17:09:34 +01001324 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001325
1326 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1327 !(tmp & ATSR_ACTIVE), 5, 50)) {
1328 dev_err(dev,
Fabio Estevam077124c2015-08-18 17:12:24 +01001329 "iova to phys timed out on %pad. Falling back to software table walk.\n",
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001330 &iova);
1331 return ops->iova_to_phys(ops, iova);
1332 }
1333
Robin Murphyf9a05f02016-04-13 18:13:01 +01001334 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001335 if (phys & CB_PAR_F) {
1336 dev_err(dev, "translation fault!\n");
1337 dev_err(dev, "PAR = 0x%llx\n", phys);
1338 return 0;
1339 }
1340
1341 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1342}
1343
Will Deacon45ae7cf2013-06-24 18:31:25 +01001344static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001345 dma_addr_t iova)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001346{
Will Deacon518f7132014-11-14 17:17:54 +00001347 phys_addr_t ret;
1348 unsigned long flags;
Joerg Roedel1d672632015-03-26 13:43:10 +01001349 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deacon518f7132014-11-14 17:17:54 +00001350 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001351
Will Deacon518f7132014-11-14 17:17:54 +00001352 if (!ops)
Will Deacona44a97912013-11-07 18:47:50 +00001353 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001354
Will Deacon518f7132014-11-14 17:17:54 +00001355 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001356 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1357 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001358 ret = arm_smmu_iova_to_phys_hard(domain, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001359 } else {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001360 ret = ops->iova_to_phys(ops, iova);
Baptiste Reynal83a60ed2015-03-04 16:51:06 +01001361 }
1362
Will Deacon518f7132014-11-14 17:17:54 +00001363 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001364
Will Deacon518f7132014-11-14 17:17:54 +00001365 return ret;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001366}
1367
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001368static bool arm_smmu_capable(enum iommu_cap cap)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001369{
Will Deacond0948942014-06-24 17:30:10 +01001370 switch (cap) {
1371 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001372 /*
1373 * Return true here as the SMMU can always send out coherent
1374 * requests.
1375 */
1376 return true;
Will Deacond0948942014-06-24 17:30:10 +01001377 case IOMMU_CAP_INTR_REMAP:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001378 return true; /* MSIs are just memory writes */
Antonios Motakis0029a8d2014-10-13 14:06:18 +01001379 case IOMMU_CAP_NOEXEC:
1380 return true;
Will Deacond0948942014-06-24 17:30:10 +01001381 default:
Joerg Roedel1fd0c772014-09-05 10:49:34 +02001382 return false;
Will Deacond0948942014-06-24 17:30:10 +01001383 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001384}
Will Deacon45ae7cf2013-06-24 18:31:25 +01001385
Will Deacona9a1b0b2014-05-01 18:05:08 +01001386static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1387{
1388 *((u16 *)data) = alias;
1389 return 0; /* Continue walking */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001390}
1391
Will Deacon8f68f8e2014-07-15 11:27:08 +01001392static void __arm_smmu_release_pci_iommudata(void *data)
1393{
1394 kfree(data);
1395}
1396
Joerg Roedelaf659932015-10-21 23:51:41 +02001397static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1398 struct iommu_group *group)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001399{
Will Deacon03edb222015-01-19 14:27:33 +00001400 struct arm_smmu_master_cfg *cfg;
Joerg Roedelaf659932015-10-21 23:51:41 +02001401 u16 sid;
1402 int i;
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001403
Will Deacon03edb222015-01-19 14:27:33 +00001404 cfg = iommu_group_get_iommudata(group);
1405 if (!cfg) {
Will Deacona9a1b0b2014-05-01 18:05:08 +01001406 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001407 if (!cfg)
1408 return -ENOMEM;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001409
Will Deacon03edb222015-01-19 14:27:33 +00001410 iommu_group_set_iommudata(group, cfg,
1411 __arm_smmu_release_pci_iommudata);
Will Deacona9a1b0b2014-05-01 18:05:08 +01001412 }
1413
Joerg Roedelaf659932015-10-21 23:51:41 +02001414 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1415 return -ENOSPC;
Will Deacona9a1b0b2014-05-01 18:05:08 +01001416
Will Deacon03edb222015-01-19 14:27:33 +00001417 /*
1418 * Assume Stream ID == Requester ID for now.
1419 * We need a way to describe the ID mappings in FDT.
1420 */
1421 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1422 for (i = 0; i < cfg->num_streamids; ++i)
1423 if (cfg->streamids[i] == sid)
1424 break;
1425
1426 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1427 if (i == cfg->num_streamids)
1428 cfg->streamids[cfg->num_streamids++] = sid;
1429
1430 return 0;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001431}
1432
Joerg Roedelaf659932015-10-21 23:51:41 +02001433static int arm_smmu_init_platform_device(struct device *dev,
1434 struct iommu_group *group)
Will Deacon03edb222015-01-19 14:27:33 +00001435{
Will Deacon03edb222015-01-19 14:27:33 +00001436 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
Joerg Roedelaf659932015-10-21 23:51:41 +02001437 struct arm_smmu_master *master;
Will Deacon03edb222015-01-19 14:27:33 +00001438
1439 if (!smmu)
1440 return -ENODEV;
1441
1442 master = find_smmu_master(smmu, dev->of_node);
1443 if (!master)
1444 return -ENODEV;
1445
Will Deacon03edb222015-01-19 14:27:33 +00001446 iommu_group_set_iommudata(group, &master->cfg, NULL);
Joerg Roedelaf659932015-10-21 23:51:41 +02001447
1448 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001449}
1450
1451static int arm_smmu_add_device(struct device *dev)
1452{
Joerg Roedelaf659932015-10-21 23:51:41 +02001453 struct iommu_group *group;
Will Deacon03edb222015-01-19 14:27:33 +00001454
Joerg Roedelaf659932015-10-21 23:51:41 +02001455 group = iommu_group_get_for_dev(dev);
1456 if (IS_ERR(group))
1457 return PTR_ERR(group);
1458
Peng Fan9a4a9d82015-11-20 16:56:18 +08001459 iommu_group_put(group);
Joerg Roedelaf659932015-10-21 23:51:41 +02001460 return 0;
Will Deacon03edb222015-01-19 14:27:33 +00001461}
1462
Will Deacon45ae7cf2013-06-24 18:31:25 +01001463static void arm_smmu_remove_device(struct device *dev)
1464{
Antonios Motakis5fc63a72013-10-18 16:08:29 +01001465 iommu_group_remove_device(dev);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001466}
1467
Joerg Roedelaf659932015-10-21 23:51:41 +02001468static struct iommu_group *arm_smmu_device_group(struct device *dev)
1469{
1470 struct iommu_group *group;
1471 int ret;
1472
1473 if (dev_is_pci(dev))
1474 group = pci_device_group(dev);
1475 else
1476 group = generic_device_group(dev);
1477
1478 if (IS_ERR(group))
1479 return group;
1480
1481 if (dev_is_pci(dev))
1482 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1483 else
1484 ret = arm_smmu_init_platform_device(dev, group);
1485
1486 if (ret) {
1487 iommu_group_put(group);
1488 group = ERR_PTR(ret);
1489 }
1490
1491 return group;
1492}
1493
Will Deaconc752ce42014-06-25 22:46:31 +01001494static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1495 enum iommu_attr attr, void *data)
1496{
Joerg Roedel1d672632015-03-26 13:43:10 +01001497 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001498
1499 switch (attr) {
1500 case DOMAIN_ATTR_NESTING:
1501 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1502 return 0;
1503 default:
1504 return -ENODEV;
1505 }
1506}
1507
1508static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1509 enum iommu_attr attr, void *data)
1510{
Will Deacon518f7132014-11-14 17:17:54 +00001511 int ret = 0;
Joerg Roedel1d672632015-03-26 13:43:10 +01001512 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
Will Deaconc752ce42014-06-25 22:46:31 +01001513
Will Deacon518f7132014-11-14 17:17:54 +00001514 mutex_lock(&smmu_domain->init_mutex);
1515
Will Deaconc752ce42014-06-25 22:46:31 +01001516 switch (attr) {
1517 case DOMAIN_ATTR_NESTING:
Will Deacon518f7132014-11-14 17:17:54 +00001518 if (smmu_domain->smmu) {
1519 ret = -EPERM;
1520 goto out_unlock;
1521 }
1522
Will Deaconc752ce42014-06-25 22:46:31 +01001523 if (*(int *)data)
1524 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1525 else
1526 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1527
Will Deacon518f7132014-11-14 17:17:54 +00001528 break;
Will Deaconc752ce42014-06-25 22:46:31 +01001529 default:
Will Deacon518f7132014-11-14 17:17:54 +00001530 ret = -ENODEV;
Will Deaconc752ce42014-06-25 22:46:31 +01001531 }
Will Deacon518f7132014-11-14 17:17:54 +00001532
1533out_unlock:
1534 mutex_unlock(&smmu_domain->init_mutex);
1535 return ret;
Will Deaconc752ce42014-06-25 22:46:31 +01001536}
1537
Will Deacon518f7132014-11-14 17:17:54 +00001538static struct iommu_ops arm_smmu_ops = {
Will Deaconc752ce42014-06-25 22:46:31 +01001539 .capable = arm_smmu_capable,
Joerg Roedel1d672632015-03-26 13:43:10 +01001540 .domain_alloc = arm_smmu_domain_alloc,
1541 .domain_free = arm_smmu_domain_free,
Will Deaconc752ce42014-06-25 22:46:31 +01001542 .attach_dev = arm_smmu_attach_dev,
Will Deaconc752ce42014-06-25 22:46:31 +01001543 .map = arm_smmu_map,
1544 .unmap = arm_smmu_unmap,
Joerg Roedel76771c92014-12-02 13:07:13 +01001545 .map_sg = default_iommu_map_sg,
Will Deaconc752ce42014-06-25 22:46:31 +01001546 .iova_to_phys = arm_smmu_iova_to_phys,
1547 .add_device = arm_smmu_add_device,
1548 .remove_device = arm_smmu_remove_device,
Joerg Roedelaf659932015-10-21 23:51:41 +02001549 .device_group = arm_smmu_device_group,
Will Deaconc752ce42014-06-25 22:46:31 +01001550 .domain_get_attr = arm_smmu_domain_get_attr,
1551 .domain_set_attr = arm_smmu_domain_set_attr,
Will Deacon518f7132014-11-14 17:17:54 +00001552 .pgsize_bitmap = -1UL, /* Restricted during device attach */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001553};
1554
1555static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1556{
1557 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001558 void __iomem *cb_base;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001559 int i = 0;
Peng Fan3ca37122016-05-03 21:50:30 +08001560 u32 reg, major;
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001561
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001562 /* clear global FSR */
1563 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1564 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001565
Robin Murphy25a1c962016-02-10 14:25:33 +00001566 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1567 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001568 for (i = 0; i < smmu->num_mapping_groups; ++i) {
Olav Haugan3c8766d2014-08-22 17:12:32 -07001569 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
Robin Murphy25a1c962016-02-10 14:25:33 +00001570 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
Will Deacon45ae7cf2013-06-24 18:31:25 +01001571 }
1572
Peng Fan3ca37122016-05-03 21:50:30 +08001573 /*
1574 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1575 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1576 * bit is only present in MMU-500r2 onwards.
1577 */
1578 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1579 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1580 if ((smmu->model == ARM_MMU500) && (major >= 2)) {
1581 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1582 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1583 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1584 }
1585
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001586 /* Make sure all context banks are disabled and clear CB_FSR */
1587 for (i = 0; i < smmu->num_context_banks; ++i) {
1588 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1589 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1590 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001591 /*
1592 * Disable MMU-500's not-particularly-beneficial next-page
1593 * prefetcher for the sake of errata #841119 and #826419.
1594 */
1595 if (smmu->model == ARM_MMU500) {
1596 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1597 reg &= ~ARM_MMU500_ACTLR_CPRE;
1598 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1599 }
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001600 }
Will Deacon1463fe42013-07-31 19:21:27 +01001601
Will Deacon45ae7cf2013-06-24 18:31:25 +01001602 /* Invalidate the TLB, just in case */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001603 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1604 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1605
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001606 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001607
Will Deacon45ae7cf2013-06-24 18:31:25 +01001608 /* Enable fault reporting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001609 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001610
1611 /* Disable TLB broadcasting. */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001612 reg |= (sCR0_VMIDPNE | sCR0_PTM);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001613
Robin Murphy25a1c962016-02-10 14:25:33 +00001614 /* Enable client access, handling unmatched streams as appropriate */
1615 reg &= ~sCR0_CLIENTPD;
1616 if (disable_bypass)
1617 reg |= sCR0_USFCFG;
1618 else
1619 reg &= ~sCR0_USFCFG;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001620
1621 /* Disable forced broadcasting */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001622 reg &= ~sCR0_FB;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001623
1624 /* Don't upgrade barriers */
Andreas Herrmann659db6f2013-10-01 13:39:09 +01001625 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001626
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001627 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1628 reg |= sCR0_VMID16EN;
1629
Will Deacon45ae7cf2013-06-24 18:31:25 +01001630 /* Push the button */
Will Deacon518f7132014-11-14 17:17:54 +00001631 __arm_smmu_tlb_sync(smmu);
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001632 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001633}
1634
1635static int arm_smmu_id_size_to_bits(int size)
1636{
1637 switch (size) {
1638 case 0:
1639 return 32;
1640 case 1:
1641 return 36;
1642 case 2:
1643 return 40;
1644 case 3:
1645 return 42;
1646 case 4:
1647 return 44;
1648 case 5:
1649 default:
1650 return 48;
1651 }
1652}
1653
1654static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1655{
1656 unsigned long size;
1657 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1658 u32 id;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001659 bool cttw_dt, cttw_reg;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001660
1661 dev_notice(smmu->dev, "probing hardware configuration...\n");
Robin Murphyb7862e32016-04-13 18:13:03 +01001662 dev_notice(smmu->dev, "SMMUv%d with:\n",
1663 smmu->version == ARM_SMMU_V2 ? 2 : 1);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001664
1665 /* ID0 */
1666 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
Will Deacon4cf740b2014-07-14 19:47:39 +01001667
1668 /* Restrict available stages based on module parameter */
1669 if (force_stage == 1)
1670 id &= ~(ID0_S2TS | ID0_NTS);
1671 else if (force_stage == 2)
1672 id &= ~(ID0_S1TS | ID0_NTS);
1673
Will Deacon45ae7cf2013-06-24 18:31:25 +01001674 if (id & ID0_S1TS) {
1675 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1676 dev_notice(smmu->dev, "\tstage 1 translation\n");
1677 }
1678
1679 if (id & ID0_S2TS) {
1680 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1681 dev_notice(smmu->dev, "\tstage 2 translation\n");
1682 }
1683
1684 if (id & ID0_NTS) {
1685 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1686 dev_notice(smmu->dev, "\tnested translation\n");
1687 }
1688
1689 if (!(smmu->features &
Will Deacon4cf740b2014-07-14 19:47:39 +01001690 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001691 dev_err(smmu->dev, "\tno translation support!\n");
1692 return -ENODEV;
1693 }
1694
Robin Murphyb7862e32016-04-13 18:13:03 +01001695 if ((id & ID0_S1TS) &&
1696 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
Mitchel Humpherys859a7322014-10-29 21:13:40 +00001697 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1698 dev_notice(smmu->dev, "\taddress translation ops\n");
1699 }
1700
Robin Murphybae2c2d2015-07-29 19:46:05 +01001701 /*
1702 * In order for DMA API calls to work properly, we must defer to what
1703 * the DT says about coherency, regardless of what the hardware claims.
1704 * Fortunately, this also opens up a workaround for systems where the
1705 * ID register value has ended up configured incorrectly.
1706 */
1707 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1708 cttw_reg = !!(id & ID0_CTTW);
1709 if (cttw_dt)
Will Deacon45ae7cf2013-06-24 18:31:25 +01001710 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
Robin Murphybae2c2d2015-07-29 19:46:05 +01001711 if (cttw_dt || cttw_reg)
1712 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1713 cttw_dt ? "" : "non-");
1714 if (cttw_dt != cttw_reg)
1715 dev_notice(smmu->dev,
1716 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
Will Deacon45ae7cf2013-06-24 18:31:25 +01001717
Robin Murphy21174242016-09-12 17:13:48 +01001718 /* Max. number of entries we have for stream matching/indexing */
1719 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1720 smmu->streamid_mask = size - 1;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001721 if (id & ID0_SMS) {
Robin Murphy21174242016-09-12 17:13:48 +01001722 u32 smr;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001723
1724 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
Robin Murphy21174242016-09-12 17:13:48 +01001725 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1726 if (size == 0) {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001727 dev_err(smmu->dev,
1728 "stream-matching supported, but no SMRs present!\n");
1729 return -ENODEV;
1730 }
1731
Robin Murphy21174242016-09-12 17:13:48 +01001732 /*
1733 * SMR.ID bits may not be preserved if the corresponding MASK
1734 * bits are set, so check each one separately. We can reject
1735 * masters later if they try to claim IDs outside these masks.
1736 */
1737 smr = smmu->streamid_mask << SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001738 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1739 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
Robin Murphy21174242016-09-12 17:13:48 +01001740 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001741
Robin Murphy21174242016-09-12 17:13:48 +01001742 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1743 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1744 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1745 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001746
1747 dev_notice(smmu->dev,
Robin Murphy21174242016-09-12 17:13:48 +01001748 "\tstream matching with %lu register groups, mask 0x%x",
1749 size, smmu->smr_mask_mask);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001750 }
Robin Murphy21174242016-09-12 17:13:48 +01001751 smmu->num_mapping_groups = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001752
Robin Murphy7602b872016-04-28 17:12:09 +01001753 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1754 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1755 if (!(id & ID0_PTFS_NO_AARCH32S))
1756 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1757 }
1758
Will Deacon45ae7cf2013-06-24 18:31:25 +01001759 /* ID1 */
1760 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
Will Deaconc757e852014-07-30 11:33:25 +01001761 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001762
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001763 /* Check for size mismatch of SMMU address space from mapped region */
Will Deacon518f7132014-11-14 17:17:54 +00001764 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
Will Deaconc757e852014-07-30 11:33:25 +01001765 size *= 2 << smmu->pgshift;
Andreas Herrmannc55af7f2013-10-01 13:39:06 +01001766 if (smmu->size != size)
Mitchel Humpherys29073202014-07-08 09:52:18 -07001767 dev_warn(smmu->dev,
1768 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1769 size, smmu->size);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001770
Will Deacon518f7132014-11-14 17:17:54 +00001771 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001772 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1773 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1774 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1775 return -ENODEV;
1776 }
1777 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1778 smmu->num_context_banks, smmu->num_s2_context_banks);
Robin Murphye086d912016-04-13 18:12:58 +01001779 /*
1780 * Cavium CN88xx erratum #27704.
1781 * Ensure ASID and VMID allocation is unique across all SMMUs in
1782 * the system.
1783 */
1784 if (smmu->model == CAVIUM_SMMUV2) {
1785 smmu->cavium_id_base =
1786 atomic_add_return(smmu->num_context_banks,
1787 &cavium_smmu_context_count);
1788 smmu->cavium_id_base -= smmu->num_context_banks;
1789 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001790
1791 /* ID2 */
1792 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1793 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001794 smmu->ipa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001795
Will Deacon518f7132014-11-14 17:17:54 +00001796 /* The output mask is also applied for bypass */
Will Deacon45ae7cf2013-06-24 18:31:25 +01001797 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
Will Deacon518f7132014-11-14 17:17:54 +00001798 smmu->pa_size = size;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001799
Tirumalesh Chalamarla4e3e9b62016-02-23 10:19:00 -08001800 if (id & ID2_VMID16)
1801 smmu->features |= ARM_SMMU_FEAT_VMID16;
1802
Robin Murphyf1d84542015-03-04 16:41:05 +00001803 /*
1804 * What the page table walker can address actually depends on which
1805 * descriptor format is in use, but since a) we don't know that yet,
1806 * and b) it can vary per context bank, this will have to do...
1807 */
1808 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1809 dev_warn(smmu->dev,
1810 "failed to set DMA mask for table walker\n");
1811
Robin Murphyb7862e32016-04-13 18:13:03 +01001812 if (smmu->version < ARM_SMMU_V2) {
Will Deacon518f7132014-11-14 17:17:54 +00001813 smmu->va_size = smmu->ipa_size;
Robin Murphyb7862e32016-04-13 18:13:03 +01001814 if (smmu->version == ARM_SMMU_V1_64K)
1815 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001816 } else {
Will Deacon45ae7cf2013-06-24 18:31:25 +01001817 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
Will Deacon518f7132014-11-14 17:17:54 +00001818 smmu->va_size = arm_smmu_id_size_to_bits(size);
Will Deacon518f7132014-11-14 17:17:54 +00001819 if (id & ID2_PTFS_4K)
Robin Murphy7602b872016-04-28 17:12:09 +01001820 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
Will Deacon518f7132014-11-14 17:17:54 +00001821 if (id & ID2_PTFS_16K)
Robin Murphy7602b872016-04-28 17:12:09 +01001822 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
Will Deacon518f7132014-11-14 17:17:54 +00001823 if (id & ID2_PTFS_64K)
Robin Murphy7602b872016-04-28 17:12:09 +01001824 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001825 }
1826
Robin Murphy7602b872016-04-28 17:12:09 +01001827 /* Now we've corralled the various formats, what'll it do? */
Robin Murphy7602b872016-04-28 17:12:09 +01001828 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
Robin Murphyd5466352016-05-09 17:20:09 +01001829 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
Robin Murphy7602b872016-04-28 17:12:09 +01001830 if (smmu->features &
1831 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
Robin Murphyd5466352016-05-09 17:20:09 +01001832 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
Robin Murphy7602b872016-04-28 17:12:09 +01001833 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
Robin Murphyd5466352016-05-09 17:20:09 +01001834 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
Robin Murphy7602b872016-04-28 17:12:09 +01001835 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
Robin Murphyd5466352016-05-09 17:20:09 +01001836 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
Robin Murphy7602b872016-04-28 17:12:09 +01001837
Robin Murphyd5466352016-05-09 17:20:09 +01001838 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1839 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1840 else
1841 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1842 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1843 smmu->pgsize_bitmap);
1844
Will Deacon518f7132014-11-14 17:17:54 +00001845
Will Deacon28d60072014-09-01 16:24:48 +01001846 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1847 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001848 smmu->va_size, smmu->ipa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001849
1850 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1851 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
Will Deacon518f7132014-11-14 17:17:54 +00001852 smmu->ipa_size, smmu->pa_size);
Will Deacon28d60072014-09-01 16:24:48 +01001853
Will Deacon45ae7cf2013-06-24 18:31:25 +01001854 return 0;
1855}
1856
Robin Murphy67b65a32016-04-13 18:12:57 +01001857struct arm_smmu_match_data {
1858 enum arm_smmu_arch_version version;
1859 enum arm_smmu_implementation model;
1860};
1861
1862#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1863static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1864
1865ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1866ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
Robin Murphyb7862e32016-04-13 18:13:03 +01001867ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001868ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
Robin Murphye086d912016-04-13 18:12:58 +01001869ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
Robin Murphy67b65a32016-04-13 18:12:57 +01001870
Joerg Roedel09b52692014-10-02 12:24:45 +02001871static const struct of_device_id arm_smmu_of_match[] = {
Robin Murphy67b65a32016-04-13 18:12:57 +01001872 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1873 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1874 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
Robin Murphyb7862e32016-04-13 18:13:03 +01001875 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
Robin Murphyf0cfffc2016-04-13 18:12:59 +01001876 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
Robin Murphye086d912016-04-13 18:12:58 +01001877 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
Robin Murphy09360402014-08-28 17:51:59 +01001878 { },
1879};
1880MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1881
Will Deacon45ae7cf2013-06-24 18:31:25 +01001882static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1883{
Robin Murphy09360402014-08-28 17:51:59 +01001884 const struct of_device_id *of_id;
Robin Murphy67b65a32016-04-13 18:12:57 +01001885 const struct arm_smmu_match_data *data;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001886 struct resource *res;
1887 struct arm_smmu_device *smmu;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001888 struct device *dev = &pdev->dev;
1889 struct rb_node *node;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02001890 struct of_phandle_iterator it;
1891 struct arm_smmu_phandle_args *masterspec;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001892 int num_irqs, i, err;
1893
1894 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1895 if (!smmu) {
1896 dev_err(dev, "failed to allocate arm_smmu_device\n");
1897 return -ENOMEM;
1898 }
1899 smmu->dev = dev;
1900
Robin Murphy09360402014-08-28 17:51:59 +01001901 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
Robin Murphy67b65a32016-04-13 18:12:57 +01001902 data = of_id->data;
1903 smmu->version = data->version;
1904 smmu->model = data->model;
Robin Murphy09360402014-08-28 17:51:59 +01001905
Will Deacon45ae7cf2013-06-24 18:31:25 +01001906 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Julia Lawall8a7f4312013-08-19 12:20:37 +01001907 smmu->base = devm_ioremap_resource(dev, res);
1908 if (IS_ERR(smmu->base))
1909 return PTR_ERR(smmu->base);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001910 smmu->size = resource_size(res);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001911
1912 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1913 &smmu->num_global_irqs)) {
1914 dev_err(dev, "missing #global-interrupts property\n");
1915 return -ENODEV;
1916 }
1917
1918 num_irqs = 0;
1919 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1920 num_irqs++;
1921 if (num_irqs > smmu->num_global_irqs)
1922 smmu->num_context_irqs++;
1923 }
1924
Andreas Herrmann44a08de2013-10-01 13:39:07 +01001925 if (!smmu->num_context_irqs) {
1926 dev_err(dev, "found %d interrupts but expected at least %d\n",
1927 num_irqs, smmu->num_global_irqs + 1);
1928 return -ENODEV;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001929 }
Will Deacon45ae7cf2013-06-24 18:31:25 +01001930
1931 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1932 GFP_KERNEL);
1933 if (!smmu->irqs) {
1934 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1935 return -ENOMEM;
1936 }
1937
1938 for (i = 0; i < num_irqs; ++i) {
1939 int irq = platform_get_irq(pdev, i);
Mitchel Humpherys29073202014-07-08 09:52:18 -07001940
Will Deacon45ae7cf2013-06-24 18:31:25 +01001941 if (irq < 0) {
1942 dev_err(dev, "failed to get irq index %d\n", i);
1943 return -ENODEV;
1944 }
1945 smmu->irqs[i] = irq;
1946 }
1947
Olav Haugan3c8766d2014-08-22 17:12:32 -07001948 err = arm_smmu_device_cfg_probe(smmu);
1949 if (err)
1950 return err;
1951
Will Deacon45ae7cf2013-06-24 18:31:25 +01001952 i = 0;
1953 smmu->masters = RB_ROOT;
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02001954
1955 err = -ENOMEM;
1956 /* No need to zero the memory for masterspec */
1957 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
1958 if (!masterspec)
1959 goto out_put_masters;
1960
1961 of_for_each_phandle(&it, err, dev->of_node,
1962 "mmu-masters", "#stream-id-cells", 0) {
1963 int count = of_phandle_iterator_args(&it, masterspec->args,
1964 MAX_MASTER_STREAMIDS);
1965 masterspec->np = of_node_get(it.node);
1966 masterspec->args_count = count;
1967
1968 err = register_smmu_master(smmu, dev, masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001969 if (err) {
1970 dev_err(dev, "failed to add master %s\n",
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02001971 masterspec->np->name);
1972 kfree(masterspec);
Will Deacon45ae7cf2013-06-24 18:31:25 +01001973 goto out_put_masters;
1974 }
1975
1976 i++;
1977 }
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02001978
Will Deacon45ae7cf2013-06-24 18:31:25 +01001979 dev_notice(dev, "registered %d master devices\n", i);
1980
Joerg Roedelcb6c27b2016-04-04 17:49:22 +02001981 kfree(masterspec);
1982
Andreas Herrmann3a5df8f2014-01-30 18:18:04 +00001983 parse_driver_options(smmu);
1984
Robin Murphyb7862e32016-04-13 18:13:03 +01001985 if (smmu->version == ARM_SMMU_V2 &&
Will Deacon45ae7cf2013-06-24 18:31:25 +01001986 smmu->num_context_banks != smmu->num_context_irqs) {
1987 dev_err(dev,
1988 "found only %d context interrupt(s) but %d required\n",
1989 smmu->num_context_irqs, smmu->num_context_banks);
Wei Yongjun89a23cde2013-11-15 09:42:30 +00001990 err = -ENODEV;
Will Deacon44680ee2014-06-25 11:29:12 +01001991 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01001992 }
1993
Will Deacon45ae7cf2013-06-24 18:31:25 +01001994 for (i = 0; i < smmu->num_global_irqs; ++i) {
Peng Fanbee14002016-07-04 17:38:22 +08001995 err = devm_request_irq(smmu->dev, smmu->irqs[i],
1996 arm_smmu_global_fault,
1997 IRQF_SHARED,
1998 "arm-smmu global fault",
1999 smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002000 if (err) {
2001 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2002 i, smmu->irqs[i]);
Peng Fanbee14002016-07-04 17:38:22 +08002003 goto out_put_masters;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002004 }
2005 }
2006
2007 INIT_LIST_HEAD(&smmu->list);
2008 spin_lock(&arm_smmu_devices_lock);
2009 list_add(&smmu->list, &arm_smmu_devices);
2010 spin_unlock(&arm_smmu_devices_lock);
Will Deaconfd90cec2013-08-21 13:56:34 +01002011
2012 arm_smmu_device_reset(smmu);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002013 return 0;
2014
Will Deacon45ae7cf2013-06-24 18:31:25 +01002015out_put_masters:
2016 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002017 struct arm_smmu_master *master
2018 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002019 of_node_put(master->of_node);
2020 }
2021
2022 return err;
2023}
2024
2025static int arm_smmu_device_remove(struct platform_device *pdev)
2026{
Will Deacon45ae7cf2013-06-24 18:31:25 +01002027 struct device *dev = &pdev->dev;
2028 struct arm_smmu_device *curr, *smmu = NULL;
2029 struct rb_node *node;
2030
2031 spin_lock(&arm_smmu_devices_lock);
2032 list_for_each_entry(curr, &arm_smmu_devices, list) {
2033 if (curr->dev == dev) {
2034 smmu = curr;
2035 list_del(&smmu->list);
2036 break;
2037 }
2038 }
2039 spin_unlock(&arm_smmu_devices_lock);
2040
2041 if (!smmu)
2042 return -ENODEV;
2043
Will Deacon45ae7cf2013-06-24 18:31:25 +01002044 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
Mitchel Humpherys29073202014-07-08 09:52:18 -07002045 struct arm_smmu_master *master
2046 = container_of(node, struct arm_smmu_master, node);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002047 of_node_put(master->of_node);
2048 }
2049
Will Deaconecfadb62013-07-31 19:21:28 +01002050 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002051 dev_err(dev, "removing device with active domains!\n");
2052
Will Deacon45ae7cf2013-06-24 18:31:25 +01002053 /* Turn the thing off */
Mitchel Humpherys29073202014-07-08 09:52:18 -07002054 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002055 return 0;
2056}
2057
Will Deacon45ae7cf2013-06-24 18:31:25 +01002058static struct platform_driver arm_smmu_driver = {
2059 .driver = {
Will Deacon45ae7cf2013-06-24 18:31:25 +01002060 .name = "arm-smmu",
2061 .of_match_table = of_match_ptr(arm_smmu_of_match),
2062 },
2063 .probe = arm_smmu_device_dt_probe,
2064 .remove = arm_smmu_device_remove,
2065};
2066
2067static int __init arm_smmu_init(void)
2068{
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002069 struct device_node *np;
Will Deacon45ae7cf2013-06-24 18:31:25 +01002070 int ret;
2071
Thierry Reding0e7d37a2014-11-07 15:26:18 +00002072 /*
2073 * Play nice with systems that don't have an ARM SMMU by checking that
2074 * an ARM SMMU exists in the system before proceeding with the driver
2075 * and IOMMU bus operation registration.
2076 */
2077 np = of_find_matching_node(NULL, arm_smmu_of_match);
2078 if (!np)
2079 return 0;
2080
2081 of_node_put(np);
2082
Will Deacon45ae7cf2013-06-24 18:31:25 +01002083 ret = platform_driver_register(&arm_smmu_driver);
2084 if (ret)
2085 return ret;
2086
2087 /* Oh, for a proper bus abstraction */
Dan Carpenter6614ee72013-08-21 09:34:20 +01002088 if (!iommu_present(&platform_bus_type))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002089 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2090
Will Deacond123cf82014-02-04 22:17:53 +00002091#ifdef CONFIG_ARM_AMBA
Dan Carpenter6614ee72013-08-21 09:34:20 +01002092 if (!iommu_present(&amba_bustype))
Will Deacon45ae7cf2013-06-24 18:31:25 +01002093 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
Will Deacond123cf82014-02-04 22:17:53 +00002094#endif
Will Deacon45ae7cf2013-06-24 18:31:25 +01002095
Will Deacona9a1b0b2014-05-01 18:05:08 +01002096#ifdef CONFIG_PCI
Wei Chen112c8982016-06-13 17:20:17 +08002097 if (!iommu_present(&pci_bus_type)) {
2098 pci_request_acs();
Will Deacona9a1b0b2014-05-01 18:05:08 +01002099 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
Wei Chen112c8982016-06-13 17:20:17 +08002100 }
Will Deacona9a1b0b2014-05-01 18:05:08 +01002101#endif
2102
Will Deacon45ae7cf2013-06-24 18:31:25 +01002103 return 0;
2104}
2105
2106static void __exit arm_smmu_exit(void)
2107{
2108 return platform_driver_unregister(&arm_smmu_driver);
2109}
2110
Andreas Herrmannb1950b22013-10-01 13:39:05 +01002111subsys_initcall(arm_smmu_init);
Will Deacon45ae7cf2013-06-24 18:31:25 +01002112module_exit(arm_smmu_exit);
2113
2114MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2115MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2116MODULE_LICENSE("GPL v2");