blob: 720829724d8619e334577ed780313e813dee3f36 [file] [log] [blame]
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001/*
Thierry Reding89184652014-04-16 09:24:44 +02002 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02003 *
Thierry Reding89184652014-04-16 09:24:44 +02004 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02007 */
8
Thierry Redingbc5e6de2013-01-21 11:09:06 +01009#include <linux/err.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020010#include <linux/iommu.h>
Thierry Reding89184652014-04-16 09:24:44 +020011#include <linux/kernel.h>
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +030012#include <linux/of.h>
Thierry Reding89184652014-04-16 09:24:44 +020013#include <linux/of_device.h>
14#include <linux/platform_device.h>
15#include <linux/slab.h>
Thierry Reding306a7f92014-07-17 13:17:24 +020016
17#include <soc/tegra/ahb.h>
Thierry Reding89184652014-04-16 09:24:44 +020018#include <soc/tegra/mc.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020019
Thierry Reding89184652014-04-16 09:24:44 +020020struct tegra_smmu {
21 void __iomem *regs;
22 struct device *dev;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020023
Thierry Reding89184652014-04-16 09:24:44 +020024 struct tegra_mc *mc;
25 const struct tegra_smmu_soc *soc;
Stephen Warrene6bc5932012-09-04 16:36:15 -060026
Thierry Reding89184652014-04-16 09:24:44 +020027 unsigned long *asids;
28 struct mutex lock;
Stephen Warrene6bc5932012-09-04 16:36:15 -060029
Thierry Reding89184652014-04-16 09:24:44 +020030 struct list_head list;
Stephen Warrene6bc5932012-09-04 16:36:15 -060031};
32
Thierry Reding89184652014-04-16 09:24:44 +020033struct tegra_smmu_as {
Joerg Roedeld5f1a812015-03-26 13:43:12 +010034 struct iommu_domain domain;
Thierry Reding89184652014-04-16 09:24:44 +020035 struct tegra_smmu *smmu;
36 unsigned int use_count;
37 struct page *count;
38 struct page *pd;
39 unsigned id;
40 u32 attr;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +030041};
42
Joerg Roedeld5f1a812015-03-26 13:43:12 +010043static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
44{
45 return container_of(dom, struct tegra_smmu_as, domain);
46}
47
Thierry Reding89184652014-04-16 09:24:44 +020048static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
49 unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020050{
Thierry Reding89184652014-04-16 09:24:44 +020051 writel(value, smmu->regs + offset);
Joerg Roedelfe1229b2013-02-04 20:40:58 +010052}
53
Thierry Reding89184652014-04-16 09:24:44 +020054static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020055{
Thierry Reding89184652014-04-16 09:24:44 +020056 return readl(smmu->regs + offset);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020057}
58
Thierry Reding89184652014-04-16 09:24:44 +020059#define SMMU_CONFIG 0x010
60#define SMMU_CONFIG_ENABLE (1 << 0)
61
62#define SMMU_TLB_CONFIG 0x14
63#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
64#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
65#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
66
67#define SMMU_PTC_CONFIG 0x18
68#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
69#define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
70#define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
71
72#define SMMU_PTB_ASID 0x01c
73#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
74
75#define SMMU_PTB_DATA 0x020
76#define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
77
78#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
79
80#define SMMU_TLB_FLUSH 0x030
81#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
82#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
83#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
84#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
85#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
86 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
87#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
88 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
89#define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
90
91#define SMMU_PTC_FLUSH 0x034
92#define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
93#define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
94
95#define SMMU_PTC_FLUSH_HI 0x9b8
96#define SMMU_PTC_FLUSH_HI_MASK 0x3
97
98/* per-SWGROUP SMMU_*_ASID register */
99#define SMMU_ASID_ENABLE (1 << 31)
100#define SMMU_ASID_MASK 0x7f
101#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
102
103/* page table definitions */
104#define SMMU_NUM_PDE 1024
105#define SMMU_NUM_PTE 1024
106
107#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
108#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
109
110#define SMMU_PDE_SHIFT 22
111#define SMMU_PTE_SHIFT 12
112
113#define SMMU_PFN_MASK 0x000fffff
114
115#define SMMU_PD_READABLE (1 << 31)
116#define SMMU_PD_WRITABLE (1 << 30)
117#define SMMU_PD_NONSECURE (1 << 29)
118
119#define SMMU_PDE_READABLE (1 << 31)
120#define SMMU_PDE_WRITABLE (1 << 30)
121#define SMMU_PDE_NONSECURE (1 << 29)
122#define SMMU_PDE_NEXT (1 << 28)
123
124#define SMMU_PTE_READABLE (1 << 31)
125#define SMMU_PTE_WRITABLE (1 << 30)
126#define SMMU_PTE_NONSECURE (1 << 29)
127
128#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
129 SMMU_PDE_NONSECURE)
130#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
131 SMMU_PTE_NONSECURE)
132
133static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
134 unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200135{
Thierry Reding89184652014-04-16 09:24:44 +0200136 phys_addr_t phys = page ? page_to_phys(page) : 0;
137 u32 value;
Hiroshi Doyua6870e92013-01-31 10:14:10 +0200138
Thierry Reding89184652014-04-16 09:24:44 +0200139 if (page) {
140 offset &= ~(smmu->mc->soc->atom_size - 1);
Hiroshi Doyua6870e92013-01-31 10:14:10 +0200141
Thierry Reding89184652014-04-16 09:24:44 +0200142 if (smmu->mc->soc->num_address_bits > 32) {
143#ifdef CONFIG_PHYS_ADDR_T_64BIT
144 value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200145#else
Thierry Reding89184652014-04-16 09:24:44 +0200146 value = 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200147#endif
Thierry Reding89184652014-04-16 09:24:44 +0200148 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
149 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200150
Thierry Reding89184652014-04-16 09:24:44 +0200151 value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
152 } else {
153 value = SMMU_PTC_FLUSH_TYPE_ALL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200154 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300155
Thierry Reding89184652014-04-16 09:24:44 +0200156 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
157}
158
159static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
160{
161 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
162}
163
164static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
165 unsigned long asid)
166{
167 u32 value;
168
169 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
170 SMMU_TLB_FLUSH_VA_MATCH_ALL;
171 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
172}
173
174static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
175 unsigned long asid,
176 unsigned long iova)
177{
178 u32 value;
179
180 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
181 SMMU_TLB_FLUSH_VA_SECTION(iova);
182 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
183}
184
185static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
186 unsigned long asid,
187 unsigned long iova)
188{
189 u32 value;
190
191 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
192 SMMU_TLB_FLUSH_VA_GROUP(iova);
193 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
194}
195
196static inline void smmu_flush(struct tegra_smmu *smmu)
197{
198 smmu_readl(smmu, SMMU_CONFIG);
199}
200
201static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
202{
203 unsigned long id;
204
205 mutex_lock(&smmu->lock);
206
207 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
208 if (id >= smmu->soc->num_asids) {
209 mutex_unlock(&smmu->lock);
210 return -ENOSPC;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200211 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300212
Thierry Reding89184652014-04-16 09:24:44 +0200213 set_bit(id, smmu->asids);
214 *idp = id;
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300215
Thierry Reding89184652014-04-16 09:24:44 +0200216 mutex_unlock(&smmu->lock);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200217 return 0;
218}
219
Thierry Reding89184652014-04-16 09:24:44 +0200220static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200221{
Thierry Reding89184652014-04-16 09:24:44 +0200222 mutex_lock(&smmu->lock);
223 clear_bit(id, smmu->asids);
224 mutex_unlock(&smmu->lock);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200225}
226
Thierry Reding89184652014-04-16 09:24:44 +0200227static bool tegra_smmu_capable(enum iommu_cap cap)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200228{
Joerg Roedel7c2aa642014-09-05 10:51:37 +0200229 return false;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200230}
231
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100232static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200233{
Thierry Reding89184652014-04-16 09:24:44 +0200234 struct tegra_smmu_as *as;
235 unsigned int i;
236 uint32_t *pd;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200237
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100238 if (type != IOMMU_DOMAIN_UNMANAGED)
239 return NULL;
240
Thierry Reding89184652014-04-16 09:24:44 +0200241 as = kzalloc(sizeof(*as), GFP_KERNEL);
242 if (!as)
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100243 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200244
Thierry Reding89184652014-04-16 09:24:44 +0200245 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200246
Thierry Reding89184652014-04-16 09:24:44 +0200247 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
248 if (!as->pd) {
249 kfree(as);
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100250 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200251 }
252
Thierry Reding89184652014-04-16 09:24:44 +0200253 as->count = alloc_page(GFP_KERNEL);
254 if (!as->count) {
255 __free_page(as->pd);
256 kfree(as);
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100257 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200258 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200259
Thierry Reding89184652014-04-16 09:24:44 +0200260 /* clear PDEs */
261 pd = page_address(as->pd);
262 SetPageReserved(as->pd);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200263
Thierry Reding89184652014-04-16 09:24:44 +0200264 for (i = 0; i < SMMU_NUM_PDE; i++)
265 pd[i] = 0;
Hiroshi Doyud2453b22012-07-30 08:39:18 +0300266
Thierry Reding89184652014-04-16 09:24:44 +0200267 /* clear PDE usage counters */
268 pd = page_address(as->count);
269 SetPageReserved(as->count);
Hiroshi Doyud2453b22012-07-30 08:39:18 +0300270
Thierry Reding89184652014-04-16 09:24:44 +0200271 for (i = 0; i < SMMU_NUM_PDE; i++)
272 pd[i] = 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200273
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100274 return &as->domain;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200275}
276
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100277static void tegra_smmu_domain_free(struct iommu_domain *domain)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200278{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100279 struct tegra_smmu_as *as = to_smmu_as(domain);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200280
Thierry Reding89184652014-04-16 09:24:44 +0200281 /* TODO: free page directory and page tables */
282 ClearPageReserved(as->pd);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200283
Thierry Reding89184652014-04-16 09:24:44 +0200284 kfree(as);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200285}
286
Thierry Reding89184652014-04-16 09:24:44 +0200287static const struct tegra_smmu_swgroup *
288tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300289{
Thierry Reding89184652014-04-16 09:24:44 +0200290 const struct tegra_smmu_swgroup *group = NULL;
291 unsigned int i;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300292
Thierry Reding89184652014-04-16 09:24:44 +0200293 for (i = 0; i < smmu->soc->num_swgroups; i++) {
294 if (smmu->soc->swgroups[i].swgroup == swgroup) {
295 group = &smmu->soc->swgroups[i];
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300296 break;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300297 }
298 }
299
Thierry Reding89184652014-04-16 09:24:44 +0200300 return group;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300301}
302
Thierry Reding89184652014-04-16 09:24:44 +0200303static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
304 unsigned int asid)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200305{
Thierry Reding89184652014-04-16 09:24:44 +0200306 const struct tegra_smmu_swgroup *group;
307 unsigned int i;
308 u32 value;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200309
Thierry Reding89184652014-04-16 09:24:44 +0200310 for (i = 0; i < smmu->soc->num_clients; i++) {
311 const struct tegra_mc_client *client = &smmu->soc->clients[i];
312
313 if (client->swgroup != swgroup)
314 continue;
315
316 value = smmu_readl(smmu, client->smmu.reg);
317 value |= BIT(client->smmu.bit);
318 smmu_writel(smmu, value, client->smmu.reg);
319 }
320
321 group = tegra_smmu_find_swgroup(smmu, swgroup);
322 if (group) {
323 value = smmu_readl(smmu, group->reg);
324 value &= ~SMMU_ASID_MASK;
325 value |= SMMU_ASID_VALUE(asid);
326 value |= SMMU_ASID_ENABLE;
327 smmu_writel(smmu, value, group->reg);
328 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200329}
330
Thierry Reding89184652014-04-16 09:24:44 +0200331static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
332 unsigned int asid)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200333{
Thierry Reding89184652014-04-16 09:24:44 +0200334 const struct tegra_smmu_swgroup *group;
335 unsigned int i;
336 u32 value;
337
338 group = tegra_smmu_find_swgroup(smmu, swgroup);
339 if (group) {
340 value = smmu_readl(smmu, group->reg);
341 value &= ~SMMU_ASID_MASK;
342 value |= SMMU_ASID_VALUE(asid);
343 value &= ~SMMU_ASID_ENABLE;
344 smmu_writel(smmu, value, group->reg);
345 }
346
347 for (i = 0; i < smmu->soc->num_clients; i++) {
348 const struct tegra_mc_client *client = &smmu->soc->clients[i];
349
350 if (client->swgroup != swgroup)
351 continue;
352
353 value = smmu_readl(smmu, client->smmu.reg);
354 value &= ~BIT(client->smmu.bit);
355 smmu_writel(smmu, value, client->smmu.reg);
356 }
357}
358
359static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
360 struct tegra_smmu_as *as)
361{
362 u32 value;
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300363 int err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200364
Thierry Reding89184652014-04-16 09:24:44 +0200365 if (as->use_count > 0) {
366 as->use_count++;
367 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200368 }
369
Thierry Reding89184652014-04-16 09:24:44 +0200370 err = tegra_smmu_alloc_asid(smmu, &as->id);
371 if (err < 0)
Hiroshi Doyu0547c2f2012-06-25 14:23:57 +0300372 return err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200373
Thierry Reding89184652014-04-16 09:24:44 +0200374 smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
375 smmu_flush_ptc(smmu, as->pd, 0);
376 smmu_flush_tlb_asid(smmu, as->id);
377
378 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
379 value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
380 smmu_writel(smmu, value, SMMU_PTB_DATA);
381 smmu_flush(smmu);
382
383 as->smmu = smmu;
384 as->use_count++;
385
386 return 0;
387}
388
389static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
390 struct tegra_smmu_as *as)
391{
392 if (--as->use_count > 0)
393 return;
394
395 tegra_smmu_free_asid(smmu, as->id);
396 as->smmu = NULL;
397}
398
399static int tegra_smmu_attach_dev(struct iommu_domain *domain,
400 struct device *dev)
401{
402 struct tegra_smmu *smmu = dev->archdata.iommu;
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100403 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200404 struct device_node *np = dev->of_node;
405 struct of_phandle_args args;
406 unsigned int index = 0;
407 int err = 0;
408
409 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
410 &args)) {
411 unsigned int swgroup = args.args[0];
412
413 if (args.np != smmu->dev->of_node) {
414 of_node_put(args.np);
415 continue;
416 }
417
418 of_node_put(args.np);
419
420 err = tegra_smmu_as_prepare(smmu, as);
421 if (err < 0)
422 return err;
423
424 tegra_smmu_enable(smmu, swgroup, as->id);
425 index++;
426 }
427
428 if (index == 0)
429 return -ENODEV;
430
431 return 0;
432}
433
434static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
435{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100436 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200437 struct device_node *np = dev->of_node;
438 struct tegra_smmu *smmu = as->smmu;
439 struct of_phandle_args args;
440 unsigned int index = 0;
441
442 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
443 &args)) {
444 unsigned int swgroup = args.args[0];
445
446 if (args.np != smmu->dev->of_node) {
447 of_node_put(args.np);
448 continue;
449 }
450
451 of_node_put(args.np);
452
453 tegra_smmu_disable(smmu, swgroup, as->id);
454 tegra_smmu_as_unprepare(smmu, as);
455 index++;
456 }
457}
458
459static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
460 struct page **pagep)
461{
462 u32 *pd = page_address(as->pd), *pt, *count;
463 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
464 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
465 struct tegra_smmu *smmu = as->smmu;
466 struct page *page;
467 unsigned int i;
468
469 if (pd[pde] == 0) {
470 page = alloc_page(GFP_KERNEL | __GFP_DMA);
471 if (!page)
472 return NULL;
473
474 pt = page_address(page);
475 SetPageReserved(page);
476
477 for (i = 0; i < SMMU_NUM_PTE; i++)
478 pt[i] = 0;
479
480 smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
481
482 pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
483
484 smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
485 smmu_flush_ptc(smmu, as->pd, pde << 2);
486 smmu_flush_tlb_section(smmu, as->id, iova);
487 smmu_flush(smmu);
488 } else {
489 page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
490 pt = page_address(page);
491 }
492
493 *pagep = page;
494
495 /* Keep track of entries in this page table. */
496 count = page_address(as->count);
497 if (pt[pte] == 0)
498 count[pde]++;
499
500 return &pt[pte];
501}
502
503static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
504{
505 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
506 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
507 u32 *count = page_address(as->count);
508 u32 *pd = page_address(as->pd), *pt;
509 struct page *page;
510
511 page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
512 pt = page_address(page);
513
514 /*
515 * When no entries in this page table are used anymore, return the
516 * memory page to the system.
517 */
518 if (pt[pte] != 0) {
519 if (--count[pde] == 0) {
520 ClearPageReserved(page);
521 __free_page(page);
522 pd[pde] = 0;
523 }
524
525 pt[pte] = 0;
526 }
527}
528
529static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
530 phys_addr_t paddr, size_t size, int prot)
531{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100532 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200533 struct tegra_smmu *smmu = as->smmu;
534 unsigned long offset;
535 struct page *page;
536 u32 *pte;
537
538 pte = as_get_pte(as, iova, &page);
539 if (!pte)
Hiroshi Doyu0547c2f2012-06-25 14:23:57 +0300540 return -ENOMEM;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200541
Thierry Reding89184652014-04-16 09:24:44 +0200542 *pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR;
543 offset = offset_in_page(pte);
544
545 smmu->soc->ops->flush_dcache(page, offset, 4);
546 smmu_flush_ptc(smmu, page, offset);
547 smmu_flush_tlb_group(smmu, as->id, iova);
548 smmu_flush(smmu);
549
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200550 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200551}
552
Thierry Reding89184652014-04-16 09:24:44 +0200553static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
554 size_t size)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200555{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100556 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200557 struct tegra_smmu *smmu = as->smmu;
558 unsigned long offset;
559 struct page *page;
560 u32 *pte;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200561
Thierry Reding89184652014-04-16 09:24:44 +0200562 pte = as_get_pte(as, iova, &page);
563 if (!pte)
564 return 0;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300565
Thierry Reding89184652014-04-16 09:24:44 +0200566 offset = offset_in_page(pte);
567 as_put_pte(as, iova);
568
569 smmu->soc->ops->flush_dcache(page, offset, 4);
570 smmu_flush_ptc(smmu, page, offset);
571 smmu_flush_tlb_group(smmu, as->id, iova);
572 smmu_flush(smmu);
573
574 return size;
575}
576
577static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
578 dma_addr_t iova)
579{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100580 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200581 struct page *page;
582 unsigned long pfn;
583 u32 *pte;
584
585 pte = as_get_pte(as, iova, &page);
586 pfn = *pte & SMMU_PFN_MASK;
587
588 return PFN_PHYS(pfn);
589}
590
591static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
592{
593 struct platform_device *pdev;
594 struct tegra_mc *mc;
595
596 pdev = of_find_device_by_node(np);
597 if (!pdev)
598 return NULL;
599
600 mc = platform_get_drvdata(pdev);
601 if (!mc)
602 return NULL;
603
604 return mc->smmu;
605}
606
607static int tegra_smmu_add_device(struct device *dev)
608{
609 struct device_node *np = dev->of_node;
610 struct of_phandle_args args;
611 unsigned int index = 0;
612
613 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
614 &args) == 0) {
615 struct tegra_smmu *smmu;
616
617 smmu = tegra_smmu_find(args.np);
618 if (smmu) {
619 /*
620 * Only a single IOMMU master interface is currently
621 * supported by the Linux kernel, so abort after the
622 * first match.
623 */
624 dev->archdata.iommu = smmu;
625 break;
626 }
627
628 index++;
629 }
630
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200631 return 0;
632}
633
Thierry Reding89184652014-04-16 09:24:44 +0200634static void tegra_smmu_remove_device(struct device *dev)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200635{
Thierry Reding89184652014-04-16 09:24:44 +0200636 dev->archdata.iommu = NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200637}
638
Thierry Reding89184652014-04-16 09:24:44 +0200639static const struct iommu_ops tegra_smmu_ops = {
640 .capable = tegra_smmu_capable,
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100641 .domain_alloc = tegra_smmu_domain_alloc,
642 .domain_free = tegra_smmu_domain_free,
Thierry Reding89184652014-04-16 09:24:44 +0200643 .attach_dev = tegra_smmu_attach_dev,
644 .detach_dev = tegra_smmu_detach_dev,
645 .add_device = tegra_smmu_add_device,
646 .remove_device = tegra_smmu_remove_device,
647 .map = tegra_smmu_map,
648 .unmap = tegra_smmu_unmap,
649 .map_sg = default_iommu_map_sg,
650 .iova_to_phys = tegra_smmu_iova_to_phys,
651
652 .pgsize_bitmap = SZ_4K,
653};
654
655static void tegra_smmu_ahb_enable(void)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200656{
Thierry Reding89184652014-04-16 09:24:44 +0200657 static const struct of_device_id ahb_match[] = {
658 { .compatible = "nvidia,tegra30-ahb", },
659 { }
660 };
661 struct device_node *ahb;
662
663 ahb = of_find_matching_node(NULL, ahb_match);
664 if (ahb) {
665 tegra_ahb_enable_smmu(ahb);
666 of_node_put(ahb);
667 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200668}
669
Thierry Reding89184652014-04-16 09:24:44 +0200670struct tegra_smmu *tegra_smmu_probe(struct device *dev,
671 const struct tegra_smmu_soc *soc,
672 struct tegra_mc *mc)
673{
674 struct tegra_smmu *smmu;
675 size_t size;
676 u32 value;
677 int err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200678
Thierry Reding89184652014-04-16 09:24:44 +0200679 /* This can happen on Tegra20 which doesn't have an SMMU */
680 if (!soc)
681 return NULL;
682
683 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
684 if (!smmu)
685 return ERR_PTR(-ENOMEM);
686
687 /*
688 * This is a bit of a hack. Ideally we'd want to simply return this
689 * value. However the IOMMU registration process will attempt to add
690 * all devices to the IOMMU when bus_set_iommu() is called. In order
691 * not to rely on global variables to track the IOMMU instance, we
692 * set it here so that it can be looked up from the .add_device()
693 * callback via the IOMMU device's .drvdata field.
694 */
695 mc->smmu = smmu;
696
697 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
698
699 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
700 if (!smmu->asids)
701 return ERR_PTR(-ENOMEM);
702
703 mutex_init(&smmu->lock);
704
705 smmu->regs = mc->regs;
706 smmu->soc = soc;
707 smmu->dev = dev;
708 smmu->mc = mc;
709
710 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
711
712 if (soc->supports_request_limit)
713 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
714
715 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
716
717 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
718 SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
719
720 if (soc->supports_round_robin_arbitration)
721 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
722
723 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
724
725 smmu_flush_ptc(smmu, NULL, 0);
726 smmu_flush_tlb(smmu);
727 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
728 smmu_flush(smmu);
729
730 tegra_smmu_ahb_enable();
731
732 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
733 if (err < 0)
734 return ERR_PTR(err);
735
736 return smmu;
737}