blob: 76887a73b47acccf6e792d7a2b33830e73606462 [file] [log] [blame]
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001/*
Thierry Reding89184652014-04-16 09:24:44 +02002 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02003 *
Thierry Reding89184652014-04-16 09:24:44 +02004 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02007 */
8
Thierry Reding804cb542015-03-27 11:07:27 +01009#include <linux/bitops.h>
Thierry Redingbc5e6de2013-01-21 11:09:06 +010010#include <linux/err.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020011#include <linux/iommu.h>
Thierry Reding89184652014-04-16 09:24:44 +020012#include <linux/kernel.h>
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +030013#include <linux/of.h>
Thierry Reding89184652014-04-16 09:24:44 +020014#include <linux/of_device.h>
15#include <linux/platform_device.h>
16#include <linux/slab.h>
Thierry Reding306a7f92014-07-17 13:17:24 +020017
18#include <soc/tegra/ahb.h>
Thierry Reding89184652014-04-16 09:24:44 +020019#include <soc/tegra/mc.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020020
Thierry Reding89184652014-04-16 09:24:44 +020021struct tegra_smmu {
22 void __iomem *regs;
23 struct device *dev;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020024
Thierry Reding89184652014-04-16 09:24:44 +020025 struct tegra_mc *mc;
26 const struct tegra_smmu_soc *soc;
Stephen Warrene6bc5932012-09-04 16:36:15 -060027
Thierry Reding804cb542015-03-27 11:07:27 +010028 unsigned long pfn_mask;
29
Thierry Reding89184652014-04-16 09:24:44 +020030 unsigned long *asids;
31 struct mutex lock;
Stephen Warrene6bc5932012-09-04 16:36:15 -060032
Thierry Reding89184652014-04-16 09:24:44 +020033 struct list_head list;
Stephen Warrene6bc5932012-09-04 16:36:15 -060034};
35
Thierry Reding89184652014-04-16 09:24:44 +020036struct tegra_smmu_as {
37 struct iommu_domain *domain;
38 struct tegra_smmu *smmu;
39 unsigned int use_count;
40 struct page *count;
41 struct page *pd;
42 unsigned id;
43 u32 attr;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +030044};
45
Thierry Reding89184652014-04-16 09:24:44 +020046static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
47 unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020048{
Thierry Reding89184652014-04-16 09:24:44 +020049 writel(value, smmu->regs + offset);
Joerg Roedelfe1229b2013-02-04 20:40:58 +010050}
51
Thierry Reding89184652014-04-16 09:24:44 +020052static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020053{
Thierry Reding89184652014-04-16 09:24:44 +020054 return readl(smmu->regs + offset);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020055}
56
Thierry Reding89184652014-04-16 09:24:44 +020057#define SMMU_CONFIG 0x010
58#define SMMU_CONFIG_ENABLE (1 << 0)
59
60#define SMMU_TLB_CONFIG 0x14
61#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
62#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
63#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
64
65#define SMMU_PTC_CONFIG 0x18
66#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
67#define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
68#define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
69
70#define SMMU_PTB_ASID 0x01c
71#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
72
73#define SMMU_PTB_DATA 0x020
74#define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
75
76#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
77
78#define SMMU_TLB_FLUSH 0x030
79#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
80#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
81#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
82#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
83#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
84 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
85#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
86 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
87#define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
88
89#define SMMU_PTC_FLUSH 0x034
90#define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
91#define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
92
93#define SMMU_PTC_FLUSH_HI 0x9b8
94#define SMMU_PTC_FLUSH_HI_MASK 0x3
95
96/* per-SWGROUP SMMU_*_ASID register */
97#define SMMU_ASID_ENABLE (1 << 31)
98#define SMMU_ASID_MASK 0x7f
99#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
100
101/* page table definitions */
102#define SMMU_NUM_PDE 1024
103#define SMMU_NUM_PTE 1024
104
105#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
106#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
107
108#define SMMU_PDE_SHIFT 22
109#define SMMU_PTE_SHIFT 12
110
Thierry Reding89184652014-04-16 09:24:44 +0200111#define SMMU_PD_READABLE (1 << 31)
112#define SMMU_PD_WRITABLE (1 << 30)
113#define SMMU_PD_NONSECURE (1 << 29)
114
115#define SMMU_PDE_READABLE (1 << 31)
116#define SMMU_PDE_WRITABLE (1 << 30)
117#define SMMU_PDE_NONSECURE (1 << 29)
118#define SMMU_PDE_NEXT (1 << 28)
119
120#define SMMU_PTE_READABLE (1 << 31)
121#define SMMU_PTE_WRITABLE (1 << 30)
122#define SMMU_PTE_NONSECURE (1 << 29)
123
124#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
125 SMMU_PDE_NONSECURE)
126#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
127 SMMU_PTE_NONSECURE)
128
129static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
130 unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200131{
Thierry Reding89184652014-04-16 09:24:44 +0200132 phys_addr_t phys = page ? page_to_phys(page) : 0;
133 u32 value;
Hiroshi Doyua6870e92013-01-31 10:14:10 +0200134
Thierry Reding89184652014-04-16 09:24:44 +0200135 if (page) {
136 offset &= ~(smmu->mc->soc->atom_size - 1);
Hiroshi Doyua6870e92013-01-31 10:14:10 +0200137
Thierry Reding89184652014-04-16 09:24:44 +0200138 if (smmu->mc->soc->num_address_bits > 32) {
139#ifdef CONFIG_PHYS_ADDR_T_64BIT
140 value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200141#else
Thierry Reding89184652014-04-16 09:24:44 +0200142 value = 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200143#endif
Thierry Reding89184652014-04-16 09:24:44 +0200144 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
145 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200146
Thierry Reding89184652014-04-16 09:24:44 +0200147 value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
148 } else {
149 value = SMMU_PTC_FLUSH_TYPE_ALL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200150 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300151
Thierry Reding89184652014-04-16 09:24:44 +0200152 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
153}
154
155static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
156{
157 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
158}
159
160static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
161 unsigned long asid)
162{
163 u32 value;
164
165 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
166 SMMU_TLB_FLUSH_VA_MATCH_ALL;
167 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
168}
169
170static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
171 unsigned long asid,
172 unsigned long iova)
173{
174 u32 value;
175
176 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
177 SMMU_TLB_FLUSH_VA_SECTION(iova);
178 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
179}
180
181static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
182 unsigned long asid,
183 unsigned long iova)
184{
185 u32 value;
186
187 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
188 SMMU_TLB_FLUSH_VA_GROUP(iova);
189 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
190}
191
192static inline void smmu_flush(struct tegra_smmu *smmu)
193{
194 smmu_readl(smmu, SMMU_CONFIG);
195}
196
197static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
198{
199 unsigned long id;
200
201 mutex_lock(&smmu->lock);
202
203 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
204 if (id >= smmu->soc->num_asids) {
205 mutex_unlock(&smmu->lock);
206 return -ENOSPC;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200207 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300208
Thierry Reding89184652014-04-16 09:24:44 +0200209 set_bit(id, smmu->asids);
210 *idp = id;
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300211
Thierry Reding89184652014-04-16 09:24:44 +0200212 mutex_unlock(&smmu->lock);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200213 return 0;
214}
215
Thierry Reding89184652014-04-16 09:24:44 +0200216static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200217{
Thierry Reding89184652014-04-16 09:24:44 +0200218 mutex_lock(&smmu->lock);
219 clear_bit(id, smmu->asids);
220 mutex_unlock(&smmu->lock);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200221}
222
Thierry Reding89184652014-04-16 09:24:44 +0200223static bool tegra_smmu_capable(enum iommu_cap cap)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200224{
Joerg Roedel7c2aa642014-09-05 10:51:37 +0200225 return false;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200226}
227
Thierry Reding89184652014-04-16 09:24:44 +0200228static int tegra_smmu_domain_init(struct iommu_domain *domain)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200229{
Thierry Reding89184652014-04-16 09:24:44 +0200230 struct tegra_smmu_as *as;
231 unsigned int i;
232 uint32_t *pd;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200233
Thierry Reding89184652014-04-16 09:24:44 +0200234 as = kzalloc(sizeof(*as), GFP_KERNEL);
235 if (!as)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200236 return -ENOMEM;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200237
Thierry Reding89184652014-04-16 09:24:44 +0200238 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
239 as->domain = domain;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200240
Thierry Reding89184652014-04-16 09:24:44 +0200241 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
242 if (!as->pd) {
243 kfree(as);
244 return -ENOMEM;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200245 }
246
Thierry Reding89184652014-04-16 09:24:44 +0200247 as->count = alloc_page(GFP_KERNEL);
248 if (!as->count) {
249 __free_page(as->pd);
250 kfree(as);
251 return -ENOMEM;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200252 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200253
Thierry Reding89184652014-04-16 09:24:44 +0200254 /* clear PDEs */
255 pd = page_address(as->pd);
256 SetPageReserved(as->pd);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200257
Thierry Reding89184652014-04-16 09:24:44 +0200258 for (i = 0; i < SMMU_NUM_PDE; i++)
259 pd[i] = 0;
Hiroshi Doyud2453b22012-07-30 08:39:18 +0300260
Thierry Reding89184652014-04-16 09:24:44 +0200261 /* clear PDE usage counters */
262 pd = page_address(as->count);
263 SetPageReserved(as->count);
Hiroshi Doyud2453b22012-07-30 08:39:18 +0300264
Thierry Reding89184652014-04-16 09:24:44 +0200265 for (i = 0; i < SMMU_NUM_PDE; i++)
266 pd[i] = 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200267
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200268 domain->priv = as;
269
Thierry Reding471d9142015-03-27 11:07:25 +0100270 /* setup aperture */
271 domain->geometry.aperture_start = 0;
272 domain->geometry.aperture_end = 0xffffffff;
273 domain->geometry.force_aperture = true;
274
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200275 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200276}
277
Thierry Reding89184652014-04-16 09:24:44 +0200278static void tegra_smmu_domain_destroy(struct iommu_domain *domain)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200279{
Thierry Reding89184652014-04-16 09:24:44 +0200280 struct tegra_smmu_as *as = domain->priv;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200281
Thierry Reding89184652014-04-16 09:24:44 +0200282 /* TODO: free page directory and page tables */
283 ClearPageReserved(as->pd);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200284
Thierry Reding89184652014-04-16 09:24:44 +0200285 kfree(as);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200286}
287
Thierry Reding89184652014-04-16 09:24:44 +0200288static const struct tegra_smmu_swgroup *
289tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300290{
Thierry Reding89184652014-04-16 09:24:44 +0200291 const struct tegra_smmu_swgroup *group = NULL;
292 unsigned int i;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300293
Thierry Reding89184652014-04-16 09:24:44 +0200294 for (i = 0; i < smmu->soc->num_swgroups; i++) {
295 if (smmu->soc->swgroups[i].swgroup == swgroup) {
296 group = &smmu->soc->swgroups[i];
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300297 break;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300298 }
299 }
300
Thierry Reding89184652014-04-16 09:24:44 +0200301 return group;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300302}
303
Thierry Reding89184652014-04-16 09:24:44 +0200304static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
305 unsigned int asid)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200306{
Thierry Reding89184652014-04-16 09:24:44 +0200307 const struct tegra_smmu_swgroup *group;
308 unsigned int i;
309 u32 value;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200310
Thierry Reding89184652014-04-16 09:24:44 +0200311 for (i = 0; i < smmu->soc->num_clients; i++) {
312 const struct tegra_mc_client *client = &smmu->soc->clients[i];
313
314 if (client->swgroup != swgroup)
315 continue;
316
317 value = smmu_readl(smmu, client->smmu.reg);
318 value |= BIT(client->smmu.bit);
319 smmu_writel(smmu, value, client->smmu.reg);
320 }
321
322 group = tegra_smmu_find_swgroup(smmu, swgroup);
323 if (group) {
324 value = smmu_readl(smmu, group->reg);
325 value &= ~SMMU_ASID_MASK;
326 value |= SMMU_ASID_VALUE(asid);
327 value |= SMMU_ASID_ENABLE;
328 smmu_writel(smmu, value, group->reg);
329 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200330}
331
Thierry Reding89184652014-04-16 09:24:44 +0200332static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
333 unsigned int asid)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200334{
Thierry Reding89184652014-04-16 09:24:44 +0200335 const struct tegra_smmu_swgroup *group;
336 unsigned int i;
337 u32 value;
338
339 group = tegra_smmu_find_swgroup(smmu, swgroup);
340 if (group) {
341 value = smmu_readl(smmu, group->reg);
342 value &= ~SMMU_ASID_MASK;
343 value |= SMMU_ASID_VALUE(asid);
344 value &= ~SMMU_ASID_ENABLE;
345 smmu_writel(smmu, value, group->reg);
346 }
347
348 for (i = 0; i < smmu->soc->num_clients; i++) {
349 const struct tegra_mc_client *client = &smmu->soc->clients[i];
350
351 if (client->swgroup != swgroup)
352 continue;
353
354 value = smmu_readl(smmu, client->smmu.reg);
355 value &= ~BIT(client->smmu.bit);
356 smmu_writel(smmu, value, client->smmu.reg);
357 }
358}
359
360static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
361 struct tegra_smmu_as *as)
362{
363 u32 value;
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300364 int err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200365
Thierry Reding89184652014-04-16 09:24:44 +0200366 if (as->use_count > 0) {
367 as->use_count++;
368 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200369 }
370
Thierry Reding89184652014-04-16 09:24:44 +0200371 err = tegra_smmu_alloc_asid(smmu, &as->id);
372 if (err < 0)
Hiroshi Doyu0547c2f2012-06-25 14:23:57 +0300373 return err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200374
Thierry Reding89184652014-04-16 09:24:44 +0200375 smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
376 smmu_flush_ptc(smmu, as->pd, 0);
377 smmu_flush_tlb_asid(smmu, as->id);
378
379 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
380 value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
381 smmu_writel(smmu, value, SMMU_PTB_DATA);
382 smmu_flush(smmu);
383
384 as->smmu = smmu;
385 as->use_count++;
386
387 return 0;
388}
389
390static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
391 struct tegra_smmu_as *as)
392{
393 if (--as->use_count > 0)
394 return;
395
396 tegra_smmu_free_asid(smmu, as->id);
397 as->smmu = NULL;
398}
399
400static int tegra_smmu_attach_dev(struct iommu_domain *domain,
401 struct device *dev)
402{
403 struct tegra_smmu *smmu = dev->archdata.iommu;
404 struct tegra_smmu_as *as = domain->priv;
405 struct device_node *np = dev->of_node;
406 struct of_phandle_args args;
407 unsigned int index = 0;
408 int err = 0;
409
410 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
411 &args)) {
412 unsigned int swgroup = args.args[0];
413
414 if (args.np != smmu->dev->of_node) {
415 of_node_put(args.np);
416 continue;
417 }
418
419 of_node_put(args.np);
420
421 err = tegra_smmu_as_prepare(smmu, as);
422 if (err < 0)
423 return err;
424
425 tegra_smmu_enable(smmu, swgroup, as->id);
426 index++;
427 }
428
429 if (index == 0)
430 return -ENODEV;
431
432 return 0;
433}
434
435static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
436{
437 struct tegra_smmu_as *as = domain->priv;
438 struct device_node *np = dev->of_node;
439 struct tegra_smmu *smmu = as->smmu;
440 struct of_phandle_args args;
441 unsigned int index = 0;
442
443 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
444 &args)) {
445 unsigned int swgroup = args.args[0];
446
447 if (args.np != smmu->dev->of_node) {
448 of_node_put(args.np);
449 continue;
450 }
451
452 of_node_put(args.np);
453
454 tegra_smmu_disable(smmu, swgroup, as->id);
455 tegra_smmu_as_unprepare(smmu, as);
456 index++;
457 }
458}
459
460static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
461 struct page **pagep)
462{
463 u32 *pd = page_address(as->pd), *pt, *count;
464 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
465 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
466 struct tegra_smmu *smmu = as->smmu;
467 struct page *page;
468 unsigned int i;
469
470 if (pd[pde] == 0) {
471 page = alloc_page(GFP_KERNEL | __GFP_DMA);
472 if (!page)
473 return NULL;
474
475 pt = page_address(page);
476 SetPageReserved(page);
477
478 for (i = 0; i < SMMU_NUM_PTE; i++)
479 pt[i] = 0;
480
481 smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
482
483 pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
484
485 smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
486 smmu_flush_ptc(smmu, as->pd, pde << 2);
487 smmu_flush_tlb_section(smmu, as->id, iova);
488 smmu_flush(smmu);
489 } else {
Thierry Reding804cb542015-03-27 11:07:27 +0100490 page = pfn_to_page(pd[pde] & smmu->pfn_mask);
Thierry Reding89184652014-04-16 09:24:44 +0200491 pt = page_address(page);
492 }
493
494 *pagep = page;
495
496 /* Keep track of entries in this page table. */
497 count = page_address(as->count);
498 if (pt[pte] == 0)
499 count[pde]++;
500
501 return &pt[pte];
502}
503
504static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
505{
506 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
507 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
508 u32 *count = page_address(as->count);
509 u32 *pd = page_address(as->pd), *pt;
510 struct page *page;
511
Thierry Reding804cb542015-03-27 11:07:27 +0100512 page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
Thierry Reding89184652014-04-16 09:24:44 +0200513 pt = page_address(page);
514
515 /*
516 * When no entries in this page table are used anymore, return the
517 * memory page to the system.
518 */
519 if (pt[pte] != 0) {
520 if (--count[pde] == 0) {
521 ClearPageReserved(page);
522 __free_page(page);
523 pd[pde] = 0;
524 }
525
526 pt[pte] = 0;
527 }
528}
529
530static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
531 phys_addr_t paddr, size_t size, int prot)
532{
533 struct tegra_smmu_as *as = domain->priv;
534 struct tegra_smmu *smmu = as->smmu;
535 unsigned long offset;
536 struct page *page;
537 u32 *pte;
538
539 pte = as_get_pte(as, iova, &page);
540 if (!pte)
Hiroshi Doyu0547c2f2012-06-25 14:23:57 +0300541 return -ENOMEM;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200542
Thierry Reding89184652014-04-16 09:24:44 +0200543 *pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR;
544 offset = offset_in_page(pte);
545
546 smmu->soc->ops->flush_dcache(page, offset, 4);
547 smmu_flush_ptc(smmu, page, offset);
548 smmu_flush_tlb_group(smmu, as->id, iova);
549 smmu_flush(smmu);
550
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200551 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200552}
553
Thierry Reding89184652014-04-16 09:24:44 +0200554static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
555 size_t size)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200556{
Thierry Reding89184652014-04-16 09:24:44 +0200557 struct tegra_smmu_as *as = domain->priv;
558 struct tegra_smmu *smmu = as->smmu;
559 unsigned long offset;
560 struct page *page;
561 u32 *pte;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200562
Thierry Reding89184652014-04-16 09:24:44 +0200563 pte = as_get_pte(as, iova, &page);
564 if (!pte)
565 return 0;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300566
Thierry Reding89184652014-04-16 09:24:44 +0200567 offset = offset_in_page(pte);
568 as_put_pte(as, iova);
569
570 smmu->soc->ops->flush_dcache(page, offset, 4);
571 smmu_flush_ptc(smmu, page, offset);
572 smmu_flush_tlb_group(smmu, as->id, iova);
573 smmu_flush(smmu);
574
575 return size;
576}
577
578static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
579 dma_addr_t iova)
580{
581 struct tegra_smmu_as *as = domain->priv;
582 struct page *page;
583 unsigned long pfn;
584 u32 *pte;
585
586 pte = as_get_pte(as, iova, &page);
Thierry Reding804cb542015-03-27 11:07:27 +0100587 pfn = *pte & as->smmu->pfn_mask;
Thierry Reding89184652014-04-16 09:24:44 +0200588
589 return PFN_PHYS(pfn);
590}
591
592static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
593{
594 struct platform_device *pdev;
595 struct tegra_mc *mc;
596
597 pdev = of_find_device_by_node(np);
598 if (!pdev)
599 return NULL;
600
601 mc = platform_get_drvdata(pdev);
602 if (!mc)
603 return NULL;
604
605 return mc->smmu;
606}
607
608static int tegra_smmu_add_device(struct device *dev)
609{
610 struct device_node *np = dev->of_node;
611 struct of_phandle_args args;
612 unsigned int index = 0;
613
614 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
615 &args) == 0) {
616 struct tegra_smmu *smmu;
617
618 smmu = tegra_smmu_find(args.np);
619 if (smmu) {
620 /*
621 * Only a single IOMMU master interface is currently
622 * supported by the Linux kernel, so abort after the
623 * first match.
624 */
625 dev->archdata.iommu = smmu;
626 break;
627 }
628
629 index++;
630 }
631
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200632 return 0;
633}
634
Thierry Reding89184652014-04-16 09:24:44 +0200635static void tegra_smmu_remove_device(struct device *dev)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200636{
Thierry Reding89184652014-04-16 09:24:44 +0200637 dev->archdata.iommu = NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200638}
639
Thierry Reding89184652014-04-16 09:24:44 +0200640static const struct iommu_ops tegra_smmu_ops = {
641 .capable = tegra_smmu_capable,
642 .domain_init = tegra_smmu_domain_init,
643 .domain_destroy = tegra_smmu_domain_destroy,
644 .attach_dev = tegra_smmu_attach_dev,
645 .detach_dev = tegra_smmu_detach_dev,
646 .add_device = tegra_smmu_add_device,
647 .remove_device = tegra_smmu_remove_device,
648 .map = tegra_smmu_map,
649 .unmap = tegra_smmu_unmap,
650 .map_sg = default_iommu_map_sg,
651 .iova_to_phys = tegra_smmu_iova_to_phys,
652
653 .pgsize_bitmap = SZ_4K,
654};
655
656static void tegra_smmu_ahb_enable(void)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200657{
Thierry Reding89184652014-04-16 09:24:44 +0200658 static const struct of_device_id ahb_match[] = {
659 { .compatible = "nvidia,tegra30-ahb", },
660 { }
661 };
662 struct device_node *ahb;
663
664 ahb = of_find_matching_node(NULL, ahb_match);
665 if (ahb) {
666 tegra_ahb_enable_smmu(ahb);
667 of_node_put(ahb);
668 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200669}
670
Thierry Reding89184652014-04-16 09:24:44 +0200671struct tegra_smmu *tegra_smmu_probe(struct device *dev,
672 const struct tegra_smmu_soc *soc,
673 struct tegra_mc *mc)
674{
675 struct tegra_smmu *smmu;
676 size_t size;
677 u32 value;
678 int err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200679
Thierry Reding89184652014-04-16 09:24:44 +0200680 /* This can happen on Tegra20 which doesn't have an SMMU */
681 if (!soc)
682 return NULL;
683
684 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
685 if (!smmu)
686 return ERR_PTR(-ENOMEM);
687
688 /*
689 * This is a bit of a hack. Ideally we'd want to simply return this
690 * value. However the IOMMU registration process will attempt to add
691 * all devices to the IOMMU when bus_set_iommu() is called. In order
692 * not to rely on global variables to track the IOMMU instance, we
693 * set it here so that it can be looked up from the .add_device()
694 * callback via the IOMMU device's .drvdata field.
695 */
696 mc->smmu = smmu;
697
698 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
699
700 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
701 if (!smmu->asids)
702 return ERR_PTR(-ENOMEM);
703
704 mutex_init(&smmu->lock);
705
706 smmu->regs = mc->regs;
707 smmu->soc = soc;
708 smmu->dev = dev;
709 smmu->mc = mc;
710
Thierry Reding804cb542015-03-27 11:07:27 +0100711 smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
712 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
713 mc->soc->num_address_bits, smmu->pfn_mask);
714
Thierry Reding89184652014-04-16 09:24:44 +0200715 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
716
717 if (soc->supports_request_limit)
718 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
719
720 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
721
722 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
723 SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
724
725 if (soc->supports_round_robin_arbitration)
726 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
727
728 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
729
730 smmu_flush_ptc(smmu, NULL, 0);
731 smmu_flush_tlb(smmu);
732 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
733 smmu_flush(smmu);
734
735 tegra_smmu_ahb_enable();
736
737 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
738 if (err < 0)
739 return ERR_PTR(err);
740
741 return smmu;
742}