blob: 42b13c07aeef363c0e826b402c532bef42ff2856 [file] [log] [blame]
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02001/*
Thierry Reding89184652014-04-16 09:24:44 +02002 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02003 *
Thierry Reding89184652014-04-16 09:24:44 +02004 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +02007 */
8
Thierry Reding804cb542015-03-27 11:07:27 +01009#include <linux/bitops.h>
Thierry Redingd1313e72015-01-23 09:49:25 +010010#include <linux/debugfs.h>
Thierry Redingbc5e6de2013-01-21 11:09:06 +010011#include <linux/err.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020012#include <linux/iommu.h>
Thierry Reding89184652014-04-16 09:24:44 +020013#include <linux/kernel.h>
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +030014#include <linux/of.h>
Thierry Reding89184652014-04-16 09:24:44 +020015#include <linux/of_device.h>
16#include <linux/platform_device.h>
17#include <linux/slab.h>
Thierry Reding306a7f92014-07-17 13:17:24 +020018
Russell King4b3c7d12015-07-27 13:29:36 +010019#include <asm/cacheflush.h>
20
Thierry Reding306a7f92014-07-17 13:17:24 +020021#include <soc/tegra/ahb.h>
Thierry Reding89184652014-04-16 09:24:44 +020022#include <soc/tegra/mc.h>
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020023
Thierry Reding89184652014-04-16 09:24:44 +020024struct tegra_smmu {
25 void __iomem *regs;
26 struct device *dev;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020027
Thierry Reding89184652014-04-16 09:24:44 +020028 struct tegra_mc *mc;
29 const struct tegra_smmu_soc *soc;
Stephen Warrene6bc5932012-09-04 16:36:15 -060030
Thierry Reding804cb542015-03-27 11:07:27 +010031 unsigned long pfn_mask;
32
Thierry Reding89184652014-04-16 09:24:44 +020033 unsigned long *asids;
34 struct mutex lock;
Stephen Warrene6bc5932012-09-04 16:36:15 -060035
Thierry Reding89184652014-04-16 09:24:44 +020036 struct list_head list;
Thierry Redingd1313e72015-01-23 09:49:25 +010037
38 struct dentry *debugfs;
Stephen Warrene6bc5932012-09-04 16:36:15 -060039};
40
Thierry Reding89184652014-04-16 09:24:44 +020041struct tegra_smmu_as {
Joerg Roedeld5f1a812015-03-26 13:43:12 +010042 struct iommu_domain domain;
Thierry Reding89184652014-04-16 09:24:44 +020043 struct tegra_smmu *smmu;
44 unsigned int use_count;
Russell King32924c72015-07-27 13:29:31 +010045 u32 *count;
Russell King853520f2015-07-27 13:29:26 +010046 struct page **pts;
Thierry Reding89184652014-04-16 09:24:44 +020047 struct page *pd;
48 unsigned id;
49 u32 attr;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +030050};
51
Joerg Roedeld5f1a812015-03-26 13:43:12 +010052static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
53{
54 return container_of(dom, struct tegra_smmu_as, domain);
55}
56
Thierry Reding89184652014-04-16 09:24:44 +020057static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
58 unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020059{
Thierry Reding89184652014-04-16 09:24:44 +020060 writel(value, smmu->regs + offset);
Joerg Roedelfe1229b2013-02-04 20:40:58 +010061}
62
Thierry Reding89184652014-04-16 09:24:44 +020063static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020064{
Thierry Reding89184652014-04-16 09:24:44 +020065 return readl(smmu->regs + offset);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +020066}
67
Thierry Reding89184652014-04-16 09:24:44 +020068#define SMMU_CONFIG 0x010
69#define SMMU_CONFIG_ENABLE (1 << 0)
70
71#define SMMU_TLB_CONFIG 0x14
72#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
73#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
74#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
75
76#define SMMU_PTC_CONFIG 0x18
77#define SMMU_PTC_CONFIG_ENABLE (1 << 29)
78#define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
79#define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
80
81#define SMMU_PTB_ASID 0x01c
82#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
83
84#define SMMU_PTB_DATA 0x020
85#define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
86
87#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
88
89#define SMMU_TLB_FLUSH 0x030
90#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
91#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
92#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
93#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24)
94#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
95 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
96#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
97 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
98#define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
99
100#define SMMU_PTC_FLUSH 0x034
101#define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
102#define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
103
104#define SMMU_PTC_FLUSH_HI 0x9b8
105#define SMMU_PTC_FLUSH_HI_MASK 0x3
106
107/* per-SWGROUP SMMU_*_ASID register */
108#define SMMU_ASID_ENABLE (1 << 31)
109#define SMMU_ASID_MASK 0x7f
110#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
111
112/* page table definitions */
113#define SMMU_NUM_PDE 1024
114#define SMMU_NUM_PTE 1024
115
116#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
117#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
118
119#define SMMU_PDE_SHIFT 22
120#define SMMU_PTE_SHIFT 12
121
Thierry Reding89184652014-04-16 09:24:44 +0200122#define SMMU_PD_READABLE (1 << 31)
123#define SMMU_PD_WRITABLE (1 << 30)
124#define SMMU_PD_NONSECURE (1 << 29)
125
126#define SMMU_PDE_READABLE (1 << 31)
127#define SMMU_PDE_WRITABLE (1 << 30)
128#define SMMU_PDE_NONSECURE (1 << 29)
129#define SMMU_PDE_NEXT (1 << 28)
130
131#define SMMU_PTE_READABLE (1 << 31)
132#define SMMU_PTE_WRITABLE (1 << 30)
133#define SMMU_PTE_NONSECURE (1 << 29)
134
135#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
136 SMMU_PDE_NONSECURE)
137#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
138 SMMU_PTE_NONSECURE)
139
Russell King34d35f82015-07-27 13:29:16 +0100140static unsigned int iova_pd_index(unsigned long iova)
141{
142 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
143}
144
145static unsigned int iova_pt_index(unsigned long iova)
146{
147 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
148}
149
Russell King4b3c7d12015-07-27 13:29:36 +0100150static void smmu_flush_dcache(struct page *page, unsigned long offset,
151 size_t size)
152{
153#ifdef CONFIG_ARM
154 phys_addr_t phys = page_to_phys(page) + offset;
155#endif
156 void *virt = page_address(page) + offset;
157
158#ifdef CONFIG_ARM
159 __cpuc_flush_dcache_area(virt, size);
160 outer_flush_range(phys, phys + size);
161#endif
162
163#ifdef CONFIG_ARM64
164 __flush_dcache_area(virt, size);
165#endif
166}
167
Thierry Reding89184652014-04-16 09:24:44 +0200168static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
169 unsigned long offset)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200170{
Thierry Reding89184652014-04-16 09:24:44 +0200171 phys_addr_t phys = page ? page_to_phys(page) : 0;
172 u32 value;
Hiroshi Doyua6870e92013-01-31 10:14:10 +0200173
Thierry Reding89184652014-04-16 09:24:44 +0200174 if (page) {
175 offset &= ~(smmu->mc->soc->atom_size - 1);
Hiroshi Doyua6870e92013-01-31 10:14:10 +0200176
Thierry Reding89184652014-04-16 09:24:44 +0200177 if (smmu->mc->soc->num_address_bits > 32) {
178#ifdef CONFIG_PHYS_ADDR_T_64BIT
179 value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200180#else
Thierry Reding89184652014-04-16 09:24:44 +0200181 value = 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200182#endif
Thierry Reding89184652014-04-16 09:24:44 +0200183 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
184 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200185
Thierry Reding89184652014-04-16 09:24:44 +0200186 value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
187 } else {
188 value = SMMU_PTC_FLUSH_TYPE_ALL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200189 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300190
Thierry Reding89184652014-04-16 09:24:44 +0200191 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
192}
193
194static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
195{
196 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
197}
198
199static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
200 unsigned long asid)
201{
202 u32 value;
203
204 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
205 SMMU_TLB_FLUSH_VA_MATCH_ALL;
206 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
207}
208
209static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
210 unsigned long asid,
211 unsigned long iova)
212{
213 u32 value;
214
215 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
216 SMMU_TLB_FLUSH_VA_SECTION(iova);
217 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
218}
219
220static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
221 unsigned long asid,
222 unsigned long iova)
223{
224 u32 value;
225
226 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) |
227 SMMU_TLB_FLUSH_VA_GROUP(iova);
228 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
229}
230
231static inline void smmu_flush(struct tegra_smmu *smmu)
232{
233 smmu_readl(smmu, SMMU_CONFIG);
234}
235
236static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
237{
238 unsigned long id;
239
240 mutex_lock(&smmu->lock);
241
242 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
243 if (id >= smmu->soc->num_asids) {
244 mutex_unlock(&smmu->lock);
245 return -ENOSPC;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200246 }
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300247
Thierry Reding89184652014-04-16 09:24:44 +0200248 set_bit(id, smmu->asids);
249 *idp = id;
Hiroshi DOYU9e971a02012-07-02 14:26:38 +0300250
Thierry Reding89184652014-04-16 09:24:44 +0200251 mutex_unlock(&smmu->lock);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200252 return 0;
253}
254
Thierry Reding89184652014-04-16 09:24:44 +0200255static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200256{
Thierry Reding89184652014-04-16 09:24:44 +0200257 mutex_lock(&smmu->lock);
258 clear_bit(id, smmu->asids);
259 mutex_unlock(&smmu->lock);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200260}
261
Thierry Reding89184652014-04-16 09:24:44 +0200262static bool tegra_smmu_capable(enum iommu_cap cap)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200263{
Joerg Roedel7c2aa642014-09-05 10:51:37 +0200264 return false;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200265}
266
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100267static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200268{
Thierry Reding89184652014-04-16 09:24:44 +0200269 struct tegra_smmu_as *as;
270 unsigned int i;
271 uint32_t *pd;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200272
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100273 if (type != IOMMU_DOMAIN_UNMANAGED)
274 return NULL;
275
Thierry Reding89184652014-04-16 09:24:44 +0200276 as = kzalloc(sizeof(*as), GFP_KERNEL);
277 if (!as)
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100278 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200279
Thierry Reding89184652014-04-16 09:24:44 +0200280 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200281
Thierry Reding89184652014-04-16 09:24:44 +0200282 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
283 if (!as->pd) {
284 kfree(as);
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100285 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200286 }
287
Russell King32924c72015-07-27 13:29:31 +0100288 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
Thierry Reding89184652014-04-16 09:24:44 +0200289 if (!as->count) {
290 __free_page(as->pd);
291 kfree(as);
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100292 return NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200293 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200294
Russell King853520f2015-07-27 13:29:26 +0100295 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
296 if (!as->pts) {
Russell King32924c72015-07-27 13:29:31 +0100297 kfree(as->count);
Russell King853520f2015-07-27 13:29:26 +0100298 __free_page(as->pd);
299 kfree(as);
300 return NULL;
301 }
302
Thierry Reding89184652014-04-16 09:24:44 +0200303 /* clear PDEs */
304 pd = page_address(as->pd);
305 SetPageReserved(as->pd);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200306
Thierry Reding89184652014-04-16 09:24:44 +0200307 for (i = 0; i < SMMU_NUM_PDE; i++)
308 pd[i] = 0;
Hiroshi Doyud2453b22012-07-30 08:39:18 +0300309
Thierry Reding471d9142015-03-27 11:07:25 +0100310 /* setup aperture */
Joerg Roedel7f65ef02015-04-02 13:33:19 +0200311 as->domain.geometry.aperture_start = 0;
312 as->domain.geometry.aperture_end = 0xffffffff;
313 as->domain.geometry.force_aperture = true;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200314
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100315 return &as->domain;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200316}
317
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100318static void tegra_smmu_domain_free(struct iommu_domain *domain)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200319{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100320 struct tegra_smmu_as *as = to_smmu_as(domain);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200321
Thierry Reding89184652014-04-16 09:24:44 +0200322 /* TODO: free page directory and page tables */
323 ClearPageReserved(as->pd);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200324
Thierry Reding89184652014-04-16 09:24:44 +0200325 kfree(as);
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200326}
327
Thierry Reding89184652014-04-16 09:24:44 +0200328static const struct tegra_smmu_swgroup *
329tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300330{
Thierry Reding89184652014-04-16 09:24:44 +0200331 const struct tegra_smmu_swgroup *group = NULL;
332 unsigned int i;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300333
Thierry Reding89184652014-04-16 09:24:44 +0200334 for (i = 0; i < smmu->soc->num_swgroups; i++) {
335 if (smmu->soc->swgroups[i].swgroup == swgroup) {
336 group = &smmu->soc->swgroups[i];
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300337 break;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300338 }
339 }
340
Thierry Reding89184652014-04-16 09:24:44 +0200341 return group;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300342}
343
Thierry Reding89184652014-04-16 09:24:44 +0200344static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
345 unsigned int asid)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200346{
Thierry Reding89184652014-04-16 09:24:44 +0200347 const struct tegra_smmu_swgroup *group;
348 unsigned int i;
349 u32 value;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200350
Thierry Reding89184652014-04-16 09:24:44 +0200351 for (i = 0; i < smmu->soc->num_clients; i++) {
352 const struct tegra_mc_client *client = &smmu->soc->clients[i];
353
354 if (client->swgroup != swgroup)
355 continue;
356
357 value = smmu_readl(smmu, client->smmu.reg);
358 value |= BIT(client->smmu.bit);
359 smmu_writel(smmu, value, client->smmu.reg);
360 }
361
362 group = tegra_smmu_find_swgroup(smmu, swgroup);
363 if (group) {
364 value = smmu_readl(smmu, group->reg);
365 value &= ~SMMU_ASID_MASK;
366 value |= SMMU_ASID_VALUE(asid);
367 value |= SMMU_ASID_ENABLE;
368 smmu_writel(smmu, value, group->reg);
369 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200370}
371
Thierry Reding89184652014-04-16 09:24:44 +0200372static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
373 unsigned int asid)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200374{
Thierry Reding89184652014-04-16 09:24:44 +0200375 const struct tegra_smmu_swgroup *group;
376 unsigned int i;
377 u32 value;
378
379 group = tegra_smmu_find_swgroup(smmu, swgroup);
380 if (group) {
381 value = smmu_readl(smmu, group->reg);
382 value &= ~SMMU_ASID_MASK;
383 value |= SMMU_ASID_VALUE(asid);
384 value &= ~SMMU_ASID_ENABLE;
385 smmu_writel(smmu, value, group->reg);
386 }
387
388 for (i = 0; i < smmu->soc->num_clients; i++) {
389 const struct tegra_mc_client *client = &smmu->soc->clients[i];
390
391 if (client->swgroup != swgroup)
392 continue;
393
394 value = smmu_readl(smmu, client->smmu.reg);
395 value &= ~BIT(client->smmu.bit);
396 smmu_writel(smmu, value, client->smmu.reg);
397 }
398}
399
400static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
401 struct tegra_smmu_as *as)
402{
403 u32 value;
Hiroshi Doyu0760e8f2012-06-25 14:23:55 +0300404 int err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200405
Thierry Reding89184652014-04-16 09:24:44 +0200406 if (as->use_count > 0) {
407 as->use_count++;
408 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200409 }
410
Thierry Reding89184652014-04-16 09:24:44 +0200411 err = tegra_smmu_alloc_asid(smmu, &as->id);
412 if (err < 0)
Hiroshi Doyu0547c2f2012-06-25 14:23:57 +0300413 return err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200414
Russell King4b3c7d12015-07-27 13:29:36 +0100415 smmu_flush_dcache(as->pd, 0, SMMU_SIZE_PD);
Thierry Reding89184652014-04-16 09:24:44 +0200416 smmu_flush_ptc(smmu, as->pd, 0);
417 smmu_flush_tlb_asid(smmu, as->id);
418
419 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
420 value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
421 smmu_writel(smmu, value, SMMU_PTB_DATA);
422 smmu_flush(smmu);
423
424 as->smmu = smmu;
425 as->use_count++;
426
427 return 0;
428}
429
430static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
431 struct tegra_smmu_as *as)
432{
433 if (--as->use_count > 0)
434 return;
435
436 tegra_smmu_free_asid(smmu, as->id);
437 as->smmu = NULL;
438}
439
440static int tegra_smmu_attach_dev(struct iommu_domain *domain,
441 struct device *dev)
442{
443 struct tegra_smmu *smmu = dev->archdata.iommu;
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100444 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200445 struct device_node *np = dev->of_node;
446 struct of_phandle_args args;
447 unsigned int index = 0;
448 int err = 0;
449
450 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
451 &args)) {
452 unsigned int swgroup = args.args[0];
453
454 if (args.np != smmu->dev->of_node) {
455 of_node_put(args.np);
456 continue;
457 }
458
459 of_node_put(args.np);
460
461 err = tegra_smmu_as_prepare(smmu, as);
462 if (err < 0)
463 return err;
464
465 tegra_smmu_enable(smmu, swgroup, as->id);
466 index++;
467 }
468
469 if (index == 0)
470 return -ENODEV;
471
472 return 0;
473}
474
475static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
476{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100477 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200478 struct device_node *np = dev->of_node;
479 struct tegra_smmu *smmu = as->smmu;
480 struct of_phandle_args args;
481 unsigned int index = 0;
482
483 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
484 &args)) {
485 unsigned int swgroup = args.args[0];
486
487 if (args.np != smmu->dev->of_node) {
488 of_node_put(args.np);
489 continue;
490 }
491
492 of_node_put(args.np);
493
494 tegra_smmu_disable(smmu, swgroup, as->id);
495 tegra_smmu_as_unprepare(smmu, as);
496 index++;
497 }
498}
499
Russell King0b42c7c2015-07-27 13:29:21 +0100500static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
501{
502 u32 *pt = page_address(pt_page);
503
504 return pt + iova_pt_index(iova);
505}
506
507static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
508 struct page **pagep)
509{
510 unsigned int pd_index = iova_pd_index(iova);
511 struct page *pt_page;
Russell King0b42c7c2015-07-27 13:29:21 +0100512
Russell King853520f2015-07-27 13:29:26 +0100513 pt_page = as->pts[pd_index];
514 if (!pt_page)
Russell King0b42c7c2015-07-27 13:29:21 +0100515 return NULL;
516
Russell King0b42c7c2015-07-27 13:29:21 +0100517 *pagep = pt_page;
518
519 return tegra_smmu_pte_offset(pt_page, iova);
520}
521
Thierry Reding89184652014-04-16 09:24:44 +0200522static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
523 struct page **pagep)
524{
Russell King32924c72015-07-27 13:29:31 +0100525 u32 *pd = page_address(as->pd), *pt;
Russell King34d35f82015-07-27 13:29:16 +0100526 unsigned int pde = iova_pd_index(iova);
Thierry Reding89184652014-04-16 09:24:44 +0200527 struct tegra_smmu *smmu = as->smmu;
528 struct page *page;
529 unsigned int i;
530
Russell King853520f2015-07-27 13:29:26 +0100531 if (!as->pts[pde]) {
Thierry Reding89184652014-04-16 09:24:44 +0200532 page = alloc_page(GFP_KERNEL | __GFP_DMA);
533 if (!page)
534 return NULL;
535
536 pt = page_address(page);
537 SetPageReserved(page);
538
539 for (i = 0; i < SMMU_NUM_PTE; i++)
540 pt[i] = 0;
541
Russell King853520f2015-07-27 13:29:26 +0100542 as->pts[pde] = page;
543
Russell King4b3c7d12015-07-27 13:29:36 +0100544 smmu_flush_dcache(page, 0, SMMU_SIZE_PT);
Thierry Reding89184652014-04-16 09:24:44 +0200545
546 pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
547
Russell King4b3c7d12015-07-27 13:29:36 +0100548 smmu_flush_dcache(as->pd, pde << 2, 4);
Thierry Reding89184652014-04-16 09:24:44 +0200549 smmu_flush_ptc(smmu, as->pd, pde << 2);
550 smmu_flush_tlb_section(smmu, as->id, iova);
551 smmu_flush(smmu);
552 } else {
Russell King853520f2015-07-27 13:29:26 +0100553 page = as->pts[pde];
Thierry Reding89184652014-04-16 09:24:44 +0200554 }
555
556 *pagep = page;
557
Russell King0b42c7c2015-07-27 13:29:21 +0100558 pt = page_address(page);
559
Thierry Reding89184652014-04-16 09:24:44 +0200560 /* Keep track of entries in this page table. */
Russell King0b42c7c2015-07-27 13:29:21 +0100561 if (pt[iova_pt_index(iova)] == 0)
Russell King32924c72015-07-27 13:29:31 +0100562 as->count[pde]++;
Thierry Reding89184652014-04-16 09:24:44 +0200563
Russell King0b42c7c2015-07-27 13:29:21 +0100564 return tegra_smmu_pte_offset(page, iova);
Thierry Reding89184652014-04-16 09:24:44 +0200565}
566
Russell Kingb98e34f2015-07-27 13:29:05 +0100567static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
Thierry Reding89184652014-04-16 09:24:44 +0200568{
Russell Kingb98e34f2015-07-27 13:29:05 +0100569 struct tegra_smmu *smmu = as->smmu;
Russell King34d35f82015-07-27 13:29:16 +0100570 unsigned int pde = iova_pd_index(iova);
Russell Kingb98e34f2015-07-27 13:29:05 +0100571 u32 *pd = page_address(as->pd);
Russell King853520f2015-07-27 13:29:26 +0100572 struct page *page = as->pts[pde];
Thierry Reding89184652014-04-16 09:24:44 +0200573
574 /*
575 * When no entries in this page table are used anymore, return the
576 * memory page to the system.
577 */
Russell King32924c72015-07-27 13:29:31 +0100578 if (--as->count[pde] == 0) {
Russell Kingb98e34f2015-07-27 13:29:05 +0100579 unsigned int offset = pde * sizeof(*pd);
Thierry Reding89184652014-04-16 09:24:44 +0200580
Russell Kingb98e34f2015-07-27 13:29:05 +0100581 /* Clear the page directory entry first */
582 pd[pde] = 0;
583
584 /* Flush the page directory entry */
Russell King4b3c7d12015-07-27 13:29:36 +0100585 smmu_flush_dcache(as->pd, offset, sizeof(*pd));
Russell Kingb98e34f2015-07-27 13:29:05 +0100586 smmu_flush_ptc(smmu, as->pd, offset);
587 smmu_flush_tlb_section(smmu, as->id, iova);
588 smmu_flush(smmu);
589
590 /* Finally, free the page */
591 ClearPageReserved(page);
592 __free_page(page);
Russell King853520f2015-07-27 13:29:26 +0100593 as->pts[pde] = NULL;
Thierry Reding89184652014-04-16 09:24:44 +0200594 }
595}
596
Russell King8482ee5e2015-07-27 13:29:10 +0100597static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
598 u32 *pte, struct page *pte_page, u32 val)
599{
600 struct tegra_smmu *smmu = as->smmu;
601 unsigned long offset = offset_in_page(pte);
602
603 *pte = val;
604
Russell King4b3c7d12015-07-27 13:29:36 +0100605 smmu_flush_dcache(pte_page, offset, 4);
Russell King8482ee5e2015-07-27 13:29:10 +0100606 smmu_flush_ptc(smmu, pte_page, offset);
607 smmu_flush_tlb_group(smmu, as->id, iova);
608 smmu_flush(smmu);
609}
610
Thierry Reding89184652014-04-16 09:24:44 +0200611static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
612 phys_addr_t paddr, size_t size, int prot)
613{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100614 struct tegra_smmu_as *as = to_smmu_as(domain);
Thierry Reding89184652014-04-16 09:24:44 +0200615 struct page *page;
616 u32 *pte;
617
618 pte = as_get_pte(as, iova, &page);
619 if (!pte)
Hiroshi Doyu0547c2f2012-06-25 14:23:57 +0300620 return -ENOMEM;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200621
Russell King8482ee5e2015-07-27 13:29:10 +0100622 tegra_smmu_set_pte(as, iova, pte, page,
623 __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
Thierry Reding89184652014-04-16 09:24:44 +0200624
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200625 return 0;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200626}
627
Thierry Reding89184652014-04-16 09:24:44 +0200628static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
629 size_t size)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200630{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100631 struct tegra_smmu_as *as = to_smmu_as(domain);
Russell King0b42c7c2015-07-27 13:29:21 +0100632 struct page *pte_page;
Thierry Reding89184652014-04-16 09:24:44 +0200633 u32 *pte;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200634
Russell King0b42c7c2015-07-27 13:29:21 +0100635 pte = tegra_smmu_pte_lookup(as, iova, &pte_page);
Russell Kingb98e34f2015-07-27 13:29:05 +0100636 if (!pte || !*pte)
Thierry Reding89184652014-04-16 09:24:44 +0200637 return 0;
Hiroshi Doyu39abf8a2012-08-02 11:46:40 +0300638
Russell King0b42c7c2015-07-27 13:29:21 +0100639 tegra_smmu_set_pte(as, iova, pte, pte_page, 0);
Russell Kingb98e34f2015-07-27 13:29:05 +0100640 tegra_smmu_pte_put_use(as, iova);
641
Thierry Reding89184652014-04-16 09:24:44 +0200642 return size;
643}
644
645static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
646 dma_addr_t iova)
647{
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100648 struct tegra_smmu_as *as = to_smmu_as(domain);
Russell King0b42c7c2015-07-27 13:29:21 +0100649 struct page *pte_page;
Thierry Reding89184652014-04-16 09:24:44 +0200650 unsigned long pfn;
651 u32 *pte;
652
Russell King0b42c7c2015-07-27 13:29:21 +0100653 pte = tegra_smmu_pte_lookup(as, iova, &pte_page);
Russell King91137852015-07-27 13:29:00 +0100654 if (!pte || !*pte)
655 return 0;
656
Thierry Reding804cb542015-03-27 11:07:27 +0100657 pfn = *pte & as->smmu->pfn_mask;
Thierry Reding89184652014-04-16 09:24:44 +0200658
659 return PFN_PHYS(pfn);
660}
661
662static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
663{
664 struct platform_device *pdev;
665 struct tegra_mc *mc;
666
667 pdev = of_find_device_by_node(np);
668 if (!pdev)
669 return NULL;
670
671 mc = platform_get_drvdata(pdev);
672 if (!mc)
673 return NULL;
674
675 return mc->smmu;
676}
677
678static int tegra_smmu_add_device(struct device *dev)
679{
680 struct device_node *np = dev->of_node;
681 struct of_phandle_args args;
682 unsigned int index = 0;
683
684 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
685 &args) == 0) {
686 struct tegra_smmu *smmu;
687
688 smmu = tegra_smmu_find(args.np);
689 if (smmu) {
690 /*
691 * Only a single IOMMU master interface is currently
692 * supported by the Linux kernel, so abort after the
693 * first match.
694 */
695 dev->archdata.iommu = smmu;
696 break;
697 }
698
699 index++;
700 }
701
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200702 return 0;
703}
704
Thierry Reding89184652014-04-16 09:24:44 +0200705static void tegra_smmu_remove_device(struct device *dev)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200706{
Thierry Reding89184652014-04-16 09:24:44 +0200707 dev->archdata.iommu = NULL;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200708}
709
Thierry Reding89184652014-04-16 09:24:44 +0200710static const struct iommu_ops tegra_smmu_ops = {
711 .capable = tegra_smmu_capable,
Joerg Roedeld5f1a812015-03-26 13:43:12 +0100712 .domain_alloc = tegra_smmu_domain_alloc,
713 .domain_free = tegra_smmu_domain_free,
Thierry Reding89184652014-04-16 09:24:44 +0200714 .attach_dev = tegra_smmu_attach_dev,
715 .detach_dev = tegra_smmu_detach_dev,
716 .add_device = tegra_smmu_add_device,
717 .remove_device = tegra_smmu_remove_device,
718 .map = tegra_smmu_map,
719 .unmap = tegra_smmu_unmap,
720 .map_sg = default_iommu_map_sg,
721 .iova_to_phys = tegra_smmu_iova_to_phys,
722
723 .pgsize_bitmap = SZ_4K,
724};
725
726static void tegra_smmu_ahb_enable(void)
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200727{
Thierry Reding89184652014-04-16 09:24:44 +0200728 static const struct of_device_id ahb_match[] = {
729 { .compatible = "nvidia,tegra30-ahb", },
730 { }
731 };
732 struct device_node *ahb;
733
734 ahb = of_find_matching_node(NULL, ahb_match);
735 if (ahb) {
736 tegra_ahb_enable_smmu(ahb);
737 of_node_put(ahb);
738 }
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200739}
740
Thierry Redingd1313e72015-01-23 09:49:25 +0100741static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
742{
743 struct tegra_smmu *smmu = s->private;
744 unsigned int i;
745 u32 value;
746
747 seq_printf(s, "swgroup enabled ASID\n");
748 seq_printf(s, "------------------------\n");
749
750 for (i = 0; i < smmu->soc->num_swgroups; i++) {
751 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
752 const char *status;
753 unsigned int asid;
754
755 value = smmu_readl(smmu, group->reg);
756
757 if (value & SMMU_ASID_ENABLE)
758 status = "yes";
759 else
760 status = "no";
761
762 asid = value & SMMU_ASID_MASK;
763
764 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status,
765 asid);
766 }
767
768 return 0;
769}
770
771static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file)
772{
773 return single_open(file, tegra_smmu_swgroups_show, inode->i_private);
774}
775
776static const struct file_operations tegra_smmu_swgroups_fops = {
777 .open = tegra_smmu_swgroups_open,
778 .read = seq_read,
779 .llseek = seq_lseek,
780 .release = single_release,
781};
782
783static int tegra_smmu_clients_show(struct seq_file *s, void *data)
784{
785 struct tegra_smmu *smmu = s->private;
786 unsigned int i;
787 u32 value;
788
789 seq_printf(s, "client enabled\n");
790 seq_printf(s, "--------------------\n");
791
792 for (i = 0; i < smmu->soc->num_clients; i++) {
793 const struct tegra_mc_client *client = &smmu->soc->clients[i];
794 const char *status;
795
796 value = smmu_readl(smmu, client->smmu.reg);
797
798 if (value & BIT(client->smmu.bit))
799 status = "yes";
800 else
801 status = "no";
802
803 seq_printf(s, "%-12s %s\n", client->name, status);
804 }
805
806 return 0;
807}
808
809static int tegra_smmu_clients_open(struct inode *inode, struct file *file)
810{
811 return single_open(file, tegra_smmu_clients_show, inode->i_private);
812}
813
814static const struct file_operations tegra_smmu_clients_fops = {
815 .open = tegra_smmu_clients_open,
816 .read = seq_read,
817 .llseek = seq_lseek,
818 .release = single_release,
819};
820
821static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
822{
823 smmu->debugfs = debugfs_create_dir("smmu", NULL);
824 if (!smmu->debugfs)
825 return;
826
827 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
828 &tegra_smmu_swgroups_fops);
829 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
830 &tegra_smmu_clients_fops);
831}
832
833static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
834{
835 debugfs_remove_recursive(smmu->debugfs);
836}
837
Thierry Reding89184652014-04-16 09:24:44 +0200838struct tegra_smmu *tegra_smmu_probe(struct device *dev,
839 const struct tegra_smmu_soc *soc,
840 struct tegra_mc *mc)
841{
842 struct tegra_smmu *smmu;
843 size_t size;
844 u32 value;
845 int err;
Hiroshi DOYU7a31f6f2011-11-17 07:31:31 +0200846
Thierry Reding89184652014-04-16 09:24:44 +0200847 /* This can happen on Tegra20 which doesn't have an SMMU */
848 if (!soc)
849 return NULL;
850
851 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
852 if (!smmu)
853 return ERR_PTR(-ENOMEM);
854
855 /*
856 * This is a bit of a hack. Ideally we'd want to simply return this
857 * value. However the IOMMU registration process will attempt to add
858 * all devices to the IOMMU when bus_set_iommu() is called. In order
859 * not to rely on global variables to track the IOMMU instance, we
860 * set it here so that it can be looked up from the .add_device()
861 * callback via the IOMMU device's .drvdata field.
862 */
863 mc->smmu = smmu;
864
865 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
866
867 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
868 if (!smmu->asids)
869 return ERR_PTR(-ENOMEM);
870
871 mutex_init(&smmu->lock);
872
873 smmu->regs = mc->regs;
874 smmu->soc = soc;
875 smmu->dev = dev;
876 smmu->mc = mc;
877
Thierry Reding804cb542015-03-27 11:07:27 +0100878 smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
879 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
880 mc->soc->num_address_bits, smmu->pfn_mask);
881
Thierry Reding89184652014-04-16 09:24:44 +0200882 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
883
884 if (soc->supports_request_limit)
885 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
886
887 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
888
889 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
890 SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
891
892 if (soc->supports_round_robin_arbitration)
893 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
894
895 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
896
897 smmu_flush_ptc(smmu, NULL, 0);
898 smmu_flush_tlb(smmu);
899 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
900 smmu_flush(smmu);
901
902 tegra_smmu_ahb_enable();
903
904 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
905 if (err < 0)
906 return ERR_PTR(err);
907
Thierry Redingd1313e72015-01-23 09:49:25 +0100908 if (IS_ENABLED(CONFIG_DEBUG_FS))
909 tegra_smmu_debugfs_init(smmu);
910
Thierry Reding89184652014-04-16 09:24:44 +0200911 return smmu;
912}
Thierry Redingd1313e72015-01-23 09:49:25 +0100913
914void tegra_smmu_remove(struct tegra_smmu *smmu)
915{
916 if (IS_ENABLED(CONFIG_DEBUG_FS))
917 tegra_smmu_debugfs_exit(smmu);
918}