blob: 134add789187ed399dc910e3414480c506419fa0 [file] [log] [blame]
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -07001/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/errno.h>
23#include <linux/io.h>
24#include <linux/interrupt.h>
25#include <linux/list.h>
26#include <linux/spinlock.h>
27#include <linux/slab.h>
28#include <linux/iommu.h>
29
30#include <asm/cacheflush.h>
31#include <asm/sizes.h>
32
33#include <mach/iommu_hw-8xxx.h>
34#include <mach/iommu.h>
35
36DEFINE_SPINLOCK(msm_iommu_lock);
37
38struct msm_priv {
39 unsigned long *pgtable;
40 struct list_head list_attached;
41};
42
43static void __flush_iotlb(struct iommu_domain *domain)
44{
45 struct msm_priv *priv = domain->priv;
46 struct msm_iommu_drvdata *iommu_drvdata;
47 struct msm_iommu_ctx_drvdata *ctx_drvdata;
48
49#ifndef CONFIG_IOMMU_PGTABLES_L2
50 unsigned long *fl_table = priv->pgtable;
51 int i;
52
53 dmac_flush_range(fl_table, fl_table + SZ_16K);
54
55 for (i = 0; i < NUM_FL_PTE; i++)
56 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
57 void *sl_table = __va(fl_table[i] & FL_BASE_MASK);
58 dmac_flush_range(sl_table, sl_table + SZ_4K);
59 }
60#endif
61
62 list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
63 if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
64 BUG();
65
66 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
67 SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
68 }
69}
70
71static void __reset_context(void __iomem *base, int ctx)
72{
73 SET_BPRCOSH(base, ctx, 0);
74 SET_BPRCISH(base, ctx, 0);
75 SET_BPRCNSH(base, ctx, 0);
76 SET_BPSHCFG(base, ctx, 0);
77 SET_BPMTCFG(base, ctx, 0);
78 SET_ACTLR(base, ctx, 0);
79 SET_SCTLR(base, ctx, 0);
80 SET_FSRRESTORE(base, ctx, 0);
81 SET_TTBR0(base, ctx, 0);
82 SET_TTBR1(base, ctx, 0);
83 SET_TTBCR(base, ctx, 0);
84 SET_BFBCR(base, ctx, 0);
85 SET_PAR(base, ctx, 0);
86 SET_FAR(base, ctx, 0);
87 SET_CTX_TLBIALL(base, ctx, 0);
88 SET_TLBFLPTER(base, ctx, 0);
89 SET_TLBSLPTER(base, ctx, 0);
90 SET_TLBLKCR(base, ctx, 0);
91 SET_PRRR(base, ctx, 0);
92 SET_NMRR(base, ctx, 0);
93 SET_CONTEXTIDR(base, ctx, 0);
94}
95
96static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
97{
98 __reset_context(base, ctx);
99
100 /* Set up HTW mode */
101 /* TLB miss configuration: perform HTW on miss */
102 SET_TLBMCFG(base, ctx, 0x3);
103
104 /* V2P configuration: HTW for access */
105 SET_V2PCFG(base, ctx, 0x3);
106
107 SET_TTBCR(base, ctx, 0);
108 SET_TTBR0_PA(base, ctx, (pgtable >> 14));
109
110 /* Invalidate the TLB for this context */
111 SET_CTX_TLBIALL(base, ctx, 0);
112
113 /* Set interrupt number to "secure" interrupt */
114 SET_IRPTNDX(base, ctx, 0);
115
116 /* Enable context fault interrupt */
117 SET_CFEIE(base, ctx, 1);
118
119 /* Stall access on a context fault and let the handler deal with it */
120 SET_CFCFG(base, ctx, 1);
121
122 /* Redirect all cacheable requests to L2 slave port. */
123 SET_RCISH(base, ctx, 1);
124 SET_RCOSH(base, ctx, 1);
125 SET_RCNSH(base, ctx, 1);
126
127 /* Turn on TEX Remap */
128 SET_TRE(base, ctx, 1);
129
130 /* Do not configure PRRR / NMRR on the IOMMU for now. We will assume
131 * TEX class 0 for everything until attributes are properly worked out
132 */
133 SET_PRRR(base, ctx, 0);
134 SET_NMRR(base, ctx, 0);
135
136 /* Turn on BFB prefetch */
137 SET_BFBDFE(base, ctx, 1);
138
139#ifdef CONFIG_IOMMU_PGTABLES_L2
140 /* Configure page tables as inner-cacheable and shareable to reduce
141 * the TLB miss penalty.
142 */
143 SET_TTBR0_SH(base, ctx, 1);
144 SET_TTBR1_SH(base, ctx, 1);
145
146 SET_TTBR0_NOS(base, ctx, 1);
147 SET_TTBR1_NOS(base, ctx, 1);
148
149 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
150 SET_TTBR0_IRGNL(base, ctx, 1);
151
152 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
153 SET_TTBR1_IRGNL(base, ctx, 1);
154
155 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
156 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
157#endif
158
159 /* Enable the MMU */
160 SET_M(base, ctx, 1);
161}
162
163static int msm_iommu_domain_init(struct iommu_domain *domain)
164{
165 struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
166
167 if (!priv)
168 goto fail_nomem;
169
170 INIT_LIST_HEAD(&priv->list_attached);
171 priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
172 get_order(SZ_16K));
173
174 if (!priv->pgtable)
175 goto fail_nomem;
176
177 memset(priv->pgtable, 0, SZ_16K);
178 domain->priv = priv;
179 return 0;
180
181fail_nomem:
182 kfree(priv);
183 return -ENOMEM;
184}
185
186static void msm_iommu_domain_destroy(struct iommu_domain *domain)
187{
188 struct msm_priv *priv;
189 unsigned long flags;
190 unsigned long *fl_table;
191 int i;
192
193 spin_lock_irqsave(&msm_iommu_lock, flags);
194 priv = domain->priv;
195 domain->priv = NULL;
196
197 if (priv) {
198 fl_table = priv->pgtable;
199
200 for (i = 0; i < NUM_FL_PTE; i++)
201 if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
202 free_page((unsigned long) __va(((fl_table[i]) &
203 FL_BASE_MASK)));
204
205 free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
206 priv->pgtable = NULL;
207 }
208
209 kfree(priv);
210 spin_unlock_irqrestore(&msm_iommu_lock, flags);
211}
212
213static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
214{
215 struct msm_priv *priv;
216 struct msm_iommu_ctx_dev *ctx_dev;
217 struct msm_iommu_drvdata *iommu_drvdata;
218 struct msm_iommu_ctx_drvdata *ctx_drvdata;
219 struct msm_iommu_ctx_drvdata *tmp_drvdata;
220 int ret = 0;
221 unsigned long flags;
222
223 spin_lock_irqsave(&msm_iommu_lock, flags);
224
225 priv = domain->priv;
226
227 if (!priv || !dev) {
228 ret = -EINVAL;
229 goto fail;
230 }
231
232 iommu_drvdata = dev_get_drvdata(dev->parent);
233 ctx_drvdata = dev_get_drvdata(dev);
234 ctx_dev = dev->platform_data;
235
236 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
237 ret = -EINVAL;
238 goto fail;
239 }
240
241 list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
242 if (tmp_drvdata == ctx_drvdata) {
243 ret = -EBUSY;
244 goto fail;
245 }
246
247 __program_context(iommu_drvdata->base, ctx_dev->num,
248 __pa(priv->pgtable));
249
250 list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
251 __flush_iotlb(domain);
252
253fail:
254 spin_unlock_irqrestore(&msm_iommu_lock, flags);
255 return ret;
256}
257
258static void msm_iommu_detach_dev(struct iommu_domain *domain,
259 struct device *dev)
260{
261 struct msm_priv *priv;
262 struct msm_iommu_ctx_dev *ctx_dev;
263 struct msm_iommu_drvdata *iommu_drvdata;
264 struct msm_iommu_ctx_drvdata *ctx_drvdata;
265 unsigned long flags;
266
267 spin_lock_irqsave(&msm_iommu_lock, flags);
268 priv = domain->priv;
269
270 if (!priv || !dev)
271 goto fail;
272
273 iommu_drvdata = dev_get_drvdata(dev->parent);
274 ctx_drvdata = dev_get_drvdata(dev);
275 ctx_dev = dev->platform_data;
276
277 if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
278 goto fail;
279
280 __flush_iotlb(domain);
281 __reset_context(iommu_drvdata->base, ctx_dev->num);
282 list_del_init(&ctx_drvdata->attached_elm);
283
284fail:
285 spin_unlock_irqrestore(&msm_iommu_lock, flags);
286}
287
288static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
289 phys_addr_t pa, int order, int prot)
290{
291 struct msm_priv *priv;
292 unsigned long flags;
293 unsigned long *fl_table;
294 unsigned long *fl_pte;
295 unsigned long fl_offset;
296 unsigned long *sl_table;
297 unsigned long *sl_pte;
298 unsigned long sl_offset;
299 size_t len = 0x1000UL << order;
300 int ret = 0;
301
302 spin_lock_irqsave(&msm_iommu_lock, flags);
303 priv = domain->priv;
304
305 if (!priv) {
306 ret = -EINVAL;
307 goto fail;
308 }
309
310 fl_table = priv->pgtable;
311
312 if (len != SZ_16M && len != SZ_1M &&
313 len != SZ_64K && len != SZ_4K) {
314 pr_debug("Bad size: %d\n", len);
315 ret = -EINVAL;
316 goto fail;
317 }
318
319 if (!fl_table) {
320 pr_debug("Null page table\n");
321 ret = -EINVAL;
322 goto fail;
323 }
324
325 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
326 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
327
328 if (len == SZ_16M) {
329 int i = 0;
330 for (i = 0; i < 16; i++)
331 *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
332 FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
333 FL_SHARED;
334 }
335
336 if (len == SZ_1M)
337 *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE |
338 FL_TYPE_SECT | FL_SHARED;
339
340 /* Need a 2nd level table */
341 if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) {
342 unsigned long *sl;
343 sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
344 get_order(SZ_4K));
345
346 if (!sl) {
347 pr_debug("Could not allocate second level table\n");
348 ret = -ENOMEM;
349 goto fail;
350 }
351
352 memset(sl, 0, SZ_4K);
353 *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE);
354 }
355
356 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
357 sl_offset = SL_OFFSET(va);
358 sl_pte = sl_table + sl_offset;
359
360
361 if (len == SZ_4K)
362 *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 |
363 SL_SHARED | SL_TYPE_SMALL;
364
365 if (len == SZ_64K) {
366 int i;
367
368 for (i = 0; i < 16; i++)
369 *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
370 SL_AP1 | SL_SHARED | SL_TYPE_LARGE;
371 }
372
373 __flush_iotlb(domain);
374fail:
375 spin_unlock_irqrestore(&msm_iommu_lock, flags);
376 return ret;
377}
378
379static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
380 int order)
381{
382 struct msm_priv *priv;
383 unsigned long flags;
384 unsigned long *fl_table;
385 unsigned long *fl_pte;
386 unsigned long fl_offset;
387 unsigned long *sl_table;
388 unsigned long *sl_pte;
389 unsigned long sl_offset;
390 size_t len = 0x1000UL << order;
391 int i, ret = 0;
392
393 spin_lock_irqsave(&msm_iommu_lock, flags);
394
395 priv = domain->priv;
396
397 if (!priv) {
398 ret = -ENODEV;
399 goto fail;
400 }
401
402 fl_table = priv->pgtable;
403
404 if (len != SZ_16M && len != SZ_1M &&
405 len != SZ_64K && len != SZ_4K) {
406 pr_debug("Bad length: %d\n", len);
407 ret = -EINVAL;
408 goto fail;
409 }
410
411 if (!fl_table) {
412 pr_debug("Null page table\n");
413 ret = -EINVAL;
414 goto fail;
415 }
416
417 fl_offset = FL_OFFSET(va); /* Upper 12 bits */
418 fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
419
420 if (*fl_pte == 0) {
421 pr_debug("First level PTE is 0\n");
422 ret = -ENODEV;
423 goto fail;
424 }
425
426 /* Unmap supersection */
427 if (len == SZ_16M)
428 for (i = 0; i < 16; i++)
429 *(fl_pte+i) = 0;
430
431 if (len == SZ_1M)
432 *fl_pte = 0;
433
434 sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
435 sl_offset = SL_OFFSET(va);
436 sl_pte = sl_table + sl_offset;
437
438 if (len == SZ_64K) {
439 for (i = 0; i < 16; i++)
440 *(sl_pte+i) = 0;
441 }
442
443 if (len == SZ_4K)
444 *sl_pte = 0;
445
446 if (len == SZ_4K || len == SZ_64K) {
447 int used = 0;
448
449 for (i = 0; i < NUM_SL_PTE; i++)
450 if (sl_table[i])
451 used = 1;
452 if (!used) {
453 free_page((unsigned long)sl_table);
454 *fl_pte = 0;
455 }
456 }
457
458 __flush_iotlb(domain);
459fail:
460 spin_unlock_irqrestore(&msm_iommu_lock, flags);
461 return ret;
462}
463
464static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
465 unsigned long va)
466{
467 struct msm_priv *priv;
468 struct msm_iommu_drvdata *iommu_drvdata;
469 struct msm_iommu_ctx_drvdata *ctx_drvdata;
470 unsigned int par;
471 unsigned long flags;
472 void __iomem *base;
473 phys_addr_t ret = 0;
474 int ctx;
475
476 spin_lock_irqsave(&msm_iommu_lock, flags);
477
478 priv = domain->priv;
479 if (list_empty(&priv->list_attached))
480 goto fail;
481
482 ctx_drvdata = list_entry(priv->list_attached.next,
483 struct msm_iommu_ctx_drvdata, attached_elm);
484 iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
485
486 base = iommu_drvdata->base;
487 ctx = ctx_drvdata->num;
488
489 /* Invalidate context TLB */
490 SET_CTX_TLBIALL(base, ctx, 0);
491 SET_V2PPR_VA(base, ctx, va >> V2Pxx_VA_SHIFT);
492
493 if (GET_FAULT(base, ctx))
494 goto fail;
495
496 par = GET_PAR(base, ctx);
497
498 /* We are dealing with a supersection */
499 if (GET_NOFAULT_SS(base, ctx))
500 ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
501 else /* Upper 20 bits from PAR, lower 12 from VA */
502 ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
503
504fail:
505 spin_unlock_irqrestore(&msm_iommu_lock, flags);
506 return ret;
507}
508
509static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
510 unsigned long cap)
511{
512 return 0;
513}
514
515static void print_ctx_regs(void __iomem *base, int ctx)
516{
517 unsigned int fsr = GET_FSR(base, ctx);
518 pr_err("FAR = %08x PAR = %08x\n",
519 GET_FAR(base, ctx), GET_PAR(base, ctx));
520 pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
521 (fsr & 0x02) ? "TF " : "",
522 (fsr & 0x04) ? "AFF " : "",
523 (fsr & 0x08) ? "APF " : "",
524 (fsr & 0x10) ? "TLBMF " : "",
525 (fsr & 0x20) ? "HTWDEEF " : "",
526 (fsr & 0x40) ? "HTWSEEF " : "",
527 (fsr & 0x80) ? "MHF " : "",
528 (fsr & 0x10000) ? "SL " : "",
529 (fsr & 0x40000000) ? "SS " : "",
530 (fsr & 0x80000000) ? "MULTI " : "");
531
532 pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
533 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
534 pr_err("TTBR0 = %08x TTBR1 = %08x\n",
535 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
536 pr_err("SCTLR = %08x ACTLR = %08x\n",
537 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
538 pr_err("PRRR = %08x NMRR = %08x\n",
539 GET_PRRR(base, ctx), GET_NMRR(base, ctx));
540}
541
542irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
543{
544 struct msm_iommu_drvdata *drvdata = dev_id;
545 void __iomem *base;
546 unsigned int fsr = 0;
547 int ncb = 0, i = 0;
548
549 spin_lock(&msm_iommu_lock);
550
551 if (!drvdata) {
552 pr_err("Invalid device ID in context interrupt handler\n");
553 goto fail;
554 }
555
556 base = drvdata->base;
557
558 pr_err("===== WOAH! =====\n");
559 pr_err("Unexpected IOMMU page fault!\n");
560 pr_err("base = %08x\n", (unsigned int) base);
561
562 ncb = GET_NCB(base)+1;
563 for (i = 0; i < ncb; i++) {
564 fsr = GET_FSR(base, i);
565 if (fsr) {
566 pr_err("Fault occurred in context %d.\n", i);
567 pr_err("Interesting registers:\n");
568 print_ctx_regs(base, i);
569 SET_FSR(base, i, 0x4000000F);
570 }
571 }
572fail:
573 spin_unlock(&msm_iommu_lock);
574 return 0;
575}
576
577static struct iommu_ops msm_iommu_ops = {
578 .domain_init = msm_iommu_domain_init,
579 .domain_destroy = msm_iommu_domain_destroy,
580 .attach_dev = msm_iommu_attach_dev,
581 .detach_dev = msm_iommu_detach_dev,
582 .map = msm_iommu_map,
583 .unmap = msm_iommu_unmap,
584 .iova_to_phys = msm_iommu_iova_to_phys,
585 .domain_has_cap = msm_iommu_domain_has_cap
586};
587
Stepan Moskovchenko516cbc72010-11-12 19:29:53 -0800588static int __init msm_iommu_init(void)
Stepan Moskovchenko0720d1f2010-08-24 18:31:10 -0700589{
590 register_iommu(&msm_iommu_ops);
591 return 0;
592}
593
594subsys_initcall(msm_iommu_init);
595
596MODULE_LICENSE("GPL v2");
597MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");