blob: 3aad2e34cbabc4ac9a12685fcf52425d01bdb3d9 [file] [log] [blame]
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +02001/*
2 * omap iommu: tlb and pagetable primitives
3 *
Hiroshi DOYUc127c7d2010-02-15 10:03:32 -08004 * Copyright (C) 2008-2010 Nokia Corporation
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +02005 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/err.h>
15#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +020017#include <linux/interrupt.h>
18#include <linux/ioport.h>
19#include <linux/clk.h>
20#include <linux/platform_device.h>
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +030021#include <linux/iommu.h>
22#include <linux/mutex.h>
23#include <linux/spinlock.h>
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +020024
25#include <asm/cacheflush.h>
26
Tony Lindgrence491cf2009-10-20 09:40:47 -070027#include <plat/iommu.h>
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +020028
Ohad Ben-Cohenfcf3a6e2011-08-15 23:21:41 +030029#include <plat/iopgtable.h>
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +020030
Hiroshi DOYU37c28362010-04-27 05:37:12 +000031#define for_each_iotlb_cr(obj, n, __i, cr) \
32 for (__i = 0; \
33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
34 __i++)
35
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +030036/**
37 * struct omap_iommu_domain - omap iommu domain
38 * @pgtable: the page table
39 * @iommu_dev: an omap iommu device attached to this domain. only a single
40 * iommu device can be attached for now.
41 * @lock: domain lock, should be taken when attaching/detaching
42 */
43struct omap_iommu_domain {
44 u32 *pgtable;
45 struct iommu *iommu_dev;
46 spinlock_t lock;
47};
48
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +020049/* accommodate the difference between omap1 and omap2/3 */
50static const struct iommu_functions *arch_iommu;
51
52static struct platform_driver omap_iommu_driver;
53static struct kmem_cache *iopte_cachep;
54
55/**
56 * install_iommu_arch - Install archtecure specific iommu functions
57 * @ops: a pointer to architecture specific iommu functions
58 *
59 * There are several kind of iommu algorithm(tlb, pagetable) among
60 * omap series. This interface installs such an iommu algorighm.
61 **/
62int install_iommu_arch(const struct iommu_functions *ops)
63{
64 if (arch_iommu)
65 return -EBUSY;
66
67 arch_iommu = ops;
68 return 0;
69}
70EXPORT_SYMBOL_GPL(install_iommu_arch);
71
72/**
73 * uninstall_iommu_arch - Uninstall archtecure specific iommu functions
74 * @ops: a pointer to architecture specific iommu functions
75 *
76 * This interface uninstalls the iommu algorighm installed previously.
77 **/
78void uninstall_iommu_arch(const struct iommu_functions *ops)
79{
80 if (arch_iommu != ops)
81 pr_err("%s: not your arch\n", __func__);
82
83 arch_iommu = NULL;
84}
85EXPORT_SYMBOL_GPL(uninstall_iommu_arch);
86
87/**
88 * iommu_save_ctx - Save registers for pm off-mode support
89 * @obj: target iommu
90 **/
91void iommu_save_ctx(struct iommu *obj)
92{
93 arch_iommu->save_ctx(obj);
94}
95EXPORT_SYMBOL_GPL(iommu_save_ctx);
96
97/**
98 * iommu_restore_ctx - Restore registers for pm off-mode support
99 * @obj: target iommu
100 **/
101void iommu_restore_ctx(struct iommu *obj)
102{
103 arch_iommu->restore_ctx(obj);
104}
105EXPORT_SYMBOL_GPL(iommu_restore_ctx);
106
107/**
108 * iommu_arch_version - Return running iommu arch version
109 **/
110u32 iommu_arch_version(void)
111{
112 return arch_iommu->version;
113}
114EXPORT_SYMBOL_GPL(iommu_arch_version);
115
116static int iommu_enable(struct iommu *obj)
117{
118 int err;
119
120 if (!obj)
121 return -EINVAL;
122
Martin Hostettleref4815a2011-02-24 12:51:31 -0800123 if (!arch_iommu)
124 return -ENODEV;
125
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200126 clk_enable(obj->clk);
127
128 err = arch_iommu->enable(obj);
129
130 clk_disable(obj->clk);
131 return err;
132}
133
134static void iommu_disable(struct iommu *obj)
135{
136 if (!obj)
137 return;
138
139 clk_enable(obj->clk);
140
141 arch_iommu->disable(obj);
142
143 clk_disable(obj->clk);
144}
145
146/*
147 * TLB operations
148 */
149void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
150{
151 BUG_ON(!cr || !e);
152
153 arch_iommu->cr_to_e(cr, e);
154}
155EXPORT_SYMBOL_GPL(iotlb_cr_to_e);
156
157static inline int iotlb_cr_valid(struct cr_regs *cr)
158{
159 if (!cr)
160 return -EINVAL;
161
162 return arch_iommu->cr_valid(cr);
163}
164
165static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj,
166 struct iotlb_entry *e)
167{
168 if (!e)
169 return NULL;
170
171 return arch_iommu->alloc_cr(obj, e);
172}
173
Ohad Ben-Cohene1f23812011-08-16 14:58:14 +0300174static u32 iotlb_cr_to_virt(struct cr_regs *cr)
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200175{
176 return arch_iommu->cr_to_virt(cr);
177}
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200178
179static u32 get_iopte_attr(struct iotlb_entry *e)
180{
181 return arch_iommu->get_pte_attr(e);
182}
183
184static u32 iommu_report_fault(struct iommu *obj, u32 *da)
185{
186 return arch_iommu->fault_isr(obj, da);
187}
188
189static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l)
190{
191 u32 val;
192
193 val = iommu_read_reg(obj, MMU_LOCK);
194
195 l->base = MMU_LOCK_BASE(val);
196 l->vict = MMU_LOCK_VICT(val);
197
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200198}
199
200static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l)
201{
202 u32 val;
203
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200204 val = (l->base << MMU_LOCK_BASE_SHIFT);
205 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
206
207 iommu_write_reg(obj, val, MMU_LOCK);
208}
209
210static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr)
211{
212 arch_iommu->tlb_read_cr(obj, cr);
213}
214
215static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr)
216{
217 arch_iommu->tlb_load_cr(obj, cr);
218
219 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
220 iommu_write_reg(obj, 1, MMU_LD_TLB);
221}
222
223/**
224 * iotlb_dump_cr - Dump an iommu tlb entry into buf
225 * @obj: target iommu
226 * @cr: contents of cam and ram register
227 * @buf: output buffer
228 **/
229static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr,
230 char *buf)
231{
232 BUG_ON(!cr || !buf);
233
234 return arch_iommu->dump_cr(obj, cr, buf);
235}
236
Hiroshi DOYU37c28362010-04-27 05:37:12 +0000237/* only used in iotlb iteration for-loop */
238static struct cr_regs __iotlb_read_cr(struct iommu *obj, int n)
239{
240 struct cr_regs cr;
241 struct iotlb_lock l;
242
243 iotlb_lock_get(obj, &l);
244 l.vict = n;
245 iotlb_lock_set(obj, &l);
246 iotlb_read_cr(obj, &cr);
247
248 return cr;
249}
250
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200251/**
252 * load_iotlb_entry - Set an iommu tlb entry
253 * @obj: target iommu
254 * @e: an iommu tlb entry info
255 **/
Ohad Ben-Cohene1f23812011-08-16 14:58:14 +0300256static int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e)
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200257{
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200258 int err = 0;
259 struct iotlb_lock l;
260 struct cr_regs *cr;
261
262 if (!obj || !obj->nr_tlb_entries || !e)
263 return -EINVAL;
264
265 clk_enable(obj->clk);
266
Kanigeri, Haribe6d8022010-04-22 23:26:11 +0000267 iotlb_lock_get(obj, &l);
268 if (l.base == obj->nr_tlb_entries) {
269 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200270 err = -EBUSY;
271 goto out;
272 }
Kanigeri, Haribe6d8022010-04-22 23:26:11 +0000273 if (!e->prsvd) {
Hiroshi DOYU37c28362010-04-27 05:37:12 +0000274 int i;
275 struct cr_regs tmp;
Kanigeri, Haribe6d8022010-04-22 23:26:11 +0000276
Hiroshi DOYU37c28362010-04-27 05:37:12 +0000277 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
Kanigeri, Haribe6d8022010-04-22 23:26:11 +0000278 if (!iotlb_cr_valid(&tmp))
279 break;
Hiroshi DOYU37c28362010-04-27 05:37:12 +0000280
Kanigeri, Haribe6d8022010-04-22 23:26:11 +0000281 if (i == obj->nr_tlb_entries) {
282 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
283 err = -EBUSY;
284 goto out;
285 }
Hiroshi DOYU37c28362010-04-27 05:37:12 +0000286
287 iotlb_lock_get(obj, &l);
Kanigeri, Haribe6d8022010-04-22 23:26:11 +0000288 } else {
289 l.vict = l.base;
290 iotlb_lock_set(obj, &l);
291 }
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200292
293 cr = iotlb_alloc_cr(obj, e);
294 if (IS_ERR(cr)) {
295 clk_disable(obj->clk);
296 return PTR_ERR(cr);
297 }
298
299 iotlb_load_cr(obj, cr);
300 kfree(cr);
301
Kanigeri, Haribe6d8022010-04-22 23:26:11 +0000302 if (e->prsvd)
303 l.base++;
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200304 /* increment victim for next tlb load */
305 if (++l.vict == obj->nr_tlb_entries)
Kanigeri, Haribe6d8022010-04-22 23:26:11 +0000306 l.vict = l.base;
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200307 iotlb_lock_set(obj, &l);
308out:
309 clk_disable(obj->clk);
310 return err;
311}
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200312
313/**
314 * flush_iotlb_page - Clear an iommu tlb entry
315 * @obj: target iommu
316 * @da: iommu device virtual address
317 *
318 * Clear an iommu tlb entry which includes 'da' address.
319 **/
Ohad Ben-Cohene1f23812011-08-16 14:58:14 +0300320static void flush_iotlb_page(struct iommu *obj, u32 da)
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200321{
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200322 int i;
Hiroshi DOYU37c28362010-04-27 05:37:12 +0000323 struct cr_regs cr;
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200324
325 clk_enable(obj->clk);
326
Hiroshi DOYU37c28362010-04-27 05:37:12 +0000327 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200328 u32 start;
329 size_t bytes;
330
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200331 if (!iotlb_cr_valid(&cr))
332 continue;
333
334 start = iotlb_cr_to_virt(&cr);
335 bytes = iopgsz_to_bytes(cr.cam & 3);
336
337 if ((start <= da) && (da < start + bytes)) {
338 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
339 __func__, start, da, bytes);
Hari Kanigeri0fa035e2010-08-20 13:50:18 +0000340 iotlb_load_cr(obj, &cr);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200341 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
342 }
343 }
344 clk_disable(obj->clk);
345
346 if (i == obj->nr_tlb_entries)
347 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
348}
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200349
350/**
351 * flush_iotlb_range - Clear an iommu tlb entries
352 * @obj: target iommu
353 * @start: iommu device virtual address(start)
354 * @end: iommu device virtual address(end)
355 *
356 * Clear an iommu tlb entry which includes 'da' address.
357 **/
358void flush_iotlb_range(struct iommu *obj, u32 start, u32 end)
359{
360 u32 da = start;
361
362 while (da < end) {
363 flush_iotlb_page(obj, da);
364 /* FIXME: Optimize for multiple page size */
365 da += IOPTE_SIZE;
366 }
367}
368EXPORT_SYMBOL_GPL(flush_iotlb_range);
369
370/**
371 * flush_iotlb_all - Clear all iommu tlb entries
372 * @obj: target iommu
373 **/
Ohad Ben-Cohene1f23812011-08-16 14:58:14 +0300374static void flush_iotlb_all(struct iommu *obj)
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200375{
376 struct iotlb_lock l;
377
378 clk_enable(obj->clk);
379
380 l.base = 0;
381 l.vict = 0;
382 iotlb_lock_set(obj, &l);
383
384 iommu_write_reg(obj, 1, MMU_GFLUSH);
385
386 clk_disable(obj->clk);
387}
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200388
Kanigeri, Hariddfa9752010-05-24 02:01:51 +0000389/**
390 * iommu_set_twl - enable/disable table walking logic
391 * @obj: target iommu
392 * @on: enable/disable
393 *
394 * Function used to enable/disable TWL. If one wants to work
395 * exclusively with locked TLB entries and receive notifications
396 * for TLB miss then call this function to disable TWL.
397 */
398void iommu_set_twl(struct iommu *obj, bool on)
399{
400 clk_enable(obj->clk);
401 arch_iommu->set_twl(obj, on);
402 clk_disable(obj->clk);
403}
404EXPORT_SYMBOL_GPL(iommu_set_twl);
405
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200406#if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
407
Hiroshi DOYU14e0e672009-08-28 10:54:41 -0700408ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes)
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200409{
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200410 if (!obj || !buf)
411 return -EINVAL;
412
413 clk_enable(obj->clk);
414
Hiroshi DOYU14e0e672009-08-28 10:54:41 -0700415 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200416
417 clk_disable(obj->clk);
418
419 return bytes;
420}
421EXPORT_SYMBOL_GPL(iommu_dump_ctx);
422
Hiroshi DOYU14e0e672009-08-28 10:54:41 -0700423static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num)
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200424{
425 int i;
Hiroshi DOYU37c28362010-04-27 05:37:12 +0000426 struct iotlb_lock saved;
427 struct cr_regs tmp;
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200428 struct cr_regs *p = crs;
429
430 clk_enable(obj->clk);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200431 iotlb_lock_get(obj, &saved);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200432
Hiroshi DOYU37c28362010-04-27 05:37:12 +0000433 for_each_iotlb_cr(obj, num, i, tmp) {
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200434 if (!iotlb_cr_valid(&tmp))
435 continue;
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200436 *p++ = tmp;
437 }
Hiroshi DOYU37c28362010-04-27 05:37:12 +0000438
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200439 iotlb_lock_set(obj, &saved);
440 clk_disable(obj->clk);
441
442 return p - crs;
443}
444
445/**
446 * dump_tlb_entries - dump cr arrays to given buffer
447 * @obj: target iommu
448 * @buf: output buffer
449 **/
Hiroshi DOYU14e0e672009-08-28 10:54:41 -0700450size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes)
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200451{
Hiroshi DOYU14e0e672009-08-28 10:54:41 -0700452 int i, num;
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200453 struct cr_regs *cr;
454 char *p = buf;
455
Hiroshi DOYU14e0e672009-08-28 10:54:41 -0700456 num = bytes / sizeof(*cr);
457 num = min(obj->nr_tlb_entries, num);
458
459 cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200460 if (!cr)
461 return 0;
462
Hiroshi DOYU14e0e672009-08-28 10:54:41 -0700463 num = __dump_tlb_entries(obj, cr, num);
464 for (i = 0; i < num; i++)
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200465 p += iotlb_dump_cr(obj, cr + i, p);
466 kfree(cr);
467
468 return p - buf;
469}
470EXPORT_SYMBOL_GPL(dump_tlb_entries);
471
472int foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
473{
474 return driver_for_each_device(&omap_iommu_driver.driver,
475 NULL, data, fn);
476}
477EXPORT_SYMBOL_GPL(foreach_iommu_device);
478
479#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
480
481/*
482 * H/W pagetable operations
483 */
484static void flush_iopgd_range(u32 *first, u32 *last)
485{
486 /* FIXME: L2 cache should be taken care of if it exists */
487 do {
488 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
489 : : "r" (first));
490 first += L1_CACHE_BYTES / sizeof(*first);
491 } while (first <= last);
492}
493
494static void flush_iopte_range(u32 *first, u32 *last)
495{
496 /* FIXME: L2 cache should be taken care of if it exists */
497 do {
498 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
499 : : "r" (first));
500 first += L1_CACHE_BYTES / sizeof(*first);
501 } while (first <= last);
502}
503
504static void iopte_free(u32 *iopte)
505{
506 /* Note: freed iopte's must be clean ready for re-use */
507 kmem_cache_free(iopte_cachep, iopte);
508}
509
510static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da)
511{
512 u32 *iopte;
513
514 /* a table has already existed */
515 if (*iopgd)
516 goto pte_ready;
517
518 /*
519 * do the allocation outside the page table lock
520 */
521 spin_unlock(&obj->page_table_lock);
522 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
523 spin_lock(&obj->page_table_lock);
524
525 if (!*iopgd) {
526 if (!iopte)
527 return ERR_PTR(-ENOMEM);
528
529 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
530 flush_iopgd_range(iopgd, iopgd);
531
532 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
533 } else {
534 /* We raced, free the reduniovant table */
535 iopte_free(iopte);
536 }
537
538pte_ready:
539 iopte = iopte_offset(iopgd, da);
540
541 dev_vdbg(obj->dev,
542 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
543 __func__, da, iopgd, *iopgd, iopte, *iopte);
544
545 return iopte;
546}
547
548static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot)
549{
550 u32 *iopgd = iopgd_offset(obj, da);
551
Hiroshi DOYU4abb7612010-05-06 18:24:04 +0300552 if ((da | pa) & ~IOSECTION_MASK) {
553 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
554 __func__, da, pa, IOSECTION_SIZE);
555 return -EINVAL;
556 }
557
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200558 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
559 flush_iopgd_range(iopgd, iopgd);
560 return 0;
561}
562
563static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot)
564{
565 u32 *iopgd = iopgd_offset(obj, da);
566 int i;
567
Hiroshi DOYU4abb7612010-05-06 18:24:04 +0300568 if ((da | pa) & ~IOSUPER_MASK) {
569 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
570 __func__, da, pa, IOSUPER_SIZE);
571 return -EINVAL;
572 }
573
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200574 for (i = 0; i < 16; i++)
575 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
576 flush_iopgd_range(iopgd, iopgd + 15);
577 return 0;
578}
579
580static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot)
581{
582 u32 *iopgd = iopgd_offset(obj, da);
583 u32 *iopte = iopte_alloc(obj, iopgd, da);
584
585 if (IS_ERR(iopte))
586 return PTR_ERR(iopte);
587
588 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
589 flush_iopte_range(iopte, iopte);
590
591 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
592 __func__, da, pa, iopte, *iopte);
593
594 return 0;
595}
596
597static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot)
598{
599 u32 *iopgd = iopgd_offset(obj, da);
600 u32 *iopte = iopte_alloc(obj, iopgd, da);
601 int i;
602
Hiroshi DOYU4abb7612010-05-06 18:24:04 +0300603 if ((da | pa) & ~IOLARGE_MASK) {
604 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
605 __func__, da, pa, IOLARGE_SIZE);
606 return -EINVAL;
607 }
608
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200609 if (IS_ERR(iopte))
610 return PTR_ERR(iopte);
611
612 for (i = 0; i < 16; i++)
613 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
614 flush_iopte_range(iopte, iopte + 15);
615 return 0;
616}
617
618static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e)
619{
620 int (*fn)(struct iommu *, u32, u32, u32);
621 u32 prot;
622 int err;
623
624 if (!obj || !e)
625 return -EINVAL;
626
627 switch (e->pgsz) {
628 case MMU_CAM_PGSZ_16M:
629 fn = iopgd_alloc_super;
630 break;
631 case MMU_CAM_PGSZ_1M:
632 fn = iopgd_alloc_section;
633 break;
634 case MMU_CAM_PGSZ_64K:
635 fn = iopte_alloc_large;
636 break;
637 case MMU_CAM_PGSZ_4K:
638 fn = iopte_alloc_page;
639 break;
640 default:
641 fn = NULL;
642 BUG();
643 break;
644 }
645
646 prot = get_iopte_attr(e);
647
648 spin_lock(&obj->page_table_lock);
649 err = fn(obj, e->da, e->pa, prot);
650 spin_unlock(&obj->page_table_lock);
651
652 return err;
653}
654
655/**
656 * iopgtable_store_entry - Make an iommu pte entry
657 * @obj: target iommu
658 * @e: an iommu tlb entry info
659 **/
660int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e)
661{
662 int err;
663
664 flush_iotlb_page(obj, e->da);
665 err = iopgtable_store_entry_core(obj, e);
666#ifdef PREFETCH_IOTLB
667 if (!err)
668 load_iotlb_entry(obj, e);
669#endif
670 return err;
671}
672EXPORT_SYMBOL_GPL(iopgtable_store_entry);
673
674/**
675 * iopgtable_lookup_entry - Lookup an iommu pte entry
676 * @obj: target iommu
677 * @da: iommu device virtual address
678 * @ppgd: iommu pgd entry pointer to be returned
679 * @ppte: iommu pte entry pointer to be returned
680 **/
Ohad Ben-Cohene1f23812011-08-16 14:58:14 +0300681static void
682iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200683{
684 u32 *iopgd, *iopte = NULL;
685
686 iopgd = iopgd_offset(obj, da);
687 if (!*iopgd)
688 goto out;
689
Hiroshi DOYUa1a54452010-05-13 09:45:35 +0300690 if (iopgd_is_table(*iopgd))
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200691 iopte = iopte_offset(iopgd, da);
692out:
693 *ppgd = iopgd;
694 *ppte = iopte;
695}
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200696
697static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da)
698{
699 size_t bytes;
700 u32 *iopgd = iopgd_offset(obj, da);
701 int nent = 1;
702
703 if (!*iopgd)
704 return 0;
705
Hiroshi DOYUa1a54452010-05-13 09:45:35 +0300706 if (iopgd_is_table(*iopgd)) {
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200707 int i;
708 u32 *iopte = iopte_offset(iopgd, da);
709
710 bytes = IOPTE_SIZE;
711 if (*iopte & IOPTE_LARGE) {
712 nent *= 16;
713 /* rewind to the 1st entry */
Hiroshi DOYUc127c7d2010-02-15 10:03:32 -0800714 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200715 }
716 bytes *= nent;
717 memset(iopte, 0, nent * sizeof(*iopte));
718 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
719
720 /*
721 * do table walk to check if this table is necessary or not
722 */
723 iopte = iopte_offset(iopgd, 0);
724 for (i = 0; i < PTRS_PER_IOPTE; i++)
725 if (iopte[i])
726 goto out;
727
728 iopte_free(iopte);
729 nent = 1; /* for the next L1 entry */
730 } else {
731 bytes = IOPGD_SIZE;
Hiroshi DOYUdcc730d2009-10-22 14:46:32 -0700732 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200733 nent *= 16;
734 /* rewind to the 1st entry */
Hiroshi DOYU8d33ea52010-02-15 10:03:32 -0800735 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200736 }
737 bytes *= nent;
738 }
739 memset(iopgd, 0, nent * sizeof(*iopgd));
740 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
741out:
742 return bytes;
743}
744
745/**
746 * iopgtable_clear_entry - Remove an iommu pte entry
747 * @obj: target iommu
748 * @da: iommu device virtual address
749 **/
Ohad Ben-Cohene1f23812011-08-16 14:58:14 +0300750static size_t iopgtable_clear_entry(struct iommu *obj, u32 da)
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200751{
752 size_t bytes;
753
754 spin_lock(&obj->page_table_lock);
755
756 bytes = iopgtable_clear_entry_core(obj, da);
757 flush_iotlb_page(obj, da);
758
759 spin_unlock(&obj->page_table_lock);
760
761 return bytes;
762}
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200763
764static void iopgtable_clear_entry_all(struct iommu *obj)
765{
766 int i;
767
768 spin_lock(&obj->page_table_lock);
769
770 for (i = 0; i < PTRS_PER_IOPGD; i++) {
771 u32 da;
772 u32 *iopgd;
773
774 da = i << IOPGD_SHIFT;
775 iopgd = iopgd_offset(obj, da);
776
777 if (!*iopgd)
778 continue;
779
Hiroshi DOYUa1a54452010-05-13 09:45:35 +0300780 if (iopgd_is_table(*iopgd))
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200781 iopte_free(iopte_offset(iopgd, 0));
782
783 *iopgd = 0;
784 flush_iopgd_range(iopgd, iopgd);
785 }
786
787 flush_iotlb_all(obj);
788
789 spin_unlock(&obj->page_table_lock);
790}
791
792/*
793 * Device IOMMU generic operations
794 */
795static irqreturn_t iommu_fault_handler(int irq, void *data)
796{
David Cohend594f1f2011-02-16 19:35:51 +0000797 u32 da, errs;
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200798 u32 *iopgd, *iopte;
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200799 struct iommu *obj = data;
800
801 if (!obj->refcount)
802 return IRQ_NONE;
803
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200804 clk_enable(obj->clk);
David Cohend594f1f2011-02-16 19:35:51 +0000805 errs = iommu_report_fault(obj, &da);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200806 clk_disable(obj->clk);
Laurent Pinchartc56b2dd2011-05-10 16:56:46 +0200807 if (errs == 0)
808 return IRQ_HANDLED;
David Cohend594f1f2011-02-16 19:35:51 +0000809
810 /* Fault callback or TLB/PTE Dynamic loading */
811 if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200812 return IRQ_HANDLED;
813
Hiroshi DOYU37b29812010-05-24 02:01:52 +0000814 iommu_disable(obj);
815
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200816 iopgd = iopgd_offset(obj, da);
817
Hiroshi DOYUa1a54452010-05-13 09:45:35 +0300818 if (!iopgd_is_table(*iopgd)) {
David Cohend594f1f2011-02-16 19:35:51 +0000819 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
820 "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200821 return IRQ_NONE;
822 }
823
824 iopte = iopte_offset(iopgd, da);
825
David Cohend594f1f2011-02-16 19:35:51 +0000826 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
827 "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
828 iopte, *iopte);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200829
830 return IRQ_NONE;
831}
832
833static int device_match_by_alias(struct device *dev, void *data)
834{
835 struct iommu *obj = to_iommu(dev);
836 const char *name = data;
837
838 pr_debug("%s: %s %s\n", __func__, obj->name, name);
839
840 return strcmp(obj->name, name) == 0;
841}
842
843/**
Guzman Lugo, Fernandoc7f4ab22010-12-15 00:54:03 +0000844 * iommu_set_da_range - Set a valid device address range
845 * @obj: target iommu
846 * @start Start of valid range
847 * @end End of valid range
848 **/
849int iommu_set_da_range(struct iommu *obj, u32 start, u32 end)
850{
851
852 if (!obj)
853 return -EFAULT;
854
855 if (end < start || !PAGE_ALIGN(start | end))
856 return -EINVAL;
857
858 obj->da_start = start;
859 obj->da_end = end;
860
861 return 0;
862}
863EXPORT_SYMBOL_GPL(iommu_set_da_range);
864
865/**
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300866 * omap_find_iommu_device() - find an omap iommu device by name
867 * @name: name of the iommu device
868 *
869 * The generic iommu API requires the caller to provide the device
870 * he wishes to attach to a certain iommu domain.
871 *
872 * Drivers generally should not bother with this as it should just
873 * be taken care of by the DMA-API using dev_archdata.
874 *
875 * This function is provided as an interim solution until the latter
876 * materializes, and omap3isp is fully migrated to the DMA-API.
877 */
878struct device *omap_find_iommu_device(const char *name)
879{
880 return driver_find_device(&omap_iommu_driver.driver, NULL,
881 (void *)name,
882 device_match_by_alias);
883}
884EXPORT_SYMBOL_GPL(omap_find_iommu_device);
885
886/**
887 * omap_iommu_attach() - attach iommu device to an iommu domain
888 * @dev: target omap iommu device
889 * @iopgd: page table
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200890 **/
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300891static struct iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200892{
893 int err = -ENOMEM;
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300894 struct iommu *obj = to_iommu(dev);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200895
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300896 spin_lock(&obj->iommu_lock);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200897
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300898 /* an iommu device can only be attached once */
899 if (++obj->refcount > 1) {
900 dev_err(dev, "%s: already attached!\n", obj->name);
901 err = -EBUSY;
902 goto err_enable;
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200903 }
904
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300905 obj->iopgd = iopgd;
906 err = iommu_enable(obj);
907 if (err)
908 goto err_enable;
909 flush_iotlb_all(obj);
910
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200911 if (!try_module_get(obj->owner))
912 goto err_module;
913
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300914 spin_unlock(&obj->iommu_lock);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200915
916 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
917 return obj;
918
919err_module:
920 if (obj->refcount == 1)
921 iommu_disable(obj);
922err_enable:
923 obj->refcount--;
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300924 spin_unlock(&obj->iommu_lock);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200925 return ERR_PTR(err);
926}
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200927
928/**
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300929 * omap_iommu_detach - release iommu device
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200930 * @obj: target iommu
931 **/
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300932static void omap_iommu_detach(struct iommu *obj)
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200933{
Roel Kluinacf9d462010-01-08 10:29:05 -0800934 if (!obj || IS_ERR(obj))
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200935 return;
936
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300937 spin_lock(&obj->iommu_lock);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200938
939 if (--obj->refcount == 0)
940 iommu_disable(obj);
941
942 module_put(obj->owner);
943
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300944 obj->iopgd = NULL;
945
946 spin_unlock(&obj->iommu_lock);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200947
948 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
949}
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200950
David Cohend594f1f2011-02-16 19:35:51 +0000951int iommu_set_isr(const char *name,
952 int (*isr)(struct iommu *obj, u32 da, u32 iommu_errs,
953 void *priv),
954 void *isr_priv)
955{
956 struct device *dev;
957 struct iommu *obj;
958
959 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
960 device_match_by_alias);
961 if (!dev)
962 return -ENODEV;
963
964 obj = to_iommu(dev);
965 mutex_lock(&obj->iommu_lock);
966 if (obj->refcount != 0) {
967 mutex_unlock(&obj->iommu_lock);
968 return -EBUSY;
969 }
970 obj->isr = isr;
971 obj->isr_priv = isr_priv;
972 mutex_unlock(&obj->iommu_lock);
973
974 return 0;
975}
976EXPORT_SYMBOL_GPL(iommu_set_isr);
977
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200978/*
979 * OMAP Device MMU(IOMMU) detection
980 */
981static int __devinit omap_iommu_probe(struct platform_device *pdev)
982{
983 int err = -ENODEV;
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +0200984 int irq;
985 struct iommu *obj;
986 struct resource *res;
987 struct iommu_platform_data *pdata = pdev->dev.platform_data;
988
989 if (pdev->num_resources != 2)
990 return -EINVAL;
991
992 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
993 if (!obj)
994 return -ENOMEM;
995
996 obj->clk = clk_get(&pdev->dev, pdata->clk_name);
997 if (IS_ERR(obj->clk))
998 goto err_clk;
999
1000 obj->nr_tlb_entries = pdata->nr_tlb_entries;
1001 obj->name = pdata->name;
1002 obj->dev = &pdev->dev;
1003 obj->ctx = (void *)obj + sizeof(*obj);
Guzman Lugo, Fernandoc7f4ab22010-12-15 00:54:03 +00001004 obj->da_start = pdata->da_start;
1005 obj->da_end = pdata->da_end;
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +02001006
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +03001007 spin_lock_init(&obj->iommu_lock);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +02001008 mutex_init(&obj->mmap_lock);
1009 spin_lock_init(&obj->page_table_lock);
1010 INIT_LIST_HEAD(&obj->mmap);
1011
1012 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1013 if (!res) {
1014 err = -ENODEV;
1015 goto err_mem;
1016 }
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +02001017
1018 res = request_mem_region(res->start, resource_size(res),
1019 dev_name(&pdev->dev));
1020 if (!res) {
1021 err = -EIO;
1022 goto err_mem;
1023 }
1024
Aaro Koskinenda4a0f72011-03-14 12:28:32 +00001025 obj->regbase = ioremap(res->start, resource_size(res));
1026 if (!obj->regbase) {
1027 err = -ENOMEM;
1028 goto err_ioremap;
1029 }
1030
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +02001031 irq = platform_get_irq(pdev, 0);
1032 if (irq < 0) {
1033 err = -ENODEV;
1034 goto err_irq;
1035 }
1036 err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
1037 dev_name(&pdev->dev), obj);
1038 if (err < 0)
1039 goto err_irq;
1040 platform_set_drvdata(pdev, obj);
1041
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +02001042 dev_info(&pdev->dev, "%s registered\n", obj->name);
1043 return 0;
1044
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +02001045err_irq:
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +02001046 iounmap(obj->regbase);
Aaro Koskinenda4a0f72011-03-14 12:28:32 +00001047err_ioremap:
1048 release_mem_region(res->start, resource_size(res));
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +02001049err_mem:
1050 clk_put(obj->clk);
1051err_clk:
1052 kfree(obj);
1053 return err;
1054}
1055
1056static int __devexit omap_iommu_remove(struct platform_device *pdev)
1057{
1058 int irq;
1059 struct resource *res;
1060 struct iommu *obj = platform_get_drvdata(pdev);
1061
1062 platform_set_drvdata(pdev, NULL);
1063
1064 iopgtable_clear_entry_all(obj);
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +02001065
1066 irq = platform_get_irq(pdev, 0);
1067 free_irq(irq, obj);
1068 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1069 release_mem_region(res->start, resource_size(res));
1070 iounmap(obj->regbase);
1071
1072 clk_put(obj->clk);
1073 dev_info(&pdev->dev, "%s removed\n", obj->name);
1074 kfree(obj);
1075 return 0;
1076}
1077
1078static struct platform_driver omap_iommu_driver = {
1079 .probe = omap_iommu_probe,
1080 .remove = __devexit_p(omap_iommu_remove),
1081 .driver = {
1082 .name = "omap-iommu",
1083 },
1084};
1085
1086static void iopte_cachep_ctor(void *iopte)
1087{
1088 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1089}
1090
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +03001091static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1092 phys_addr_t pa, int order, int prot)
1093{
1094 struct omap_iommu_domain *omap_domain = domain->priv;
1095 struct iommu *oiommu = omap_domain->iommu_dev;
1096 struct device *dev = oiommu->dev;
1097 size_t bytes = PAGE_SIZE << order;
1098 struct iotlb_entry e;
1099 int omap_pgsz;
1100 u32 ret, flags;
1101
1102 /* we only support mapping a single iommu page for now */
1103 omap_pgsz = bytes_to_iopgsz(bytes);
1104 if (omap_pgsz < 0) {
1105 dev_err(dev, "invalid size to map: %d\n", bytes);
1106 return -EINVAL;
1107 }
1108
1109 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
1110
1111 flags = omap_pgsz | prot;
1112
1113 iotlb_init_entry(&e, da, pa, flags);
1114
1115 ret = iopgtable_store_entry(oiommu, &e);
1116 if (ret) {
1117 dev_err(dev, "iopgtable_store_entry failed: %d\n", ret);
1118 return ret;
1119 }
1120
1121 return 0;
1122}
1123
1124static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1125 int order)
1126{
1127 struct omap_iommu_domain *omap_domain = domain->priv;
1128 struct iommu *oiommu = omap_domain->iommu_dev;
1129 struct device *dev = oiommu->dev;
1130 size_t bytes = PAGE_SIZE << order;
1131 size_t ret;
1132
1133 dev_dbg(dev, "unmapping da 0x%lx size 0x%x\n", da, bytes);
1134
1135 ret = iopgtable_clear_entry(oiommu, da);
1136 if (ret != bytes) {
1137 dev_err(dev, "entry @ 0x%lx was %d; not %d\n", da, ret, bytes);
1138 return -EINVAL;
1139 }
1140
1141 return 0;
1142}
1143
1144static int
1145omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1146{
1147 struct omap_iommu_domain *omap_domain = domain->priv;
1148 struct iommu *oiommu;
1149 int ret = 0;
1150
1151 spin_lock(&omap_domain->lock);
1152
1153 /* only a single device is supported per domain for now */
1154 if (omap_domain->iommu_dev) {
1155 dev_err(dev, "iommu domain is already attached\n");
1156 ret = -EBUSY;
1157 goto out;
1158 }
1159
1160 /* get a handle to and enable the omap iommu */
1161 oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
1162 if (IS_ERR(oiommu)) {
1163 ret = PTR_ERR(oiommu);
1164 dev_err(dev, "can't get omap iommu: %d\n", ret);
1165 goto out;
1166 }
1167
1168 omap_domain->iommu_dev = oiommu;
1169
1170out:
1171 spin_unlock(&omap_domain->lock);
1172 return ret;
1173}
1174
1175static void omap_iommu_detach_dev(struct iommu_domain *domain,
1176 struct device *dev)
1177{
1178 struct omap_iommu_domain *omap_domain = domain->priv;
1179 struct iommu *oiommu = to_iommu(dev);
1180
1181 spin_lock(&omap_domain->lock);
1182
1183 /* only a single device is supported per domain for now */
1184 if (omap_domain->iommu_dev != oiommu) {
1185 dev_err(dev, "invalid iommu device\n");
1186 goto out;
1187 }
1188
1189 iopgtable_clear_entry_all(oiommu);
1190
1191 omap_iommu_detach(oiommu);
1192
1193 omap_domain->iommu_dev = NULL;
1194
1195out:
1196 spin_unlock(&omap_domain->lock);
1197}
1198
1199static int omap_iommu_domain_init(struct iommu_domain *domain)
1200{
1201 struct omap_iommu_domain *omap_domain;
1202
1203 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1204 if (!omap_domain) {
1205 pr_err("kzalloc failed\n");
1206 goto out;
1207 }
1208
1209 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1210 if (!omap_domain->pgtable) {
1211 pr_err("kzalloc failed\n");
1212 goto fail_nomem;
1213 }
1214
1215 /*
1216 * should never fail, but please keep this around to ensure
1217 * we keep the hardware happy
1218 */
1219 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
1220
1221 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1222 spin_lock_init(&omap_domain->lock);
1223
1224 domain->priv = omap_domain;
1225
1226 return 0;
1227
1228fail_nomem:
1229 kfree(omap_domain);
1230out:
1231 return -ENOMEM;
1232}
1233
1234/* assume device was already detached */
1235static void omap_iommu_domain_destroy(struct iommu_domain *domain)
1236{
1237 struct omap_iommu_domain *omap_domain = domain->priv;
1238
1239 domain->priv = NULL;
1240
1241 kfree(omap_domain->pgtable);
1242 kfree(omap_domain);
1243}
1244
1245static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1246 unsigned long da)
1247{
1248 struct omap_iommu_domain *omap_domain = domain->priv;
1249 struct iommu *oiommu = omap_domain->iommu_dev;
1250 struct device *dev = oiommu->dev;
1251 u32 *pgd, *pte;
1252 phys_addr_t ret = 0;
1253
1254 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1255
1256 if (pte) {
1257 if (iopte_is_small(*pte))
1258 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1259 else if (iopte_is_large(*pte))
1260 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1261 else
1262 dev_err(dev, "bogus pte 0x%x", *pte);
1263 } else {
1264 if (iopgd_is_section(*pgd))
1265 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1266 else if (iopgd_is_super(*pgd))
1267 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1268 else
1269 dev_err(dev, "bogus pgd 0x%x", *pgd);
1270 }
1271
1272 return ret;
1273}
1274
1275static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
1276 unsigned long cap)
1277{
1278 return 0;
1279}
1280
1281static struct iommu_ops omap_iommu_ops = {
1282 .domain_init = omap_iommu_domain_init,
1283 .domain_destroy = omap_iommu_domain_destroy,
1284 .attach_dev = omap_iommu_attach_dev,
1285 .detach_dev = omap_iommu_detach_dev,
1286 .map = omap_iommu_map,
1287 .unmap = omap_iommu_unmap,
1288 .iova_to_phys = omap_iommu_iova_to_phys,
1289 .domain_has_cap = omap_iommu_domain_has_cap,
1290};
1291
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +02001292static int __init omap_iommu_init(void)
1293{
1294 struct kmem_cache *p;
1295 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1296 size_t align = 1 << 10; /* L2 pagetable alignement */
1297
1298 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1299 iopte_cachep_ctor);
1300 if (!p)
1301 return -ENOMEM;
1302 iopte_cachep = p;
1303
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +03001304 register_iommu(&omap_iommu_ops);
1305
Hiroshi DOYUa9dcad52009-01-26 15:13:40 +02001306 return platform_driver_register(&omap_iommu_driver);
1307}
1308module_init(omap_iommu_init);
1309
1310static void __exit omap_iommu_exit(void)
1311{
1312 kmem_cache_destroy(iopte_cachep);
1313
1314 platform_driver_unregister(&omap_iommu_driver);
1315}
1316module_exit(omap_iommu_exit);
1317
1318MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1319MODULE_ALIAS("platform:omap-iommu");
1320MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1321MODULE_LICENSE("GPL v2");