blob: b278458d58168e81e92144f450b7b89650383846 [file] [log] [blame]
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +020019#define pr_fmt(fmt) "%s: " fmt, __func__
20
Joerg Roedel905d66c2011-09-06 16:03:26 +020021#include <linux/device.h>
Ohad Ben-Cohen40998182011-09-02 13:32:32 -040022#include <linux/kernel.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010023#include <linux/bug.h>
24#include <linux/types.h>
Andrew Morton60db4022009-05-06 16:03:07 -070025#include <linux/module.h>
26#include <linux/slab.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010027#include <linux/errno.h>
28#include <linux/iommu.h>
29
Joerg Roedelff217762011-08-26 16:48:26 +020030static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +010031{
Joerg Roedelfc2100e2008-11-26 17:21:24 +010032}
33
Joerg Roedelff217762011-08-26 16:48:26 +020034/**
35 * bus_set_iommu - set iommu-callbacks for the bus
36 * @bus: bus.
37 * @ops: the callbacks provided by the iommu-driver
38 *
39 * This function is called by an iommu driver to set the iommu methods
40 * used for a particular bus. Drivers for devices on that bus can use
41 * the iommu-api after these ops are registered.
42 * This special function is needed because IOMMUs are usually devices on
43 * the bus itself, so the iommu drivers are not initialized when the bus
44 * is set up. With this function the iommu-driver can set the iommu-ops
45 * afterwards.
46 */
47int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +010048{
Joerg Roedelff217762011-08-26 16:48:26 +020049 if (bus->iommu_ops != NULL)
50 return -EBUSY;
Joerg Roedelfc2100e2008-11-26 17:21:24 +010051
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +020052 /*
53 * Set the default pgsize values, which retain the existing
54 * IOMMU API behavior: drivers will be called to map
55 * regions that are sized/aligned to order of 4KiB pages.
56 *
57 * This will be removed once all drivers are migrated.
58 */
59 if (!ops->pgsize_bitmap)
60 ops->pgsize_bitmap = ~0xFFFUL;
61
Joerg Roedelff217762011-08-26 16:48:26 +020062 bus->iommu_ops = ops;
63
64 /* Do IOMMU specific setup for this bus-type */
65 iommu_bus_init(bus, ops);
66
67 return 0;
Joerg Roedelfc2100e2008-11-26 17:21:24 +010068}
Joerg Roedelff217762011-08-26 16:48:26 +020069EXPORT_SYMBOL_GPL(bus_set_iommu);
70
Joerg Roedela1b60c12011-09-06 18:46:34 +020071bool iommu_present(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +010072{
Joerg Roedel94441c32011-09-06 18:58:54 +020073 return bus->iommu_ops != NULL;
Joerg Roedelfc2100e2008-11-26 17:21:24 +010074}
Joerg Roedela1b60c12011-09-06 18:46:34 +020075EXPORT_SYMBOL_GPL(iommu_present);
Joerg Roedelfc2100e2008-11-26 17:21:24 +010076
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -040077/**
78 * iommu_set_fault_handler() - set a fault handler for an iommu domain
79 * @domain: iommu domain
80 * @handler: fault handler
Ohad Ben-Cohen0ed6d2d2011-09-27 07:36:40 -040081 *
82 * This function should be used by IOMMU users which want to be notified
83 * whenever an IOMMU fault happens.
84 *
85 * The fault handler itself should return 0 on success, and an appropriate
86 * error code otherwise.
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -040087 */
88void iommu_set_fault_handler(struct iommu_domain *domain,
89 iommu_fault_handler_t handler)
90{
91 BUG_ON(!domain);
92
93 domain->handler = handler;
94}
Ohad Ben-Cohen30bd9182011-09-26 09:11:46 -040095EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -040096
Joerg Roedel905d66c2011-09-06 16:03:26 +020097struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +010098{
99 struct iommu_domain *domain;
100 int ret;
101
Joerg Roedel94441c32011-09-06 18:58:54 +0200102 if (bus == NULL || bus->iommu_ops == NULL)
Joerg Roedel905d66c2011-09-06 16:03:26 +0200103 return NULL;
104
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100105 domain = kmalloc(sizeof(*domain), GFP_KERNEL);
106 if (!domain)
107 return NULL;
108
Joerg Roedel94441c32011-09-06 18:58:54 +0200109 domain->ops = bus->iommu_ops;
Joerg Roedel905d66c2011-09-06 16:03:26 +0200110
Joerg Roedel94441c32011-09-06 18:58:54 +0200111 ret = domain->ops->domain_init(domain);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100112 if (ret)
113 goto out_free;
114
115 return domain;
116
117out_free:
118 kfree(domain);
119
120 return NULL;
121}
122EXPORT_SYMBOL_GPL(iommu_domain_alloc);
123
124void iommu_domain_free(struct iommu_domain *domain)
125{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200126 if (likely(domain->ops->domain_destroy != NULL))
127 domain->ops->domain_destroy(domain);
128
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100129 kfree(domain);
130}
131EXPORT_SYMBOL_GPL(iommu_domain_free);
132
133int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
134{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200135 if (unlikely(domain->ops->attach_dev == NULL))
136 return -ENODEV;
137
138 return domain->ops->attach_dev(domain, dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100139}
140EXPORT_SYMBOL_GPL(iommu_attach_device);
141
142void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
143{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200144 if (unlikely(domain->ops->detach_dev == NULL))
145 return;
146
147 domain->ops->detach_dev(domain, dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100148}
149EXPORT_SYMBOL_GPL(iommu_detach_device);
150
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100151phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
152 unsigned long iova)
153{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200154 if (unlikely(domain->ops->iova_to_phys == NULL))
155 return 0;
156
157 return domain->ops->iova_to_phys(domain, iova);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100158}
159EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800160
161int iommu_domain_has_cap(struct iommu_domain *domain,
162 unsigned long cap)
163{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200164 if (unlikely(domain->ops->domain_has_cap == NULL))
165 return 0;
166
167 return domain->ops->domain_has_cap(domain, cap);
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800168}
169EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100170
171int iommu_map(struct iommu_domain *domain, unsigned long iova,
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200172 phys_addr_t paddr, size_t size, int prot)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100173{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200174 unsigned long orig_iova = iova;
175 unsigned int min_pagesz;
176 size_t orig_size = size;
177 int ret = 0;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100178
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200179 if (unlikely(domain->ops->map == NULL))
180 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100181
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200182 /* find out the minimum page size supported */
183 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100184
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200185 /*
186 * both the virtual address and the physical one, as well as
187 * the size of the mapping, must be aligned (at least) to the
188 * size of the smallest page supported by the hardware
189 */
190 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
191 pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
192 "0x%x\n", iova, (unsigned long)paddr,
193 (unsigned long)size, min_pagesz);
194 return -EINVAL;
195 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100196
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200197 pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
198 (unsigned long)paddr, (unsigned long)size);
199
200 while (size) {
201 unsigned long pgsize, addr_merge = iova | paddr;
202 unsigned int pgsize_idx;
203
204 /* Max page size that still fits into 'size' */
205 pgsize_idx = __fls(size);
206
207 /* need to consider alignment requirements ? */
208 if (likely(addr_merge)) {
209 /* Max page size allowed by both iova and paddr */
210 unsigned int align_pgsize_idx = __ffs(addr_merge);
211
212 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
213 }
214
215 /* build a mask of acceptable page sizes */
216 pgsize = (1UL << (pgsize_idx + 1)) - 1;
217
218 /* throw away page sizes not supported by the hardware */
219 pgsize &= domain->ops->pgsize_bitmap;
220
221 /* make sure we're still sane */
222 BUG_ON(!pgsize);
223
224 /* pick the biggest page */
225 pgsize_idx = __fls(pgsize);
226 pgsize = 1UL << pgsize_idx;
227
228 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
229 (unsigned long)paddr, pgsize);
230
231 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
232 if (ret)
233 break;
234
235 iova += pgsize;
236 paddr += pgsize;
237 size -= pgsize;
238 }
239
240 /* unroll mapping in case something went wrong */
241 if (ret)
242 iommu_unmap(domain, orig_iova, orig_size - size);
243
244 return ret;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100245}
246EXPORT_SYMBOL_GPL(iommu_map);
247
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200248size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100249{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200250 size_t unmapped_page, unmapped = 0;
251 unsigned int min_pagesz;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100252
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200253 if (unlikely(domain->ops->unmap == NULL))
254 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100255
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200256 /* find out the minimum page size supported */
257 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100258
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200259 /*
260 * The virtual address, as well as the size of the mapping, must be
261 * aligned (at least) to the size of the smallest page supported
262 * by the hardware
263 */
264 if (!IS_ALIGNED(iova | size, min_pagesz)) {
265 pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
266 iova, (unsigned long)size, min_pagesz);
267 return -EINVAL;
268 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100269
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200270 pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
271 (unsigned long)size);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200272
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200273 /*
274 * Keep iterating until we either unmap 'size' bytes (or more)
275 * or we hit an area that isn't mapped.
276 */
277 while (unmapped < size) {
278 size_t left = size - unmapped;
279
280 unmapped_page = domain->ops->unmap(domain, iova, left);
281 if (!unmapped_page)
282 break;
283
284 pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
285 (unsigned long)unmapped_page);
286
287 iova += unmapped_page;
288 unmapped += unmapped_page;
289 }
290
291 return unmapped;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100292}
293EXPORT_SYMBOL_GPL(iommu_unmap);