blob: ed5e0a553ca78a6c2a9330fd0b65d1f4078fb348 [file] [log] [blame]
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +020019#define pr_fmt(fmt) "%s: " fmt, __func__
20
Joerg Roedel905d66c2011-09-06 16:03:26 +020021#include <linux/device.h>
Ohad Ben-Cohen40998182011-09-02 13:32:32 -040022#include <linux/kernel.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010023#include <linux/bug.h>
24#include <linux/types.h>
Andrew Morton60db4022009-05-06 16:03:07 -070025#include <linux/module.h>
26#include <linux/slab.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010027#include <linux/errno.h>
28#include <linux/iommu.h>
29
Alex Williamson14604322011-10-21 15:56:05 -040030static ssize_t show_iommu_group(struct device *dev,
31 struct device_attribute *attr, char *buf)
32{
33 unsigned int groupid;
34
35 if (iommu_device_group(dev, &groupid))
36 return 0;
37
38 return sprintf(buf, "%u", groupid);
39}
40static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
41
42static int add_iommu_group(struct device *dev, void *data)
43{
44 unsigned int groupid;
45
46 if (iommu_device_group(dev, &groupid) == 0)
47 return device_create_file(dev, &dev_attr_iommu_group);
48
49 return 0;
50}
51
52static int remove_iommu_group(struct device *dev)
53{
54 unsigned int groupid;
55
56 if (iommu_device_group(dev, &groupid) == 0)
57 device_remove_file(dev, &dev_attr_iommu_group);
58
59 return 0;
60}
61
62static int iommu_device_notifier(struct notifier_block *nb,
63 unsigned long action, void *data)
64{
65 struct device *dev = data;
66
67 if (action == BUS_NOTIFY_ADD_DEVICE)
68 return add_iommu_group(dev, NULL);
69 else if (action == BUS_NOTIFY_DEL_DEVICE)
70 return remove_iommu_group(dev);
71
72 return 0;
73}
74
75static struct notifier_block iommu_device_nb = {
76 .notifier_call = iommu_device_notifier,
77};
78
Joerg Roedelff217762011-08-26 16:48:26 +020079static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +010080{
Alex Williamson14604322011-10-21 15:56:05 -040081 bus_register_notifier(bus, &iommu_device_nb);
82 bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
Joerg Roedelfc2100e2008-11-26 17:21:24 +010083}
84
Joerg Roedelff217762011-08-26 16:48:26 +020085/**
86 * bus_set_iommu - set iommu-callbacks for the bus
87 * @bus: bus.
88 * @ops: the callbacks provided by the iommu-driver
89 *
90 * This function is called by an iommu driver to set the iommu methods
91 * used for a particular bus. Drivers for devices on that bus can use
92 * the iommu-api after these ops are registered.
93 * This special function is needed because IOMMUs are usually devices on
94 * the bus itself, so the iommu drivers are not initialized when the bus
95 * is set up. With this function the iommu-driver can set the iommu-ops
96 * afterwards.
97 */
98int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +010099{
Joerg Roedelff217762011-08-26 16:48:26 +0200100 if (bus->iommu_ops != NULL)
101 return -EBUSY;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100102
Joerg Roedelff217762011-08-26 16:48:26 +0200103 bus->iommu_ops = ops;
104
105 /* Do IOMMU specific setup for this bus-type */
106 iommu_bus_init(bus, ops);
107
108 return 0;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100109}
Joerg Roedelff217762011-08-26 16:48:26 +0200110EXPORT_SYMBOL_GPL(bus_set_iommu);
111
Joerg Roedela1b60c12011-09-06 18:46:34 +0200112bool iommu_present(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100113{
Joerg Roedel94441c32011-09-06 18:58:54 +0200114 return bus->iommu_ops != NULL;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100115}
Joerg Roedela1b60c12011-09-06 18:46:34 +0200116EXPORT_SYMBOL_GPL(iommu_present);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100117
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400118/**
119 * iommu_set_fault_handler() - set a fault handler for an iommu domain
120 * @domain: iommu domain
121 * @handler: fault handler
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +0300122 * @token: user data, will be passed back to the fault handler
Ohad Ben-Cohen0ed6d2d2011-09-27 07:36:40 -0400123 *
124 * This function should be used by IOMMU users which want to be notified
125 * whenever an IOMMU fault happens.
126 *
127 * The fault handler itself should return 0 on success, and an appropriate
128 * error code otherwise.
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400129 */
130void iommu_set_fault_handler(struct iommu_domain *domain,
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +0300131 iommu_fault_handler_t handler,
132 void *token)
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400133{
134 BUG_ON(!domain);
135
136 domain->handler = handler;
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +0300137 domain->handler_token = token;
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400138}
Ohad Ben-Cohen30bd9182011-09-26 09:11:46 -0400139EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400140
Joerg Roedel905d66c2011-09-06 16:03:26 +0200141struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100142{
143 struct iommu_domain *domain;
144 int ret;
145
Joerg Roedel94441c32011-09-06 18:58:54 +0200146 if (bus == NULL || bus->iommu_ops == NULL)
Joerg Roedel905d66c2011-09-06 16:03:26 +0200147 return NULL;
148
KyongHo Cho8bd69602011-12-16 21:38:25 +0900149 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100150 if (!domain)
151 return NULL;
152
Joerg Roedel94441c32011-09-06 18:58:54 +0200153 domain->ops = bus->iommu_ops;
Joerg Roedel905d66c2011-09-06 16:03:26 +0200154
Joerg Roedel94441c32011-09-06 18:58:54 +0200155 ret = domain->ops->domain_init(domain);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100156 if (ret)
157 goto out_free;
158
159 return domain;
160
161out_free:
162 kfree(domain);
163
164 return NULL;
165}
166EXPORT_SYMBOL_GPL(iommu_domain_alloc);
167
168void iommu_domain_free(struct iommu_domain *domain)
169{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200170 if (likely(domain->ops->domain_destroy != NULL))
171 domain->ops->domain_destroy(domain);
172
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100173 kfree(domain);
174}
175EXPORT_SYMBOL_GPL(iommu_domain_free);
176
177int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
178{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200179 if (unlikely(domain->ops->attach_dev == NULL))
180 return -ENODEV;
181
182 return domain->ops->attach_dev(domain, dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100183}
184EXPORT_SYMBOL_GPL(iommu_attach_device);
185
186void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
187{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200188 if (unlikely(domain->ops->detach_dev == NULL))
189 return;
190
191 domain->ops->detach_dev(domain, dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100192}
193EXPORT_SYMBOL_GPL(iommu_detach_device);
194
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100195phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
196 unsigned long iova)
197{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200198 if (unlikely(domain->ops->iova_to_phys == NULL))
199 return 0;
200
201 return domain->ops->iova_to_phys(domain, iova);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100202}
203EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800204
205int iommu_domain_has_cap(struct iommu_domain *domain,
206 unsigned long cap)
207{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200208 if (unlikely(domain->ops->domain_has_cap == NULL))
209 return 0;
210
211 return domain->ops->domain_has_cap(domain, cap);
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800212}
213EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100214
215int iommu_map(struct iommu_domain *domain, unsigned long iova,
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200216 phys_addr_t paddr, size_t size, int prot)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100217{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200218 unsigned long orig_iova = iova;
219 unsigned int min_pagesz;
220 size_t orig_size = size;
221 int ret = 0;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100222
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200223 if (unlikely(domain->ops->map == NULL))
224 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100225
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200226 /* find out the minimum page size supported */
227 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100228
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200229 /*
230 * both the virtual address and the physical one, as well as
231 * the size of the mapping, must be aligned (at least) to the
232 * size of the smallest page supported by the hardware
233 */
234 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
235 pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
236 "0x%x\n", iova, (unsigned long)paddr,
237 (unsigned long)size, min_pagesz);
238 return -EINVAL;
239 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100240
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200241 pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
242 (unsigned long)paddr, (unsigned long)size);
243
244 while (size) {
245 unsigned long pgsize, addr_merge = iova | paddr;
246 unsigned int pgsize_idx;
247
248 /* Max page size that still fits into 'size' */
249 pgsize_idx = __fls(size);
250
251 /* need to consider alignment requirements ? */
252 if (likely(addr_merge)) {
253 /* Max page size allowed by both iova and paddr */
254 unsigned int align_pgsize_idx = __ffs(addr_merge);
255
256 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
257 }
258
259 /* build a mask of acceptable page sizes */
260 pgsize = (1UL << (pgsize_idx + 1)) - 1;
261
262 /* throw away page sizes not supported by the hardware */
263 pgsize &= domain->ops->pgsize_bitmap;
264
265 /* make sure we're still sane */
266 BUG_ON(!pgsize);
267
268 /* pick the biggest page */
269 pgsize_idx = __fls(pgsize);
270 pgsize = 1UL << pgsize_idx;
271
272 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
273 (unsigned long)paddr, pgsize);
274
275 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
276 if (ret)
277 break;
278
279 iova += pgsize;
280 paddr += pgsize;
281 size -= pgsize;
282 }
283
284 /* unroll mapping in case something went wrong */
285 if (ret)
286 iommu_unmap(domain, orig_iova, orig_size - size);
287
288 return ret;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100289}
290EXPORT_SYMBOL_GPL(iommu_map);
291
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200292size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100293{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200294 size_t unmapped_page, unmapped = 0;
295 unsigned int min_pagesz;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100296
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200297 if (unlikely(domain->ops->unmap == NULL))
298 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100299
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200300 /* find out the minimum page size supported */
301 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100302
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200303 /*
304 * The virtual address, as well as the size of the mapping, must be
305 * aligned (at least) to the size of the smallest page supported
306 * by the hardware
307 */
308 if (!IS_ALIGNED(iova | size, min_pagesz)) {
309 pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
310 iova, (unsigned long)size, min_pagesz);
311 return -EINVAL;
312 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100313
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200314 pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
315 (unsigned long)size);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200316
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200317 /*
318 * Keep iterating until we either unmap 'size' bytes (or more)
319 * or we hit an area that isn't mapped.
320 */
321 while (unmapped < size) {
322 size_t left = size - unmapped;
323
324 unmapped_page = domain->ops->unmap(domain, iova, left);
325 if (!unmapped_page)
326 break;
327
328 pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
329 (unsigned long)unmapped_page);
330
331 iova += unmapped_page;
332 unmapped += unmapped_page;
333 }
334
335 return unmapped;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100336}
337EXPORT_SYMBOL_GPL(iommu_unmap);
Alex Williamson14604322011-10-21 15:56:05 -0400338
339int iommu_device_group(struct device *dev, unsigned int *groupid)
340{
341 if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
342 return dev->bus->iommu_ops->device_group(dev, groupid);
343
344 return -ENODEV;
345}
346EXPORT_SYMBOL_GPL(iommu_device_group);
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100347
348int iommu_domain_get_attr(struct iommu_domain *domain,
349 enum iommu_attr attr, void *data)
350{
Joerg Roedel0ff64f82012-01-26 19:40:53 +0100351 struct iommu_domain_geometry *geometry;
352 int ret = 0;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100353
Joerg Roedel0ff64f82012-01-26 19:40:53 +0100354 switch (attr) {
355 case DOMAIN_ATTR_GEOMETRY:
356 geometry = data;
357 *geometry = domain->geometry;
358
359 break;
360 default:
361 if (!domain->ops->domain_get_attr)
362 return -EINVAL;
363
364 ret = domain->ops->domain_get_attr(domain, attr, data);
365 }
366
367 return ret;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100368}
369EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
370
371int iommu_domain_set_attr(struct iommu_domain *domain,
372 enum iommu_attr attr, void *data)
373{
374 if (!domain->ops->domain_set_attr)
375 return -EINVAL;
376
377 return domain->ops->domain_set_attr(domain, attr, data);
378}
379EXPORT_SYMBOL_GPL(iommu_domain_set_attr);