blob: b972d430d92b946eb16e98a41a2e23441b17f93e [file] [log] [blame]
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +020019#define pr_fmt(fmt) "%s: " fmt, __func__
20
Joerg Roedel905d66c2011-09-06 16:03:26 +020021#include <linux/device.h>
Ohad Ben-Cohen40998182011-09-02 13:32:32 -040022#include <linux/kernel.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010023#include <linux/bug.h>
24#include <linux/types.h>
Andrew Morton60db4022009-05-06 16:03:07 -070025#include <linux/module.h>
26#include <linux/slab.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010027#include <linux/errno.h>
28#include <linux/iommu.h>
Alex Williamsond72e31c2012-05-30 14:18:53 -060029#include <linux/idr.h>
30#include <linux/notifier.h>
31#include <linux/err.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010032
Alex Williamsond72e31c2012-05-30 14:18:53 -060033static struct kset *iommu_group_kset;
34static struct ida iommu_group_ida;
35static struct mutex iommu_group_mutex;
36
37struct iommu_group {
38 struct kobject kobj;
39 struct kobject *devices_kobj;
40 struct list_head devices;
41 struct mutex mutex;
42 struct blocking_notifier_head notifier;
43 void *iommu_data;
44 void (*iommu_data_release)(void *iommu_data);
45 char *name;
46 int id;
47};
48
49struct iommu_device {
50 struct list_head list;
51 struct device *dev;
52 char *name;
53};
54
55struct iommu_group_attribute {
56 struct attribute attr;
57 ssize_t (*show)(struct iommu_group *group, char *buf);
58 ssize_t (*store)(struct iommu_group *group,
59 const char *buf, size_t count);
60};
61
62#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
63struct iommu_group_attribute iommu_group_attr_##_name = \
64 __ATTR(_name, _mode, _show, _store)
65
66#define to_iommu_group_attr(_attr) \
67 container_of(_attr, struct iommu_group_attribute, attr)
68#define to_iommu_group(_kobj) \
69 container_of(_kobj, struct iommu_group, kobj)
70
71static ssize_t iommu_group_attr_show(struct kobject *kobj,
72 struct attribute *__attr, char *buf)
Alex Williamson14604322011-10-21 15:56:05 -040073{
Alex Williamsond72e31c2012-05-30 14:18:53 -060074 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
75 struct iommu_group *group = to_iommu_group(kobj);
76 ssize_t ret = -EIO;
Alex Williamson14604322011-10-21 15:56:05 -040077
Alex Williamsond72e31c2012-05-30 14:18:53 -060078 if (attr->show)
79 ret = attr->show(group, buf);
80 return ret;
Alex Williamson14604322011-10-21 15:56:05 -040081}
Alex Williamsond72e31c2012-05-30 14:18:53 -060082
83static ssize_t iommu_group_attr_store(struct kobject *kobj,
84 struct attribute *__attr,
85 const char *buf, size_t count)
86{
87 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
88 struct iommu_group *group = to_iommu_group(kobj);
89 ssize_t ret = -EIO;
90
91 if (attr->store)
92 ret = attr->store(group, buf, count);
93 return ret;
94}
95
96static const struct sysfs_ops iommu_group_sysfs_ops = {
97 .show = iommu_group_attr_show,
98 .store = iommu_group_attr_store,
99};
100
101static int iommu_group_create_file(struct iommu_group *group,
102 struct iommu_group_attribute *attr)
103{
104 return sysfs_create_file(&group->kobj, &attr->attr);
105}
106
107static void iommu_group_remove_file(struct iommu_group *group,
108 struct iommu_group_attribute *attr)
109{
110 sysfs_remove_file(&group->kobj, &attr->attr);
111}
112
113static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
114{
115 return sprintf(buf, "%s\n", group->name);
116}
117
118static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
119
120static void iommu_group_release(struct kobject *kobj)
121{
122 struct iommu_group *group = to_iommu_group(kobj);
123
124 if (group->iommu_data_release)
125 group->iommu_data_release(group->iommu_data);
126
127 mutex_lock(&iommu_group_mutex);
128 ida_remove(&iommu_group_ida, group->id);
129 mutex_unlock(&iommu_group_mutex);
130
131 kfree(group->name);
132 kfree(group);
133}
134
135static struct kobj_type iommu_group_ktype = {
136 .sysfs_ops = &iommu_group_sysfs_ops,
137 .release = iommu_group_release,
138};
139
140/**
141 * iommu_group_alloc - Allocate a new group
142 * @name: Optional name to associate with group, visible in sysfs
143 *
144 * This function is called by an iommu driver to allocate a new iommu
145 * group. The iommu group represents the minimum granularity of the iommu.
146 * Upon successful return, the caller holds a reference to the supplied
147 * group in order to hold the group until devices are added. Use
148 * iommu_group_put() to release this extra reference count, allowing the
149 * group to be automatically reclaimed once it has no devices or external
150 * references.
151 */
152struct iommu_group *iommu_group_alloc(void)
153{
154 struct iommu_group *group;
155 int ret;
156
157 group = kzalloc(sizeof(*group), GFP_KERNEL);
158 if (!group)
159 return ERR_PTR(-ENOMEM);
160
161 group->kobj.kset = iommu_group_kset;
162 mutex_init(&group->mutex);
163 INIT_LIST_HEAD(&group->devices);
164 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
165
166 mutex_lock(&iommu_group_mutex);
167
168again:
169 if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
170 kfree(group);
171 mutex_unlock(&iommu_group_mutex);
172 return ERR_PTR(-ENOMEM);
173 }
174
175 if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
176 goto again;
177
178 mutex_unlock(&iommu_group_mutex);
179
180 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
181 NULL, "%d", group->id);
182 if (ret) {
183 mutex_lock(&iommu_group_mutex);
184 ida_remove(&iommu_group_ida, group->id);
185 mutex_unlock(&iommu_group_mutex);
186 kfree(group);
187 return ERR_PTR(ret);
188 }
189
190 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
191 if (!group->devices_kobj) {
192 kobject_put(&group->kobj); /* triggers .release & free */
193 return ERR_PTR(-ENOMEM);
194 }
195
196 /*
197 * The devices_kobj holds a reference on the group kobject, so
198 * as long as that exists so will the group. We can therefore
199 * use the devices_kobj for reference counting.
200 */
201 kobject_put(&group->kobj);
202
203 return group;
204}
205EXPORT_SYMBOL_GPL(iommu_group_alloc);
206
207/**
208 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
209 * @group: the group
210 *
211 * iommu drivers can store data in the group for use when doing iommu
212 * operations. This function provides a way to retrieve it. Caller
213 * should hold a group reference.
214 */
215void *iommu_group_get_iommudata(struct iommu_group *group)
216{
217 return group->iommu_data;
218}
219EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
220
221/**
222 * iommu_group_set_iommudata - set iommu_data for a group
223 * @group: the group
224 * @iommu_data: new data
225 * @release: release function for iommu_data
226 *
227 * iommu drivers can store data in the group for use when doing iommu
228 * operations. This function provides a way to set the data after
229 * the group has been allocated. Caller should hold a group reference.
230 */
231void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
232 void (*release)(void *iommu_data))
233{
234 group->iommu_data = iommu_data;
235 group->iommu_data_release = release;
236}
237EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
238
239/**
240 * iommu_group_set_name - set name for a group
241 * @group: the group
242 * @name: name
243 *
244 * Allow iommu driver to set a name for a group. When set it will
245 * appear in a name attribute file under the group in sysfs.
246 */
247int iommu_group_set_name(struct iommu_group *group, const char *name)
248{
249 int ret;
250
251 if (group->name) {
252 iommu_group_remove_file(group, &iommu_group_attr_name);
253 kfree(group->name);
254 group->name = NULL;
255 if (!name)
256 return 0;
257 }
258
259 group->name = kstrdup(name, GFP_KERNEL);
260 if (!group->name)
261 return -ENOMEM;
262
263 ret = iommu_group_create_file(group, &iommu_group_attr_name);
264 if (ret) {
265 kfree(group->name);
266 group->name = NULL;
267 return ret;
268 }
269
270 return 0;
271}
272EXPORT_SYMBOL_GPL(iommu_group_set_name);
273
274/**
275 * iommu_group_add_device - add a device to an iommu group
276 * @group: the group into which to add the device (reference should be held)
277 * @dev: the device
278 *
279 * This function is called by an iommu driver to add a device into a
280 * group. Adding a device increments the group reference count.
281 */
282int iommu_group_add_device(struct iommu_group *group, struct device *dev)
283{
284 int ret, i = 0;
285 struct iommu_device *device;
286
287 device = kzalloc(sizeof(*device), GFP_KERNEL);
288 if (!device)
289 return -ENOMEM;
290
291 device->dev = dev;
292
293 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
294 if (ret) {
295 kfree(device);
296 return ret;
297 }
298
299 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
300rename:
301 if (!device->name) {
302 sysfs_remove_link(&dev->kobj, "iommu_group");
303 kfree(device);
304 return -ENOMEM;
305 }
306
307 ret = sysfs_create_link_nowarn(group->devices_kobj,
308 &dev->kobj, device->name);
309 if (ret) {
310 kfree(device->name);
311 if (ret == -EEXIST && i >= 0) {
312 /*
313 * Account for the slim chance of collision
314 * and append an instance to the name.
315 */
316 device->name = kasprintf(GFP_KERNEL, "%s.%d",
317 kobject_name(&dev->kobj), i++);
318 goto rename;
319 }
320
321 sysfs_remove_link(&dev->kobj, "iommu_group");
322 kfree(device);
323 return ret;
324 }
325
326 kobject_get(group->devices_kobj);
327
328 dev->iommu_group = group;
329
330 mutex_lock(&group->mutex);
331 list_add_tail(&device->list, &group->devices);
332 mutex_unlock(&group->mutex);
333
334 /* Notify any listeners about change to group. */
335 blocking_notifier_call_chain(&group->notifier,
336 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
337 return 0;
338}
339EXPORT_SYMBOL_GPL(iommu_group_add_device);
340
341/**
342 * iommu_group_remove_device - remove a device from it's current group
343 * @dev: device to be removed
344 *
345 * This function is called by an iommu driver to remove the device from
346 * it's current group. This decrements the iommu group reference count.
347 */
348void iommu_group_remove_device(struct device *dev)
349{
350 struct iommu_group *group = dev->iommu_group;
351 struct iommu_device *tmp_device, *device = NULL;
352
353 /* Pre-notify listeners that a device is being removed. */
354 blocking_notifier_call_chain(&group->notifier,
355 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
356
357 mutex_lock(&group->mutex);
358 list_for_each_entry(tmp_device, &group->devices, list) {
359 if (tmp_device->dev == dev) {
360 device = tmp_device;
361 list_del(&device->list);
362 break;
363 }
364 }
365 mutex_unlock(&group->mutex);
366
367 if (!device)
368 return;
369
370 sysfs_remove_link(group->devices_kobj, device->name);
371 sysfs_remove_link(&dev->kobj, "iommu_group");
372
373 kfree(device->name);
374 kfree(device);
375 dev->iommu_group = NULL;
376 kobject_put(group->devices_kobj);
377}
378EXPORT_SYMBOL_GPL(iommu_group_remove_device);
379
380/**
381 * iommu_group_for_each_dev - iterate over each device in the group
382 * @group: the group
383 * @data: caller opaque data to be passed to callback function
384 * @fn: caller supplied callback function
385 *
386 * This function is called by group users to iterate over group devices.
387 * Callers should hold a reference count to the group during callback.
388 * The group->mutex is held across callbacks, which will block calls to
389 * iommu_group_add/remove_device.
390 */
391int iommu_group_for_each_dev(struct iommu_group *group, void *data,
392 int (*fn)(struct device *, void *))
393{
394 struct iommu_device *device;
395 int ret = 0;
396
397 mutex_lock(&group->mutex);
398 list_for_each_entry(device, &group->devices, list) {
399 ret = fn(device->dev, data);
400 if (ret)
401 break;
402 }
403 mutex_unlock(&group->mutex);
404 return ret;
405}
406EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
407
408/**
409 * iommu_group_get - Return the group for a device and increment reference
410 * @dev: get the group that this device belongs to
411 *
412 * This function is called by iommu drivers and users to get the group
413 * for the specified device. If found, the group is returned and the group
414 * reference in incremented, else NULL.
415 */
416struct iommu_group *iommu_group_get(struct device *dev)
417{
418 struct iommu_group *group = dev->iommu_group;
419
420 if (group)
421 kobject_get(group->devices_kobj);
422
423 return group;
424}
425EXPORT_SYMBOL_GPL(iommu_group_get);
426
427/**
428 * iommu_group_put - Decrement group reference
429 * @group: the group to use
430 *
431 * This function is called by iommu drivers and users to release the
432 * iommu group. Once the reference count is zero, the group is released.
433 */
434void iommu_group_put(struct iommu_group *group)
435{
436 if (group)
437 kobject_put(group->devices_kobj);
438}
439EXPORT_SYMBOL_GPL(iommu_group_put);
440
441/**
442 * iommu_group_register_notifier - Register a notifier for group changes
443 * @group: the group to watch
444 * @nb: notifier block to signal
445 *
446 * This function allows iommu group users to track changes in a group.
447 * See include/linux/iommu.h for actions sent via this notifier. Caller
448 * should hold a reference to the group throughout notifier registration.
449 */
450int iommu_group_register_notifier(struct iommu_group *group,
451 struct notifier_block *nb)
452{
453 return blocking_notifier_chain_register(&group->notifier, nb);
454}
455EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
456
457/**
458 * iommu_group_unregister_notifier - Unregister a notifier
459 * @group: the group to watch
460 * @nb: notifier block to signal
461 *
462 * Unregister a previously registered group notifier block.
463 */
464int iommu_group_unregister_notifier(struct iommu_group *group,
465 struct notifier_block *nb)
466{
467 return blocking_notifier_chain_unregister(&group->notifier, nb);
468}
469EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
470
471/**
472 * iommu_group_id - Return ID for a group
473 * @group: the group to ID
474 *
475 * Return the unique ID for the group matching the sysfs group number.
476 */
477int iommu_group_id(struct iommu_group *group)
478{
479 return group->id;
480}
481EXPORT_SYMBOL_GPL(iommu_group_id);
Alex Williamson14604322011-10-21 15:56:05 -0400482
483static int add_iommu_group(struct device *dev, void *data)
484{
Alex Williamsond72e31c2012-05-30 14:18:53 -0600485 struct iommu_ops *ops = data;
Alex Williamson14604322011-10-21 15:56:05 -0400486
Alex Williamsond72e31c2012-05-30 14:18:53 -0600487 if (!ops->add_device)
488 return -ENODEV;
489
490 WARN_ON(dev->iommu_group);
491
492 ops->add_device(dev);
Alex Williamson14604322011-10-21 15:56:05 -0400493
494 return 0;
495}
496
Alex Williamsond72e31c2012-05-30 14:18:53 -0600497static int iommu_bus_notifier(struct notifier_block *nb,
498 unsigned long action, void *data)
Alex Williamson14604322011-10-21 15:56:05 -0400499{
500 struct device *dev = data;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600501 struct iommu_ops *ops = dev->bus->iommu_ops;
502 struct iommu_group *group;
503 unsigned long group_action = 0;
Alex Williamson14604322011-10-21 15:56:05 -0400504
Alex Williamsond72e31c2012-05-30 14:18:53 -0600505 /*
506 * ADD/DEL call into iommu driver ops if provided, which may
507 * result in ADD/DEL notifiers to group->notifier
508 */
509 if (action == BUS_NOTIFY_ADD_DEVICE) {
510 if (ops->add_device)
511 return ops->add_device(dev);
512 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
513 if (ops->remove_device && dev->iommu_group) {
514 ops->remove_device(dev);
515 return 0;
516 }
517 }
Alex Williamson14604322011-10-21 15:56:05 -0400518
Alex Williamsond72e31c2012-05-30 14:18:53 -0600519 /*
520 * Remaining BUS_NOTIFYs get filtered and republished to the
521 * group, if anyone is listening
522 */
523 group = iommu_group_get(dev);
524 if (!group)
525 return 0;
526
527 switch (action) {
528 case BUS_NOTIFY_BIND_DRIVER:
529 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
530 break;
531 case BUS_NOTIFY_BOUND_DRIVER:
532 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
533 break;
534 case BUS_NOTIFY_UNBIND_DRIVER:
535 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
536 break;
537 case BUS_NOTIFY_UNBOUND_DRIVER:
538 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
539 break;
540 }
541
542 if (group_action)
543 blocking_notifier_call_chain(&group->notifier,
544 group_action, dev);
545
546 iommu_group_put(group);
Alex Williamson14604322011-10-21 15:56:05 -0400547 return 0;
548}
549
Alex Williamsond72e31c2012-05-30 14:18:53 -0600550static struct notifier_block iommu_bus_nb = {
551 .notifier_call = iommu_bus_notifier,
Alex Williamson14604322011-10-21 15:56:05 -0400552};
553
Joerg Roedelff217762011-08-26 16:48:26 +0200554static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100555{
Alex Williamsond72e31c2012-05-30 14:18:53 -0600556 bus_register_notifier(bus, &iommu_bus_nb);
557 bus_for_each_dev(bus, NULL, ops, add_iommu_group);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100558}
559
Joerg Roedelff217762011-08-26 16:48:26 +0200560/**
561 * bus_set_iommu - set iommu-callbacks for the bus
562 * @bus: bus.
563 * @ops: the callbacks provided by the iommu-driver
564 *
565 * This function is called by an iommu driver to set the iommu methods
566 * used for a particular bus. Drivers for devices on that bus can use
567 * the iommu-api after these ops are registered.
568 * This special function is needed because IOMMUs are usually devices on
569 * the bus itself, so the iommu drivers are not initialized when the bus
570 * is set up. With this function the iommu-driver can set the iommu-ops
571 * afterwards.
572 */
573int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100574{
Joerg Roedelff217762011-08-26 16:48:26 +0200575 if (bus->iommu_ops != NULL)
576 return -EBUSY;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100577
Joerg Roedelff217762011-08-26 16:48:26 +0200578 bus->iommu_ops = ops;
579
580 /* Do IOMMU specific setup for this bus-type */
581 iommu_bus_init(bus, ops);
582
583 return 0;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100584}
Joerg Roedelff217762011-08-26 16:48:26 +0200585EXPORT_SYMBOL_GPL(bus_set_iommu);
586
Joerg Roedela1b60c12011-09-06 18:46:34 +0200587bool iommu_present(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100588{
Joerg Roedel94441c32011-09-06 18:58:54 +0200589 return bus->iommu_ops != NULL;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100590}
Joerg Roedela1b60c12011-09-06 18:46:34 +0200591EXPORT_SYMBOL_GPL(iommu_present);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100592
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400593/**
594 * iommu_set_fault_handler() - set a fault handler for an iommu domain
595 * @domain: iommu domain
596 * @handler: fault handler
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +0300597 * @token: user data, will be passed back to the fault handler
Ohad Ben-Cohen0ed6d2d2011-09-27 07:36:40 -0400598 *
599 * This function should be used by IOMMU users which want to be notified
600 * whenever an IOMMU fault happens.
601 *
602 * The fault handler itself should return 0 on success, and an appropriate
603 * error code otherwise.
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400604 */
605void iommu_set_fault_handler(struct iommu_domain *domain,
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +0300606 iommu_fault_handler_t handler,
607 void *token)
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400608{
609 BUG_ON(!domain);
610
611 domain->handler = handler;
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +0300612 domain->handler_token = token;
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400613}
Ohad Ben-Cohen30bd9182011-09-26 09:11:46 -0400614EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400615
Joerg Roedel905d66c2011-09-06 16:03:26 +0200616struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100617{
618 struct iommu_domain *domain;
619 int ret;
620
Joerg Roedel94441c32011-09-06 18:58:54 +0200621 if (bus == NULL || bus->iommu_ops == NULL)
Joerg Roedel905d66c2011-09-06 16:03:26 +0200622 return NULL;
623
KyongHo Cho8bd69602011-12-16 21:38:25 +0900624 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100625 if (!domain)
626 return NULL;
627
Joerg Roedel94441c32011-09-06 18:58:54 +0200628 domain->ops = bus->iommu_ops;
Joerg Roedel905d66c2011-09-06 16:03:26 +0200629
Joerg Roedel94441c32011-09-06 18:58:54 +0200630 ret = domain->ops->domain_init(domain);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100631 if (ret)
632 goto out_free;
633
634 return domain;
635
636out_free:
637 kfree(domain);
638
639 return NULL;
640}
641EXPORT_SYMBOL_GPL(iommu_domain_alloc);
642
643void iommu_domain_free(struct iommu_domain *domain)
644{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200645 if (likely(domain->ops->domain_destroy != NULL))
646 domain->ops->domain_destroy(domain);
647
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100648 kfree(domain);
649}
650EXPORT_SYMBOL_GPL(iommu_domain_free);
651
652int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
653{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200654 if (unlikely(domain->ops->attach_dev == NULL))
655 return -ENODEV;
656
657 return domain->ops->attach_dev(domain, dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100658}
659EXPORT_SYMBOL_GPL(iommu_attach_device);
660
661void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
662{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200663 if (unlikely(domain->ops->detach_dev == NULL))
664 return;
665
666 domain->ops->detach_dev(domain, dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100667}
668EXPORT_SYMBOL_GPL(iommu_detach_device);
669
Alex Williamsond72e31c2012-05-30 14:18:53 -0600670/*
671 * IOMMU groups are really the natrual working unit of the IOMMU, but
672 * the IOMMU API works on domains and devices. Bridge that gap by
673 * iterating over the devices in a group. Ideally we'd have a single
674 * device which represents the requestor ID of the group, but we also
675 * allow IOMMU drivers to create policy defined minimum sets, where
676 * the physical hardware may be able to distiguish members, but we
677 * wish to group them at a higher level (ex. untrusted multi-function
678 * PCI devices). Thus we attach each device.
679 */
680static int iommu_group_do_attach_device(struct device *dev, void *data)
681{
682 struct iommu_domain *domain = data;
683
684 return iommu_attach_device(domain, dev);
685}
686
687int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
688{
689 return iommu_group_for_each_dev(group, domain,
690 iommu_group_do_attach_device);
691}
692EXPORT_SYMBOL_GPL(iommu_attach_group);
693
694static int iommu_group_do_detach_device(struct device *dev, void *data)
695{
696 struct iommu_domain *domain = data;
697
698 iommu_detach_device(domain, dev);
699
700 return 0;
701}
702
703void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
704{
705 iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device);
706}
707EXPORT_SYMBOL_GPL(iommu_detach_group);
708
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100709phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
710 unsigned long iova)
711{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200712 if (unlikely(domain->ops->iova_to_phys == NULL))
713 return 0;
714
715 return domain->ops->iova_to_phys(domain, iova);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100716}
717EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800718
719int iommu_domain_has_cap(struct iommu_domain *domain,
720 unsigned long cap)
721{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200722 if (unlikely(domain->ops->domain_has_cap == NULL))
723 return 0;
724
725 return domain->ops->domain_has_cap(domain, cap);
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800726}
727EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100728
729int iommu_map(struct iommu_domain *domain, unsigned long iova,
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200730 phys_addr_t paddr, size_t size, int prot)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100731{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200732 unsigned long orig_iova = iova;
733 unsigned int min_pagesz;
734 size_t orig_size = size;
735 int ret = 0;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100736
Joerg Roedel57886512013-01-29 13:41:09 +0100737 if (unlikely(domain->ops->unmap == NULL ||
738 domain->ops->pgsize_bitmap == 0UL))
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200739 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100740
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200741 /* find out the minimum page size supported */
742 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100743
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200744 /*
745 * both the virtual address and the physical one, as well as
746 * the size of the mapping, must be aligned (at least) to the
747 * size of the smallest page supported by the hardware
748 */
749 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
750 pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
751 "0x%x\n", iova, (unsigned long)paddr,
752 (unsigned long)size, min_pagesz);
753 return -EINVAL;
754 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100755
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200756 pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
757 (unsigned long)paddr, (unsigned long)size);
758
759 while (size) {
760 unsigned long pgsize, addr_merge = iova | paddr;
761 unsigned int pgsize_idx;
762
763 /* Max page size that still fits into 'size' */
764 pgsize_idx = __fls(size);
765
766 /* need to consider alignment requirements ? */
767 if (likely(addr_merge)) {
768 /* Max page size allowed by both iova and paddr */
769 unsigned int align_pgsize_idx = __ffs(addr_merge);
770
771 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
772 }
773
774 /* build a mask of acceptable page sizes */
775 pgsize = (1UL << (pgsize_idx + 1)) - 1;
776
777 /* throw away page sizes not supported by the hardware */
778 pgsize &= domain->ops->pgsize_bitmap;
779
780 /* make sure we're still sane */
781 BUG_ON(!pgsize);
782
783 /* pick the biggest page */
784 pgsize_idx = __fls(pgsize);
785 pgsize = 1UL << pgsize_idx;
786
787 pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
788 (unsigned long)paddr, pgsize);
789
790 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
791 if (ret)
792 break;
793
794 iova += pgsize;
795 paddr += pgsize;
796 size -= pgsize;
797 }
798
799 /* unroll mapping in case something went wrong */
800 if (ret)
801 iommu_unmap(domain, orig_iova, orig_size - size);
802
803 return ret;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100804}
805EXPORT_SYMBOL_GPL(iommu_map);
806
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200807size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100808{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200809 size_t unmapped_page, unmapped = 0;
810 unsigned int min_pagesz;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100811
Joerg Roedel57886512013-01-29 13:41:09 +0100812 if (unlikely(domain->ops->unmap == NULL ||
813 domain->ops->pgsize_bitmap == 0UL))
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200814 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100815
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200816 /* find out the minimum page size supported */
817 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100818
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200819 /*
820 * The virtual address, as well as the size of the mapping, must be
821 * aligned (at least) to the size of the smallest page supported
822 * by the hardware
823 */
824 if (!IS_ALIGNED(iova | size, min_pagesz)) {
825 pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
826 iova, (unsigned long)size, min_pagesz);
827 return -EINVAL;
828 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100829
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200830 pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
831 (unsigned long)size);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +0200832
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200833 /*
834 * Keep iterating until we either unmap 'size' bytes (or more)
835 * or we hit an area that isn't mapped.
836 */
837 while (unmapped < size) {
838 size_t left = size - unmapped;
839
840 unmapped_page = domain->ops->unmap(domain, iova, left);
841 if (!unmapped_page)
842 break;
843
844 pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
845 (unsigned long)unmapped_page);
846
847 iova += unmapped_page;
848 unmapped += unmapped_page;
849 }
850
851 return unmapped;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100852}
853EXPORT_SYMBOL_GPL(iommu_unmap);
Alex Williamson14604322011-10-21 15:56:05 -0400854
Joerg Roedeld7787d52013-01-29 14:26:20 +0100855
856int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
857 phys_addr_t paddr, u64 size)
858{
859 if (unlikely(domain->ops->domain_window_enable == NULL))
860 return -ENODEV;
861
862 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size);
863}
864EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
865
866void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
867{
868 if (unlikely(domain->ops->domain_window_disable == NULL))
869 return;
870
871 return domain->ops->domain_window_disable(domain, wnd_nr);
872}
873EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
874
Alex Williamsond72e31c2012-05-30 14:18:53 -0600875static int __init iommu_init(void)
Alex Williamson14604322011-10-21 15:56:05 -0400876{
Alex Williamsond72e31c2012-05-30 14:18:53 -0600877 iommu_group_kset = kset_create_and_add("iommu_groups",
878 NULL, kernel_kobj);
879 ida_init(&iommu_group_ida);
880 mutex_init(&iommu_group_mutex);
Alex Williamson14604322011-10-21 15:56:05 -0400881
Alex Williamsond72e31c2012-05-30 14:18:53 -0600882 BUG_ON(!iommu_group_kset);
883
884 return 0;
Alex Williamson14604322011-10-21 15:56:05 -0400885}
Alexey Kardashevskiy097e3632013-01-07 18:51:52 +1100886arch_initcall(iommu_init);
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100887
888int iommu_domain_get_attr(struct iommu_domain *domain,
889 enum iommu_attr attr, void *data)
890{
Joerg Roedel0ff64f82012-01-26 19:40:53 +0100891 struct iommu_domain_geometry *geometry;
Joerg Roedeld2e12162013-01-29 13:49:04 +0100892 bool *paging;
Joerg Roedel0ff64f82012-01-26 19:40:53 +0100893 int ret = 0;
Joerg Roedel69356712013-02-04 14:00:01 +0100894 u32 *count;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100895
Joerg Roedel0ff64f82012-01-26 19:40:53 +0100896 switch (attr) {
897 case DOMAIN_ATTR_GEOMETRY:
898 geometry = data;
899 *geometry = domain->geometry;
900
901 break;
Joerg Roedeld2e12162013-01-29 13:49:04 +0100902 case DOMAIN_ATTR_PAGING:
903 paging = data;
904 *paging = (domain->ops->pgsize_bitmap != 0UL);
905 break;
Joerg Roedel69356712013-02-04 14:00:01 +0100906 case DOMAIN_ATTR_WINDOWS:
907 count = data;
908
909 if (domain->ops->domain_get_windows != NULL)
910 *count = domain->ops->domain_get_windows(domain);
911 else
912 ret = -ENODEV;
913
914 break;
Joerg Roedel0ff64f82012-01-26 19:40:53 +0100915 default:
916 if (!domain->ops->domain_get_attr)
917 return -EINVAL;
918
919 ret = domain->ops->domain_get_attr(domain, attr, data);
920 }
921
922 return ret;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100923}
924EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
925
926int iommu_domain_set_attr(struct iommu_domain *domain,
927 enum iommu_attr attr, void *data)
928{
Joerg Roedel69356712013-02-04 14:00:01 +0100929 int ret = 0;
930 u32 *count;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100931
Joerg Roedel69356712013-02-04 14:00:01 +0100932 switch (attr) {
933 case DOMAIN_ATTR_WINDOWS:
934 count = data;
935
936 if (domain->ops->domain_set_windows != NULL)
937 ret = domain->ops->domain_set_windows(domain, *count);
938 else
939 ret = -ENODEV;
940
941 break;
942 default:
943 if (domain->ops->domain_set_attr == NULL)
944 return -EINVAL;
945
946 ret = domain->ops->domain_set_attr(domain, attr, data);
947 }
948
949 return ret;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +0100950}
951EXPORT_SYMBOL_GPL(iommu_domain_set_attr);