blob: d061c8677a81453540ecbf2e2412e54849c6017d [file] [log] [blame]
Joerg Roedelfc2100e2008-11-26 17:21:24 +01001/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +020019#define pr_fmt(fmt) "%s: " fmt, __func__
20
Joerg Roedel905d66c2011-09-06 16:03:26 +020021#include <linux/device.h>
Ohad Ben-Cohen40998182011-09-02 13:32:32 -040022#include <linux/kernel.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010023#include <linux/bug.h>
24#include <linux/types.h>
Andrew Morton60db4022009-05-06 16:03:07 -070025#include <linux/module.h>
26#include <linux/slab.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010027#include <linux/errno.h>
28#include <linux/iommu.h>
Alex Williamsond72e31c2012-05-30 14:18:53 -060029#include <linux/idr.h>
30#include <linux/notifier.h>
31#include <linux/err.h>
Alex Williamson104a1c12014-07-03 09:51:18 -060032#include <linux/pci.h>
Shuah Khan7f6db172013-08-15 11:59:23 -060033#include <trace/events/iommu.h>
Joerg Roedelfc2100e2008-11-26 17:21:24 +010034
Alex Williamsond72e31c2012-05-30 14:18:53 -060035static struct kset *iommu_group_kset;
36static struct ida iommu_group_ida;
37static struct mutex iommu_group_mutex;
38
39struct iommu_group {
40 struct kobject kobj;
41 struct kobject *devices_kobj;
42 struct list_head devices;
43 struct mutex mutex;
44 struct blocking_notifier_head notifier;
45 void *iommu_data;
46 void (*iommu_data_release)(void *iommu_data);
47 char *name;
48 int id;
49};
50
51struct iommu_device {
52 struct list_head list;
53 struct device *dev;
54 char *name;
55};
56
57struct iommu_group_attribute {
58 struct attribute attr;
59 ssize_t (*show)(struct iommu_group *group, char *buf);
60 ssize_t (*store)(struct iommu_group *group,
61 const char *buf, size_t count);
62};
63
64#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
65struct iommu_group_attribute iommu_group_attr_##_name = \
66 __ATTR(_name, _mode, _show, _store)
67
68#define to_iommu_group_attr(_attr) \
69 container_of(_attr, struct iommu_group_attribute, attr)
70#define to_iommu_group(_kobj) \
71 container_of(_kobj, struct iommu_group, kobj)
72
73static ssize_t iommu_group_attr_show(struct kobject *kobj,
74 struct attribute *__attr, char *buf)
Alex Williamson14604322011-10-21 15:56:05 -040075{
Alex Williamsond72e31c2012-05-30 14:18:53 -060076 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
77 struct iommu_group *group = to_iommu_group(kobj);
78 ssize_t ret = -EIO;
Alex Williamson14604322011-10-21 15:56:05 -040079
Alex Williamsond72e31c2012-05-30 14:18:53 -060080 if (attr->show)
81 ret = attr->show(group, buf);
82 return ret;
Alex Williamson14604322011-10-21 15:56:05 -040083}
Alex Williamsond72e31c2012-05-30 14:18:53 -060084
85static ssize_t iommu_group_attr_store(struct kobject *kobj,
86 struct attribute *__attr,
87 const char *buf, size_t count)
88{
89 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
90 struct iommu_group *group = to_iommu_group(kobj);
91 ssize_t ret = -EIO;
92
93 if (attr->store)
94 ret = attr->store(group, buf, count);
95 return ret;
96}
97
98static const struct sysfs_ops iommu_group_sysfs_ops = {
99 .show = iommu_group_attr_show,
100 .store = iommu_group_attr_store,
101};
102
103static int iommu_group_create_file(struct iommu_group *group,
104 struct iommu_group_attribute *attr)
105{
106 return sysfs_create_file(&group->kobj, &attr->attr);
107}
108
109static void iommu_group_remove_file(struct iommu_group *group,
110 struct iommu_group_attribute *attr)
111{
112 sysfs_remove_file(&group->kobj, &attr->attr);
113}
114
115static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
116{
117 return sprintf(buf, "%s\n", group->name);
118}
119
120static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
121
122static void iommu_group_release(struct kobject *kobj)
123{
124 struct iommu_group *group = to_iommu_group(kobj);
125
126 if (group->iommu_data_release)
127 group->iommu_data_release(group->iommu_data);
128
129 mutex_lock(&iommu_group_mutex);
130 ida_remove(&iommu_group_ida, group->id);
131 mutex_unlock(&iommu_group_mutex);
132
133 kfree(group->name);
134 kfree(group);
135}
136
137static struct kobj_type iommu_group_ktype = {
138 .sysfs_ops = &iommu_group_sysfs_ops,
139 .release = iommu_group_release,
140};
141
142/**
143 * iommu_group_alloc - Allocate a new group
144 * @name: Optional name to associate with group, visible in sysfs
145 *
146 * This function is called by an iommu driver to allocate a new iommu
147 * group. The iommu group represents the minimum granularity of the iommu.
148 * Upon successful return, the caller holds a reference to the supplied
149 * group in order to hold the group until devices are added. Use
150 * iommu_group_put() to release this extra reference count, allowing the
151 * group to be automatically reclaimed once it has no devices or external
152 * references.
153 */
154struct iommu_group *iommu_group_alloc(void)
155{
156 struct iommu_group *group;
157 int ret;
158
159 group = kzalloc(sizeof(*group), GFP_KERNEL);
160 if (!group)
161 return ERR_PTR(-ENOMEM);
162
163 group->kobj.kset = iommu_group_kset;
164 mutex_init(&group->mutex);
165 INIT_LIST_HEAD(&group->devices);
166 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
167
168 mutex_lock(&iommu_group_mutex);
169
170again:
171 if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
172 kfree(group);
173 mutex_unlock(&iommu_group_mutex);
174 return ERR_PTR(-ENOMEM);
175 }
176
177 if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
178 goto again;
179
180 mutex_unlock(&iommu_group_mutex);
181
182 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
183 NULL, "%d", group->id);
184 if (ret) {
185 mutex_lock(&iommu_group_mutex);
186 ida_remove(&iommu_group_ida, group->id);
187 mutex_unlock(&iommu_group_mutex);
188 kfree(group);
189 return ERR_PTR(ret);
190 }
191
192 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
193 if (!group->devices_kobj) {
194 kobject_put(&group->kobj); /* triggers .release & free */
195 return ERR_PTR(-ENOMEM);
196 }
197
198 /*
199 * The devices_kobj holds a reference on the group kobject, so
200 * as long as that exists so will the group. We can therefore
201 * use the devices_kobj for reference counting.
202 */
203 kobject_put(&group->kobj);
204
205 return group;
206}
207EXPORT_SYMBOL_GPL(iommu_group_alloc);
208
Alexey Kardashevskiyaa16bea2013-03-25 10:23:49 +1100209struct iommu_group *iommu_group_get_by_id(int id)
210{
211 struct kobject *group_kobj;
212 struct iommu_group *group;
213 const char *name;
214
215 if (!iommu_group_kset)
216 return NULL;
217
218 name = kasprintf(GFP_KERNEL, "%d", id);
219 if (!name)
220 return NULL;
221
222 group_kobj = kset_find_obj(iommu_group_kset, name);
223 kfree(name);
224
225 if (!group_kobj)
226 return NULL;
227
228 group = container_of(group_kobj, struct iommu_group, kobj);
229 BUG_ON(group->id != id);
230
231 kobject_get(group->devices_kobj);
232 kobject_put(&group->kobj);
233
234 return group;
235}
236EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
237
Alex Williamsond72e31c2012-05-30 14:18:53 -0600238/**
239 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
240 * @group: the group
241 *
242 * iommu drivers can store data in the group for use when doing iommu
243 * operations. This function provides a way to retrieve it. Caller
244 * should hold a group reference.
245 */
246void *iommu_group_get_iommudata(struct iommu_group *group)
247{
248 return group->iommu_data;
249}
250EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
251
252/**
253 * iommu_group_set_iommudata - set iommu_data for a group
254 * @group: the group
255 * @iommu_data: new data
256 * @release: release function for iommu_data
257 *
258 * iommu drivers can store data in the group for use when doing iommu
259 * operations. This function provides a way to set the data after
260 * the group has been allocated. Caller should hold a group reference.
261 */
262void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
263 void (*release)(void *iommu_data))
264{
265 group->iommu_data = iommu_data;
266 group->iommu_data_release = release;
267}
268EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
269
270/**
271 * iommu_group_set_name - set name for a group
272 * @group: the group
273 * @name: name
274 *
275 * Allow iommu driver to set a name for a group. When set it will
276 * appear in a name attribute file under the group in sysfs.
277 */
278int iommu_group_set_name(struct iommu_group *group, const char *name)
279{
280 int ret;
281
282 if (group->name) {
283 iommu_group_remove_file(group, &iommu_group_attr_name);
284 kfree(group->name);
285 group->name = NULL;
286 if (!name)
287 return 0;
288 }
289
290 group->name = kstrdup(name, GFP_KERNEL);
291 if (!group->name)
292 return -ENOMEM;
293
294 ret = iommu_group_create_file(group, &iommu_group_attr_name);
295 if (ret) {
296 kfree(group->name);
297 group->name = NULL;
298 return ret;
299 }
300
301 return 0;
302}
303EXPORT_SYMBOL_GPL(iommu_group_set_name);
304
305/**
306 * iommu_group_add_device - add a device to an iommu group
307 * @group: the group into which to add the device (reference should be held)
308 * @dev: the device
309 *
310 * This function is called by an iommu driver to add a device into a
311 * group. Adding a device increments the group reference count.
312 */
313int iommu_group_add_device(struct iommu_group *group, struct device *dev)
314{
315 int ret, i = 0;
316 struct iommu_device *device;
317
318 device = kzalloc(sizeof(*device), GFP_KERNEL);
319 if (!device)
320 return -ENOMEM;
321
322 device->dev = dev;
323
324 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
325 if (ret) {
326 kfree(device);
327 return ret;
328 }
329
330 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
331rename:
332 if (!device->name) {
333 sysfs_remove_link(&dev->kobj, "iommu_group");
334 kfree(device);
335 return -ENOMEM;
336 }
337
338 ret = sysfs_create_link_nowarn(group->devices_kobj,
339 &dev->kobj, device->name);
340 if (ret) {
341 kfree(device->name);
342 if (ret == -EEXIST && i >= 0) {
343 /*
344 * Account for the slim chance of collision
345 * and append an instance to the name.
346 */
347 device->name = kasprintf(GFP_KERNEL, "%s.%d",
348 kobject_name(&dev->kobj), i++);
349 goto rename;
350 }
351
352 sysfs_remove_link(&dev->kobj, "iommu_group");
353 kfree(device);
354 return ret;
355 }
356
357 kobject_get(group->devices_kobj);
358
359 dev->iommu_group = group;
360
361 mutex_lock(&group->mutex);
362 list_add_tail(&device->list, &group->devices);
363 mutex_unlock(&group->mutex);
364
365 /* Notify any listeners about change to group. */
366 blocking_notifier_call_chain(&group->notifier,
367 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
Shuah Khand1cf7e82013-08-15 11:59:24 -0600368
369 trace_add_device_to_group(group->id, dev);
Alex Williamsond72e31c2012-05-30 14:18:53 -0600370 return 0;
371}
372EXPORT_SYMBOL_GPL(iommu_group_add_device);
373
374/**
375 * iommu_group_remove_device - remove a device from it's current group
376 * @dev: device to be removed
377 *
378 * This function is called by an iommu driver to remove the device from
379 * it's current group. This decrements the iommu group reference count.
380 */
381void iommu_group_remove_device(struct device *dev)
382{
383 struct iommu_group *group = dev->iommu_group;
384 struct iommu_device *tmp_device, *device = NULL;
385
386 /* Pre-notify listeners that a device is being removed. */
387 blocking_notifier_call_chain(&group->notifier,
388 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
389
390 mutex_lock(&group->mutex);
391 list_for_each_entry(tmp_device, &group->devices, list) {
392 if (tmp_device->dev == dev) {
393 device = tmp_device;
394 list_del(&device->list);
395 break;
396 }
397 }
398 mutex_unlock(&group->mutex);
399
400 if (!device)
401 return;
402
403 sysfs_remove_link(group->devices_kobj, device->name);
404 sysfs_remove_link(&dev->kobj, "iommu_group");
405
Shuah Khan2e757082013-08-15 11:59:25 -0600406 trace_remove_device_from_group(group->id, dev);
407
Alex Williamsond72e31c2012-05-30 14:18:53 -0600408 kfree(device->name);
409 kfree(device);
410 dev->iommu_group = NULL;
411 kobject_put(group->devices_kobj);
412}
413EXPORT_SYMBOL_GPL(iommu_group_remove_device);
414
415/**
416 * iommu_group_for_each_dev - iterate over each device in the group
417 * @group: the group
418 * @data: caller opaque data to be passed to callback function
419 * @fn: caller supplied callback function
420 *
421 * This function is called by group users to iterate over group devices.
422 * Callers should hold a reference count to the group during callback.
423 * The group->mutex is held across callbacks, which will block calls to
424 * iommu_group_add/remove_device.
425 */
426int iommu_group_for_each_dev(struct iommu_group *group, void *data,
427 int (*fn)(struct device *, void *))
428{
429 struct iommu_device *device;
430 int ret = 0;
431
432 mutex_lock(&group->mutex);
433 list_for_each_entry(device, &group->devices, list) {
434 ret = fn(device->dev, data);
435 if (ret)
436 break;
437 }
438 mutex_unlock(&group->mutex);
439 return ret;
440}
441EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
442
443/**
444 * iommu_group_get - Return the group for a device and increment reference
445 * @dev: get the group that this device belongs to
446 *
447 * This function is called by iommu drivers and users to get the group
448 * for the specified device. If found, the group is returned and the group
449 * reference in incremented, else NULL.
450 */
451struct iommu_group *iommu_group_get(struct device *dev)
452{
453 struct iommu_group *group = dev->iommu_group;
454
455 if (group)
456 kobject_get(group->devices_kobj);
457
458 return group;
459}
460EXPORT_SYMBOL_GPL(iommu_group_get);
461
462/**
463 * iommu_group_put - Decrement group reference
464 * @group: the group to use
465 *
466 * This function is called by iommu drivers and users to release the
467 * iommu group. Once the reference count is zero, the group is released.
468 */
469void iommu_group_put(struct iommu_group *group)
470{
471 if (group)
472 kobject_put(group->devices_kobj);
473}
474EXPORT_SYMBOL_GPL(iommu_group_put);
475
476/**
477 * iommu_group_register_notifier - Register a notifier for group changes
478 * @group: the group to watch
479 * @nb: notifier block to signal
480 *
481 * This function allows iommu group users to track changes in a group.
482 * See include/linux/iommu.h for actions sent via this notifier. Caller
483 * should hold a reference to the group throughout notifier registration.
484 */
485int iommu_group_register_notifier(struct iommu_group *group,
486 struct notifier_block *nb)
487{
488 return blocking_notifier_chain_register(&group->notifier, nb);
489}
490EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
491
492/**
493 * iommu_group_unregister_notifier - Unregister a notifier
494 * @group: the group to watch
495 * @nb: notifier block to signal
496 *
497 * Unregister a previously registered group notifier block.
498 */
499int iommu_group_unregister_notifier(struct iommu_group *group,
500 struct notifier_block *nb)
501{
502 return blocking_notifier_chain_unregister(&group->notifier, nb);
503}
504EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
505
506/**
507 * iommu_group_id - Return ID for a group
508 * @group: the group to ID
509 *
510 * Return the unique ID for the group matching the sysfs group number.
511 */
512int iommu_group_id(struct iommu_group *group)
513{
514 return group->id;
515}
516EXPORT_SYMBOL_GPL(iommu_group_id);
Alex Williamson14604322011-10-21 15:56:05 -0400517
Alex Williamson104a1c12014-07-03 09:51:18 -0600518/*
519 * To consider a PCI device isolated, we require ACS to support Source
520 * Validation, Request Redirection, Completer Redirection, and Upstream
521 * Forwarding. This effectively means that devices cannot spoof their
522 * requester ID, requests and completions cannot be redirected, and all
523 * transactions are forwarded upstream, even as it passes through a
524 * bridge where the target device is downstream.
525 */
526#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
527
528struct group_for_pci_data {
529 struct pci_dev *pdev;
530 struct iommu_group *group;
531};
532
533/*
534 * DMA alias iterator callback, return the last seen device. Stop and return
535 * the IOMMU group if we find one along the way.
536 */
537static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
538{
539 struct group_for_pci_data *data = opaque;
540
541 data->pdev = pdev;
542 data->group = iommu_group_get(&pdev->dev);
543
544 return data->group != NULL;
545}
546
547/*
548 * Use standard PCI bus topology, isolation features, and DMA alias quirks
549 * to find or create an IOMMU group for a device.
550 */
551static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
552{
553 struct group_for_pci_data data;
554 struct pci_bus *bus;
555 struct iommu_group *group = NULL;
556 struct pci_dev *tmp;
557
558 /*
559 * Find the upstream DMA alias for the device. A device must not
560 * be aliased due to topology in order to have its own IOMMU group.
561 * If we find an alias along the way that already belongs to a
562 * group, use it.
563 */
564 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
565 return data.group;
566
567 pdev = data.pdev;
568
569 /*
570 * Continue upstream from the point of minimum IOMMU granularity
571 * due to aliases to the point where devices are protected from
572 * peer-to-peer DMA by PCI ACS. Again, if we find an existing
573 * group, use it.
574 */
575 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
576 if (!bus->self)
577 continue;
578
579 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
580 break;
581
582 pdev = bus->self;
583
584 group = iommu_group_get(&pdev->dev);
585 if (group)
586 return group;
587 }
588
589 /*
590 * Next we need to consider DMA alias quirks. If one device aliases
591 * to another, they should be grouped together. It's theoretically
592 * possible that aliases could create chains of devices where each
593 * device aliases another device. If we then factor in multifunction
594 * ACS grouping requirements, each alias could incorporate a new slot
595 * with multiple functions, each with aliases. This is all extremely
596 * unlikely as DMA alias quirks are typically only used for PCIe
597 * devices where we usually have a single slot per bus. Furthermore,
598 * the alias quirk is usually to another function within the slot
599 * (and ACS multifunction is not supported) or to a different slot
600 * that doesn't physically exist. The likely scenario is therefore
601 * that everything on the bus gets grouped together. To reduce the
602 * problem space, share the IOMMU group for all devices on the bus
603 * if a DMA alias quirk is present on the bus.
604 */
605 tmp = NULL;
606 for_each_pci_dev(tmp) {
607 if (tmp->bus != pdev->bus ||
608 !(tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN))
609 continue;
610
611 pci_dev_put(tmp);
612 tmp = NULL;
613
614 /* We have an alias quirk, search for an existing group */
615 for_each_pci_dev(tmp) {
616 struct iommu_group *group_tmp;
617
618 if (tmp->bus != pdev->bus)
619 continue;
620
621 group_tmp = iommu_group_get(&tmp->dev);
622 if (!group) {
623 group = group_tmp;
624 continue;
625 }
626
627 if (group_tmp) {
628 WARN_ON(group != group_tmp);
629 iommu_group_put(group_tmp);
630 }
631 }
632
633 return group ? group : iommu_group_alloc();
634 }
635
636 /*
637 * Non-multifunction devices or multifunction devices supporting
638 * ACS get their own group.
639 */
640 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
641 return iommu_group_alloc();
642
643 /*
644 * Multifunction devices not supporting ACS share a group with other
645 * similar devices in the same slot.
646 */
647 tmp = NULL;
648 for_each_pci_dev(tmp) {
649 if (tmp == pdev || tmp->bus != pdev->bus ||
650 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
651 pci_acs_enabled(tmp, REQ_ACS_FLAGS))
652 continue;
653
654 group = iommu_group_get(&tmp->dev);
655 if (group) {
656 pci_dev_put(tmp);
657 return group;
658 }
659 }
660
661 /* No shared group found, allocate new */
662 return iommu_group_alloc();
663}
664
665/**
666 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
667 * @dev: target device
668 *
669 * This function is intended to be called by IOMMU drivers and extended to
670 * support common, bus-defined algorithms when determining or creating the
671 * IOMMU group for a device. On success, the caller will hold a reference
672 * to the returned IOMMU group, which will already include the provided
673 * device. The reference should be released with iommu_group_put().
674 */
675struct iommu_group *iommu_group_get_for_dev(struct device *dev)
676{
677 struct iommu_group *group = ERR_PTR(-EIO);
678 int ret;
679
680 group = iommu_group_get(dev);
681 if (group)
682 return group;
683
684 if (dev_is_pci(dev))
685 group = iommu_group_get_for_pci_dev(to_pci_dev(dev));
686
687 if (IS_ERR(group))
688 return group;
689
690 ret = iommu_group_add_device(group, dev);
691 if (ret) {
692 iommu_group_put(group);
693 return ERR_PTR(ret);
694 }
695
696 return group;
697}
698
Alex Williamson14604322011-10-21 15:56:05 -0400699static int add_iommu_group(struct device *dev, void *data)
700{
Alex Williamsond72e31c2012-05-30 14:18:53 -0600701 struct iommu_ops *ops = data;
Alex Williamson14604322011-10-21 15:56:05 -0400702
Alex Williamsond72e31c2012-05-30 14:18:53 -0600703 if (!ops->add_device)
704 return -ENODEV;
705
706 WARN_ON(dev->iommu_group);
707
708 ops->add_device(dev);
Alex Williamson14604322011-10-21 15:56:05 -0400709
710 return 0;
711}
712
Alex Williamsond72e31c2012-05-30 14:18:53 -0600713static int iommu_bus_notifier(struct notifier_block *nb,
714 unsigned long action, void *data)
Alex Williamson14604322011-10-21 15:56:05 -0400715{
716 struct device *dev = data;
Alex Williamsond72e31c2012-05-30 14:18:53 -0600717 struct iommu_ops *ops = dev->bus->iommu_ops;
718 struct iommu_group *group;
719 unsigned long group_action = 0;
Alex Williamson14604322011-10-21 15:56:05 -0400720
Alex Williamsond72e31c2012-05-30 14:18:53 -0600721 /*
722 * ADD/DEL call into iommu driver ops if provided, which may
723 * result in ADD/DEL notifiers to group->notifier
724 */
725 if (action == BUS_NOTIFY_ADD_DEVICE) {
726 if (ops->add_device)
727 return ops->add_device(dev);
728 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
729 if (ops->remove_device && dev->iommu_group) {
730 ops->remove_device(dev);
731 return 0;
732 }
733 }
Alex Williamson14604322011-10-21 15:56:05 -0400734
Alex Williamsond72e31c2012-05-30 14:18:53 -0600735 /*
736 * Remaining BUS_NOTIFYs get filtered and republished to the
737 * group, if anyone is listening
738 */
739 group = iommu_group_get(dev);
740 if (!group)
741 return 0;
742
743 switch (action) {
744 case BUS_NOTIFY_BIND_DRIVER:
745 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
746 break;
747 case BUS_NOTIFY_BOUND_DRIVER:
748 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
749 break;
750 case BUS_NOTIFY_UNBIND_DRIVER:
751 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
752 break;
753 case BUS_NOTIFY_UNBOUND_DRIVER:
754 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
755 break;
756 }
757
758 if (group_action)
759 blocking_notifier_call_chain(&group->notifier,
760 group_action, dev);
761
762 iommu_group_put(group);
Alex Williamson14604322011-10-21 15:56:05 -0400763 return 0;
764}
765
Alex Williamsond72e31c2012-05-30 14:18:53 -0600766static struct notifier_block iommu_bus_nb = {
767 .notifier_call = iommu_bus_notifier,
Alex Williamson14604322011-10-21 15:56:05 -0400768};
769
Joerg Roedelff217762011-08-26 16:48:26 +0200770static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100771{
Alex Williamsond72e31c2012-05-30 14:18:53 -0600772 bus_register_notifier(bus, &iommu_bus_nb);
773 bus_for_each_dev(bus, NULL, ops, add_iommu_group);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100774}
775
Joerg Roedelff217762011-08-26 16:48:26 +0200776/**
777 * bus_set_iommu - set iommu-callbacks for the bus
778 * @bus: bus.
779 * @ops: the callbacks provided by the iommu-driver
780 *
781 * This function is called by an iommu driver to set the iommu methods
782 * used for a particular bus. Drivers for devices on that bus can use
783 * the iommu-api after these ops are registered.
784 * This special function is needed because IOMMUs are usually devices on
785 * the bus itself, so the iommu drivers are not initialized when the bus
786 * is set up. With this function the iommu-driver can set the iommu-ops
787 * afterwards.
788 */
789int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100790{
Joerg Roedelff217762011-08-26 16:48:26 +0200791 if (bus->iommu_ops != NULL)
792 return -EBUSY;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100793
Joerg Roedelff217762011-08-26 16:48:26 +0200794 bus->iommu_ops = ops;
795
796 /* Do IOMMU specific setup for this bus-type */
797 iommu_bus_init(bus, ops);
798
799 return 0;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100800}
Joerg Roedelff217762011-08-26 16:48:26 +0200801EXPORT_SYMBOL_GPL(bus_set_iommu);
802
Joerg Roedela1b60c12011-09-06 18:46:34 +0200803bool iommu_present(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100804{
Joerg Roedel94441c32011-09-06 18:58:54 +0200805 return bus->iommu_ops != NULL;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100806}
Joerg Roedela1b60c12011-09-06 18:46:34 +0200807EXPORT_SYMBOL_GPL(iommu_present);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100808
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400809/**
810 * iommu_set_fault_handler() - set a fault handler for an iommu domain
811 * @domain: iommu domain
812 * @handler: fault handler
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +0300813 * @token: user data, will be passed back to the fault handler
Ohad Ben-Cohen0ed6d2d2011-09-27 07:36:40 -0400814 *
815 * This function should be used by IOMMU users which want to be notified
816 * whenever an IOMMU fault happens.
817 *
818 * The fault handler itself should return 0 on success, and an appropriate
819 * error code otherwise.
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400820 */
821void iommu_set_fault_handler(struct iommu_domain *domain,
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +0300822 iommu_fault_handler_t handler,
823 void *token)
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400824{
825 BUG_ON(!domain);
826
827 domain->handler = handler;
Ohad Ben-Cohen77ca2332012-05-21 20:20:05 +0300828 domain->handler_token = token;
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400829}
Ohad Ben-Cohen30bd9182011-09-26 09:11:46 -0400830EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
Ohad Ben-Cohen4f3f8d92011-09-13 15:25:23 -0400831
Joerg Roedel905d66c2011-09-06 16:03:26 +0200832struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100833{
834 struct iommu_domain *domain;
835 int ret;
836
Joerg Roedel94441c32011-09-06 18:58:54 +0200837 if (bus == NULL || bus->iommu_ops == NULL)
Joerg Roedel905d66c2011-09-06 16:03:26 +0200838 return NULL;
839
KyongHo Cho8bd69602011-12-16 21:38:25 +0900840 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100841 if (!domain)
842 return NULL;
843
Joerg Roedel94441c32011-09-06 18:58:54 +0200844 domain->ops = bus->iommu_ops;
Joerg Roedel905d66c2011-09-06 16:03:26 +0200845
Joerg Roedel94441c32011-09-06 18:58:54 +0200846 ret = domain->ops->domain_init(domain);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100847 if (ret)
848 goto out_free;
849
850 return domain;
851
852out_free:
853 kfree(domain);
854
855 return NULL;
856}
857EXPORT_SYMBOL_GPL(iommu_domain_alloc);
858
859void iommu_domain_free(struct iommu_domain *domain)
860{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200861 if (likely(domain->ops->domain_destroy != NULL))
862 domain->ops->domain_destroy(domain);
863
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100864 kfree(domain);
865}
866EXPORT_SYMBOL_GPL(iommu_domain_free);
867
868int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
869{
Shuah Khanb54db772013-08-15 11:59:26 -0600870 int ret;
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200871 if (unlikely(domain->ops->attach_dev == NULL))
872 return -ENODEV;
873
Shuah Khanb54db772013-08-15 11:59:26 -0600874 ret = domain->ops->attach_dev(domain, dev);
875 if (!ret)
876 trace_attach_device_to_domain(dev);
877 return ret;
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100878}
879EXPORT_SYMBOL_GPL(iommu_attach_device);
880
881void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
882{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200883 if (unlikely(domain->ops->detach_dev == NULL))
884 return;
885
886 domain->ops->detach_dev(domain, dev);
Shuah Khan69980632013-08-15 11:59:27 -0600887 trace_detach_device_from_domain(dev);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100888}
889EXPORT_SYMBOL_GPL(iommu_detach_device);
890
Alex Williamsond72e31c2012-05-30 14:18:53 -0600891/*
892 * IOMMU groups are really the natrual working unit of the IOMMU, but
893 * the IOMMU API works on domains and devices. Bridge that gap by
894 * iterating over the devices in a group. Ideally we'd have a single
895 * device which represents the requestor ID of the group, but we also
896 * allow IOMMU drivers to create policy defined minimum sets, where
897 * the physical hardware may be able to distiguish members, but we
898 * wish to group them at a higher level (ex. untrusted multi-function
899 * PCI devices). Thus we attach each device.
900 */
901static int iommu_group_do_attach_device(struct device *dev, void *data)
902{
903 struct iommu_domain *domain = data;
904
905 return iommu_attach_device(domain, dev);
906}
907
908int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
909{
910 return iommu_group_for_each_dev(group, domain,
911 iommu_group_do_attach_device);
912}
913EXPORT_SYMBOL_GPL(iommu_attach_group);
914
915static int iommu_group_do_detach_device(struct device *dev, void *data)
916{
917 struct iommu_domain *domain = data;
918
919 iommu_detach_device(domain, dev);
920
921 return 0;
922}
923
924void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
925{
926 iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device);
927}
928EXPORT_SYMBOL_GPL(iommu_detach_group);
929
Varun Sethibb5547ac2013-03-29 01:23:58 +0530930phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100931{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200932 if (unlikely(domain->ops->iova_to_phys == NULL))
933 return 0;
934
935 return domain->ops->iova_to_phys(domain, iova);
Joerg Roedelfc2100e2008-11-26 17:21:24 +0100936}
937EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800938
939int iommu_domain_has_cap(struct iommu_domain *domain,
940 unsigned long cap)
941{
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200942 if (unlikely(domain->ops->domain_has_cap == NULL))
943 return 0;
944
945 return domain->ops->domain_has_cap(domain, cap);
Sheng Yangdbb9fd82009-03-18 15:33:06 +0800946}
947EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100948
Alex Williamsonbd139692013-06-17 19:57:34 -0600949static size_t iommu_pgsize(struct iommu_domain *domain,
950 unsigned long addr_merge, size_t size)
951{
952 unsigned int pgsize_idx;
953 size_t pgsize;
954
955 /* Max page size that still fits into 'size' */
956 pgsize_idx = __fls(size);
957
958 /* need to consider alignment requirements ? */
959 if (likely(addr_merge)) {
960 /* Max page size allowed by address */
961 unsigned int align_pgsize_idx = __ffs(addr_merge);
962 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
963 }
964
965 /* build a mask of acceptable page sizes */
966 pgsize = (1UL << (pgsize_idx + 1)) - 1;
967
968 /* throw away page sizes not supported by the hardware */
969 pgsize &= domain->ops->pgsize_bitmap;
970
971 /* make sure we're still sane */
972 BUG_ON(!pgsize);
973
974 /* pick the biggest page */
975 pgsize_idx = __fls(pgsize);
976 pgsize = 1UL << pgsize_idx;
977
978 return pgsize;
979}
980
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100981int iommu_map(struct iommu_domain *domain, unsigned long iova,
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200982 phys_addr_t paddr, size_t size, int prot)
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100983{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200984 unsigned long orig_iova = iova;
985 unsigned int min_pagesz;
986 size_t orig_size = size;
987 int ret = 0;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100988
Joerg Roedel57886512013-01-29 13:41:09 +0100989 if (unlikely(domain->ops->unmap == NULL ||
990 domain->ops->pgsize_bitmap == 0UL))
Joerg Roedele5aa7f02011-09-06 16:44:29 +0200991 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100992
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200993 /* find out the minimum page size supported */
994 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +0100995
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200996 /*
997 * both the virtual address and the physical one, as well as
998 * the size of the mapping, must be aligned (at least) to the
999 * size of the smallest page supported by the hardware
1000 */
1001 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
Fabio Estevamabedb042013-08-22 10:25:42 -03001002 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
Joe Perches6197ca82013-06-23 12:29:04 -07001003 iova, &paddr, size, min_pagesz);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001004 return -EINVAL;
1005 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001006
Fabio Estevamabedb042013-08-22 10:25:42 -03001007 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001008
1009 while (size) {
Alex Williamsonbd139692013-06-17 19:57:34 -06001010 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001011
Fabio Estevamabedb042013-08-22 10:25:42 -03001012 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
Joe Perches6197ca82013-06-23 12:29:04 -07001013 iova, &paddr, pgsize);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001014
1015 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
1016 if (ret)
1017 break;
1018
1019 iova += pgsize;
1020 paddr += pgsize;
1021 size -= pgsize;
1022 }
1023
1024 /* unroll mapping in case something went wrong */
1025 if (ret)
1026 iommu_unmap(domain, orig_iova, orig_size - size);
Shuah Khane0be7c82013-08-15 11:59:28 -06001027 else
1028 trace_map(iova, paddr, size);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001029
1030 return ret;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001031}
1032EXPORT_SYMBOL_GPL(iommu_map);
1033
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001034size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001035{
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001036 size_t unmapped_page, unmapped = 0;
1037 unsigned int min_pagesz;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001038
Joerg Roedel57886512013-01-29 13:41:09 +01001039 if (unlikely(domain->ops->unmap == NULL ||
1040 domain->ops->pgsize_bitmap == 0UL))
Joerg Roedele5aa7f02011-09-06 16:44:29 +02001041 return -ENODEV;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001042
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001043 /* find out the minimum page size supported */
1044 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001045
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001046 /*
1047 * The virtual address, as well as the size of the mapping, must be
1048 * aligned (at least) to the size of the smallest page supported
1049 * by the hardware
1050 */
1051 if (!IS_ALIGNED(iova | size, min_pagesz)) {
Joe Perches6197ca82013-06-23 12:29:04 -07001052 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1053 iova, size, min_pagesz);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001054 return -EINVAL;
1055 }
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001056
Joe Perches6197ca82013-06-23 12:29:04 -07001057 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02001058
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001059 /*
1060 * Keep iterating until we either unmap 'size' bytes (or more)
1061 * or we hit an area that isn't mapped.
1062 */
1063 while (unmapped < size) {
Alex Williamsonbd139692013-06-17 19:57:34 -06001064 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001065
Alex Williamsonbd139692013-06-17 19:57:34 -06001066 unmapped_page = domain->ops->unmap(domain, iova, pgsize);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001067 if (!unmapped_page)
1068 break;
1069
Joe Perches6197ca82013-06-23 12:29:04 -07001070 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1071 iova, unmapped_page);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001072
1073 iova += unmapped_page;
1074 unmapped += unmapped_page;
1075 }
1076
Shuah Khan3a506392013-08-15 11:59:29 -06001077 trace_unmap(iova, 0, size);
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +02001078 return unmapped;
Joerg Roedelcefc53c2010-01-08 13:35:09 +01001079}
1080EXPORT_SYMBOL_GPL(iommu_unmap);
Alex Williamson14604322011-10-21 15:56:05 -04001081
Joerg Roedeld7787d52013-01-29 14:26:20 +01001082
1083int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
Varun Sethi80f97f02013-03-29 01:24:00 +05301084 phys_addr_t paddr, u64 size, int prot)
Joerg Roedeld7787d52013-01-29 14:26:20 +01001085{
1086 if (unlikely(domain->ops->domain_window_enable == NULL))
1087 return -ENODEV;
1088
Varun Sethi80f97f02013-03-29 01:24:00 +05301089 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
1090 prot);
Joerg Roedeld7787d52013-01-29 14:26:20 +01001091}
1092EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
1093
1094void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
1095{
1096 if (unlikely(domain->ops->domain_window_disable == NULL))
1097 return;
1098
1099 return domain->ops->domain_window_disable(domain, wnd_nr);
1100}
1101EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
1102
Alex Williamsond72e31c2012-05-30 14:18:53 -06001103static int __init iommu_init(void)
Alex Williamson14604322011-10-21 15:56:05 -04001104{
Alex Williamsond72e31c2012-05-30 14:18:53 -06001105 iommu_group_kset = kset_create_and_add("iommu_groups",
1106 NULL, kernel_kobj);
1107 ida_init(&iommu_group_ida);
1108 mutex_init(&iommu_group_mutex);
Alex Williamson14604322011-10-21 15:56:05 -04001109
Alex Williamsond72e31c2012-05-30 14:18:53 -06001110 BUG_ON(!iommu_group_kset);
1111
1112 return 0;
Alex Williamson14604322011-10-21 15:56:05 -04001113}
Alexey Kardashevskiy097e3632013-01-07 18:51:52 +11001114arch_initcall(iommu_init);
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01001115
1116int iommu_domain_get_attr(struct iommu_domain *domain,
1117 enum iommu_attr attr, void *data)
1118{
Joerg Roedel0ff64f82012-01-26 19:40:53 +01001119 struct iommu_domain_geometry *geometry;
Joerg Roedeld2e12162013-01-29 13:49:04 +01001120 bool *paging;
Joerg Roedel0ff64f82012-01-26 19:40:53 +01001121 int ret = 0;
Joerg Roedel69356712013-02-04 14:00:01 +01001122 u32 *count;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01001123
Joerg Roedel0ff64f82012-01-26 19:40:53 +01001124 switch (attr) {
1125 case DOMAIN_ATTR_GEOMETRY:
1126 geometry = data;
1127 *geometry = domain->geometry;
1128
1129 break;
Joerg Roedeld2e12162013-01-29 13:49:04 +01001130 case DOMAIN_ATTR_PAGING:
1131 paging = data;
1132 *paging = (domain->ops->pgsize_bitmap != 0UL);
1133 break;
Joerg Roedel69356712013-02-04 14:00:01 +01001134 case DOMAIN_ATTR_WINDOWS:
1135 count = data;
1136
1137 if (domain->ops->domain_get_windows != NULL)
1138 *count = domain->ops->domain_get_windows(domain);
1139 else
1140 ret = -ENODEV;
1141
1142 break;
Joerg Roedel0ff64f82012-01-26 19:40:53 +01001143 default:
1144 if (!domain->ops->domain_get_attr)
1145 return -EINVAL;
1146
1147 ret = domain->ops->domain_get_attr(domain, attr, data);
1148 }
1149
1150 return ret;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01001151}
1152EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
1153
1154int iommu_domain_set_attr(struct iommu_domain *domain,
1155 enum iommu_attr attr, void *data)
1156{
Joerg Roedel69356712013-02-04 14:00:01 +01001157 int ret = 0;
1158 u32 *count;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01001159
Joerg Roedel69356712013-02-04 14:00:01 +01001160 switch (attr) {
1161 case DOMAIN_ATTR_WINDOWS:
1162 count = data;
1163
1164 if (domain->ops->domain_set_windows != NULL)
1165 ret = domain->ops->domain_set_windows(domain, *count);
1166 else
1167 ret = -ENODEV;
1168
1169 break;
1170 default:
1171 if (domain->ops->domain_set_attr == NULL)
1172 return -EINVAL;
1173
1174 ret = domain->ops->domain_set_attr(domain, attr, data);
1175 }
1176
1177 return ret;
Joerg Roedel0cd76dd2012-01-26 19:40:52 +01001178}
1179EXPORT_SYMBOL_GPL(iommu_domain_set_attr);