blob: b0861739a27df5f92396c46326154772b6090773 [file] [log] [blame]
Joerg Roedelb6c02712008-06-26 21:27:53 +02001/*
Joerg Roedel5d0d7152010-10-13 11:13:21 +02002 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
Joerg Roedelb6c02712008-06-26 21:27:53 +02003 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Joerg Roedel72e1dcc2011-11-10 19:13:51 +010020#include <linux/ratelimit.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020021#include <linux/pci.h>
Joerg Roedelcb41ed82011-04-05 11:00:53 +020022#include <linux/pci-ats.h>
Akinobu Mitaa66022c2009-12-15 16:48:28 -080023#include <linux/bitmap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Joerg Roedel7f265082008-12-12 13:50:21 +010025#include <linux/debugfs.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020026#include <linux/scatterlist.h>
FUJITA Tomonori51491362009-01-05 23:47:25 +090027#include <linux/dma-mapping.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020028#include <linux/iommu-helper.h>
Joerg Roedelc156e342008-12-02 18:13:27 +010029#include <linux/iommu.h>
Joerg Roedel815b33f2011-04-06 17:26:49 +020030#include <linux/delay.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020031#include <linux/amd-iommu.h>
Joerg Roedel72e1dcc2011-11-10 19:13:51 +010032#include <linux/notifier.h>
33#include <linux/export.h>
Joerg Roedel17f5b562011-07-06 17:14:44 +020034#include <asm/msidef.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020035#include <asm/proto.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090036#include <asm/iommu.h>
Joerg Roedel1d9b16d2008-11-27 18:39:15 +010037#include <asm/gart.h>
Joerg Roedel27c21272011-05-30 15:56:24 +020038#include <asm/dma.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020039
40#include "amd_iommu_proto.h"
41#include "amd_iommu_types.h"
Joerg Roedelb6c02712008-06-26 21:27:53 +020042
43#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
44
Joerg Roedel815b33f2011-04-06 17:26:49 +020045#define LOOP_TIMEOUT 100000
Joerg Roedel136f78a2008-07-11 17:14:27 +020046
Joerg Roedelb6c02712008-06-26 21:27:53 +020047static DEFINE_RWLOCK(amd_iommu_devtable_lock);
48
Joerg Roedelbd60b732008-09-11 10:24:48 +020049/* A list of preallocated protection domains */
50static LIST_HEAD(iommu_pd_list);
51static DEFINE_SPINLOCK(iommu_pd_list_lock);
52
Joerg Roedel8fa5f802011-06-09 12:24:45 +020053/* List of all available dev_data structures */
54static LIST_HEAD(dev_data_list);
55static DEFINE_SPINLOCK(dev_data_list_lock);
56
Joerg Roedel0feae532009-08-26 15:26:30 +020057/*
58 * Domain for untranslated devices - only allocated
59 * if iommu=pt passed on kernel cmd line.
60 */
61static struct protection_domain *pt_domain;
62
Joerg Roedel26961ef2008-12-03 17:00:17 +010063static struct iommu_ops amd_iommu_ops;
Joerg Roedel26961ef2008-12-03 17:00:17 +010064
Joerg Roedel72e1dcc2011-11-10 19:13:51 +010065static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
Joerg Roedel52815b72011-11-17 17:24:28 +010066int amd_iommu_max_glx_val = -1;
Joerg Roedel72e1dcc2011-11-10 19:13:51 +010067
Joerg Roedel431b2a22008-07-11 17:14:22 +020068/*
69 * general struct to manage commands send to an IOMMU
70 */
Joerg Roedeld6449532008-07-11 17:14:28 +020071struct iommu_cmd {
Joerg Roedelb6c02712008-06-26 21:27:53 +020072 u32 data[4];
73};
74
Joerg Roedel04bfdd82009-09-02 16:00:23 +020075static void update_domain(struct protection_domain *domain);
Joerg Roedel5abcdba2011-12-01 15:49:45 +010076static int __init alloc_passthrough_domain(void);
Chris Wrightc1eee672009-05-21 00:56:58 -070077
Joerg Roedel15898bb2009-11-24 15:39:42 +010078/****************************************************************************
79 *
80 * Helper functions
81 *
82 ****************************************************************************/
83
Joerg Roedelf62dda62011-06-09 12:55:35 +020084static struct iommu_dev_data *alloc_dev_data(u16 devid)
Joerg Roedel8fa5f802011-06-09 12:24:45 +020085{
86 struct iommu_dev_data *dev_data;
87 unsigned long flags;
88
89 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
90 if (!dev_data)
91 return NULL;
92
Joerg Roedelf62dda62011-06-09 12:55:35 +020093 dev_data->devid = devid;
Joerg Roedel8fa5f802011-06-09 12:24:45 +020094 atomic_set(&dev_data->bind, 0);
95
96 spin_lock_irqsave(&dev_data_list_lock, flags);
97 list_add_tail(&dev_data->dev_data_list, &dev_data_list);
98 spin_unlock_irqrestore(&dev_data_list_lock, flags);
99
100 return dev_data;
101}
102
103static void free_dev_data(struct iommu_dev_data *dev_data)
104{
105 unsigned long flags;
106
107 spin_lock_irqsave(&dev_data_list_lock, flags);
108 list_del(&dev_data->dev_data_list);
109 spin_unlock_irqrestore(&dev_data_list_lock, flags);
110
111 kfree(dev_data);
112}
113
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200114static struct iommu_dev_data *search_dev_data(u16 devid)
115{
116 struct iommu_dev_data *dev_data;
117 unsigned long flags;
118
119 spin_lock_irqsave(&dev_data_list_lock, flags);
120 list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
121 if (dev_data->devid == devid)
122 goto out_unlock;
123 }
124
125 dev_data = NULL;
126
127out_unlock:
128 spin_unlock_irqrestore(&dev_data_list_lock, flags);
129
130 return dev_data;
131}
132
133static struct iommu_dev_data *find_dev_data(u16 devid)
134{
135 struct iommu_dev_data *dev_data;
136
137 dev_data = search_dev_data(devid);
138
139 if (dev_data == NULL)
140 dev_data = alloc_dev_data(devid);
141
142 return dev_data;
143}
144
Joerg Roedel15898bb2009-11-24 15:39:42 +0100145static inline u16 get_device_id(struct device *dev)
146{
147 struct pci_dev *pdev = to_pci_dev(dev);
148
149 return calc_devid(pdev->bus->number, pdev->devfn);
150}
151
Joerg Roedel657cbb62009-11-23 15:26:46 +0100152static struct iommu_dev_data *get_dev_data(struct device *dev)
153{
154 return dev->archdata.iommu;
155}
156
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100157static bool pci_iommuv2_capable(struct pci_dev *pdev)
158{
159 static const int caps[] = {
160 PCI_EXT_CAP_ID_ATS,
161 PCI_PRI_CAP,
162 PCI_PASID_CAP,
163 };
164 int i, pos;
165
166 for (i = 0; i < 3; ++i) {
167 pos = pci_find_ext_capability(pdev, caps[i]);
168 if (pos == 0)
169 return false;
170 }
171
172 return true;
173}
174
Joerg Roedel71c70982009-11-24 16:43:06 +0100175/*
176 * In this function the list of preallocated protection domains is traversed to
177 * find the domain for a specific device
178 */
179static struct dma_ops_domain *find_protection_domain(u16 devid)
180{
181 struct dma_ops_domain *entry, *ret = NULL;
182 unsigned long flags;
183 u16 alias = amd_iommu_alias_table[devid];
184
185 if (list_empty(&iommu_pd_list))
186 return NULL;
187
188 spin_lock_irqsave(&iommu_pd_list_lock, flags);
189
190 list_for_each_entry(entry, &iommu_pd_list, list) {
191 if (entry->target_dev == devid ||
192 entry->target_dev == alias) {
193 ret = entry;
194 break;
195 }
196 }
197
198 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
199
200 return ret;
201}
202
Joerg Roedel98fc5a62009-11-24 17:19:23 +0100203/*
204 * This function checks if the driver got a valid device from the caller to
205 * avoid dereferencing invalid pointers.
206 */
207static bool check_device(struct device *dev)
208{
209 u16 devid;
210
211 if (!dev || !dev->dma_mask)
212 return false;
213
214 /* No device or no PCI device */
Julia Lawall339d3262010-02-06 09:42:39 +0100215 if (dev->bus != &pci_bus_type)
Joerg Roedel98fc5a62009-11-24 17:19:23 +0100216 return false;
217
218 devid = get_device_id(dev);
219
220 /* Out of our scope? */
221 if (devid > amd_iommu_last_bdf)
222 return false;
223
224 if (amd_iommu_rlookup_table[devid] == NULL)
225 return false;
226
227 return true;
228}
229
Joerg Roedel657cbb62009-11-23 15:26:46 +0100230static int iommu_init_device(struct device *dev)
231{
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100232 struct pci_dev *pdev = to_pci_dev(dev);
Joerg Roedel657cbb62009-11-23 15:26:46 +0100233 struct iommu_dev_data *dev_data;
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200234 u16 alias;
Joerg Roedel657cbb62009-11-23 15:26:46 +0100235
236 if (dev->archdata.iommu)
237 return 0;
238
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200239 dev_data = find_dev_data(get_device_id(dev));
Joerg Roedel657cbb62009-11-23 15:26:46 +0100240 if (!dev_data)
241 return -ENOMEM;
242
Joerg Roedelf62dda62011-06-09 12:55:35 +0200243 alias = amd_iommu_alias_table[dev_data->devid];
Joerg Roedel2b02b092011-06-09 17:48:39 +0200244 if (alias != dev_data->devid) {
Joerg Roedel71f77582011-06-09 19:03:15 +0200245 struct iommu_dev_data *alias_data;
Joerg Roedelb00d3bc2009-11-26 15:35:33 +0100246
Joerg Roedel71f77582011-06-09 19:03:15 +0200247 alias_data = find_dev_data(alias);
248 if (alias_data == NULL) {
249 pr_err("AMD-Vi: Warning: Unhandled device %s\n",
250 dev_name(dev));
Joerg Roedel2b02b092011-06-09 17:48:39 +0200251 free_dev_data(dev_data);
252 return -ENOTSUPP;
253 }
Joerg Roedel71f77582011-06-09 19:03:15 +0200254 dev_data->alias_data = alias_data;
Joerg Roedel26018872011-06-06 16:50:14 +0200255 }
Joerg Roedel657cbb62009-11-23 15:26:46 +0100256
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100257 if (pci_iommuv2_capable(pdev)) {
258 struct amd_iommu *iommu;
259
260 iommu = amd_iommu_rlookup_table[dev_data->devid];
261 dev_data->iommu_v2 = iommu->is_iommu_v2;
262 }
263
Joerg Roedel657cbb62009-11-23 15:26:46 +0100264 dev->archdata.iommu = dev_data;
265
Joerg Roedel657cbb62009-11-23 15:26:46 +0100266 return 0;
267}
268
Joerg Roedel26018872011-06-06 16:50:14 +0200269static void iommu_ignore_device(struct device *dev)
270{
271 u16 devid, alias;
272
273 devid = get_device_id(dev);
274 alias = amd_iommu_alias_table[devid];
275
276 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
277 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
278
279 amd_iommu_rlookup_table[devid] = NULL;
280 amd_iommu_rlookup_table[alias] = NULL;
281}
282
Joerg Roedel657cbb62009-11-23 15:26:46 +0100283static void iommu_uninit_device(struct device *dev)
284{
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200285 /*
286 * Nothing to do here - we keep dev_data around for unplugged devices
287 * and reuse it when the device is re-plugged - not doing so would
288 * introduce a ton of races.
289 */
Joerg Roedel657cbb62009-11-23 15:26:46 +0100290}
Joerg Roedelb7cc9552009-12-10 11:03:39 +0100291
292void __init amd_iommu_uninit_devices(void)
293{
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200294 struct iommu_dev_data *dev_data, *n;
Joerg Roedelb7cc9552009-12-10 11:03:39 +0100295 struct pci_dev *pdev = NULL;
296
297 for_each_pci_dev(pdev) {
298
299 if (!check_device(&pdev->dev))
300 continue;
301
302 iommu_uninit_device(&pdev->dev);
303 }
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200304
305 /* Free all of our dev_data structures */
306 list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list)
307 free_dev_data(dev_data);
Joerg Roedelb7cc9552009-12-10 11:03:39 +0100308}
309
310int __init amd_iommu_init_devices(void)
311{
312 struct pci_dev *pdev = NULL;
313 int ret = 0;
314
315 for_each_pci_dev(pdev) {
316
317 if (!check_device(&pdev->dev))
318 continue;
319
320 ret = iommu_init_device(&pdev->dev);
Joerg Roedel26018872011-06-06 16:50:14 +0200321 if (ret == -ENOTSUPP)
322 iommu_ignore_device(&pdev->dev);
323 else if (ret)
Joerg Roedelb7cc9552009-12-10 11:03:39 +0100324 goto out_free;
325 }
326
327 return 0;
328
329out_free:
330
331 amd_iommu_uninit_devices();
332
333 return ret;
334}
Joerg Roedel7f265082008-12-12 13:50:21 +0100335#ifdef CONFIG_AMD_IOMMU_STATS
336
337/*
338 * Initialization code for statistics collection
339 */
340
Joerg Roedelda49f6d2008-12-12 14:59:58 +0100341DECLARE_STATS_COUNTER(compl_wait);
Joerg Roedel0f2a86f2008-12-12 15:05:16 +0100342DECLARE_STATS_COUNTER(cnt_map_single);
Joerg Roedel146a6912008-12-12 15:07:12 +0100343DECLARE_STATS_COUNTER(cnt_unmap_single);
Joerg Roedeld03f067a2008-12-12 15:09:48 +0100344DECLARE_STATS_COUNTER(cnt_map_sg);
Joerg Roedel55877a62008-12-12 15:12:14 +0100345DECLARE_STATS_COUNTER(cnt_unmap_sg);
Joerg Roedelc8f0fb32008-12-12 15:14:21 +0100346DECLARE_STATS_COUNTER(cnt_alloc_coherent);
Joerg Roedel5d31ee72008-12-12 15:16:38 +0100347DECLARE_STATS_COUNTER(cnt_free_coherent);
Joerg Roedelc1858972008-12-12 15:42:39 +0100348DECLARE_STATS_COUNTER(cross_page);
Joerg Roedelf57d98a2008-12-12 15:46:29 +0100349DECLARE_STATS_COUNTER(domain_flush_single);
Joerg Roedel18811f52008-12-12 15:48:28 +0100350DECLARE_STATS_COUNTER(domain_flush_all);
Joerg Roedel5774f7c2008-12-12 15:57:30 +0100351DECLARE_STATS_COUNTER(alloced_io_mem);
Joerg Roedel8ecaf8f2008-12-12 16:13:04 +0100352DECLARE_STATS_COUNTER(total_map_requests);
Joerg Roedelda49f6d2008-12-12 14:59:58 +0100353
Joerg Roedel7f265082008-12-12 13:50:21 +0100354static struct dentry *stats_dir;
Joerg Roedel7f265082008-12-12 13:50:21 +0100355static struct dentry *de_fflush;
356
357static void amd_iommu_stats_add(struct __iommu_counter *cnt)
358{
359 if (stats_dir == NULL)
360 return;
361
362 cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
363 &cnt->value);
364}
365
366static void amd_iommu_stats_init(void)
367{
368 stats_dir = debugfs_create_dir("amd-iommu", NULL);
369 if (stats_dir == NULL)
370 return;
371
Joerg Roedel7f265082008-12-12 13:50:21 +0100372 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
373 (u32 *)&amd_iommu_unmap_flush);
Joerg Roedelda49f6d2008-12-12 14:59:58 +0100374
375 amd_iommu_stats_add(&compl_wait);
Joerg Roedel0f2a86f2008-12-12 15:05:16 +0100376 amd_iommu_stats_add(&cnt_map_single);
Joerg Roedel146a6912008-12-12 15:07:12 +0100377 amd_iommu_stats_add(&cnt_unmap_single);
Joerg Roedeld03f067a2008-12-12 15:09:48 +0100378 amd_iommu_stats_add(&cnt_map_sg);
Joerg Roedel55877a62008-12-12 15:12:14 +0100379 amd_iommu_stats_add(&cnt_unmap_sg);
Joerg Roedelc8f0fb32008-12-12 15:14:21 +0100380 amd_iommu_stats_add(&cnt_alloc_coherent);
Joerg Roedel5d31ee72008-12-12 15:16:38 +0100381 amd_iommu_stats_add(&cnt_free_coherent);
Joerg Roedelc1858972008-12-12 15:42:39 +0100382 amd_iommu_stats_add(&cross_page);
Joerg Roedelf57d98a2008-12-12 15:46:29 +0100383 amd_iommu_stats_add(&domain_flush_single);
Joerg Roedel18811f52008-12-12 15:48:28 +0100384 amd_iommu_stats_add(&domain_flush_all);
Joerg Roedel5774f7c2008-12-12 15:57:30 +0100385 amd_iommu_stats_add(&alloced_io_mem);
Joerg Roedel8ecaf8f2008-12-12 16:13:04 +0100386 amd_iommu_stats_add(&total_map_requests);
Joerg Roedel7f265082008-12-12 13:50:21 +0100387}
388
389#endif
390
Joerg Roedel431b2a22008-07-11 17:14:22 +0200391/****************************************************************************
392 *
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200393 * Interrupt handling functions
394 *
395 ****************************************************************************/
396
Joerg Roedele3e59872009-09-03 14:02:10 +0200397static void dump_dte_entry(u16 devid)
398{
399 int i;
400
Joerg Roedelee6c2862011-11-09 12:06:03 +0100401 for (i = 0; i < 4; ++i)
402 pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
Joerg Roedele3e59872009-09-03 14:02:10 +0200403 amd_iommu_dev_table[devid].data[i]);
404}
405
Joerg Roedel945b4ac2009-09-03 14:25:02 +0200406static void dump_command(unsigned long phys_addr)
407{
408 struct iommu_cmd *cmd = phys_to_virt(phys_addr);
409 int i;
410
411 for (i = 0; i < 4; ++i)
412 pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
413}
414
Joerg Roedela345b232009-09-03 15:01:43 +0200415static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
Joerg Roedel90008ee2008-09-09 16:41:05 +0200416{
417 u32 *event = __evt;
418 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
419 int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
420 int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
421 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
422 u64 address = (u64)(((u64)event[3]) << 32) | event[2];
423
Joerg Roedel4c6f40d2009-09-01 16:43:58 +0200424 printk(KERN_ERR "AMD-Vi: Event logged [");
Joerg Roedel90008ee2008-09-09 16:41:05 +0200425
426 switch (type) {
427 case EVENT_TYPE_ILL_DEV:
428 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
429 "address=0x%016llx flags=0x%04x]\n",
430 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
431 address, flags);
Joerg Roedele3e59872009-09-03 14:02:10 +0200432 dump_dte_entry(devid);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200433 break;
434 case EVENT_TYPE_IO_FAULT:
435 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
436 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
437 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
438 domid, address, flags);
439 break;
440 case EVENT_TYPE_DEV_TAB_ERR:
441 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
442 "address=0x%016llx flags=0x%04x]\n",
443 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
444 address, flags);
445 break;
446 case EVENT_TYPE_PAGE_TAB_ERR:
447 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
448 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
449 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
450 domid, address, flags);
451 break;
452 case EVENT_TYPE_ILL_CMD:
453 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
Joerg Roedel945b4ac2009-09-03 14:25:02 +0200454 dump_command(address);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200455 break;
456 case EVENT_TYPE_CMD_HARD_ERR:
457 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
458 "flags=0x%04x]\n", address, flags);
459 break;
460 case EVENT_TYPE_IOTLB_INV_TO:
461 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
462 "address=0x%016llx]\n",
463 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
464 address);
465 break;
466 case EVENT_TYPE_INV_DEV_REQ:
467 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
468 "address=0x%016llx flags=0x%04x]\n",
469 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
470 address, flags);
471 break;
472 default:
473 printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
474 }
475}
476
477static void iommu_poll_events(struct amd_iommu *iommu)
478{
479 u32 head, tail;
480 unsigned long flags;
481
482 spin_lock_irqsave(&iommu->lock, flags);
483
484 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
485 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
486
487 while (head != tail) {
Joerg Roedela345b232009-09-03 15:01:43 +0200488 iommu_print_event(iommu, iommu->evt_buf + head);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200489 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
490 }
491
492 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
493
494 spin_unlock_irqrestore(&iommu->lock, flags);
495}
496
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100497static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
498{
499 struct amd_iommu_fault fault;
500 volatile u64 *raw;
501 int i;
502
503 raw = (u64 *)(iommu->ppr_log + head);
504
505 /*
506 * Hardware bug: Interrupt may arrive before the entry is written to
507 * memory. If this happens we need to wait for the entry to arrive.
508 */
509 for (i = 0; i < LOOP_TIMEOUT; ++i) {
510 if (PPR_REQ_TYPE(raw[0]) != 0)
511 break;
512 udelay(1);
513 }
514
515 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
516 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
517 return;
518 }
519
520 fault.address = raw[1];
521 fault.pasid = PPR_PASID(raw[0]);
522 fault.device_id = PPR_DEVID(raw[0]);
523 fault.tag = PPR_TAG(raw[0]);
524 fault.flags = PPR_FLAGS(raw[0]);
525
526 /*
527 * To detect the hardware bug we need to clear the entry
528 * to back to zero.
529 */
530 raw[0] = raw[1] = 0;
531
532 atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
533}
534
535static void iommu_poll_ppr_log(struct amd_iommu *iommu)
536{
537 unsigned long flags;
538 u32 head, tail;
539
540 if (iommu->ppr_log == NULL)
541 return;
542
543 spin_lock_irqsave(&iommu->lock, flags);
544
545 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
546 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
547
548 while (head != tail) {
549
550 /* Handle PPR entry */
551 iommu_handle_ppr_entry(iommu, head);
552
553 /* Update and refresh ring-buffer state*/
554 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
555 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
556 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
557 }
558
559 /* enable ppr interrupts again */
560 writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
561
562 spin_unlock_irqrestore(&iommu->lock, flags);
563}
564
Joerg Roedel72fe00f2011-05-10 10:50:42 +0200565irqreturn_t amd_iommu_int_thread(int irq, void *data)
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200566{
Joerg Roedel90008ee2008-09-09 16:41:05 +0200567 struct amd_iommu *iommu;
568
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100569 for_each_iommu(iommu) {
Joerg Roedel90008ee2008-09-09 16:41:05 +0200570 iommu_poll_events(iommu);
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100571 iommu_poll_ppr_log(iommu);
572 }
Joerg Roedel90008ee2008-09-09 16:41:05 +0200573
574 return IRQ_HANDLED;
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200575}
576
Joerg Roedel72fe00f2011-05-10 10:50:42 +0200577irqreturn_t amd_iommu_int_handler(int irq, void *data)
578{
579 return IRQ_WAKE_THREAD;
580}
581
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200582/****************************************************************************
583 *
Joerg Roedel431b2a22008-07-11 17:14:22 +0200584 * IOMMU command queuing functions
585 *
586 ****************************************************************************/
587
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200588static int wait_on_sem(volatile u64 *sem)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200589{
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200590 int i = 0;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200591
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200592 while (*sem == 0 && i < LOOP_TIMEOUT) {
593 udelay(1);
594 i += 1;
595 }
596
597 if (i == LOOP_TIMEOUT) {
598 pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
599 return -EIO;
600 }
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200601
602 return 0;
603}
604
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200605static void copy_cmd_to_buffer(struct amd_iommu *iommu,
606 struct iommu_cmd *cmd,
607 u32 tail)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200608{
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200609 u8 *target;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200610
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200611 target = iommu->cmd_buf + tail;
612 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200613
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200614 /* Copy command to buffer */
615 memcpy(target, cmd, sizeof(*cmd));
616
617 /* Tell the IOMMU about it */
618 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
619}
620
Joerg Roedel815b33f2011-04-06 17:26:49 +0200621static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
Joerg Roedelded46732011-04-06 10:53:48 +0200622{
Joerg Roedel815b33f2011-04-06 17:26:49 +0200623 WARN_ON(address & 0x7ULL);
624
Joerg Roedelded46732011-04-06 10:53:48 +0200625 memset(cmd, 0, sizeof(*cmd));
Joerg Roedel815b33f2011-04-06 17:26:49 +0200626 cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
627 cmd->data[1] = upper_32_bits(__pa(address));
628 cmd->data[2] = 1;
Joerg Roedelded46732011-04-06 10:53:48 +0200629 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
630}
631
Joerg Roedel94fe79e2011-04-06 11:07:21 +0200632static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
633{
634 memset(cmd, 0, sizeof(*cmd));
635 cmd->data[0] = devid;
636 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
637}
638
Joerg Roedel11b64022011-04-06 11:49:28 +0200639static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
640 size_t size, u16 domid, int pde)
641{
642 u64 pages;
643 int s;
644
645 pages = iommu_num_pages(address, size, PAGE_SIZE);
646 s = 0;
647
648 if (pages > 1) {
649 /*
650 * If we have to flush more than one page, flush all
651 * TLB entries for this domain
652 */
653 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
654 s = 1;
655 }
656
657 address &= PAGE_MASK;
658
659 memset(cmd, 0, sizeof(*cmd));
660 cmd->data[1] |= domid;
661 cmd->data[2] = lower_32_bits(address);
662 cmd->data[3] = upper_32_bits(address);
663 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
664 if (s) /* size bit - we flush more than one 4kb page */
665 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
666 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
667 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
668}
669
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200670static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
671 u64 address, size_t size)
672{
673 u64 pages;
674 int s;
675
676 pages = iommu_num_pages(address, size, PAGE_SIZE);
677 s = 0;
678
679 if (pages > 1) {
680 /*
681 * If we have to flush more than one page, flush all
682 * TLB entries for this domain
683 */
684 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
685 s = 1;
686 }
687
688 address &= PAGE_MASK;
689
690 memset(cmd, 0, sizeof(*cmd));
691 cmd->data[0] = devid;
692 cmd->data[0] |= (qdep & 0xff) << 24;
693 cmd->data[1] = devid;
694 cmd->data[2] = lower_32_bits(address);
695 cmd->data[3] = upper_32_bits(address);
696 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
697 if (s)
698 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
699}
700
Joerg Roedel22e266c2011-11-21 15:59:08 +0100701static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
702 u64 address, bool size)
703{
704 memset(cmd, 0, sizeof(*cmd));
705
706 address &= ~(0xfffULL);
707
708 cmd->data[0] = pasid & PASID_MASK;
709 cmd->data[1] = domid;
710 cmd->data[2] = lower_32_bits(address);
711 cmd->data[3] = upper_32_bits(address);
712 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
713 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
714 if (size)
715 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
716 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
717}
718
719static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
720 int qdep, u64 address, bool size)
721{
722 memset(cmd, 0, sizeof(*cmd));
723
724 address &= ~(0xfffULL);
725
726 cmd->data[0] = devid;
727 cmd->data[0] |= (pasid & 0xff) << 16;
728 cmd->data[0] |= (qdep & 0xff) << 24;
729 cmd->data[1] = devid;
730 cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16;
731 cmd->data[2] = lower_32_bits(address);
732 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
733 cmd->data[3] = upper_32_bits(address);
734 if (size)
735 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
736 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
737}
738
Joerg Roedel58fc7f12011-04-11 11:13:24 +0200739static void build_inv_all(struct iommu_cmd *cmd)
740{
741 memset(cmd, 0, sizeof(*cmd));
742 CMD_SET_TYPE(cmd, CMD_INV_ALL);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200743}
744
Joerg Roedel431b2a22008-07-11 17:14:22 +0200745/*
Joerg Roedelb6c02712008-06-26 21:27:53 +0200746 * Writes the command to the IOMMUs command buffer and informs the
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200747 * hardware about the new command.
Joerg Roedel431b2a22008-07-11 17:14:22 +0200748 */
Joerg Roedelf1ca1512011-09-02 14:10:32 +0200749static int iommu_queue_command_sync(struct amd_iommu *iommu,
750 struct iommu_cmd *cmd,
751 bool sync)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200752{
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200753 u32 left, tail, head, next_tail;
Joerg Roedel815b33f2011-04-06 17:26:49 +0200754 unsigned long flags;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200755
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200756 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
Joerg Roedelda49f6d2008-12-12 14:59:58 +0100757
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200758again:
Joerg Roedel815b33f2011-04-06 17:26:49 +0200759 spin_lock_irqsave(&iommu->lock, flags);
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200760
761 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
762 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
763 next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
764 left = (head - next_tail) % iommu->cmd_buf_size;
765
766 if (left <= 2) {
767 struct iommu_cmd sync_cmd;
768 volatile u64 sem = 0;
769 int ret;
770
771 build_completion_wait(&sync_cmd, (u64)&sem);
772 copy_cmd_to_buffer(iommu, &sync_cmd, tail);
773
774 spin_unlock_irqrestore(&iommu->lock, flags);
775
776 if ((ret = wait_on_sem(&sem)) != 0)
777 return ret;
778
779 goto again;
Joerg Roedel136f78a2008-07-11 17:14:27 +0200780 }
781
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200782 copy_cmd_to_buffer(iommu, cmd, tail);
Joerg Roedel519c31b2008-08-14 19:55:15 +0200783
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200784 /* We need to sync now to make sure all commands are processed */
Joerg Roedelf1ca1512011-09-02 14:10:32 +0200785 iommu->need_sync = sync;
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200786
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200787 spin_unlock_irqrestore(&iommu->lock, flags);
788
Joerg Roedel815b33f2011-04-06 17:26:49 +0200789 return 0;
Joerg Roedel8d201962008-12-02 20:34:41 +0100790}
791
Joerg Roedelf1ca1512011-09-02 14:10:32 +0200792static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
793{
794 return iommu_queue_command_sync(iommu, cmd, true);
795}
796
Joerg Roedel8d201962008-12-02 20:34:41 +0100797/*
798 * This function queues a completion wait command into the command
799 * buffer of an IOMMU
800 */
Joerg Roedel8d201962008-12-02 20:34:41 +0100801static int iommu_completion_wait(struct amd_iommu *iommu)
802{
Joerg Roedel815b33f2011-04-06 17:26:49 +0200803 struct iommu_cmd cmd;
804 volatile u64 sem = 0;
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200805 int ret;
Joerg Roedel8d201962008-12-02 20:34:41 +0100806
807 if (!iommu->need_sync)
Joerg Roedel815b33f2011-04-06 17:26:49 +0200808 return 0;
Joerg Roedel8d201962008-12-02 20:34:41 +0100809
Joerg Roedel815b33f2011-04-06 17:26:49 +0200810 build_completion_wait(&cmd, (u64)&sem);
Joerg Roedel8d201962008-12-02 20:34:41 +0100811
Joerg Roedelf1ca1512011-09-02 14:10:32 +0200812 ret = iommu_queue_command_sync(iommu, &cmd, false);
Joerg Roedel8d201962008-12-02 20:34:41 +0100813 if (ret)
Joerg Roedel815b33f2011-04-06 17:26:49 +0200814 return ret;
Joerg Roedel8d201962008-12-02 20:34:41 +0100815
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200816 return wait_on_sem(&sem);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200817}
818
Joerg Roedeld8c13082011-04-06 18:51:26 +0200819static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200820{
821 struct iommu_cmd cmd;
822
Joerg Roedeld8c13082011-04-06 18:51:26 +0200823 build_inv_dte(&cmd, devid);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200824
Joerg Roedeld8c13082011-04-06 18:51:26 +0200825 return iommu_queue_command(iommu, &cmd);
826}
827
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +0200828static void iommu_flush_dte_all(struct amd_iommu *iommu)
829{
830 u32 devid;
831
832 for (devid = 0; devid <= 0xffff; ++devid)
833 iommu_flush_dte(iommu, devid);
834
835 iommu_completion_wait(iommu);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200836}
837
838/*
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +0200839 * This function uses heavy locking and may disable irqs for some time. But
840 * this is no issue because it is only called during resume.
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200841 */
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +0200842static void iommu_flush_tlb_all(struct amd_iommu *iommu)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200843{
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +0200844 u32 dom_id;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200845
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +0200846 for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
847 struct iommu_cmd cmd;
848 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
849 dom_id, 1);
850 iommu_queue_command(iommu, &cmd);
851 }
Joerg Roedel431b2a22008-07-11 17:14:22 +0200852
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +0200853 iommu_completion_wait(iommu);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200854}
855
Joerg Roedel58fc7f12011-04-11 11:13:24 +0200856static void iommu_flush_all(struct amd_iommu *iommu)
857{
858 struct iommu_cmd cmd;
859
860 build_inv_all(&cmd);
861
862 iommu_queue_command(iommu, &cmd);
863 iommu_completion_wait(iommu);
864}
865
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +0200866void iommu_flush_all_caches(struct amd_iommu *iommu)
867{
Joerg Roedel58fc7f12011-04-11 11:13:24 +0200868 if (iommu_feature(iommu, FEATURE_IA)) {
869 iommu_flush_all(iommu);
870 } else {
871 iommu_flush_dte_all(iommu);
872 iommu_flush_tlb_all(iommu);
873 }
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +0200874}
875
Joerg Roedel431b2a22008-07-11 17:14:22 +0200876/*
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200877 * Command send function for flushing on-device TLB
878 */
Joerg Roedel6c542042011-06-09 17:07:31 +0200879static int device_flush_iotlb(struct iommu_dev_data *dev_data,
880 u64 address, size_t size)
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200881{
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200882 struct amd_iommu *iommu;
883 struct iommu_cmd cmd;
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200884 int qdep;
885
Joerg Roedelea61cdd2011-06-09 12:56:30 +0200886 qdep = dev_data->ats.qdep;
887 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200888
Joerg Roedelea61cdd2011-06-09 12:56:30 +0200889 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200890
891 return iommu_queue_command(iommu, &cmd);
892}
893
894/*
Joerg Roedel431b2a22008-07-11 17:14:22 +0200895 * Command send function for invalidating a device table entry
896 */
Joerg Roedel6c542042011-06-09 17:07:31 +0200897static int device_flush_dte(struct iommu_dev_data *dev_data)
Joerg Roedel3fa43652009-11-26 15:04:38 +0100898{
899 struct amd_iommu *iommu;
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200900 int ret;
Joerg Roedel3fa43652009-11-26 15:04:38 +0100901
Joerg Roedel6c542042011-06-09 17:07:31 +0200902 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedel3fa43652009-11-26 15:04:38 +0100903
Joerg Roedelf62dda62011-06-09 12:55:35 +0200904 ret = iommu_flush_dte(iommu, dev_data->devid);
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200905 if (ret)
906 return ret;
907
Joerg Roedelea61cdd2011-06-09 12:56:30 +0200908 if (dev_data->ats.enabled)
Joerg Roedel6c542042011-06-09 17:07:31 +0200909 ret = device_flush_iotlb(dev_data, 0, ~0UL);
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200910
911 return ret;
Joerg Roedel3fa43652009-11-26 15:04:38 +0100912}
913
Joerg Roedel431b2a22008-07-11 17:14:22 +0200914/*
915 * TLB invalidation function which is called from the mapping functions.
916 * It invalidates a single PTE if the range to flush is within a single
917 * page. Otherwise it flushes the whole TLB of the IOMMU.
918 */
Joerg Roedel17b124b2011-04-06 18:01:35 +0200919static void __domain_flush_pages(struct protection_domain *domain,
920 u64 address, size_t size, int pde)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200921{
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200922 struct iommu_dev_data *dev_data;
Joerg Roedel11b64022011-04-06 11:49:28 +0200923 struct iommu_cmd cmd;
924 int ret = 0, i;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200925
Joerg Roedel11b64022011-04-06 11:49:28 +0200926 build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
Joerg Roedel999ba412008-07-03 19:35:08 +0200927
Joerg Roedel6de8ad92009-11-23 18:30:32 +0100928 for (i = 0; i < amd_iommus_present; ++i) {
929 if (!domain->dev_iommu[i])
930 continue;
931
932 /*
933 * Devices of this domain are behind this IOMMU
934 * We need a TLB flush
935 */
Joerg Roedel11b64022011-04-06 11:49:28 +0200936 ret |= iommu_queue_command(amd_iommus[i], &cmd);
Joerg Roedel6de8ad92009-11-23 18:30:32 +0100937 }
938
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200939 list_for_each_entry(dev_data, &domain->dev_list, list) {
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200940
Joerg Roedelea61cdd2011-06-09 12:56:30 +0200941 if (!dev_data->ats.enabled)
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200942 continue;
943
Joerg Roedel6c542042011-06-09 17:07:31 +0200944 ret |= device_flush_iotlb(dev_data, address, size);
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200945 }
946
Joerg Roedel11b64022011-04-06 11:49:28 +0200947 WARN_ON(ret);
Joerg Roedel6de8ad92009-11-23 18:30:32 +0100948}
949
Joerg Roedel17b124b2011-04-06 18:01:35 +0200950static void domain_flush_pages(struct protection_domain *domain,
951 u64 address, size_t size)
Joerg Roedel6de8ad92009-11-23 18:30:32 +0100952{
Joerg Roedel17b124b2011-04-06 18:01:35 +0200953 __domain_flush_pages(domain, address, size, 0);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200954}
Joerg Roedelb6c02712008-06-26 21:27:53 +0200955
Joerg Roedel1c655772008-09-04 18:40:05 +0200956/* Flush the whole IO/TLB for a given protection domain */
Joerg Roedel17b124b2011-04-06 18:01:35 +0200957static void domain_flush_tlb(struct protection_domain *domain)
Joerg Roedel1c655772008-09-04 18:40:05 +0200958{
Joerg Roedel17b124b2011-04-06 18:01:35 +0200959 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
Joerg Roedel1c655772008-09-04 18:40:05 +0200960}
961
Chris Wright42a49f92009-06-15 15:42:00 +0200962/* Flush the whole IO/TLB for a given protection domain - including PDE */
Joerg Roedel17b124b2011-04-06 18:01:35 +0200963static void domain_flush_tlb_pde(struct protection_domain *domain)
Chris Wright42a49f92009-06-15 15:42:00 +0200964{
Joerg Roedel17b124b2011-04-06 18:01:35 +0200965 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
966}
967
968static void domain_flush_complete(struct protection_domain *domain)
Joerg Roedelb6c02712008-06-26 21:27:53 +0200969{
970 int i;
971
972 for (i = 0; i < amd_iommus_present; ++i) {
973 if (!domain->dev_iommu[i])
974 continue;
975
976 /*
977 * Devices of this domain are behind this IOMMU
978 * We need to wait for completion of all commands.
979 */
980 iommu_completion_wait(amd_iommus[i]);
981 }
982}
983
Joerg Roedelb00d3bc2009-11-26 15:35:33 +0100984
Joerg Roedel43f49602008-12-02 21:01:12 +0100985/*
Joerg Roedelb00d3bc2009-11-26 15:35:33 +0100986 * This function flushes the DTEs for all devices in domain
Joerg Roedel43f49602008-12-02 21:01:12 +0100987 */
Joerg Roedel17b124b2011-04-06 18:01:35 +0200988static void domain_flush_devices(struct protection_domain *domain)
Joerg Roedelbfd1be12009-05-05 15:33:57 +0200989{
Joerg Roedelb00d3bc2009-11-26 15:35:33 +0100990 struct iommu_dev_data *dev_data;
Joerg Roedelb00d3bc2009-11-26 15:35:33 +0100991
992 list_for_each_entry(dev_data, &domain->dev_list, list)
Joerg Roedel6c542042011-06-09 17:07:31 +0200993 device_flush_dte(dev_data);
Joerg Roedelb00d3bc2009-11-26 15:35:33 +0100994}
995
Joerg Roedel431b2a22008-07-11 17:14:22 +0200996/****************************************************************************
997 *
998 * The functions below are used the create the page table mappings for
999 * unity mapped regions.
1000 *
1001 ****************************************************************************/
1002
1003/*
Joerg Roedel308973d2009-11-24 17:43:32 +01001004 * This function is used to add another level to an IO page table. Adding
1005 * another level increases the size of the address space by 9 bits to a size up
1006 * to 64 bits.
1007 */
1008static bool increase_address_space(struct protection_domain *domain,
1009 gfp_t gfp)
1010{
1011 u64 *pte;
1012
1013 if (domain->mode == PAGE_MODE_6_LEVEL)
1014 /* address space already 64 bit large */
1015 return false;
1016
1017 pte = (void *)get_zeroed_page(gfp);
1018 if (!pte)
1019 return false;
1020
1021 *pte = PM_LEVEL_PDE(domain->mode,
1022 virt_to_phys(domain->pt_root));
1023 domain->pt_root = pte;
1024 domain->mode += 1;
1025 domain->updated = true;
1026
1027 return true;
1028}
1029
1030static u64 *alloc_pte(struct protection_domain *domain,
1031 unsigned long address,
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001032 unsigned long page_size,
Joerg Roedel308973d2009-11-24 17:43:32 +01001033 u64 **pte_page,
1034 gfp_t gfp)
1035{
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001036 int level, end_lvl;
Joerg Roedel308973d2009-11-24 17:43:32 +01001037 u64 *pte, *page;
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001038
1039 BUG_ON(!is_power_of_2(page_size));
Joerg Roedel308973d2009-11-24 17:43:32 +01001040
1041 while (address > PM_LEVEL_SIZE(domain->mode))
1042 increase_address_space(domain, gfp);
1043
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001044 level = domain->mode - 1;
1045 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1046 address = PAGE_SIZE_ALIGN(address, page_size);
1047 end_lvl = PAGE_SIZE_LEVEL(page_size);
Joerg Roedel308973d2009-11-24 17:43:32 +01001048
1049 while (level > end_lvl) {
1050 if (!IOMMU_PTE_PRESENT(*pte)) {
1051 page = (u64 *)get_zeroed_page(gfp);
1052 if (!page)
1053 return NULL;
1054 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
1055 }
1056
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001057 /* No level skipping support yet */
1058 if (PM_PTE_LEVEL(*pte) != level)
1059 return NULL;
1060
Joerg Roedel308973d2009-11-24 17:43:32 +01001061 level -= 1;
1062
1063 pte = IOMMU_PTE_PAGE(*pte);
1064
1065 if (pte_page && level == end_lvl)
1066 *pte_page = pte;
1067
1068 pte = &pte[PM_LEVEL_INDEX(level, address)];
1069 }
1070
1071 return pte;
1072}
1073
1074/*
1075 * This function checks if there is a PTE for a given dma address. If
1076 * there is one, it returns the pointer to it.
1077 */
Joerg Roedel24cd7722010-01-19 17:27:39 +01001078static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
Joerg Roedel308973d2009-11-24 17:43:32 +01001079{
1080 int level;
1081 u64 *pte;
1082
Joerg Roedel24cd7722010-01-19 17:27:39 +01001083 if (address > PM_LEVEL_SIZE(domain->mode))
1084 return NULL;
Joerg Roedel308973d2009-11-24 17:43:32 +01001085
Joerg Roedel24cd7722010-01-19 17:27:39 +01001086 level = domain->mode - 1;
1087 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1088
1089 while (level > 0) {
1090
1091 /* Not Present */
Joerg Roedel308973d2009-11-24 17:43:32 +01001092 if (!IOMMU_PTE_PRESENT(*pte))
1093 return NULL;
1094
Joerg Roedel24cd7722010-01-19 17:27:39 +01001095 /* Large PTE */
1096 if (PM_PTE_LEVEL(*pte) == 0x07) {
1097 unsigned long pte_mask, __pte;
1098
1099 /*
1100 * If we have a series of large PTEs, make
1101 * sure to return a pointer to the first one.
1102 */
1103 pte_mask = PTE_PAGE_SIZE(*pte);
1104 pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
1105 __pte = ((unsigned long)pte) & pte_mask;
1106
1107 return (u64 *)__pte;
1108 }
1109
1110 /* No level skipping support yet */
1111 if (PM_PTE_LEVEL(*pte) != level)
1112 return NULL;
1113
Joerg Roedel308973d2009-11-24 17:43:32 +01001114 level -= 1;
1115
Joerg Roedel24cd7722010-01-19 17:27:39 +01001116 /* Walk to the next level */
Joerg Roedel308973d2009-11-24 17:43:32 +01001117 pte = IOMMU_PTE_PAGE(*pte);
1118 pte = &pte[PM_LEVEL_INDEX(level, address)];
Joerg Roedel308973d2009-11-24 17:43:32 +01001119 }
1120
1121 return pte;
1122}
1123
1124/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001125 * Generic mapping functions. It maps a physical address into a DMA
1126 * address space. It allocates the page table pages if necessary.
1127 * In the future it can be extended to a generic mapping function
1128 * supporting all features of AMD IOMMU page tables like level skipping
1129 * and full 64 bit address spaces.
1130 */
Joerg Roedel38e817f2008-12-02 17:27:52 +01001131static int iommu_map_page(struct protection_domain *dom,
1132 unsigned long bus_addr,
1133 unsigned long phys_addr,
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02001134 int prot,
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001135 unsigned long page_size)
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001136{
Joerg Roedel8bda3092009-05-12 12:02:46 +02001137 u64 __pte, *pte;
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001138 int i, count;
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02001139
Joerg Roedelbad1cac2009-09-02 16:52:23 +02001140 if (!(prot & IOMMU_PROT_MASK))
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001141 return -EINVAL;
1142
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001143 bus_addr = PAGE_ALIGN(bus_addr);
1144 phys_addr = PAGE_ALIGN(phys_addr);
1145 count = PAGE_SIZE_PTE_COUNT(page_size);
1146 pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001147
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001148 for (i = 0; i < count; ++i)
1149 if (IOMMU_PTE_PRESENT(pte[i]))
1150 return -EBUSY;
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001151
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001152 if (page_size > PAGE_SIZE) {
1153 __pte = PAGE_SIZE_PTE(phys_addr, page_size);
1154 __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
1155 } else
1156 __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
1157
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001158 if (prot & IOMMU_PROT_IR)
1159 __pte |= IOMMU_PTE_IR;
1160 if (prot & IOMMU_PROT_IW)
1161 __pte |= IOMMU_PTE_IW;
1162
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001163 for (i = 0; i < count; ++i)
1164 pte[i] = __pte;
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001165
Joerg Roedel04bfdd82009-09-02 16:00:23 +02001166 update_domain(dom);
1167
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001168 return 0;
1169}
1170
Joerg Roedel24cd7722010-01-19 17:27:39 +01001171static unsigned long iommu_unmap_page(struct protection_domain *dom,
1172 unsigned long bus_addr,
1173 unsigned long page_size)
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001174{
Joerg Roedel24cd7722010-01-19 17:27:39 +01001175 unsigned long long unmap_size, unmapped;
1176 u64 *pte;
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001177
Joerg Roedel24cd7722010-01-19 17:27:39 +01001178 BUG_ON(!is_power_of_2(page_size));
1179
1180 unmapped = 0;
1181
1182 while (unmapped < page_size) {
1183
1184 pte = fetch_pte(dom, bus_addr);
1185
1186 if (!pte) {
1187 /*
1188 * No PTE for this address
1189 * move forward in 4kb steps
1190 */
1191 unmap_size = PAGE_SIZE;
1192 } else if (PM_PTE_LEVEL(*pte) == 0) {
1193 /* 4kb PTE found for this address */
1194 unmap_size = PAGE_SIZE;
1195 *pte = 0ULL;
1196 } else {
1197 int count, i;
1198
1199 /* Large PTE found which maps this address */
1200 unmap_size = PTE_PAGE_SIZE(*pte);
1201 count = PAGE_SIZE_PTE_COUNT(unmap_size);
1202 for (i = 0; i < count; i++)
1203 pte[i] = 0ULL;
1204 }
1205
1206 bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size;
1207 unmapped += unmap_size;
1208 }
1209
1210 BUG_ON(!is_power_of_2(unmapped));
1211
1212 return unmapped;
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001213}
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001214
Joerg Roedel431b2a22008-07-11 17:14:22 +02001215/*
1216 * This function checks if a specific unity mapping entry is needed for
1217 * this specific IOMMU.
1218 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001219static int iommu_for_unity_map(struct amd_iommu *iommu,
1220 struct unity_map_entry *entry)
1221{
1222 u16 bdf, i;
1223
1224 for (i = entry->devid_start; i <= entry->devid_end; ++i) {
1225 bdf = amd_iommu_alias_table[i];
1226 if (amd_iommu_rlookup_table[bdf] == iommu)
1227 return 1;
1228 }
1229
1230 return 0;
1231}
1232
Joerg Roedel431b2a22008-07-11 17:14:22 +02001233/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001234 * This function actually applies the mapping to the page table of the
1235 * dma_ops domain.
1236 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001237static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
1238 struct unity_map_entry *e)
1239{
1240 u64 addr;
1241 int ret;
1242
1243 for (addr = e->address_start; addr < e->address_end;
1244 addr += PAGE_SIZE) {
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02001245 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot,
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001246 PAGE_SIZE);
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001247 if (ret)
1248 return ret;
1249 /*
1250 * if unity mapping is in aperture range mark the page
1251 * as allocated in the aperture
1252 */
1253 if (addr < dma_dom->aperture_size)
Joerg Roedelc3239562009-05-12 10:56:44 +02001254 __set_bit(addr >> PAGE_SHIFT,
Joerg Roedel384de722009-05-15 12:30:05 +02001255 dma_dom->aperture[0]->bitmap);
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001256 }
1257
1258 return 0;
1259}
1260
Joerg Roedel431b2a22008-07-11 17:14:22 +02001261/*
Joerg Roedel171e7b32009-11-24 17:47:56 +01001262 * Init the unity mappings for a specific IOMMU in the system
1263 *
1264 * Basically iterates over all unity mapping entries and applies them to
1265 * the default domain DMA of that IOMMU if necessary.
1266 */
1267static int iommu_init_unity_mappings(struct amd_iommu *iommu)
1268{
1269 struct unity_map_entry *entry;
1270 int ret;
1271
1272 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
1273 if (!iommu_for_unity_map(iommu, entry))
1274 continue;
1275 ret = dma_ops_unity_map(iommu->default_dom, entry);
1276 if (ret)
1277 return ret;
1278 }
1279
1280 return 0;
1281}
1282
1283/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001284 * Inits the unity mappings required for a specific device
1285 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001286static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
1287 u16 devid)
1288{
1289 struct unity_map_entry *e;
1290 int ret;
1291
1292 list_for_each_entry(e, &amd_iommu_unity_map, list) {
1293 if (!(devid >= e->devid_start && devid <= e->devid_end))
1294 continue;
1295 ret = dma_ops_unity_map(dma_dom, e);
1296 if (ret)
1297 return ret;
1298 }
1299
1300 return 0;
1301}
1302
Joerg Roedel431b2a22008-07-11 17:14:22 +02001303/****************************************************************************
1304 *
1305 * The next functions belong to the address allocator for the dma_ops
1306 * interface functions. They work like the allocators in the other IOMMU
1307 * drivers. Its basically a bitmap which marks the allocated pages in
1308 * the aperture. Maybe it could be enhanced in the future to a more
1309 * efficient allocator.
1310 *
1311 ****************************************************************************/
Joerg Roedeld3086442008-06-26 21:27:57 +02001312
Joerg Roedel431b2a22008-07-11 17:14:22 +02001313/*
Joerg Roedel384de722009-05-15 12:30:05 +02001314 * The address allocator core functions.
Joerg Roedel431b2a22008-07-11 17:14:22 +02001315 *
1316 * called with domain->lock held
1317 */
Joerg Roedel384de722009-05-15 12:30:05 +02001318
Joerg Roedel9cabe892009-05-18 16:38:55 +02001319/*
Joerg Roedel171e7b32009-11-24 17:47:56 +01001320 * Used to reserve address ranges in the aperture (e.g. for exclusion
1321 * ranges.
1322 */
1323static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
1324 unsigned long start_page,
1325 unsigned int pages)
1326{
1327 unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
1328
1329 if (start_page + pages > last_page)
1330 pages = last_page - start_page;
1331
1332 for (i = start_page; i < start_page + pages; ++i) {
1333 int index = i / APERTURE_RANGE_PAGES;
1334 int page = i % APERTURE_RANGE_PAGES;
1335 __set_bit(page, dom->aperture[index]->bitmap);
1336 }
1337}
1338
1339/*
Joerg Roedel9cabe892009-05-18 16:38:55 +02001340 * This function is used to add a new aperture range to an existing
1341 * aperture in case of dma_ops domain allocation or address allocation
1342 * failure.
1343 */
Joerg Roedel576175c2009-11-23 19:08:46 +01001344static int alloc_new_range(struct dma_ops_domain *dma_dom,
Joerg Roedel9cabe892009-05-18 16:38:55 +02001345 bool populate, gfp_t gfp)
1346{
1347 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
Joerg Roedel576175c2009-11-23 19:08:46 +01001348 struct amd_iommu *iommu;
Joerg Roedel17f5b562011-07-06 17:14:44 +02001349 unsigned long i, old_size;
Joerg Roedel9cabe892009-05-18 16:38:55 +02001350
Joerg Roedelf5e97052009-05-22 12:31:53 +02001351#ifdef CONFIG_IOMMU_STRESS
1352 populate = false;
1353#endif
1354
Joerg Roedel9cabe892009-05-18 16:38:55 +02001355 if (index >= APERTURE_MAX_RANGES)
1356 return -ENOMEM;
1357
1358 dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
1359 if (!dma_dom->aperture[index])
1360 return -ENOMEM;
1361
1362 dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
1363 if (!dma_dom->aperture[index]->bitmap)
1364 goto out_free;
1365
1366 dma_dom->aperture[index]->offset = dma_dom->aperture_size;
1367
1368 if (populate) {
1369 unsigned long address = dma_dom->aperture_size;
1370 int i, num_ptes = APERTURE_RANGE_PAGES / 512;
1371 u64 *pte, *pte_page;
1372
1373 for (i = 0; i < num_ptes; ++i) {
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001374 pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
Joerg Roedel9cabe892009-05-18 16:38:55 +02001375 &pte_page, gfp);
1376 if (!pte)
1377 goto out_free;
1378
1379 dma_dom->aperture[index]->pte_pages[i] = pte_page;
1380
1381 address += APERTURE_RANGE_SIZE / 64;
1382 }
1383 }
1384
Joerg Roedel17f5b562011-07-06 17:14:44 +02001385 old_size = dma_dom->aperture_size;
Joerg Roedel9cabe892009-05-18 16:38:55 +02001386 dma_dom->aperture_size += APERTURE_RANGE_SIZE;
1387
Joerg Roedel17f5b562011-07-06 17:14:44 +02001388 /* Reserve address range used for MSI messages */
1389 if (old_size < MSI_ADDR_BASE_LO &&
1390 dma_dom->aperture_size > MSI_ADDR_BASE_LO) {
1391 unsigned long spage;
1392 int pages;
1393
1394 pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE);
1395 spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT;
1396
1397 dma_ops_reserve_addresses(dma_dom, spage, pages);
1398 }
1399
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001400 /* Initialize the exclusion range if necessary */
Joerg Roedel576175c2009-11-23 19:08:46 +01001401 for_each_iommu(iommu) {
1402 if (iommu->exclusion_start &&
1403 iommu->exclusion_start >= dma_dom->aperture[index]->offset
1404 && iommu->exclusion_start < dma_dom->aperture_size) {
1405 unsigned long startpage;
1406 int pages = iommu_num_pages(iommu->exclusion_start,
1407 iommu->exclusion_length,
1408 PAGE_SIZE);
1409 startpage = iommu->exclusion_start >> PAGE_SHIFT;
1410 dma_ops_reserve_addresses(dma_dom, startpage, pages);
1411 }
Joerg Roedel00cd1222009-05-19 09:52:40 +02001412 }
1413
1414 /*
1415 * Check for areas already mapped as present in the new aperture
1416 * range and mark those pages as reserved in the allocator. Such
1417 * mappings may already exist as a result of requested unity
1418 * mappings for devices.
1419 */
1420 for (i = dma_dom->aperture[index]->offset;
1421 i < dma_dom->aperture_size;
1422 i += PAGE_SIZE) {
Joerg Roedel24cd7722010-01-19 17:27:39 +01001423 u64 *pte = fetch_pte(&dma_dom->domain, i);
Joerg Roedel00cd1222009-05-19 09:52:40 +02001424 if (!pte || !IOMMU_PTE_PRESENT(*pte))
1425 continue;
1426
Joerg Roedelfcd08612011-10-11 17:41:32 +02001427 dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
Joerg Roedel00cd1222009-05-19 09:52:40 +02001428 }
1429
Joerg Roedel04bfdd82009-09-02 16:00:23 +02001430 update_domain(&dma_dom->domain);
1431
Joerg Roedel9cabe892009-05-18 16:38:55 +02001432 return 0;
1433
1434out_free:
Joerg Roedel04bfdd82009-09-02 16:00:23 +02001435 update_domain(&dma_dom->domain);
1436
Joerg Roedel9cabe892009-05-18 16:38:55 +02001437 free_page((unsigned long)dma_dom->aperture[index]->bitmap);
1438
1439 kfree(dma_dom->aperture[index]);
1440 dma_dom->aperture[index] = NULL;
1441
1442 return -ENOMEM;
1443}
1444
Joerg Roedel384de722009-05-15 12:30:05 +02001445static unsigned long dma_ops_area_alloc(struct device *dev,
1446 struct dma_ops_domain *dom,
1447 unsigned int pages,
1448 unsigned long align_mask,
1449 u64 dma_mask,
1450 unsigned long start)
1451{
Joerg Roedel803b8cb42009-05-18 15:32:48 +02001452 unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
Joerg Roedel384de722009-05-15 12:30:05 +02001453 int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
1454 int i = start >> APERTURE_RANGE_SHIFT;
1455 unsigned long boundary_size;
1456 unsigned long address = -1;
1457 unsigned long limit;
1458
Joerg Roedel803b8cb42009-05-18 15:32:48 +02001459 next_bit >>= PAGE_SHIFT;
1460
Joerg Roedel384de722009-05-15 12:30:05 +02001461 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
1462 PAGE_SIZE) >> PAGE_SHIFT;
1463
1464 for (;i < max_index; ++i) {
1465 unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
1466
1467 if (dom->aperture[i]->offset >= dma_mask)
1468 break;
1469
1470 limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
1471 dma_mask >> PAGE_SHIFT);
1472
1473 address = iommu_area_alloc(dom->aperture[i]->bitmap,
1474 limit, next_bit, pages, 0,
1475 boundary_size, align_mask);
1476 if (address != -1) {
1477 address = dom->aperture[i]->offset +
1478 (address << PAGE_SHIFT);
Joerg Roedel803b8cb42009-05-18 15:32:48 +02001479 dom->next_address = address + (pages << PAGE_SHIFT);
Joerg Roedel384de722009-05-15 12:30:05 +02001480 break;
1481 }
1482
1483 next_bit = 0;
1484 }
1485
1486 return address;
1487}
1488
Joerg Roedeld3086442008-06-26 21:27:57 +02001489static unsigned long dma_ops_alloc_addresses(struct device *dev,
1490 struct dma_ops_domain *dom,
Joerg Roedel6d4f3432008-09-04 19:18:02 +02001491 unsigned int pages,
Joerg Roedel832a90c2008-09-18 15:54:23 +02001492 unsigned long align_mask,
1493 u64 dma_mask)
Joerg Roedeld3086442008-06-26 21:27:57 +02001494{
Joerg Roedeld3086442008-06-26 21:27:57 +02001495 unsigned long address;
Joerg Roedeld3086442008-06-26 21:27:57 +02001496
Joerg Roedelfe16f082009-05-22 12:27:53 +02001497#ifdef CONFIG_IOMMU_STRESS
1498 dom->next_address = 0;
1499 dom->need_flush = true;
1500#endif
Joerg Roedeld3086442008-06-26 21:27:57 +02001501
Joerg Roedel384de722009-05-15 12:30:05 +02001502 address = dma_ops_area_alloc(dev, dom, pages, align_mask,
Joerg Roedel803b8cb42009-05-18 15:32:48 +02001503 dma_mask, dom->next_address);
Joerg Roedeld3086442008-06-26 21:27:57 +02001504
Joerg Roedel1c655772008-09-04 18:40:05 +02001505 if (address == -1) {
Joerg Roedel803b8cb42009-05-18 15:32:48 +02001506 dom->next_address = 0;
Joerg Roedel384de722009-05-15 12:30:05 +02001507 address = dma_ops_area_alloc(dev, dom, pages, align_mask,
1508 dma_mask, 0);
Joerg Roedel1c655772008-09-04 18:40:05 +02001509 dom->need_flush = true;
1510 }
Joerg Roedeld3086442008-06-26 21:27:57 +02001511
Joerg Roedel384de722009-05-15 12:30:05 +02001512 if (unlikely(address == -1))
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09001513 address = DMA_ERROR_CODE;
Joerg Roedeld3086442008-06-26 21:27:57 +02001514
1515 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
1516
1517 return address;
1518}
1519
Joerg Roedel431b2a22008-07-11 17:14:22 +02001520/*
1521 * The address free function.
1522 *
1523 * called with domain->lock held
1524 */
Joerg Roedeld3086442008-06-26 21:27:57 +02001525static void dma_ops_free_addresses(struct dma_ops_domain *dom,
1526 unsigned long address,
1527 unsigned int pages)
1528{
Joerg Roedel384de722009-05-15 12:30:05 +02001529 unsigned i = address >> APERTURE_RANGE_SHIFT;
1530 struct aperture_range *range = dom->aperture[i];
Joerg Roedel80be3082008-11-06 14:59:05 +01001531
Joerg Roedel384de722009-05-15 12:30:05 +02001532 BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
1533
Joerg Roedel47bccd62009-05-22 12:40:54 +02001534#ifdef CONFIG_IOMMU_STRESS
1535 if (i < 4)
1536 return;
1537#endif
1538
Joerg Roedel803b8cb42009-05-18 15:32:48 +02001539 if (address >= dom->next_address)
Joerg Roedel80be3082008-11-06 14:59:05 +01001540 dom->need_flush = true;
Joerg Roedel384de722009-05-15 12:30:05 +02001541
1542 address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
Joerg Roedel803b8cb42009-05-18 15:32:48 +02001543
Akinobu Mitaa66022c2009-12-15 16:48:28 -08001544 bitmap_clear(range->bitmap, address, pages);
Joerg Roedel384de722009-05-15 12:30:05 +02001545
Joerg Roedeld3086442008-06-26 21:27:57 +02001546}
1547
Joerg Roedel431b2a22008-07-11 17:14:22 +02001548/****************************************************************************
1549 *
1550 * The next functions belong to the domain allocation. A domain is
1551 * allocated for every IOMMU as the default domain. If device isolation
1552 * is enabled, every device get its own domain. The most important thing
1553 * about domains is the page table mapping the DMA address space they
1554 * contain.
1555 *
1556 ****************************************************************************/
1557
Joerg Roedelaeb26f52009-11-20 16:44:01 +01001558/*
1559 * This function adds a protection domain to the global protection domain list
1560 */
1561static void add_domain_to_list(struct protection_domain *domain)
1562{
1563 unsigned long flags;
1564
1565 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1566 list_add(&domain->list, &amd_iommu_pd_list);
1567 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1568}
1569
1570/*
1571 * This function removes a protection domain to the global
1572 * protection domain list
1573 */
1574static void del_domain_from_list(struct protection_domain *domain)
1575{
1576 unsigned long flags;
1577
1578 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1579 list_del(&domain->list);
1580 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1581}
1582
Joerg Roedelec487d12008-06-26 21:27:58 +02001583static u16 domain_id_alloc(void)
1584{
1585 unsigned long flags;
1586 int id;
1587
1588 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1589 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1590 BUG_ON(id == 0);
1591 if (id > 0 && id < MAX_DOMAIN_ID)
1592 __set_bit(id, amd_iommu_pd_alloc_bitmap);
1593 else
1594 id = 0;
1595 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1596
1597 return id;
1598}
1599
Joerg Roedela2acfb72008-12-02 18:28:53 +01001600static void domain_id_free(int id)
1601{
1602 unsigned long flags;
1603
1604 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1605 if (id > 0 && id < MAX_DOMAIN_ID)
1606 __clear_bit(id, amd_iommu_pd_alloc_bitmap);
1607 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1608}
Joerg Roedela2acfb72008-12-02 18:28:53 +01001609
Joerg Roedel86db2e52008-12-02 18:20:21 +01001610static void free_pagetable(struct protection_domain *domain)
Joerg Roedelec487d12008-06-26 21:27:58 +02001611{
1612 int i, j;
1613 u64 *p1, *p2, *p3;
1614
Joerg Roedel86db2e52008-12-02 18:20:21 +01001615 p1 = domain->pt_root;
Joerg Roedelec487d12008-06-26 21:27:58 +02001616
1617 if (!p1)
1618 return;
1619
1620 for (i = 0; i < 512; ++i) {
1621 if (!IOMMU_PTE_PRESENT(p1[i]))
1622 continue;
1623
1624 p2 = IOMMU_PTE_PAGE(p1[i]);
Joerg Roedel3cc3d842008-12-04 16:44:31 +01001625 for (j = 0; j < 512; ++j) {
Joerg Roedelec487d12008-06-26 21:27:58 +02001626 if (!IOMMU_PTE_PRESENT(p2[j]))
1627 continue;
1628 p3 = IOMMU_PTE_PAGE(p2[j]);
1629 free_page((unsigned long)p3);
1630 }
1631
1632 free_page((unsigned long)p2);
1633 }
1634
1635 free_page((unsigned long)p1);
Joerg Roedel86db2e52008-12-02 18:20:21 +01001636
1637 domain->pt_root = NULL;
Joerg Roedelec487d12008-06-26 21:27:58 +02001638}
1639
Joerg Roedel52815b72011-11-17 17:24:28 +01001640static void free_gcr3_table(struct protection_domain *domain)
1641{
1642 free_page((unsigned long)domain->gcr3_tbl);
1643}
1644
Joerg Roedel431b2a22008-07-11 17:14:22 +02001645/*
1646 * Free a domain, only used if something went wrong in the
1647 * allocation path and we need to free an already allocated page table
1648 */
Joerg Roedelec487d12008-06-26 21:27:58 +02001649static void dma_ops_domain_free(struct dma_ops_domain *dom)
1650{
Joerg Roedel384de722009-05-15 12:30:05 +02001651 int i;
1652
Joerg Roedelec487d12008-06-26 21:27:58 +02001653 if (!dom)
1654 return;
1655
Joerg Roedelaeb26f52009-11-20 16:44:01 +01001656 del_domain_from_list(&dom->domain);
1657
Joerg Roedel86db2e52008-12-02 18:20:21 +01001658 free_pagetable(&dom->domain);
Joerg Roedelec487d12008-06-26 21:27:58 +02001659
Joerg Roedel384de722009-05-15 12:30:05 +02001660 for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
1661 if (!dom->aperture[i])
1662 continue;
1663 free_page((unsigned long)dom->aperture[i]->bitmap);
1664 kfree(dom->aperture[i]);
1665 }
Joerg Roedelec487d12008-06-26 21:27:58 +02001666
1667 kfree(dom);
1668}
1669
Joerg Roedel431b2a22008-07-11 17:14:22 +02001670/*
1671 * Allocates a new protection domain usable for the dma_ops functions.
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001672 * It also initializes the page table and the address allocator data
Joerg Roedel431b2a22008-07-11 17:14:22 +02001673 * structures required for the dma_ops interface
1674 */
Joerg Roedel87a64d52009-11-24 17:26:43 +01001675static struct dma_ops_domain *dma_ops_domain_alloc(void)
Joerg Roedelec487d12008-06-26 21:27:58 +02001676{
1677 struct dma_ops_domain *dma_dom;
Joerg Roedelec487d12008-06-26 21:27:58 +02001678
1679 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
1680 if (!dma_dom)
1681 return NULL;
1682
1683 spin_lock_init(&dma_dom->domain.lock);
1684
1685 dma_dom->domain.id = domain_id_alloc();
1686 if (dma_dom->domain.id == 0)
1687 goto free_dma_dom;
Joerg Roedel7c392cb2009-11-26 11:13:32 +01001688 INIT_LIST_HEAD(&dma_dom->domain.dev_list);
Joerg Roedel8f7a0172009-09-02 16:55:24 +02001689 dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
Joerg Roedelec487d12008-06-26 21:27:58 +02001690 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
Joerg Roedel9fdb19d2008-12-02 17:46:25 +01001691 dma_dom->domain.flags = PD_DMA_OPS_MASK;
Joerg Roedelec487d12008-06-26 21:27:58 +02001692 dma_dom->domain.priv = dma_dom;
1693 if (!dma_dom->domain.pt_root)
1694 goto free_dma_dom;
Joerg Roedelec487d12008-06-26 21:27:58 +02001695
Joerg Roedel1c655772008-09-04 18:40:05 +02001696 dma_dom->need_flush = false;
Joerg Roedelbd60b732008-09-11 10:24:48 +02001697 dma_dom->target_dev = 0xffff;
Joerg Roedel1c655772008-09-04 18:40:05 +02001698
Joerg Roedelaeb26f52009-11-20 16:44:01 +01001699 add_domain_to_list(&dma_dom->domain);
1700
Joerg Roedel576175c2009-11-23 19:08:46 +01001701 if (alloc_new_range(dma_dom, true, GFP_KERNEL))
Joerg Roedelec487d12008-06-26 21:27:58 +02001702 goto free_dma_dom;
Joerg Roedelec487d12008-06-26 21:27:58 +02001703
Joerg Roedel431b2a22008-07-11 17:14:22 +02001704 /*
Joerg Roedelec487d12008-06-26 21:27:58 +02001705 * mark the first page as allocated so we never return 0 as
1706 * a valid dma-address. So we can use 0 as error value
Joerg Roedel431b2a22008-07-11 17:14:22 +02001707 */
Joerg Roedel384de722009-05-15 12:30:05 +02001708 dma_dom->aperture[0]->bitmap[0] = 1;
Joerg Roedel803b8cb42009-05-18 15:32:48 +02001709 dma_dom->next_address = 0;
Joerg Roedelec487d12008-06-26 21:27:58 +02001710
Joerg Roedelec487d12008-06-26 21:27:58 +02001711
1712 return dma_dom;
1713
1714free_dma_dom:
1715 dma_ops_domain_free(dma_dom);
1716
1717 return NULL;
1718}
1719
Joerg Roedel431b2a22008-07-11 17:14:22 +02001720/*
Joerg Roedel5b28df62008-12-02 17:49:42 +01001721 * little helper function to check whether a given protection domain is a
1722 * dma_ops domain
1723 */
1724static bool dma_ops_domain(struct protection_domain *domain)
1725{
1726 return domain->flags & PD_DMA_OPS_MASK;
1727}
1728
Joerg Roedelfd7b5532011-04-05 15:31:08 +02001729static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001730{
Joerg Roedel132bd682011-11-17 14:18:46 +01001731 u64 pte_root = 0;
Joerg Roedelee6c2862011-11-09 12:06:03 +01001732 u64 flags = 0;
Joerg Roedel863c74e2008-12-02 17:56:36 +01001733
Joerg Roedel132bd682011-11-17 14:18:46 +01001734 if (domain->mode != PAGE_MODE_NONE)
1735 pte_root = virt_to_phys(domain->pt_root);
1736
Joerg Roedel38ddf412008-09-11 10:38:32 +02001737 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1738 << DEV_ENTRY_MODE_SHIFT;
1739 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001740
Joerg Roedelee6c2862011-11-09 12:06:03 +01001741 flags = amd_iommu_dev_table[devid].data[1];
1742
Joerg Roedelfd7b5532011-04-05 15:31:08 +02001743 if (ats)
1744 flags |= DTE_FLAG_IOTLB;
1745
Joerg Roedel52815b72011-11-17 17:24:28 +01001746 if (domain->flags & PD_IOMMUV2_MASK) {
1747 u64 gcr3 = __pa(domain->gcr3_tbl);
1748 u64 glx = domain->glx;
1749 u64 tmp;
1750
1751 pte_root |= DTE_FLAG_GV;
1752 pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
1753
1754 /* First mask out possible old values for GCR3 table */
1755 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1756 flags &= ~tmp;
1757
1758 tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1759 flags &= ~tmp;
1760
1761 /* Encode GCR3 table into DTE */
1762 tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
1763 pte_root |= tmp;
1764
1765 tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
1766 flags |= tmp;
1767
1768 tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
1769 flags |= tmp;
1770 }
1771
Joerg Roedelee6c2862011-11-09 12:06:03 +01001772 flags &= ~(0xffffUL);
1773 flags |= domain->id;
1774
1775 amd_iommu_dev_table[devid].data[1] = flags;
1776 amd_iommu_dev_table[devid].data[0] = pte_root;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001777}
1778
Joerg Roedel15898bb2009-11-24 15:39:42 +01001779static void clear_dte_entry(u16 devid)
Joerg Roedel355bf552008-12-08 12:02:41 +01001780{
Joerg Roedel355bf552008-12-08 12:02:41 +01001781 /* remove entry from the device table seen by the hardware */
1782 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1783 amd_iommu_dev_table[devid].data[1] = 0;
Joerg Roedel355bf552008-12-08 12:02:41 +01001784
Joerg Roedelc5cca142009-10-09 18:31:20 +02001785 amd_iommu_apply_erratum_63(devid);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001786}
1787
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001788static void do_attach(struct iommu_dev_data *dev_data,
1789 struct protection_domain *domain)
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001790{
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001791 struct amd_iommu *iommu;
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001792 bool ats;
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001793
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001794 iommu = amd_iommu_rlookup_table[dev_data->devid];
1795 ats = dev_data->ats.enabled;
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001796
1797 /* Update data structures */
1798 dev_data->domain = domain;
1799 list_add(&dev_data->list, &domain->dev_list);
Joerg Roedelf62dda62011-06-09 12:55:35 +02001800 set_dte_entry(dev_data->devid, domain, ats);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001801
1802 /* Do reference counting */
1803 domain->dev_iommu[iommu->index] += 1;
1804 domain->dev_cnt += 1;
1805
1806 /* Flush the DTE entry */
Joerg Roedel6c542042011-06-09 17:07:31 +02001807 device_flush_dte(dev_data);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001808}
1809
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001810static void do_detach(struct iommu_dev_data *dev_data)
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001811{
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001812 struct amd_iommu *iommu;
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001813
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001814 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedelc5cca142009-10-09 18:31:20 +02001815
Joerg Roedelc4596112009-11-20 14:57:32 +01001816 /* decrease reference counters */
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001817 dev_data->domain->dev_iommu[iommu->index] -= 1;
1818 dev_data->domain->dev_cnt -= 1;
Joerg Roedel355bf552008-12-08 12:02:41 +01001819
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001820 /* Update data structures */
1821 dev_data->domain = NULL;
1822 list_del(&dev_data->list);
Joerg Roedelf62dda62011-06-09 12:55:35 +02001823 clear_dte_entry(dev_data->devid);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001824
1825 /* Flush the DTE entry */
Joerg Roedel6c542042011-06-09 17:07:31 +02001826 device_flush_dte(dev_data);
Joerg Roedel15898bb2009-11-24 15:39:42 +01001827}
1828
1829/*
1830 * If a device is not yet associated with a domain, this function does
1831 * assigns it visible for the hardware
1832 */
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001833static int __attach_device(struct iommu_dev_data *dev_data,
Joerg Roedel15898bb2009-11-24 15:39:42 +01001834 struct protection_domain *domain)
1835{
Julia Lawall84fe6c12010-05-27 12:31:51 +02001836 int ret;
Joerg Roedel657cbb62009-11-23 15:26:46 +01001837
Joerg Roedel15898bb2009-11-24 15:39:42 +01001838 /* lock domain */
1839 spin_lock(&domain->lock);
1840
Joerg Roedel71f77582011-06-09 19:03:15 +02001841 if (dev_data->alias_data != NULL) {
1842 struct iommu_dev_data *alias_data = dev_data->alias_data;
Joerg Roedel15898bb2009-11-24 15:39:42 +01001843
Joerg Roedel2b02b092011-06-09 17:48:39 +02001844 /* Some sanity checks */
1845 ret = -EBUSY;
1846 if (alias_data->domain != NULL &&
1847 alias_data->domain != domain)
1848 goto out_unlock;
Joerg Roedel15898bb2009-11-24 15:39:42 +01001849
Joerg Roedel2b02b092011-06-09 17:48:39 +02001850 if (dev_data->domain != NULL &&
1851 dev_data->domain != domain)
1852 goto out_unlock;
1853
1854 /* Do real assignment */
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001855 if (alias_data->domain == NULL)
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001856 do_attach(alias_data, domain);
Joerg Roedel24100052009-11-25 15:59:57 +01001857
1858 atomic_inc(&alias_data->bind);
Joerg Roedel657cbb62009-11-23 15:26:46 +01001859 }
Joerg Roedel15898bb2009-11-24 15:39:42 +01001860
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001861 if (dev_data->domain == NULL)
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001862 do_attach(dev_data, domain);
Joerg Roedel15898bb2009-11-24 15:39:42 +01001863
Joerg Roedel24100052009-11-25 15:59:57 +01001864 atomic_inc(&dev_data->bind);
1865
Julia Lawall84fe6c12010-05-27 12:31:51 +02001866 ret = 0;
1867
1868out_unlock:
1869
Joerg Roedel355bf552008-12-08 12:02:41 +01001870 /* ready */
1871 spin_unlock(&domain->lock);
Joerg Roedel21129f72009-09-01 11:59:42 +02001872
Julia Lawall84fe6c12010-05-27 12:31:51 +02001873 return ret;
Joerg Roedel15898bb2009-11-24 15:39:42 +01001874}
1875
Joerg Roedel52815b72011-11-17 17:24:28 +01001876
1877static void pdev_iommuv2_disable(struct pci_dev *pdev)
1878{
1879 pci_disable_ats(pdev);
1880 pci_disable_pri(pdev);
1881 pci_disable_pasid(pdev);
1882}
1883
1884static int pdev_iommuv2_enable(struct pci_dev *pdev)
1885{
1886 int ret;
1887
1888 /* Only allow access to user-accessible pages */
1889 ret = pci_enable_pasid(pdev, 0);
1890 if (ret)
1891 goto out_err;
1892
1893 /* First reset the PRI state of the device */
1894 ret = pci_reset_pri(pdev);
1895 if (ret)
1896 goto out_err;
1897
1898 /* FIXME: Hardcode number of outstanding requests for now */
1899 ret = pci_enable_pri(pdev, 32);
1900 if (ret)
1901 goto out_err;
1902
1903 ret = pci_enable_ats(pdev, PAGE_SHIFT);
1904 if (ret)
1905 goto out_err;
1906
1907 return 0;
1908
1909out_err:
1910 pci_disable_pri(pdev);
1911 pci_disable_pasid(pdev);
1912
1913 return ret;
1914}
1915
Joerg Roedel15898bb2009-11-24 15:39:42 +01001916/*
1917 * If a device is not yet associated with a domain, this function does
1918 * assigns it visible for the hardware
1919 */
1920static int attach_device(struct device *dev,
1921 struct protection_domain *domain)
1922{
Joerg Roedelfd7b5532011-04-05 15:31:08 +02001923 struct pci_dev *pdev = to_pci_dev(dev);
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001924 struct iommu_dev_data *dev_data;
Joerg Roedel15898bb2009-11-24 15:39:42 +01001925 unsigned long flags;
1926 int ret;
1927
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001928 dev_data = get_dev_data(dev);
1929
Joerg Roedel52815b72011-11-17 17:24:28 +01001930 if (domain->flags & PD_IOMMUV2_MASK) {
1931 if (!dev_data->iommu_v2 || !dev_data->passthrough)
1932 return -EINVAL;
1933
1934 if (pdev_iommuv2_enable(pdev) != 0)
1935 return -EINVAL;
1936
1937 dev_data->ats.enabled = true;
1938 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
1939 } else if (amd_iommu_iotlb_sup &&
1940 pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001941 dev_data->ats.enabled = true;
1942 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
1943 }
Joerg Roedelfd7b5532011-04-05 15:31:08 +02001944
Joerg Roedel15898bb2009-11-24 15:39:42 +01001945 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001946 ret = __attach_device(dev_data, domain);
Joerg Roedel15898bb2009-11-24 15:39:42 +01001947 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1948
1949 /*
1950 * We might boot into a crash-kernel here. The crashed kernel
1951 * left the caches in the IOMMU dirty. So we have to flush
1952 * here to evict all dirty stuff.
1953 */
Joerg Roedel17b124b2011-04-06 18:01:35 +02001954 domain_flush_tlb_pde(domain);
Joerg Roedel15898bb2009-11-24 15:39:42 +01001955
1956 return ret;
1957}
1958
1959/*
1960 * Removes a device from a protection domain (unlocked)
1961 */
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001962static void __detach_device(struct iommu_dev_data *dev_data)
Joerg Roedel15898bb2009-11-24 15:39:42 +01001963{
Joerg Roedel2ca76272010-01-22 16:45:31 +01001964 struct protection_domain *domain;
Joerg Roedel7c392cb2009-11-26 11:13:32 +01001965 unsigned long flags;
Joerg Roedel15898bb2009-11-24 15:39:42 +01001966
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001967 BUG_ON(!dev_data->domain);
Joerg Roedel15898bb2009-11-24 15:39:42 +01001968
Joerg Roedel2ca76272010-01-22 16:45:31 +01001969 domain = dev_data->domain;
1970
1971 spin_lock_irqsave(&domain->lock, flags);
Joerg Roedel24100052009-11-25 15:59:57 +01001972
Joerg Roedel71f77582011-06-09 19:03:15 +02001973 if (dev_data->alias_data != NULL) {
1974 struct iommu_dev_data *alias_data = dev_data->alias_data;
1975
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001976 if (atomic_dec_and_test(&alias_data->bind))
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001977 do_detach(alias_data);
Joerg Roedel24100052009-11-25 15:59:57 +01001978 }
1979
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001980 if (atomic_dec_and_test(&dev_data->bind))
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001981 do_detach(dev_data);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001982
Joerg Roedel2ca76272010-01-22 16:45:31 +01001983 spin_unlock_irqrestore(&domain->lock, flags);
Joerg Roedel15898bb2009-11-24 15:39:42 +01001984
Joerg Roedel21129f72009-09-01 11:59:42 +02001985 /*
1986 * If we run in passthrough mode the device must be assigned to the
Joerg Roedeld3ad9372010-01-22 17:55:27 +01001987 * passthrough domain if it is detached from any other domain.
1988 * Make sure we can deassign from the pt_domain itself.
Joerg Roedel21129f72009-09-01 11:59:42 +02001989 */
Joerg Roedel5abcdba2011-12-01 15:49:45 +01001990 if (dev_data->passthrough &&
Joerg Roedeld3ad9372010-01-22 17:55:27 +01001991 (dev_data->domain == NULL && domain != pt_domain))
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001992 __attach_device(dev_data, pt_domain);
Joerg Roedel355bf552008-12-08 12:02:41 +01001993}
1994
1995/*
1996 * Removes a device from a protection domain (with devtable_lock held)
1997 */
Joerg Roedel15898bb2009-11-24 15:39:42 +01001998static void detach_device(struct device *dev)
Joerg Roedel355bf552008-12-08 12:02:41 +01001999{
Joerg Roedel52815b72011-11-17 17:24:28 +01002000 struct protection_domain *domain;
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002001 struct iommu_dev_data *dev_data;
Joerg Roedel355bf552008-12-08 12:02:41 +01002002 unsigned long flags;
2003
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002004 dev_data = get_dev_data(dev);
Joerg Roedel52815b72011-11-17 17:24:28 +01002005 domain = dev_data->domain;
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002006
Joerg Roedel355bf552008-12-08 12:02:41 +01002007 /* lock device table */
2008 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002009 __detach_device(dev_data);
Joerg Roedel355bf552008-12-08 12:02:41 +01002010 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
Joerg Roedelfd7b5532011-04-05 15:31:08 +02002011
Joerg Roedel52815b72011-11-17 17:24:28 +01002012 if (domain->flags & PD_IOMMUV2_MASK)
2013 pdev_iommuv2_disable(to_pci_dev(dev));
2014 else if (dev_data->ats.enabled)
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002015 pci_disable_ats(to_pci_dev(dev));
Joerg Roedel52815b72011-11-17 17:24:28 +01002016
2017 dev_data->ats.enabled = false;
Joerg Roedel355bf552008-12-08 12:02:41 +01002018}
Joerg Roedele275a2a2008-12-10 18:27:25 +01002019
Joerg Roedel15898bb2009-11-24 15:39:42 +01002020/*
2021 * Find out the protection domain structure for a given PCI device. This
2022 * will give us the pointer to the page table root for example.
2023 */
2024static struct protection_domain *domain_for_device(struct device *dev)
2025{
Joerg Roedel71f77582011-06-09 19:03:15 +02002026 struct iommu_dev_data *dev_data;
Joerg Roedel2b02b092011-06-09 17:48:39 +02002027 struct protection_domain *dom = NULL;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002028 unsigned long flags;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002029
Joerg Roedel657cbb62009-11-23 15:26:46 +01002030 dev_data = get_dev_data(dev);
Joerg Roedel15898bb2009-11-24 15:39:42 +01002031
Joerg Roedel2b02b092011-06-09 17:48:39 +02002032 if (dev_data->domain)
2033 return dev_data->domain;
2034
Joerg Roedel71f77582011-06-09 19:03:15 +02002035 if (dev_data->alias_data != NULL) {
2036 struct iommu_dev_data *alias_data = dev_data->alias_data;
Joerg Roedel2b02b092011-06-09 17:48:39 +02002037
2038 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
2039 if (alias_data->domain != NULL) {
2040 __attach_device(dev_data, alias_data->domain);
2041 dom = alias_data->domain;
2042 }
2043 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
Joerg Roedel15898bb2009-11-24 15:39:42 +01002044 }
2045
Joerg Roedel15898bb2009-11-24 15:39:42 +01002046 return dom;
2047}
2048
Joerg Roedele275a2a2008-12-10 18:27:25 +01002049static int device_change_notifier(struct notifier_block *nb,
2050 unsigned long action, void *data)
2051{
Joerg Roedele275a2a2008-12-10 18:27:25 +01002052 struct dma_ops_domain *dma_domain;
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002053 struct protection_domain *domain;
2054 struct iommu_dev_data *dev_data;
2055 struct device *dev = data;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002056 struct amd_iommu *iommu;
Joerg Roedel1ac4cbb2008-12-10 19:33:26 +01002057 unsigned long flags;
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002058 u16 devid;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002059
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002060 if (!check_device(dev))
2061 return 0;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002062
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002063 devid = get_device_id(dev);
2064 iommu = amd_iommu_rlookup_table[devid];
2065 dev_data = get_dev_data(dev);
Joerg Roedele275a2a2008-12-10 18:27:25 +01002066
2067 switch (action) {
Chris Wrightc1eee672009-05-21 00:56:58 -07002068 case BUS_NOTIFY_UNBOUND_DRIVER:
Joerg Roedel657cbb62009-11-23 15:26:46 +01002069
2070 domain = domain_for_device(dev);
2071
Joerg Roedele275a2a2008-12-10 18:27:25 +01002072 if (!domain)
2073 goto out;
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002074 if (dev_data->passthrough)
Joerg Roedela1ca3312009-09-01 12:22:22 +02002075 break;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002076 detach_device(dev);
Joerg Roedele275a2a2008-12-10 18:27:25 +01002077 break;
Joerg Roedel1ac4cbb2008-12-10 19:33:26 +01002078 case BUS_NOTIFY_ADD_DEVICE:
Joerg Roedel657cbb62009-11-23 15:26:46 +01002079
2080 iommu_init_device(dev);
2081
2082 domain = domain_for_device(dev);
2083
Joerg Roedel1ac4cbb2008-12-10 19:33:26 +01002084 /* allocate a protection domain if a device is added */
2085 dma_domain = find_protection_domain(devid);
2086 if (dma_domain)
2087 goto out;
Joerg Roedel87a64d52009-11-24 17:26:43 +01002088 dma_domain = dma_ops_domain_alloc();
Joerg Roedel1ac4cbb2008-12-10 19:33:26 +01002089 if (!dma_domain)
2090 goto out;
2091 dma_domain->target_dev = devid;
2092
2093 spin_lock_irqsave(&iommu_pd_list_lock, flags);
2094 list_add_tail(&dma_domain->list, &iommu_pd_list);
2095 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
2096
2097 break;
Joerg Roedel657cbb62009-11-23 15:26:46 +01002098 case BUS_NOTIFY_DEL_DEVICE:
2099
2100 iommu_uninit_device(dev);
2101
Joerg Roedele275a2a2008-12-10 18:27:25 +01002102 default:
2103 goto out;
2104 }
2105
Joerg Roedele275a2a2008-12-10 18:27:25 +01002106 iommu_completion_wait(iommu);
2107
2108out:
2109 return 0;
2110}
2111
Jaswinder Singh Rajputb25ae672009-07-01 19:53:14 +05302112static struct notifier_block device_nb = {
Joerg Roedele275a2a2008-12-10 18:27:25 +01002113 .notifier_call = device_change_notifier,
2114};
Joerg Roedel355bf552008-12-08 12:02:41 +01002115
Joerg Roedel8638c492009-12-10 11:12:25 +01002116void amd_iommu_init_notifier(void)
2117{
2118 bus_register_notifier(&pci_bus_type, &device_nb);
2119}
2120
Joerg Roedel431b2a22008-07-11 17:14:22 +02002121/*****************************************************************************
2122 *
2123 * The next functions belong to the dma_ops mapping/unmapping code.
2124 *
2125 *****************************************************************************/
2126
2127/*
2128 * In the dma_ops path we only have the struct device. This function
2129 * finds the corresponding IOMMU, the protection domain and the
2130 * requestor id for a given device.
2131 * If the device is not yet associated with a domain this is also done
2132 * in this function.
2133 */
Joerg Roedel94f6d192009-11-24 16:40:02 +01002134static struct protection_domain *get_domain(struct device *dev)
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02002135{
Joerg Roedel94f6d192009-11-24 16:40:02 +01002136 struct protection_domain *domain;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02002137 struct dma_ops_domain *dma_dom;
Joerg Roedel94f6d192009-11-24 16:40:02 +01002138 u16 devid = get_device_id(dev);
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02002139
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002140 if (!check_device(dev))
Joerg Roedel94f6d192009-11-24 16:40:02 +01002141 return ERR_PTR(-EINVAL);
Joerg Roedeldbcc1122008-09-04 15:04:26 +02002142
Joerg Roedel94f6d192009-11-24 16:40:02 +01002143 domain = domain_for_device(dev);
2144 if (domain != NULL && !dma_ops_domain(domain))
2145 return ERR_PTR(-EBUSY);
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002146
Joerg Roedel94f6d192009-11-24 16:40:02 +01002147 if (domain != NULL)
2148 return domain;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02002149
Joerg Roedel15898bb2009-11-24 15:39:42 +01002150 /* Device not bount yet - bind it */
Joerg Roedel94f6d192009-11-24 16:40:02 +01002151 dma_dom = find_protection_domain(devid);
Joerg Roedel15898bb2009-11-24 15:39:42 +01002152 if (!dma_dom)
Joerg Roedel94f6d192009-11-24 16:40:02 +01002153 dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
2154 attach_device(dev, &dma_dom->domain);
Joerg Roedel15898bb2009-11-24 15:39:42 +01002155 DUMP_printk("Using protection domain %d for device %s\n",
Joerg Roedel94f6d192009-11-24 16:40:02 +01002156 dma_dom->domain.id, dev_name(dev));
Joerg Roedelf91ba192008-11-25 12:56:12 +01002157
Joerg Roedel94f6d192009-11-24 16:40:02 +01002158 return &dma_dom->domain;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02002159}
2160
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002161static void update_device_table(struct protection_domain *domain)
2162{
Joerg Roedel492667d2009-11-27 13:25:47 +01002163 struct iommu_dev_data *dev_data;
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002164
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002165 list_for_each_entry(dev_data, &domain->dev_list, list)
2166 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002167}
2168
2169static void update_domain(struct protection_domain *domain)
2170{
2171 if (!domain->updated)
2172 return;
2173
2174 update_device_table(domain);
Joerg Roedel17b124b2011-04-06 18:01:35 +02002175
2176 domain_flush_devices(domain);
2177 domain_flush_tlb_pde(domain);
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002178
2179 domain->updated = false;
2180}
2181
Joerg Roedel431b2a22008-07-11 17:14:22 +02002182/*
Joerg Roedel8bda3092009-05-12 12:02:46 +02002183 * This function fetches the PTE for a given address in the aperture
2184 */
2185static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
2186 unsigned long address)
2187{
Joerg Roedel384de722009-05-15 12:30:05 +02002188 struct aperture_range *aperture;
Joerg Roedel8bda3092009-05-12 12:02:46 +02002189 u64 *pte, *pte_page;
2190
Joerg Roedel384de722009-05-15 12:30:05 +02002191 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
2192 if (!aperture)
2193 return NULL;
2194
2195 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
Joerg Roedel8bda3092009-05-12 12:02:46 +02002196 if (!pte) {
Joerg Roedelcbb9d722010-01-15 14:41:15 +01002197 pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02002198 GFP_ATOMIC);
Joerg Roedel384de722009-05-15 12:30:05 +02002199 aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
2200 } else
Joerg Roedel8c8c1432009-09-02 17:30:00 +02002201 pte += PM_LEVEL_INDEX(0, address);
Joerg Roedel8bda3092009-05-12 12:02:46 +02002202
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002203 update_domain(&dom->domain);
Joerg Roedel8bda3092009-05-12 12:02:46 +02002204
2205 return pte;
2206}
2207
2208/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02002209 * This is the generic map function. It maps one 4kb page at paddr to
2210 * the given address in the DMA address space for the domain.
2211 */
Joerg Roedel680525e2009-11-23 18:44:42 +01002212static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
Joerg Roedelcb76c322008-06-26 21:28:00 +02002213 unsigned long address,
2214 phys_addr_t paddr,
2215 int direction)
2216{
2217 u64 *pte, __pte;
2218
2219 WARN_ON(address > dom->aperture_size);
2220
2221 paddr &= PAGE_MASK;
2222
Joerg Roedel8bda3092009-05-12 12:02:46 +02002223 pte = dma_ops_get_pte(dom, address);
Joerg Roedel53812c12009-05-12 12:17:38 +02002224 if (!pte)
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002225 return DMA_ERROR_CODE;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002226
2227 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
2228
2229 if (direction == DMA_TO_DEVICE)
2230 __pte |= IOMMU_PTE_IR;
2231 else if (direction == DMA_FROM_DEVICE)
2232 __pte |= IOMMU_PTE_IW;
2233 else if (direction == DMA_BIDIRECTIONAL)
2234 __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
2235
2236 WARN_ON(*pte);
2237
2238 *pte = __pte;
2239
2240 return (dma_addr_t)address;
2241}
2242
Joerg Roedel431b2a22008-07-11 17:14:22 +02002243/*
2244 * The generic unmapping function for on page in the DMA address space.
2245 */
Joerg Roedel680525e2009-11-23 18:44:42 +01002246static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
Joerg Roedelcb76c322008-06-26 21:28:00 +02002247 unsigned long address)
2248{
Joerg Roedel384de722009-05-15 12:30:05 +02002249 struct aperture_range *aperture;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002250 u64 *pte;
2251
2252 if (address >= dom->aperture_size)
2253 return;
2254
Joerg Roedel384de722009-05-15 12:30:05 +02002255 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
2256 if (!aperture)
2257 return;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002258
Joerg Roedel384de722009-05-15 12:30:05 +02002259 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
2260 if (!pte)
2261 return;
2262
Joerg Roedel8c8c1432009-09-02 17:30:00 +02002263 pte += PM_LEVEL_INDEX(0, address);
Joerg Roedelcb76c322008-06-26 21:28:00 +02002264
2265 WARN_ON(!*pte);
2266
2267 *pte = 0ULL;
2268}
2269
Joerg Roedel431b2a22008-07-11 17:14:22 +02002270/*
2271 * This function contains common code for mapping of a physically
Joerg Roedel24f81162008-12-08 14:25:39 +01002272 * contiguous memory region into DMA address space. It is used by all
2273 * mapping functions provided with this IOMMU driver.
Joerg Roedel431b2a22008-07-11 17:14:22 +02002274 * Must be called with the domain lock held.
2275 */
Joerg Roedelcb76c322008-06-26 21:28:00 +02002276static dma_addr_t __map_single(struct device *dev,
Joerg Roedelcb76c322008-06-26 21:28:00 +02002277 struct dma_ops_domain *dma_dom,
2278 phys_addr_t paddr,
2279 size_t size,
Joerg Roedel6d4f3432008-09-04 19:18:02 +02002280 int dir,
Joerg Roedel832a90c2008-09-18 15:54:23 +02002281 bool align,
2282 u64 dma_mask)
Joerg Roedelcb76c322008-06-26 21:28:00 +02002283{
2284 dma_addr_t offset = paddr & ~PAGE_MASK;
Joerg Roedel53812c12009-05-12 12:17:38 +02002285 dma_addr_t address, start, ret;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002286 unsigned int pages;
Joerg Roedel6d4f3432008-09-04 19:18:02 +02002287 unsigned long align_mask = 0;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002288 int i;
2289
Joerg Roedele3c449f2008-10-15 22:02:11 -07002290 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +02002291 paddr &= PAGE_MASK;
2292
Joerg Roedel8ecaf8f2008-12-12 16:13:04 +01002293 INC_STATS_COUNTER(total_map_requests);
2294
Joerg Roedelc1858972008-12-12 15:42:39 +01002295 if (pages > 1)
2296 INC_STATS_COUNTER(cross_page);
2297
Joerg Roedel6d4f3432008-09-04 19:18:02 +02002298 if (align)
2299 align_mask = (1UL << get_order(size)) - 1;
2300
Joerg Roedel11b83882009-05-19 10:23:15 +02002301retry:
Joerg Roedel832a90c2008-09-18 15:54:23 +02002302 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
2303 dma_mask);
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002304 if (unlikely(address == DMA_ERROR_CODE)) {
Joerg Roedel11b83882009-05-19 10:23:15 +02002305 /*
2306 * setting next_address here will let the address
2307 * allocator only scan the new allocated range in the
2308 * first run. This is a small optimization.
2309 */
2310 dma_dom->next_address = dma_dom->aperture_size;
2311
Joerg Roedel576175c2009-11-23 19:08:46 +01002312 if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
Joerg Roedel11b83882009-05-19 10:23:15 +02002313 goto out;
2314
2315 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002316 * aperture was successfully enlarged by 128 MB, try
Joerg Roedel11b83882009-05-19 10:23:15 +02002317 * allocation again
2318 */
2319 goto retry;
2320 }
Joerg Roedelcb76c322008-06-26 21:28:00 +02002321
2322 start = address;
2323 for (i = 0; i < pages; ++i) {
Joerg Roedel680525e2009-11-23 18:44:42 +01002324 ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002325 if (ret == DMA_ERROR_CODE)
Joerg Roedel53812c12009-05-12 12:17:38 +02002326 goto out_unmap;
2327
Joerg Roedelcb76c322008-06-26 21:28:00 +02002328 paddr += PAGE_SIZE;
2329 start += PAGE_SIZE;
2330 }
2331 address += offset;
2332
Joerg Roedel5774f7c2008-12-12 15:57:30 +01002333 ADD_STATS_COUNTER(alloced_io_mem, size);
2334
FUJITA Tomonoriafa9fdc2008-09-20 01:23:30 +09002335 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
Joerg Roedel17b124b2011-04-06 18:01:35 +02002336 domain_flush_tlb(&dma_dom->domain);
Joerg Roedel1c655772008-09-04 18:40:05 +02002337 dma_dom->need_flush = false;
Joerg Roedel318afd42009-11-23 18:32:38 +01002338 } else if (unlikely(amd_iommu_np_cache))
Joerg Roedel17b124b2011-04-06 18:01:35 +02002339 domain_flush_pages(&dma_dom->domain, address, size);
Joerg Roedel270cab242008-09-04 15:49:46 +02002340
Joerg Roedelcb76c322008-06-26 21:28:00 +02002341out:
2342 return address;
Joerg Roedel53812c12009-05-12 12:17:38 +02002343
2344out_unmap:
2345
2346 for (--i; i >= 0; --i) {
2347 start -= PAGE_SIZE;
Joerg Roedel680525e2009-11-23 18:44:42 +01002348 dma_ops_domain_unmap(dma_dom, start);
Joerg Roedel53812c12009-05-12 12:17:38 +02002349 }
2350
2351 dma_ops_free_addresses(dma_dom, address, pages);
2352
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002353 return DMA_ERROR_CODE;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002354}
2355
Joerg Roedel431b2a22008-07-11 17:14:22 +02002356/*
2357 * Does the reverse of the __map_single function. Must be called with
2358 * the domain lock held too
2359 */
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002360static void __unmap_single(struct dma_ops_domain *dma_dom,
Joerg Roedelcb76c322008-06-26 21:28:00 +02002361 dma_addr_t dma_addr,
2362 size_t size,
2363 int dir)
2364{
Joerg Roedel04e04632010-09-23 16:12:48 +02002365 dma_addr_t flush_addr;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002366 dma_addr_t i, start;
2367 unsigned int pages;
2368
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002369 if ((dma_addr == DMA_ERROR_CODE) ||
Joerg Roedelb8d99052008-12-08 14:40:26 +01002370 (dma_addr + size > dma_dom->aperture_size))
Joerg Roedelcb76c322008-06-26 21:28:00 +02002371 return;
2372
Joerg Roedel04e04632010-09-23 16:12:48 +02002373 flush_addr = dma_addr;
Joerg Roedele3c449f2008-10-15 22:02:11 -07002374 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +02002375 dma_addr &= PAGE_MASK;
2376 start = dma_addr;
2377
2378 for (i = 0; i < pages; ++i) {
Joerg Roedel680525e2009-11-23 18:44:42 +01002379 dma_ops_domain_unmap(dma_dom, start);
Joerg Roedelcb76c322008-06-26 21:28:00 +02002380 start += PAGE_SIZE;
2381 }
2382
Joerg Roedel5774f7c2008-12-12 15:57:30 +01002383 SUB_STATS_COUNTER(alloced_io_mem, size);
2384
Joerg Roedelcb76c322008-06-26 21:28:00 +02002385 dma_ops_free_addresses(dma_dom, dma_addr, pages);
Joerg Roedel270cab242008-09-04 15:49:46 +02002386
Joerg Roedel80be3082008-11-06 14:59:05 +01002387 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
Joerg Roedel17b124b2011-04-06 18:01:35 +02002388 domain_flush_pages(&dma_dom->domain, flush_addr, size);
Joerg Roedel80be3082008-11-06 14:59:05 +01002389 dma_dom->need_flush = false;
2390 }
Joerg Roedelcb76c322008-06-26 21:28:00 +02002391}
2392
Joerg Roedel431b2a22008-07-11 17:14:22 +02002393/*
2394 * The exported map_single function for dma_ops.
2395 */
FUJITA Tomonori51491362009-01-05 23:47:25 +09002396static dma_addr_t map_page(struct device *dev, struct page *page,
2397 unsigned long offset, size_t size,
2398 enum dma_data_direction dir,
2399 struct dma_attrs *attrs)
Joerg Roedel4da70b92008-06-26 21:28:01 +02002400{
2401 unsigned long flags;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002402 struct protection_domain *domain;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002403 dma_addr_t addr;
Joerg Roedel832a90c2008-09-18 15:54:23 +02002404 u64 dma_mask;
FUJITA Tomonori51491362009-01-05 23:47:25 +09002405 phys_addr_t paddr = page_to_phys(page) + offset;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002406
Joerg Roedel0f2a86f2008-12-12 15:05:16 +01002407 INC_STATS_COUNTER(cnt_map_single);
2408
Joerg Roedel94f6d192009-11-24 16:40:02 +01002409 domain = get_domain(dev);
2410 if (PTR_ERR(domain) == -EINVAL)
Joerg Roedel4da70b92008-06-26 21:28:01 +02002411 return (dma_addr_t)paddr;
Joerg Roedel94f6d192009-11-24 16:40:02 +01002412 else if (IS_ERR(domain))
2413 return DMA_ERROR_CODE;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002414
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002415 dma_mask = *dev->dma_mask;
2416
Joerg Roedel4da70b92008-06-26 21:28:01 +02002417 spin_lock_irqsave(&domain->lock, flags);
Joerg Roedel94f6d192009-11-24 16:40:02 +01002418
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002419 addr = __map_single(dev, domain->priv, paddr, size, dir, false,
Joerg Roedel832a90c2008-09-18 15:54:23 +02002420 dma_mask);
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002421 if (addr == DMA_ERROR_CODE)
Joerg Roedel4da70b92008-06-26 21:28:01 +02002422 goto out;
2423
Joerg Roedel17b124b2011-04-06 18:01:35 +02002424 domain_flush_complete(domain);
Joerg Roedel4da70b92008-06-26 21:28:01 +02002425
2426out:
2427 spin_unlock_irqrestore(&domain->lock, flags);
2428
2429 return addr;
2430}
2431
Joerg Roedel431b2a22008-07-11 17:14:22 +02002432/*
2433 * The exported unmap_single function for dma_ops.
2434 */
FUJITA Tomonori51491362009-01-05 23:47:25 +09002435static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2436 enum dma_data_direction dir, struct dma_attrs *attrs)
Joerg Roedel4da70b92008-06-26 21:28:01 +02002437{
2438 unsigned long flags;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002439 struct protection_domain *domain;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002440
Joerg Roedel146a6912008-12-12 15:07:12 +01002441 INC_STATS_COUNTER(cnt_unmap_single);
2442
Joerg Roedel94f6d192009-11-24 16:40:02 +01002443 domain = get_domain(dev);
2444 if (IS_ERR(domain))
Joerg Roedel5b28df62008-12-02 17:49:42 +01002445 return;
2446
Joerg Roedel4da70b92008-06-26 21:28:01 +02002447 spin_lock_irqsave(&domain->lock, flags);
2448
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002449 __unmap_single(domain->priv, dma_addr, size, dir);
Joerg Roedel4da70b92008-06-26 21:28:01 +02002450
Joerg Roedel17b124b2011-04-06 18:01:35 +02002451 domain_flush_complete(domain);
Joerg Roedel4da70b92008-06-26 21:28:01 +02002452
2453 spin_unlock_irqrestore(&domain->lock, flags);
2454}
2455
Joerg Roedel431b2a22008-07-11 17:14:22 +02002456/*
2457 * This is a special map_sg function which is used if we should map a
2458 * device which is not handled by an AMD IOMMU in the system.
2459 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02002460static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
2461 int nelems, int dir)
2462{
2463 struct scatterlist *s;
2464 int i;
2465
2466 for_each_sg(sglist, s, nelems, i) {
2467 s->dma_address = (dma_addr_t)sg_phys(s);
2468 s->dma_length = s->length;
2469 }
2470
2471 return nelems;
2472}
2473
Joerg Roedel431b2a22008-07-11 17:14:22 +02002474/*
2475 * The exported map_sg function for dma_ops (handles scatter-gather
2476 * lists).
2477 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02002478static int map_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002479 int nelems, enum dma_data_direction dir,
2480 struct dma_attrs *attrs)
Joerg Roedel65b050a2008-06-26 21:28:02 +02002481{
2482 unsigned long flags;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002483 struct protection_domain *domain;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002484 int i;
2485 struct scatterlist *s;
2486 phys_addr_t paddr;
2487 int mapped_elems = 0;
Joerg Roedel832a90c2008-09-18 15:54:23 +02002488 u64 dma_mask;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002489
Joerg Roedeld03f067a2008-12-12 15:09:48 +01002490 INC_STATS_COUNTER(cnt_map_sg);
2491
Joerg Roedel94f6d192009-11-24 16:40:02 +01002492 domain = get_domain(dev);
2493 if (PTR_ERR(domain) == -EINVAL)
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002494 return map_sg_no_iommu(dev, sglist, nelems, dir);
Joerg Roedel94f6d192009-11-24 16:40:02 +01002495 else if (IS_ERR(domain))
2496 return 0;
Joerg Roedeldbcc1122008-09-04 15:04:26 +02002497
Joerg Roedel832a90c2008-09-18 15:54:23 +02002498 dma_mask = *dev->dma_mask;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002499
Joerg Roedel65b050a2008-06-26 21:28:02 +02002500 spin_lock_irqsave(&domain->lock, flags);
2501
2502 for_each_sg(sglist, s, nelems, i) {
2503 paddr = sg_phys(s);
2504
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002505 s->dma_address = __map_single(dev, domain->priv,
Joerg Roedel832a90c2008-09-18 15:54:23 +02002506 paddr, s->length, dir, false,
2507 dma_mask);
Joerg Roedel65b050a2008-06-26 21:28:02 +02002508
2509 if (s->dma_address) {
2510 s->dma_length = s->length;
2511 mapped_elems++;
2512 } else
2513 goto unmap;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002514 }
2515
Joerg Roedel17b124b2011-04-06 18:01:35 +02002516 domain_flush_complete(domain);
Joerg Roedel65b050a2008-06-26 21:28:02 +02002517
2518out:
2519 spin_unlock_irqrestore(&domain->lock, flags);
2520
2521 return mapped_elems;
2522unmap:
2523 for_each_sg(sglist, s, mapped_elems, i) {
2524 if (s->dma_address)
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002525 __unmap_single(domain->priv, s->dma_address,
Joerg Roedel65b050a2008-06-26 21:28:02 +02002526 s->dma_length, dir);
2527 s->dma_address = s->dma_length = 0;
2528 }
2529
2530 mapped_elems = 0;
2531
2532 goto out;
2533}
2534
Joerg Roedel431b2a22008-07-11 17:14:22 +02002535/*
2536 * The exported map_sg function for dma_ops (handles scatter-gather
2537 * lists).
2538 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02002539static void unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002540 int nelems, enum dma_data_direction dir,
2541 struct dma_attrs *attrs)
Joerg Roedel65b050a2008-06-26 21:28:02 +02002542{
2543 unsigned long flags;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002544 struct protection_domain *domain;
2545 struct scatterlist *s;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002546 int i;
2547
Joerg Roedel55877a62008-12-12 15:12:14 +01002548 INC_STATS_COUNTER(cnt_unmap_sg);
2549
Joerg Roedel94f6d192009-11-24 16:40:02 +01002550 domain = get_domain(dev);
2551 if (IS_ERR(domain))
Joerg Roedel5b28df62008-12-02 17:49:42 +01002552 return;
2553
Joerg Roedel65b050a2008-06-26 21:28:02 +02002554 spin_lock_irqsave(&domain->lock, flags);
2555
2556 for_each_sg(sglist, s, nelems, i) {
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002557 __unmap_single(domain->priv, s->dma_address,
Joerg Roedel65b050a2008-06-26 21:28:02 +02002558 s->dma_length, dir);
Joerg Roedel65b050a2008-06-26 21:28:02 +02002559 s->dma_address = s->dma_length = 0;
2560 }
2561
Joerg Roedel17b124b2011-04-06 18:01:35 +02002562 domain_flush_complete(domain);
Joerg Roedel65b050a2008-06-26 21:28:02 +02002563
2564 spin_unlock_irqrestore(&domain->lock, flags);
2565}
2566
Joerg Roedel431b2a22008-07-11 17:14:22 +02002567/*
2568 * The exported alloc_coherent function for dma_ops.
2569 */
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002570static void *alloc_coherent(struct device *dev, size_t size,
2571 dma_addr_t *dma_addr, gfp_t flag)
2572{
2573 unsigned long flags;
2574 void *virt_addr;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002575 struct protection_domain *domain;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002576 phys_addr_t paddr;
Joerg Roedel832a90c2008-09-18 15:54:23 +02002577 u64 dma_mask = dev->coherent_dma_mask;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002578
Joerg Roedelc8f0fb32008-12-12 15:14:21 +01002579 INC_STATS_COUNTER(cnt_alloc_coherent);
2580
Joerg Roedel94f6d192009-11-24 16:40:02 +01002581 domain = get_domain(dev);
2582 if (PTR_ERR(domain) == -EINVAL) {
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002583 virt_addr = (void *)__get_free_pages(flag, get_order(size));
2584 *dma_addr = __pa(virt_addr);
2585 return virt_addr;
Joerg Roedel94f6d192009-11-24 16:40:02 +01002586 } else if (IS_ERR(domain))
2587 return NULL;
Joerg Roedeldbcc1122008-09-04 15:04:26 +02002588
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002589 dma_mask = dev->coherent_dma_mask;
2590 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2591 flag |= __GFP_ZERO;
FUJITA Tomonori13d9fea2008-09-10 20:19:40 +09002592
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002593 virt_addr = (void *)__get_free_pages(flag, get_order(size));
2594 if (!virt_addr)
Jaswinder Singh Rajputb25ae672009-07-01 19:53:14 +05302595 return NULL;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002596
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002597 paddr = virt_to_phys(virt_addr);
2598
Joerg Roedel832a90c2008-09-18 15:54:23 +02002599 if (!dma_mask)
2600 dma_mask = *dev->dma_mask;
2601
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002602 spin_lock_irqsave(&domain->lock, flags);
2603
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002604 *dma_addr = __map_single(dev, domain->priv, paddr,
Joerg Roedel832a90c2008-09-18 15:54:23 +02002605 size, DMA_BIDIRECTIONAL, true, dma_mask);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002606
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002607 if (*dma_addr == DMA_ERROR_CODE) {
Jiri Slaby367d04c2009-05-28 09:54:48 +02002608 spin_unlock_irqrestore(&domain->lock, flags);
Joerg Roedel5b28df62008-12-02 17:49:42 +01002609 goto out_free;
Jiri Slaby367d04c2009-05-28 09:54:48 +02002610 }
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002611
Joerg Roedel17b124b2011-04-06 18:01:35 +02002612 domain_flush_complete(domain);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002613
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002614 spin_unlock_irqrestore(&domain->lock, flags);
2615
2616 return virt_addr;
Joerg Roedel5b28df62008-12-02 17:49:42 +01002617
2618out_free:
2619
2620 free_pages((unsigned long)virt_addr, get_order(size));
2621
2622 return NULL;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002623}
2624
Joerg Roedel431b2a22008-07-11 17:14:22 +02002625/*
2626 * The exported free_coherent function for dma_ops.
Joerg Roedel431b2a22008-07-11 17:14:22 +02002627 */
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002628static void free_coherent(struct device *dev, size_t size,
2629 void *virt_addr, dma_addr_t dma_addr)
2630{
2631 unsigned long flags;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002632 struct protection_domain *domain;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002633
Joerg Roedel5d31ee72008-12-12 15:16:38 +01002634 INC_STATS_COUNTER(cnt_free_coherent);
2635
Joerg Roedel94f6d192009-11-24 16:40:02 +01002636 domain = get_domain(dev);
2637 if (IS_ERR(domain))
Joerg Roedel5b28df62008-12-02 17:49:42 +01002638 goto free_mem;
2639
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002640 spin_lock_irqsave(&domain->lock, flags);
2641
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002642 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002643
Joerg Roedel17b124b2011-04-06 18:01:35 +02002644 domain_flush_complete(domain);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002645
2646 spin_unlock_irqrestore(&domain->lock, flags);
2647
2648free_mem:
2649 free_pages((unsigned long)virt_addr, get_order(size));
2650}
2651
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002652/*
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02002653 * This function is called by the DMA layer to find out if we can handle a
2654 * particular device. It is part of the dma_ops.
2655 */
2656static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2657{
Joerg Roedel420aef82009-11-23 16:14:57 +01002658 return check_device(dev);
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02002659}
2660
2661/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02002662 * The function for pre-allocating protection domains.
2663 *
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002664 * If the driver core informs the DMA layer if a driver grabs a device
2665 * we don't need to preallocate the protection domains anymore.
2666 * For now we have to.
2667 */
Jaswinder Singh Rajput0e93dd82008-12-29 21:45:22 +05302668static void prealloc_protection_domains(void)
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002669{
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002670 struct iommu_dev_data *dev_data;
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002671 struct dma_ops_domain *dma_dom;
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002672 struct pci_dev *dev = NULL;
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002673 u16 devid;
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002674
Chris Wrightd18c69d2010-04-02 18:27:55 -07002675 for_each_pci_dev(dev) {
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002676
2677 /* Do we handle this device? */
2678 if (!check_device(&dev->dev))
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002679 continue;
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002680
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002681 dev_data = get_dev_data(&dev->dev);
2682 if (!amd_iommu_force_isolation && dev_data->iommu_v2) {
2683 /* Make sure passthrough domain is allocated */
2684 alloc_passthrough_domain();
2685 dev_data->passthrough = true;
2686 attach_device(&dev->dev, pt_domain);
2687 pr_info("AMD-Vi: Using passthough domain for device %s\n",
2688 dev_name(&dev->dev));
2689 }
2690
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002691 /* Is there already any domain for it? */
Joerg Roedel15898bb2009-11-24 15:39:42 +01002692 if (domain_for_device(&dev->dev))
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002693 continue;
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002694
2695 devid = get_device_id(&dev->dev);
2696
Joerg Roedel87a64d52009-11-24 17:26:43 +01002697 dma_dom = dma_ops_domain_alloc();
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002698 if (!dma_dom)
2699 continue;
2700 init_unity_mappings_for_device(dma_dom, devid);
Joerg Roedelbd60b732008-09-11 10:24:48 +02002701 dma_dom->target_dev = devid;
2702
Joerg Roedel15898bb2009-11-24 15:39:42 +01002703 attach_device(&dev->dev, &dma_dom->domain);
Joerg Roedelbe831292009-11-23 12:50:00 +01002704
Joerg Roedelbd60b732008-09-11 10:24:48 +02002705 list_add_tail(&dma_dom->list, &iommu_pd_list);
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002706 }
2707}
2708
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002709static struct dma_map_ops amd_iommu_dma_ops = {
Joerg Roedel6631ee92008-06-26 21:28:05 +02002710 .alloc_coherent = alloc_coherent,
2711 .free_coherent = free_coherent,
FUJITA Tomonori51491362009-01-05 23:47:25 +09002712 .map_page = map_page,
2713 .unmap_page = unmap_page,
Joerg Roedel6631ee92008-06-26 21:28:05 +02002714 .map_sg = map_sg,
2715 .unmap_sg = unmap_sg,
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02002716 .dma_supported = amd_iommu_dma_supported,
Joerg Roedel6631ee92008-06-26 21:28:05 +02002717};
2718
Joerg Roedel27c21272011-05-30 15:56:24 +02002719static unsigned device_dma_ops_init(void)
2720{
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002721 struct iommu_dev_data *dev_data;
Joerg Roedel27c21272011-05-30 15:56:24 +02002722 struct pci_dev *pdev = NULL;
2723 unsigned unhandled = 0;
2724
2725 for_each_pci_dev(pdev) {
2726 if (!check_device(&pdev->dev)) {
2727 unhandled += 1;
2728 continue;
2729 }
2730
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002731 dev_data = get_dev_data(&pdev->dev);
2732
2733 if (!dev_data->passthrough)
2734 pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
2735 else
2736 pdev->dev.archdata.dma_ops = &nommu_dma_ops;
Joerg Roedel27c21272011-05-30 15:56:24 +02002737 }
2738
2739 return unhandled;
2740}
2741
Joerg Roedel431b2a22008-07-11 17:14:22 +02002742/*
2743 * The function which clues the AMD IOMMU driver into dma_ops.
2744 */
Joerg Roedelf5325092010-01-22 17:44:35 +01002745
2746void __init amd_iommu_init_api(void)
2747{
Joerg Roedel2cc21c42011-09-06 17:56:07 +02002748 bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
Joerg Roedelf5325092010-01-22 17:44:35 +01002749}
2750
Joerg Roedel6631ee92008-06-26 21:28:05 +02002751int __init amd_iommu_init_dma_ops(void)
2752{
2753 struct amd_iommu *iommu;
Joerg Roedel27c21272011-05-30 15:56:24 +02002754 int ret, unhandled;
Joerg Roedel6631ee92008-06-26 21:28:05 +02002755
Joerg Roedel431b2a22008-07-11 17:14:22 +02002756 /*
2757 * first allocate a default protection domain for every IOMMU we
2758 * found in the system. Devices not assigned to any other
2759 * protection domain will be assigned to the default one.
2760 */
Joerg Roedel3bd22172009-05-04 15:06:20 +02002761 for_each_iommu(iommu) {
Joerg Roedel87a64d52009-11-24 17:26:43 +01002762 iommu->default_dom = dma_ops_domain_alloc();
Joerg Roedel6631ee92008-06-26 21:28:05 +02002763 if (iommu->default_dom == NULL)
2764 return -ENOMEM;
Joerg Roedele2dc14a2008-12-10 18:48:59 +01002765 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
Joerg Roedel6631ee92008-06-26 21:28:05 +02002766 ret = iommu_init_unity_mappings(iommu);
2767 if (ret)
2768 goto free_domains;
2769 }
2770
Joerg Roedel431b2a22008-07-11 17:14:22 +02002771 /*
Joerg Roedel8793abe2009-11-27 11:40:33 +01002772 * Pre-allocate the protection domains for each device.
Joerg Roedel431b2a22008-07-11 17:14:22 +02002773 */
Joerg Roedel8793abe2009-11-27 11:40:33 +01002774 prealloc_protection_domains();
Joerg Roedel6631ee92008-06-26 21:28:05 +02002775
2776 iommu_detected = 1;
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09002777 swiotlb = 0;
Joerg Roedel6631ee92008-06-26 21:28:05 +02002778
Joerg Roedel431b2a22008-07-11 17:14:22 +02002779 /* Make the driver finally visible to the drivers */
Joerg Roedel27c21272011-05-30 15:56:24 +02002780 unhandled = device_dma_ops_init();
2781 if (unhandled && max_pfn > MAX_DMA32_PFN) {
2782 /* There are unhandled devices - initialize swiotlb for them */
2783 swiotlb = 1;
2784 }
Joerg Roedel6631ee92008-06-26 21:28:05 +02002785
Joerg Roedel7f265082008-12-12 13:50:21 +01002786 amd_iommu_stats_init();
2787
Joerg Roedel6631ee92008-06-26 21:28:05 +02002788 return 0;
2789
2790free_domains:
2791
Joerg Roedel3bd22172009-05-04 15:06:20 +02002792 for_each_iommu(iommu) {
Joerg Roedel6631ee92008-06-26 21:28:05 +02002793 if (iommu->default_dom)
2794 dma_ops_domain_free(iommu->default_dom);
2795 }
2796
2797 return ret;
2798}
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002799
2800/*****************************************************************************
2801 *
2802 * The following functions belong to the exported interface of AMD IOMMU
2803 *
2804 * This interface allows access to lower level functions of the IOMMU
2805 * like protection domain handling and assignement of devices to domains
2806 * which is not possible with the dma_ops interface.
2807 *
2808 *****************************************************************************/
2809
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002810static void cleanup_domain(struct protection_domain *domain)
2811{
Joerg Roedel492667d2009-11-27 13:25:47 +01002812 struct iommu_dev_data *dev_data, *next;
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002813 unsigned long flags;
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002814
2815 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2816
Joerg Roedel492667d2009-11-27 13:25:47 +01002817 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002818 __detach_device(dev_data);
Joerg Roedel492667d2009-11-27 13:25:47 +01002819 atomic_set(&dev_data->bind, 0);
2820 }
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002821
2822 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2823}
2824
Joerg Roedel26508152009-08-26 16:52:40 +02002825static void protection_domain_free(struct protection_domain *domain)
2826{
2827 if (!domain)
2828 return;
2829
Joerg Roedelaeb26f52009-11-20 16:44:01 +01002830 del_domain_from_list(domain);
2831
Joerg Roedel26508152009-08-26 16:52:40 +02002832 if (domain->id)
2833 domain_id_free(domain->id);
2834
2835 kfree(domain);
2836}
2837
2838static struct protection_domain *protection_domain_alloc(void)
Joerg Roedelc156e342008-12-02 18:13:27 +01002839{
2840 struct protection_domain *domain;
2841
2842 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
2843 if (!domain)
Joerg Roedel26508152009-08-26 16:52:40 +02002844 return NULL;
Joerg Roedelc156e342008-12-02 18:13:27 +01002845
2846 spin_lock_init(&domain->lock);
Joerg Roedel5d214fe2010-02-08 14:44:49 +01002847 mutex_init(&domain->api_lock);
Joerg Roedelc156e342008-12-02 18:13:27 +01002848 domain->id = domain_id_alloc();
2849 if (!domain->id)
Joerg Roedel26508152009-08-26 16:52:40 +02002850 goto out_err;
Joerg Roedel7c392cb2009-11-26 11:13:32 +01002851 INIT_LIST_HEAD(&domain->dev_list);
Joerg Roedel26508152009-08-26 16:52:40 +02002852
Joerg Roedelaeb26f52009-11-20 16:44:01 +01002853 add_domain_to_list(domain);
2854
Joerg Roedel26508152009-08-26 16:52:40 +02002855 return domain;
2856
2857out_err:
2858 kfree(domain);
2859
2860 return NULL;
2861}
2862
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002863static int __init alloc_passthrough_domain(void)
2864{
2865 if (pt_domain != NULL)
2866 return 0;
2867
2868 /* allocate passthrough domain */
2869 pt_domain = protection_domain_alloc();
2870 if (!pt_domain)
2871 return -ENOMEM;
2872
2873 pt_domain->mode = PAGE_MODE_NONE;
2874
2875 return 0;
2876}
Joerg Roedel26508152009-08-26 16:52:40 +02002877static int amd_iommu_domain_init(struct iommu_domain *dom)
2878{
2879 struct protection_domain *domain;
2880
2881 domain = protection_domain_alloc();
2882 if (!domain)
Joerg Roedelc156e342008-12-02 18:13:27 +01002883 goto out_free;
Joerg Roedel26508152009-08-26 16:52:40 +02002884
2885 domain->mode = PAGE_MODE_3_LEVEL;
Joerg Roedelc156e342008-12-02 18:13:27 +01002886 domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2887 if (!domain->pt_root)
2888 goto out_free;
2889
2890 dom->priv = domain;
2891
2892 return 0;
2893
2894out_free:
Joerg Roedel26508152009-08-26 16:52:40 +02002895 protection_domain_free(domain);
Joerg Roedelc156e342008-12-02 18:13:27 +01002896
2897 return -ENOMEM;
2898}
2899
Joerg Roedel98383fc2008-12-02 18:34:12 +01002900static void amd_iommu_domain_destroy(struct iommu_domain *dom)
2901{
2902 struct protection_domain *domain = dom->priv;
2903
2904 if (!domain)
2905 return;
2906
2907 if (domain->dev_cnt > 0)
2908 cleanup_domain(domain);
2909
2910 BUG_ON(domain->dev_cnt != 0);
2911
Joerg Roedel132bd682011-11-17 14:18:46 +01002912 if (domain->mode != PAGE_MODE_NONE)
2913 free_pagetable(domain);
Joerg Roedel98383fc2008-12-02 18:34:12 +01002914
Joerg Roedel52815b72011-11-17 17:24:28 +01002915 if (domain->flags & PD_IOMMUV2_MASK)
2916 free_gcr3_table(domain);
2917
Joerg Roedel8b408fe2010-03-08 14:20:07 +01002918 protection_domain_free(domain);
Joerg Roedel98383fc2008-12-02 18:34:12 +01002919
2920 dom->priv = NULL;
2921}
2922
Joerg Roedel684f2882008-12-08 12:07:44 +01002923static void amd_iommu_detach_device(struct iommu_domain *dom,
2924 struct device *dev)
2925{
Joerg Roedel657cbb62009-11-23 15:26:46 +01002926 struct iommu_dev_data *dev_data = dev->archdata.iommu;
Joerg Roedel684f2882008-12-08 12:07:44 +01002927 struct amd_iommu *iommu;
Joerg Roedel684f2882008-12-08 12:07:44 +01002928 u16 devid;
2929
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002930 if (!check_device(dev))
Joerg Roedel684f2882008-12-08 12:07:44 +01002931 return;
2932
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002933 devid = get_device_id(dev);
Joerg Roedel684f2882008-12-08 12:07:44 +01002934
Joerg Roedel657cbb62009-11-23 15:26:46 +01002935 if (dev_data->domain != NULL)
Joerg Roedel15898bb2009-11-24 15:39:42 +01002936 detach_device(dev);
Joerg Roedel684f2882008-12-08 12:07:44 +01002937
2938 iommu = amd_iommu_rlookup_table[devid];
2939 if (!iommu)
2940 return;
2941
Joerg Roedel684f2882008-12-08 12:07:44 +01002942 iommu_completion_wait(iommu);
2943}
2944
Joerg Roedel01106062008-12-02 19:34:11 +01002945static int amd_iommu_attach_device(struct iommu_domain *dom,
2946 struct device *dev)
2947{
2948 struct protection_domain *domain = dom->priv;
Joerg Roedel657cbb62009-11-23 15:26:46 +01002949 struct iommu_dev_data *dev_data;
Joerg Roedel01106062008-12-02 19:34:11 +01002950 struct amd_iommu *iommu;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002951 int ret;
Joerg Roedel01106062008-12-02 19:34:11 +01002952
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002953 if (!check_device(dev))
Joerg Roedel01106062008-12-02 19:34:11 +01002954 return -EINVAL;
2955
Joerg Roedel657cbb62009-11-23 15:26:46 +01002956 dev_data = dev->archdata.iommu;
2957
Joerg Roedelf62dda62011-06-09 12:55:35 +02002958 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedel01106062008-12-02 19:34:11 +01002959 if (!iommu)
2960 return -EINVAL;
2961
Joerg Roedel657cbb62009-11-23 15:26:46 +01002962 if (dev_data->domain)
Joerg Roedel15898bb2009-11-24 15:39:42 +01002963 detach_device(dev);
Joerg Roedel01106062008-12-02 19:34:11 +01002964
Joerg Roedel15898bb2009-11-24 15:39:42 +01002965 ret = attach_device(dev, domain);
Joerg Roedel01106062008-12-02 19:34:11 +01002966
2967 iommu_completion_wait(iommu);
2968
Joerg Roedel15898bb2009-11-24 15:39:42 +01002969 return ret;
Joerg Roedel01106062008-12-02 19:34:11 +01002970}
2971
Joerg Roedel468e2362010-01-21 16:37:36 +01002972static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
2973 phys_addr_t paddr, int gfp_order, int iommu_prot)
Joerg Roedelc6229ca2008-12-02 19:48:43 +01002974{
Joerg Roedel468e2362010-01-21 16:37:36 +01002975 unsigned long page_size = 0x1000UL << gfp_order;
Joerg Roedelc6229ca2008-12-02 19:48:43 +01002976 struct protection_domain *domain = dom->priv;
Joerg Roedelc6229ca2008-12-02 19:48:43 +01002977 int prot = 0;
2978 int ret;
2979
Joerg Roedel132bd682011-11-17 14:18:46 +01002980 if (domain->mode == PAGE_MODE_NONE)
2981 return -EINVAL;
2982
Joerg Roedelc6229ca2008-12-02 19:48:43 +01002983 if (iommu_prot & IOMMU_READ)
2984 prot |= IOMMU_PROT_IR;
2985 if (iommu_prot & IOMMU_WRITE)
2986 prot |= IOMMU_PROT_IW;
2987
Joerg Roedel5d214fe2010-02-08 14:44:49 +01002988 mutex_lock(&domain->api_lock);
Joerg Roedel795e74f72010-05-11 17:40:57 +02002989 ret = iommu_map_page(domain, iova, paddr, prot, page_size);
Joerg Roedel5d214fe2010-02-08 14:44:49 +01002990 mutex_unlock(&domain->api_lock);
2991
Joerg Roedel795e74f72010-05-11 17:40:57 +02002992 return ret;
Joerg Roedelc6229ca2008-12-02 19:48:43 +01002993}
2994
Joerg Roedel468e2362010-01-21 16:37:36 +01002995static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
2996 int gfp_order)
Joerg Roedeleb74ff62008-12-02 19:59:10 +01002997{
Joerg Roedeleb74ff62008-12-02 19:59:10 +01002998 struct protection_domain *domain = dom->priv;
Joerg Roedel468e2362010-01-21 16:37:36 +01002999 unsigned long page_size, unmap_size;
Joerg Roedeleb74ff62008-12-02 19:59:10 +01003000
Joerg Roedel132bd682011-11-17 14:18:46 +01003001 if (domain->mode == PAGE_MODE_NONE)
3002 return -EINVAL;
3003
Joerg Roedel468e2362010-01-21 16:37:36 +01003004 page_size = 0x1000UL << gfp_order;
Joerg Roedeleb74ff62008-12-02 19:59:10 +01003005
Joerg Roedel5d214fe2010-02-08 14:44:49 +01003006 mutex_lock(&domain->api_lock);
Joerg Roedel468e2362010-01-21 16:37:36 +01003007 unmap_size = iommu_unmap_page(domain, iova, page_size);
Joerg Roedel795e74f72010-05-11 17:40:57 +02003008 mutex_unlock(&domain->api_lock);
Joerg Roedeleb74ff62008-12-02 19:59:10 +01003009
Joerg Roedel17b124b2011-04-06 18:01:35 +02003010 domain_flush_tlb_pde(domain);
Joerg Roedel5d214fe2010-02-08 14:44:49 +01003011
Joerg Roedel468e2362010-01-21 16:37:36 +01003012 return get_order(unmap_size);
Joerg Roedeleb74ff62008-12-02 19:59:10 +01003013}
3014
Joerg Roedel645c4c82008-12-02 20:05:50 +01003015static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
3016 unsigned long iova)
3017{
3018 struct protection_domain *domain = dom->priv;
Joerg Roedelf03152b2010-01-21 16:15:24 +01003019 unsigned long offset_mask;
Joerg Roedel645c4c82008-12-02 20:05:50 +01003020 phys_addr_t paddr;
Joerg Roedelf03152b2010-01-21 16:15:24 +01003021 u64 *pte, __pte;
Joerg Roedel645c4c82008-12-02 20:05:50 +01003022
Joerg Roedel132bd682011-11-17 14:18:46 +01003023 if (domain->mode == PAGE_MODE_NONE)
3024 return iova;
3025
Joerg Roedel24cd7722010-01-19 17:27:39 +01003026 pte = fetch_pte(domain, iova);
Joerg Roedel645c4c82008-12-02 20:05:50 +01003027
Joerg Roedela6d41a42009-09-02 17:08:55 +02003028 if (!pte || !IOMMU_PTE_PRESENT(*pte))
Joerg Roedel645c4c82008-12-02 20:05:50 +01003029 return 0;
3030
Joerg Roedelf03152b2010-01-21 16:15:24 +01003031 if (PM_PTE_LEVEL(*pte) == 0)
3032 offset_mask = PAGE_SIZE - 1;
3033 else
3034 offset_mask = PTE_PAGE_SIZE(*pte) - 1;
3035
3036 __pte = *pte & PM_ADDR_MASK;
3037 paddr = (__pte & ~offset_mask) | (iova & offset_mask);
Joerg Roedel645c4c82008-12-02 20:05:50 +01003038
3039 return paddr;
3040}
3041
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003042static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
3043 unsigned long cap)
3044{
Joerg Roedel80a506b2010-07-27 17:14:24 +02003045 switch (cap) {
3046 case IOMMU_CAP_CACHE_COHERENCY:
3047 return 1;
3048 }
3049
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003050 return 0;
3051}
3052
Joerg Roedel26961ef2008-12-03 17:00:17 +01003053static struct iommu_ops amd_iommu_ops = {
3054 .domain_init = amd_iommu_domain_init,
3055 .domain_destroy = amd_iommu_domain_destroy,
3056 .attach_dev = amd_iommu_attach_device,
3057 .detach_dev = amd_iommu_detach_device,
Joerg Roedel468e2362010-01-21 16:37:36 +01003058 .map = amd_iommu_map,
3059 .unmap = amd_iommu_unmap,
Joerg Roedel26961ef2008-12-03 17:00:17 +01003060 .iova_to_phys = amd_iommu_iova_to_phys,
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003061 .domain_has_cap = amd_iommu_domain_has_cap,
Joerg Roedel26961ef2008-12-03 17:00:17 +01003062};
3063
Joerg Roedel0feae532009-08-26 15:26:30 +02003064/*****************************************************************************
3065 *
3066 * The next functions do a basic initialization of IOMMU for pass through
3067 * mode
3068 *
3069 * In passthrough mode the IOMMU is initialized and enabled but not used for
3070 * DMA-API translation.
3071 *
3072 *****************************************************************************/
3073
3074int __init amd_iommu_init_passthrough(void)
3075{
Joerg Roedel5abcdba2011-12-01 15:49:45 +01003076 struct iommu_dev_data *dev_data;
Joerg Roedel0feae532009-08-26 15:26:30 +02003077 struct pci_dev *dev = NULL;
Joerg Roedel5abcdba2011-12-01 15:49:45 +01003078 struct amd_iommu *iommu;
Joerg Roedel15898bb2009-11-24 15:39:42 +01003079 u16 devid;
Joerg Roedel5abcdba2011-12-01 15:49:45 +01003080 int ret;
Joerg Roedel0feae532009-08-26 15:26:30 +02003081
Joerg Roedel5abcdba2011-12-01 15:49:45 +01003082 ret = alloc_passthrough_domain();
3083 if (ret)
3084 return ret;
Joerg Roedel0feae532009-08-26 15:26:30 +02003085
Kulikov Vasiliy6c54aab2010-07-03 12:03:51 -04003086 for_each_pci_dev(dev) {
Joerg Roedel98fc5a62009-11-24 17:19:23 +01003087 if (!check_device(&dev->dev))
Joerg Roedel0feae532009-08-26 15:26:30 +02003088 continue;
3089
Joerg Roedel5abcdba2011-12-01 15:49:45 +01003090 dev_data = get_dev_data(&dev->dev);
3091 dev_data->passthrough = true;
3092
Joerg Roedel98fc5a62009-11-24 17:19:23 +01003093 devid = get_device_id(&dev->dev);
3094
Joerg Roedel15898bb2009-11-24 15:39:42 +01003095 iommu = amd_iommu_rlookup_table[devid];
Joerg Roedel0feae532009-08-26 15:26:30 +02003096 if (!iommu)
3097 continue;
3098
Joerg Roedel15898bb2009-11-24 15:39:42 +01003099 attach_device(&dev->dev, pt_domain);
Joerg Roedel0feae532009-08-26 15:26:30 +02003100 }
3101
3102 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
3103
3104 return 0;
3105}
Joerg Roedel72e1dcc2011-11-10 19:13:51 +01003106
3107/* IOMMUv2 specific functions */
3108int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3109{
3110 return atomic_notifier_chain_register(&ppr_notifier, nb);
3111}
3112EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
3113
3114int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
3115{
3116 return atomic_notifier_chain_unregister(&ppr_notifier, nb);
3117}
3118EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
Joerg Roedel132bd682011-11-17 14:18:46 +01003119
3120void amd_iommu_domain_direct_map(struct iommu_domain *dom)
3121{
3122 struct protection_domain *domain = dom->priv;
3123 unsigned long flags;
3124
3125 spin_lock_irqsave(&domain->lock, flags);
3126
3127 /* Update data structure */
3128 domain->mode = PAGE_MODE_NONE;
3129 domain->updated = true;
3130
3131 /* Make changes visible to IOMMUs */
3132 update_domain(domain);
3133
3134 /* Page-table is not visible to IOMMU anymore, so free it */
3135 free_pagetable(domain);
3136
3137 spin_unlock_irqrestore(&domain->lock, flags);
3138}
3139EXPORT_SYMBOL(amd_iommu_domain_direct_map);
Joerg Roedel52815b72011-11-17 17:24:28 +01003140
3141int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
3142{
3143 struct protection_domain *domain = dom->priv;
3144 unsigned long flags;
3145 int levels, ret;
3146
3147 if (pasids <= 0 || pasids > (PASID_MASK + 1))
3148 return -EINVAL;
3149
3150 /* Number of GCR3 table levels required */
3151 for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
3152 levels += 1;
3153
3154 if (levels > amd_iommu_max_glx_val)
3155 return -EINVAL;
3156
3157 spin_lock_irqsave(&domain->lock, flags);
3158
3159 /*
3160 * Save us all sanity checks whether devices already in the
3161 * domain support IOMMUv2. Just force that the domain has no
3162 * devices attached when it is switched into IOMMUv2 mode.
3163 */
3164 ret = -EBUSY;
3165 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
3166 goto out;
3167
3168 ret = -ENOMEM;
3169 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
3170 if (domain->gcr3_tbl == NULL)
3171 goto out;
3172
3173 domain->glx = levels;
3174 domain->flags |= PD_IOMMUV2_MASK;
3175 domain->updated = true;
3176
3177 update_domain(domain);
3178
3179 ret = 0;
3180
3181out:
3182 spin_unlock_irqrestore(&domain->lock, flags);
3183
3184 return ret;
3185}
3186EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
Joerg Roedel22e266c2011-11-21 15:59:08 +01003187
3188static int __flush_pasid(struct protection_domain *domain, int pasid,
3189 u64 address, bool size)
3190{
3191 struct iommu_dev_data *dev_data;
3192 struct iommu_cmd cmd;
3193 int i, ret;
3194
3195 if (!(domain->flags & PD_IOMMUV2_MASK))
3196 return -EINVAL;
3197
3198 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
3199
3200 /*
3201 * IOMMU TLB needs to be flushed before Device TLB to
3202 * prevent device TLB refill from IOMMU TLB
3203 */
3204 for (i = 0; i < amd_iommus_present; ++i) {
3205 if (domain->dev_iommu[i] == 0)
3206 continue;
3207
3208 ret = iommu_queue_command(amd_iommus[i], &cmd);
3209 if (ret != 0)
3210 goto out;
3211 }
3212
3213 /* Wait until IOMMU TLB flushes are complete */
3214 domain_flush_complete(domain);
3215
3216 /* Now flush device TLBs */
3217 list_for_each_entry(dev_data, &domain->dev_list, list) {
3218 struct amd_iommu *iommu;
3219 int qdep;
3220
3221 BUG_ON(!dev_data->ats.enabled);
3222
3223 qdep = dev_data->ats.qdep;
3224 iommu = amd_iommu_rlookup_table[dev_data->devid];
3225
3226 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
3227 qdep, address, size);
3228
3229 ret = iommu_queue_command(iommu, &cmd);
3230 if (ret != 0)
3231 goto out;
3232 }
3233
3234 /* Wait until all device TLBs are flushed */
3235 domain_flush_complete(domain);
3236
3237 ret = 0;
3238
3239out:
3240
3241 return ret;
3242}
3243
3244static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
3245 u64 address)
3246{
3247 return __flush_pasid(domain, pasid, address, false);
3248}
3249
3250int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
3251 u64 address)
3252{
3253 struct protection_domain *domain = dom->priv;
3254 unsigned long flags;
3255 int ret;
3256
3257 spin_lock_irqsave(&domain->lock, flags);
3258 ret = __amd_iommu_flush_page(domain, pasid, address);
3259 spin_unlock_irqrestore(&domain->lock, flags);
3260
3261 return ret;
3262}
3263EXPORT_SYMBOL(amd_iommu_flush_page);
3264
3265static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
3266{
3267 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
3268 true);
3269}
3270
3271int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
3272{
3273 struct protection_domain *domain = dom->priv;
3274 unsigned long flags;
3275 int ret;
3276
3277 spin_lock_irqsave(&domain->lock, flags);
3278 ret = __amd_iommu_flush_tlb(domain, pasid);
3279 spin_unlock_irqrestore(&domain->lock, flags);
3280
3281 return ret;
3282}
3283EXPORT_SYMBOL(amd_iommu_flush_tlb);
3284