blob: 2fde073b6be6545d76604461d7ab39db3450aa64 [file] [log] [blame]
Joerg Roedelb6c02712008-06-26 21:27:53 +02001/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/pci.h>
21#include <linux/gfp.h>
22#include <linux/bitops.h>
23#include <linux/scatterlist.h>
24#include <linux/iommu-helper.h>
25#include <asm/proto.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090026#include <asm/iommu.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020027#include <asm/amd_iommu_types.h>
Joerg Roedelc6da9922008-06-26 21:28:06 +020028#include <asm/amd_iommu.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020029
30#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
31
Joerg Roedel136f78a2008-07-11 17:14:27 +020032#define EXIT_LOOP_COUNT 10000000
33
Joerg Roedelb6c02712008-06-26 21:27:53 +020034static DEFINE_RWLOCK(amd_iommu_devtable_lock);
35
Joerg Roedelbd60b732008-09-11 10:24:48 +020036/* A list of preallocated protection domains */
37static LIST_HEAD(iommu_pd_list);
38static DEFINE_SPINLOCK(iommu_pd_list_lock);
39
Joerg Roedel431b2a22008-07-11 17:14:22 +020040/*
41 * general struct to manage commands send to an IOMMU
42 */
Joerg Roedeld6449532008-07-11 17:14:28 +020043struct iommu_cmd {
Joerg Roedelb6c02712008-06-26 21:27:53 +020044 u32 data[4];
45};
46
Joerg Roedelbd0e5212008-06-26 21:27:56 +020047static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
48 struct unity_map_entry *e);
49
Joerg Roedel431b2a22008-07-11 17:14:22 +020050/* returns !0 if the IOMMU is caching non-present entries in its TLB */
Joerg Roedel4da70b92008-06-26 21:28:01 +020051static int iommu_has_npcache(struct amd_iommu *iommu)
52{
Joerg Roedelae9b9402008-10-30 17:43:57 +010053 return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
Joerg Roedel4da70b92008-06-26 21:28:01 +020054}
55
Joerg Roedel431b2a22008-07-11 17:14:22 +020056/****************************************************************************
57 *
Joerg Roedela80dc3e2008-09-11 16:51:41 +020058 * Interrupt handling functions
59 *
60 ****************************************************************************/
61
Joerg Roedel90008ee2008-09-09 16:41:05 +020062static void iommu_print_event(void *__evt)
63{
64 u32 *event = __evt;
65 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
66 int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
67 int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
68 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
69 u64 address = (u64)(((u64)event[3]) << 32) | event[2];
70
71 printk(KERN_ERR "AMD IOMMU: Event logged [");
72
73 switch (type) {
74 case EVENT_TYPE_ILL_DEV:
75 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
76 "address=0x%016llx flags=0x%04x]\n",
77 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
78 address, flags);
79 break;
80 case EVENT_TYPE_IO_FAULT:
81 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
82 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
83 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
84 domid, address, flags);
85 break;
86 case EVENT_TYPE_DEV_TAB_ERR:
87 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
88 "address=0x%016llx flags=0x%04x]\n",
89 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
90 address, flags);
91 break;
92 case EVENT_TYPE_PAGE_TAB_ERR:
93 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
94 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
95 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
96 domid, address, flags);
97 break;
98 case EVENT_TYPE_ILL_CMD:
99 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
100 break;
101 case EVENT_TYPE_CMD_HARD_ERR:
102 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
103 "flags=0x%04x]\n", address, flags);
104 break;
105 case EVENT_TYPE_IOTLB_INV_TO:
106 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
107 "address=0x%016llx]\n",
108 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
109 address);
110 break;
111 case EVENT_TYPE_INV_DEV_REQ:
112 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
113 "address=0x%016llx flags=0x%04x]\n",
114 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
115 address, flags);
116 break;
117 default:
118 printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
119 }
120}
121
122static void iommu_poll_events(struct amd_iommu *iommu)
123{
124 u32 head, tail;
125 unsigned long flags;
126
127 spin_lock_irqsave(&iommu->lock, flags);
128
129 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
130 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
131
132 while (head != tail) {
133 iommu_print_event(iommu->evt_buf + head);
134 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
135 }
136
137 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
138
139 spin_unlock_irqrestore(&iommu->lock, flags);
140}
141
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200142irqreturn_t amd_iommu_int_handler(int irq, void *data)
143{
Joerg Roedel90008ee2008-09-09 16:41:05 +0200144 struct amd_iommu *iommu;
145
146 list_for_each_entry(iommu, &amd_iommu_list, list)
147 iommu_poll_events(iommu);
148
149 return IRQ_HANDLED;
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200150}
151
152/****************************************************************************
153 *
Joerg Roedel431b2a22008-07-11 17:14:22 +0200154 * IOMMU command queuing functions
155 *
156 ****************************************************************************/
157
158/*
159 * Writes the command to the IOMMUs command buffer and informs the
160 * hardware about the new command. Must be called with iommu->lock held.
161 */
Joerg Roedeld6449532008-07-11 17:14:28 +0200162static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200163{
164 u32 tail, head;
165 u8 *target;
166
167 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
Jiri Kosina8a7c5ef2008-08-19 02:13:55 +0200168 target = iommu->cmd_buf + tail;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200169 memcpy_toio(target, cmd, sizeof(*cmd));
170 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
171 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
172 if (tail == head)
173 return -ENOMEM;
174 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
175
176 return 0;
177}
178
Joerg Roedel431b2a22008-07-11 17:14:22 +0200179/*
180 * General queuing function for commands. Takes iommu->lock and calls
181 * __iommu_queue_command().
182 */
Joerg Roedeld6449532008-07-11 17:14:28 +0200183static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200184{
185 unsigned long flags;
186 int ret;
187
188 spin_lock_irqsave(&iommu->lock, flags);
189 ret = __iommu_queue_command(iommu, cmd);
Joerg Roedel09ee17e2008-12-03 12:19:27 +0100190 if (!ret)
191 iommu->need_sync = 1;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200192 spin_unlock_irqrestore(&iommu->lock, flags);
193
194 return ret;
195}
196
Joerg Roedel431b2a22008-07-11 17:14:22 +0200197/*
198 * This function is called whenever we need to ensure that the IOMMU has
199 * completed execution of all commands we sent. It sends a
200 * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
201 * us about that by writing a value to a physical address we pass with
202 * the command.
203 */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200204static int iommu_completion_wait(struct amd_iommu *iommu)
205{
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200206 int ret = 0, ready = 0;
Joerg Roedel519c31b2008-08-14 19:55:15 +0200207 unsigned status = 0;
Joerg Roedeld6449532008-07-11 17:14:28 +0200208 struct iommu_cmd cmd;
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200209 unsigned long flags, i = 0;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200210
211 memset(&cmd, 0, sizeof(cmd));
Joerg Roedel519c31b2008-08-14 19:55:15 +0200212 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200213 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
214
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200215 spin_lock_irqsave(&iommu->lock, flags);
216
Joerg Roedel09ee17e2008-12-03 12:19:27 +0100217 if (!iommu->need_sync)
218 goto out;
219
220 iommu->need_sync = 0;
221
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200222 ret = __iommu_queue_command(iommu, &cmd);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200223
224 if (ret)
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200225 goto out;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200226
Joerg Roedel136f78a2008-07-11 17:14:27 +0200227 while (!ready && (i < EXIT_LOOP_COUNT)) {
228 ++i;
Joerg Roedel519c31b2008-08-14 19:55:15 +0200229 /* wait for the bit to become one */
230 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
231 ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
Joerg Roedel136f78a2008-07-11 17:14:27 +0200232 }
233
Joerg Roedel519c31b2008-08-14 19:55:15 +0200234 /* set bit back to zero */
235 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
236 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
237
Joerg Roedel136f78a2008-07-11 17:14:27 +0200238 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
239 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200240out:
241 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200242
243 return 0;
244}
245
Joerg Roedel431b2a22008-07-11 17:14:22 +0200246/*
247 * Command send function for invalidating a device table entry
248 */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200249static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
250{
Joerg Roedeld6449532008-07-11 17:14:28 +0200251 struct iommu_cmd cmd;
Joerg Roedelee2fa742008-09-17 13:47:25 +0200252 int ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200253
254 BUG_ON(iommu == NULL);
255
256 memset(&cmd, 0, sizeof(cmd));
257 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
258 cmd.data[0] = devid;
259
Joerg Roedelee2fa742008-09-17 13:47:25 +0200260 ret = iommu_queue_command(iommu, &cmd);
261
Joerg Roedelee2fa742008-09-17 13:47:25 +0200262 return ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200263}
264
Joerg Roedel431b2a22008-07-11 17:14:22 +0200265/*
266 * Generic command send function for invalidaing TLB entries
267 */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200268static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
269 u64 address, u16 domid, int pde, int s)
270{
Joerg Roedeld6449532008-07-11 17:14:28 +0200271 struct iommu_cmd cmd;
Joerg Roedelee2fa742008-09-17 13:47:25 +0200272 int ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200273
274 memset(&cmd, 0, sizeof(cmd));
275 address &= PAGE_MASK;
276 CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
277 cmd.data[1] |= domid;
Joerg Roedel8a456692008-08-14 19:55:17 +0200278 cmd.data[2] = lower_32_bits(address);
Joerg Roedel8ea80d72008-07-11 17:14:23 +0200279 cmd.data[3] = upper_32_bits(address);
Joerg Roedel431b2a22008-07-11 17:14:22 +0200280 if (s) /* size bit - we flush more than one 4kb page */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200281 cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
Joerg Roedel431b2a22008-07-11 17:14:22 +0200282 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200283 cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
284
Joerg Roedelee2fa742008-09-17 13:47:25 +0200285 ret = iommu_queue_command(iommu, &cmd);
286
Joerg Roedelee2fa742008-09-17 13:47:25 +0200287 return ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200288}
289
Joerg Roedel431b2a22008-07-11 17:14:22 +0200290/*
291 * TLB invalidation function which is called from the mapping functions.
292 * It invalidates a single PTE if the range to flush is within a single
293 * page. Otherwise it flushes the whole TLB of the IOMMU.
294 */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200295static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
296 u64 address, size_t size)
297{
Joerg Roedel999ba412008-07-03 19:35:08 +0200298 int s = 0;
Joerg Roedele3c449f2008-10-15 22:02:11 -0700299 unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200300
301 address &= PAGE_MASK;
302
Joerg Roedel999ba412008-07-03 19:35:08 +0200303 if (pages > 1) {
304 /*
305 * If we have to flush more than one page, flush all
306 * TLB entries for this domain
307 */
308 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
309 s = 1;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200310 }
311
Joerg Roedel999ba412008-07-03 19:35:08 +0200312 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);
313
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200314 return 0;
315}
Joerg Roedelb6c02712008-06-26 21:27:53 +0200316
Joerg Roedel1c655772008-09-04 18:40:05 +0200317/* Flush the whole IO/TLB for a given protection domain */
318static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
319{
320 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
321
322 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
323}
324
Joerg Roedel431b2a22008-07-11 17:14:22 +0200325/****************************************************************************
326 *
327 * The functions below are used the create the page table mappings for
328 * unity mapped regions.
329 *
330 ****************************************************************************/
331
332/*
333 * Generic mapping functions. It maps a physical address into a DMA
334 * address space. It allocates the page table pages if necessary.
335 * In the future it can be extended to a generic mapping function
336 * supporting all features of AMD IOMMU page tables like level skipping
337 * and full 64 bit address spaces.
338 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200339static int iommu_map(struct protection_domain *dom,
340 unsigned long bus_addr,
341 unsigned long phys_addr,
342 int prot)
343{
344 u64 __pte, *pte, *page;
345
346 bus_addr = PAGE_ALIGN(bus_addr);
Joerg Roedelbb9d4ff2008-12-04 15:59:48 +0100347 phys_addr = PAGE_ALIGN(phys_addr);
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200348
349 /* only support 512GB address spaces for now */
350 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
351 return -EINVAL;
352
353 pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
354
355 if (!IOMMU_PTE_PRESENT(*pte)) {
356 page = (u64 *)get_zeroed_page(GFP_KERNEL);
357 if (!page)
358 return -ENOMEM;
359 *pte = IOMMU_L2_PDE(virt_to_phys(page));
360 }
361
362 pte = IOMMU_PTE_PAGE(*pte);
363 pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
364
365 if (!IOMMU_PTE_PRESENT(*pte)) {
366 page = (u64 *)get_zeroed_page(GFP_KERNEL);
367 if (!page)
368 return -ENOMEM;
369 *pte = IOMMU_L1_PDE(virt_to_phys(page));
370 }
371
372 pte = IOMMU_PTE_PAGE(*pte);
373 pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)];
374
375 if (IOMMU_PTE_PRESENT(*pte))
376 return -EBUSY;
377
378 __pte = phys_addr | IOMMU_PTE_P;
379 if (prot & IOMMU_PROT_IR)
380 __pte |= IOMMU_PTE_IR;
381 if (prot & IOMMU_PROT_IW)
382 __pte |= IOMMU_PTE_IW;
383
384 *pte = __pte;
385
386 return 0;
387}
388
Joerg Roedel431b2a22008-07-11 17:14:22 +0200389/*
390 * This function checks if a specific unity mapping entry is needed for
391 * this specific IOMMU.
392 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200393static int iommu_for_unity_map(struct amd_iommu *iommu,
394 struct unity_map_entry *entry)
395{
396 u16 bdf, i;
397
398 for (i = entry->devid_start; i <= entry->devid_end; ++i) {
399 bdf = amd_iommu_alias_table[i];
400 if (amd_iommu_rlookup_table[bdf] == iommu)
401 return 1;
402 }
403
404 return 0;
405}
406
Joerg Roedel431b2a22008-07-11 17:14:22 +0200407/*
408 * Init the unity mappings for a specific IOMMU in the system
409 *
410 * Basically iterates over all unity mapping entries and applies them to
411 * the default domain DMA of that IOMMU if necessary.
412 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200413static int iommu_init_unity_mappings(struct amd_iommu *iommu)
414{
415 struct unity_map_entry *entry;
416 int ret;
417
418 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
419 if (!iommu_for_unity_map(iommu, entry))
420 continue;
421 ret = dma_ops_unity_map(iommu->default_dom, entry);
422 if (ret)
423 return ret;
424 }
425
426 return 0;
427}
428
Joerg Roedel431b2a22008-07-11 17:14:22 +0200429/*
430 * This function actually applies the mapping to the page table of the
431 * dma_ops domain.
432 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200433static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
434 struct unity_map_entry *e)
435{
436 u64 addr;
437 int ret;
438
439 for (addr = e->address_start; addr < e->address_end;
440 addr += PAGE_SIZE) {
441 ret = iommu_map(&dma_dom->domain, addr, addr, e->prot);
442 if (ret)
443 return ret;
444 /*
445 * if unity mapping is in aperture range mark the page
446 * as allocated in the aperture
447 */
448 if (addr < dma_dom->aperture_size)
449 __set_bit(addr >> PAGE_SHIFT, dma_dom->bitmap);
450 }
451
452 return 0;
453}
454
Joerg Roedel431b2a22008-07-11 17:14:22 +0200455/*
456 * Inits the unity mappings required for a specific device
457 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200458static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
459 u16 devid)
460{
461 struct unity_map_entry *e;
462 int ret;
463
464 list_for_each_entry(e, &amd_iommu_unity_map, list) {
465 if (!(devid >= e->devid_start && devid <= e->devid_end))
466 continue;
467 ret = dma_ops_unity_map(dma_dom, e);
468 if (ret)
469 return ret;
470 }
471
472 return 0;
473}
474
Joerg Roedel431b2a22008-07-11 17:14:22 +0200475/****************************************************************************
476 *
477 * The next functions belong to the address allocator for the dma_ops
478 * interface functions. They work like the allocators in the other IOMMU
479 * drivers. Its basically a bitmap which marks the allocated pages in
480 * the aperture. Maybe it could be enhanced in the future to a more
481 * efficient allocator.
482 *
483 ****************************************************************************/
Joerg Roedeld3086442008-06-26 21:27:57 +0200484
Joerg Roedel431b2a22008-07-11 17:14:22 +0200485/*
486 * The address allocator core function.
487 *
488 * called with domain->lock held
489 */
Joerg Roedeld3086442008-06-26 21:27:57 +0200490static unsigned long dma_ops_alloc_addresses(struct device *dev,
491 struct dma_ops_domain *dom,
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200492 unsigned int pages,
Joerg Roedel832a90c2008-09-18 15:54:23 +0200493 unsigned long align_mask,
494 u64 dma_mask)
Joerg Roedeld3086442008-06-26 21:27:57 +0200495{
FUJITA Tomonori40becd82008-09-29 00:06:36 +0900496 unsigned long limit;
Joerg Roedeld3086442008-06-26 21:27:57 +0200497 unsigned long address;
Joerg Roedeld3086442008-06-26 21:27:57 +0200498 unsigned long boundary_size;
499
500 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
501 PAGE_SIZE) >> PAGE_SHIFT;
FUJITA Tomonori40becd82008-09-29 00:06:36 +0900502 limit = iommu_device_max_index(dom->aperture_size >> PAGE_SHIFT, 0,
503 dma_mask >> PAGE_SHIFT);
Joerg Roedeld3086442008-06-26 21:27:57 +0200504
Joerg Roedel1c655772008-09-04 18:40:05 +0200505 if (dom->next_bit >= limit) {
Joerg Roedeld3086442008-06-26 21:27:57 +0200506 dom->next_bit = 0;
Joerg Roedel1c655772008-09-04 18:40:05 +0200507 dom->need_flush = true;
508 }
Joerg Roedeld3086442008-06-26 21:27:57 +0200509
510 address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200511 0 , boundary_size, align_mask);
Joerg Roedel1c655772008-09-04 18:40:05 +0200512 if (address == -1) {
Joerg Roedeld3086442008-06-26 21:27:57 +0200513 address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200514 0, boundary_size, align_mask);
Joerg Roedel1c655772008-09-04 18:40:05 +0200515 dom->need_flush = true;
516 }
Joerg Roedeld3086442008-06-26 21:27:57 +0200517
518 if (likely(address != -1)) {
Joerg Roedeld3086442008-06-26 21:27:57 +0200519 dom->next_bit = address + pages;
520 address <<= PAGE_SHIFT;
521 } else
522 address = bad_dma_address;
523
524 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
525
526 return address;
527}
528
Joerg Roedel431b2a22008-07-11 17:14:22 +0200529/*
530 * The address free function.
531 *
532 * called with domain->lock held
533 */
Joerg Roedeld3086442008-06-26 21:27:57 +0200534static void dma_ops_free_addresses(struct dma_ops_domain *dom,
535 unsigned long address,
536 unsigned int pages)
537{
538 address >>= PAGE_SHIFT;
539 iommu_area_free(dom->bitmap, address, pages);
Joerg Roedel80be3082008-11-06 14:59:05 +0100540
Joerg Roedel8501c452008-11-17 19:11:46 +0100541 if (address >= dom->next_bit)
Joerg Roedel80be3082008-11-06 14:59:05 +0100542 dom->need_flush = true;
Joerg Roedeld3086442008-06-26 21:27:57 +0200543}
544
Joerg Roedel431b2a22008-07-11 17:14:22 +0200545/****************************************************************************
546 *
547 * The next functions belong to the domain allocation. A domain is
548 * allocated for every IOMMU as the default domain. If device isolation
549 * is enabled, every device get its own domain. The most important thing
550 * about domains is the page table mapping the DMA address space they
551 * contain.
552 *
553 ****************************************************************************/
554
Joerg Roedelec487d12008-06-26 21:27:58 +0200555static u16 domain_id_alloc(void)
556{
557 unsigned long flags;
558 int id;
559
560 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
561 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
562 BUG_ON(id == 0);
563 if (id > 0 && id < MAX_DOMAIN_ID)
564 __set_bit(id, amd_iommu_pd_alloc_bitmap);
565 else
566 id = 0;
567 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
568
569 return id;
570}
571
Joerg Roedel431b2a22008-07-11 17:14:22 +0200572/*
573 * Used to reserve address ranges in the aperture (e.g. for exclusion
574 * ranges.
575 */
Joerg Roedelec487d12008-06-26 21:27:58 +0200576static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
577 unsigned long start_page,
578 unsigned int pages)
579{
580 unsigned int last_page = dom->aperture_size >> PAGE_SHIFT;
581
582 if (start_page + pages > last_page)
583 pages = last_page - start_page;
584
FUJITA Tomonorid26dbc52008-09-22 22:35:07 +0900585 iommu_area_reserve(dom->bitmap, start_page, pages);
Joerg Roedelec487d12008-06-26 21:27:58 +0200586}
587
588static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
589{
590 int i, j;
591 u64 *p1, *p2, *p3;
592
593 p1 = dma_dom->domain.pt_root;
594
595 if (!p1)
596 return;
597
598 for (i = 0; i < 512; ++i) {
599 if (!IOMMU_PTE_PRESENT(p1[i]))
600 continue;
601
602 p2 = IOMMU_PTE_PAGE(p1[i]);
Joerg Roedel3cc3d842008-12-04 16:44:31 +0100603 for (j = 0; j < 512; ++j) {
Joerg Roedelec487d12008-06-26 21:27:58 +0200604 if (!IOMMU_PTE_PRESENT(p2[j]))
605 continue;
606 p3 = IOMMU_PTE_PAGE(p2[j]);
607 free_page((unsigned long)p3);
608 }
609
610 free_page((unsigned long)p2);
611 }
612
613 free_page((unsigned long)p1);
614}
615
Joerg Roedel431b2a22008-07-11 17:14:22 +0200616/*
617 * Free a domain, only used if something went wrong in the
618 * allocation path and we need to free an already allocated page table
619 */
Joerg Roedelec487d12008-06-26 21:27:58 +0200620static void dma_ops_domain_free(struct dma_ops_domain *dom)
621{
622 if (!dom)
623 return;
624
625 dma_ops_free_pagetable(dom);
626
627 kfree(dom->pte_pages);
628
629 kfree(dom->bitmap);
630
631 kfree(dom);
632}
633
Joerg Roedel431b2a22008-07-11 17:14:22 +0200634/*
635 * Allocates a new protection domain usable for the dma_ops functions.
636 * It also intializes the page table and the address allocator data
637 * structures required for the dma_ops interface
638 */
Joerg Roedelec487d12008-06-26 21:27:58 +0200639static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
640 unsigned order)
641{
642 struct dma_ops_domain *dma_dom;
643 unsigned i, num_pte_pages;
644 u64 *l2_pde;
645 u64 address;
646
647 /*
648 * Currently the DMA aperture must be between 32 MB and 1GB in size
649 */
650 if ((order < 25) || (order > 30))
651 return NULL;
652
653 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
654 if (!dma_dom)
655 return NULL;
656
657 spin_lock_init(&dma_dom->domain.lock);
658
659 dma_dom->domain.id = domain_id_alloc();
660 if (dma_dom->domain.id == 0)
661 goto free_dma_dom;
662 dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
663 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
664 dma_dom->domain.priv = dma_dom;
665 if (!dma_dom->domain.pt_root)
666 goto free_dma_dom;
667 dma_dom->aperture_size = (1ULL << order);
668 dma_dom->bitmap = kzalloc(dma_dom->aperture_size / (PAGE_SIZE * 8),
669 GFP_KERNEL);
670 if (!dma_dom->bitmap)
671 goto free_dma_dom;
672 /*
673 * mark the first page as allocated so we never return 0 as
674 * a valid dma-address. So we can use 0 as error value
675 */
676 dma_dom->bitmap[0] = 1;
677 dma_dom->next_bit = 0;
678
Joerg Roedel1c655772008-09-04 18:40:05 +0200679 dma_dom->need_flush = false;
Joerg Roedelbd60b732008-09-11 10:24:48 +0200680 dma_dom->target_dev = 0xffff;
Joerg Roedel1c655772008-09-04 18:40:05 +0200681
Joerg Roedel431b2a22008-07-11 17:14:22 +0200682 /* Intialize the exclusion range if necessary */
Joerg Roedelec487d12008-06-26 21:27:58 +0200683 if (iommu->exclusion_start &&
684 iommu->exclusion_start < dma_dom->aperture_size) {
685 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
Joerg Roedele3c449f2008-10-15 22:02:11 -0700686 int pages = iommu_num_pages(iommu->exclusion_start,
687 iommu->exclusion_length,
688 PAGE_SIZE);
Joerg Roedelec487d12008-06-26 21:27:58 +0200689 dma_ops_reserve_addresses(dma_dom, startpage, pages);
690 }
691
Joerg Roedel431b2a22008-07-11 17:14:22 +0200692 /*
693 * At the last step, build the page tables so we don't need to
694 * allocate page table pages in the dma_ops mapping/unmapping
695 * path.
696 */
Joerg Roedelec487d12008-06-26 21:27:58 +0200697 num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512);
698 dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *),
699 GFP_KERNEL);
700 if (!dma_dom->pte_pages)
701 goto free_dma_dom;
702
703 l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL);
704 if (l2_pde == NULL)
705 goto free_dma_dom;
706
707 dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde));
708
709 for (i = 0; i < num_pte_pages; ++i) {
710 dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL);
711 if (!dma_dom->pte_pages[i])
712 goto free_dma_dom;
713 address = virt_to_phys(dma_dom->pte_pages[i]);
714 l2_pde[i] = IOMMU_L1_PDE(address);
715 }
716
717 return dma_dom;
718
719free_dma_dom:
720 dma_ops_domain_free(dma_dom);
721
722 return NULL;
723}
724
Joerg Roedel431b2a22008-07-11 17:14:22 +0200725/*
726 * Find out the protection domain structure for a given PCI device. This
727 * will give us the pointer to the page table root for example.
728 */
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200729static struct protection_domain *domain_for_device(u16 devid)
730{
731 struct protection_domain *dom;
732 unsigned long flags;
733
734 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
735 dom = amd_iommu_pd_table[devid];
736 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
737
738 return dom;
739}
740
Joerg Roedel431b2a22008-07-11 17:14:22 +0200741/*
742 * If a device is not yet associated with a domain, this function does
743 * assigns it visible for the hardware
744 */
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200745static void set_device_domain(struct amd_iommu *iommu,
746 struct protection_domain *domain,
747 u16 devid)
748{
749 unsigned long flags;
750
751 u64 pte_root = virt_to_phys(domain->pt_root);
752
Joerg Roedel38ddf412008-09-11 10:38:32 +0200753 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
754 << DEV_ENTRY_MODE_SHIFT;
755 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200756
757 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
Joerg Roedel38ddf412008-09-11 10:38:32 +0200758 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
759 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200760 amd_iommu_dev_table[devid].data[2] = domain->id;
761
762 amd_iommu_pd_table[devid] = domain;
763 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
764
765 iommu_queue_inv_dev_entry(iommu, devid);
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200766}
767
Joerg Roedel431b2a22008-07-11 17:14:22 +0200768/*****************************************************************************
769 *
770 * The next functions belong to the dma_ops mapping/unmapping code.
771 *
772 *****************************************************************************/
773
774/*
Joerg Roedeldbcc1122008-09-04 15:04:26 +0200775 * This function checks if the driver got a valid device from the caller to
776 * avoid dereferencing invalid pointers.
777 */
778static bool check_device(struct device *dev)
779{
780 if (!dev || !dev->dma_mask)
781 return false;
782
783 return true;
784}
785
786/*
Joerg Roedelbd60b732008-09-11 10:24:48 +0200787 * In this function the list of preallocated protection domains is traversed to
788 * find the domain for a specific device
789 */
790static struct dma_ops_domain *find_protection_domain(u16 devid)
791{
792 struct dma_ops_domain *entry, *ret = NULL;
793 unsigned long flags;
794
795 if (list_empty(&iommu_pd_list))
796 return NULL;
797
798 spin_lock_irqsave(&iommu_pd_list_lock, flags);
799
800 list_for_each_entry(entry, &iommu_pd_list, list) {
801 if (entry->target_dev == devid) {
802 ret = entry;
803 list_del(&ret->list);
804 break;
805 }
806 }
807
808 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
809
810 return ret;
811}
812
813/*
Joerg Roedel431b2a22008-07-11 17:14:22 +0200814 * In the dma_ops path we only have the struct device. This function
815 * finds the corresponding IOMMU, the protection domain and the
816 * requestor id for a given device.
817 * If the device is not yet associated with a domain this is also done
818 * in this function.
819 */
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200820static int get_device_resources(struct device *dev,
821 struct amd_iommu **iommu,
822 struct protection_domain **domain,
823 u16 *bdf)
824{
825 struct dma_ops_domain *dma_dom;
826 struct pci_dev *pcidev;
827 u16 _bdf;
828
Joerg Roedeldbcc1122008-09-04 15:04:26 +0200829 *iommu = NULL;
830 *domain = NULL;
831 *bdf = 0xffff;
832
833 if (dev->bus != &pci_bus_type)
834 return 0;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200835
836 pcidev = to_pci_dev(dev);
Joerg Roedeld591b0a2008-07-11 17:14:35 +0200837 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200838
Joerg Roedel431b2a22008-07-11 17:14:22 +0200839 /* device not translated by any IOMMU in the system? */
Joerg Roedeldbcc1122008-09-04 15:04:26 +0200840 if (_bdf > amd_iommu_last_bdf)
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200841 return 0;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200842
843 *bdf = amd_iommu_alias_table[_bdf];
844
845 *iommu = amd_iommu_rlookup_table[*bdf];
846 if (*iommu == NULL)
847 return 0;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200848 *domain = domain_for_device(*bdf);
849 if (*domain == NULL) {
Joerg Roedelbd60b732008-09-11 10:24:48 +0200850 dma_dom = find_protection_domain(*bdf);
851 if (!dma_dom)
852 dma_dom = (*iommu)->default_dom;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200853 *domain = &dma_dom->domain;
854 set_device_domain(*iommu, *domain, *bdf);
855 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
856 "device ", (*domain)->id);
857 print_devid(_bdf, 1);
858 }
859
Joerg Roedelf91ba192008-11-25 12:56:12 +0100860 if (domain_for_device(_bdf) == NULL)
861 set_device_domain(*iommu, *domain, _bdf);
862
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200863 return 1;
864}
865
Joerg Roedel431b2a22008-07-11 17:14:22 +0200866/*
867 * This is the generic map function. It maps one 4kb page at paddr to
868 * the given address in the DMA address space for the domain.
869 */
Joerg Roedelcb76c322008-06-26 21:28:00 +0200870static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
871 struct dma_ops_domain *dom,
872 unsigned long address,
873 phys_addr_t paddr,
874 int direction)
875{
876 u64 *pte, __pte;
877
878 WARN_ON(address > dom->aperture_size);
879
880 paddr &= PAGE_MASK;
881
882 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
883 pte += IOMMU_PTE_L0_INDEX(address);
884
885 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
886
887 if (direction == DMA_TO_DEVICE)
888 __pte |= IOMMU_PTE_IR;
889 else if (direction == DMA_FROM_DEVICE)
890 __pte |= IOMMU_PTE_IW;
891 else if (direction == DMA_BIDIRECTIONAL)
892 __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
893
894 WARN_ON(*pte);
895
896 *pte = __pte;
897
898 return (dma_addr_t)address;
899}
900
Joerg Roedel431b2a22008-07-11 17:14:22 +0200901/*
902 * The generic unmapping function for on page in the DMA address space.
903 */
Joerg Roedelcb76c322008-06-26 21:28:00 +0200904static void dma_ops_domain_unmap(struct amd_iommu *iommu,
905 struct dma_ops_domain *dom,
906 unsigned long address)
907{
908 u64 *pte;
909
910 if (address >= dom->aperture_size)
911 return;
912
913 WARN_ON(address & 0xfffULL || address > dom->aperture_size);
914
915 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
916 pte += IOMMU_PTE_L0_INDEX(address);
917
918 WARN_ON(!*pte);
919
920 *pte = 0ULL;
921}
922
Joerg Roedel431b2a22008-07-11 17:14:22 +0200923/*
924 * This function contains common code for mapping of a physically
Joerg Roedel24f81162008-12-08 14:25:39 +0100925 * contiguous memory region into DMA address space. It is used by all
926 * mapping functions provided with this IOMMU driver.
Joerg Roedel431b2a22008-07-11 17:14:22 +0200927 * Must be called with the domain lock held.
928 */
Joerg Roedelcb76c322008-06-26 21:28:00 +0200929static dma_addr_t __map_single(struct device *dev,
930 struct amd_iommu *iommu,
931 struct dma_ops_domain *dma_dom,
932 phys_addr_t paddr,
933 size_t size,
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200934 int dir,
Joerg Roedel832a90c2008-09-18 15:54:23 +0200935 bool align,
936 u64 dma_mask)
Joerg Roedelcb76c322008-06-26 21:28:00 +0200937{
938 dma_addr_t offset = paddr & ~PAGE_MASK;
939 dma_addr_t address, start;
940 unsigned int pages;
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200941 unsigned long align_mask = 0;
Joerg Roedelcb76c322008-06-26 21:28:00 +0200942 int i;
943
Joerg Roedele3c449f2008-10-15 22:02:11 -0700944 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +0200945 paddr &= PAGE_MASK;
946
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200947 if (align)
948 align_mask = (1UL << get_order(size)) - 1;
949
Joerg Roedel832a90c2008-09-18 15:54:23 +0200950 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
951 dma_mask);
Joerg Roedelcb76c322008-06-26 21:28:00 +0200952 if (unlikely(address == bad_dma_address))
953 goto out;
954
955 start = address;
956 for (i = 0; i < pages; ++i) {
957 dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
958 paddr += PAGE_SIZE;
959 start += PAGE_SIZE;
960 }
961 address += offset;
962
FUJITA Tomonoriafa9fdc2008-09-20 01:23:30 +0900963 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
Joerg Roedel1c655772008-09-04 18:40:05 +0200964 iommu_flush_tlb(iommu, dma_dom->domain.id);
965 dma_dom->need_flush = false;
966 } else if (unlikely(iommu_has_npcache(iommu)))
Joerg Roedel270cab242008-09-04 15:49:46 +0200967 iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
968
Joerg Roedelcb76c322008-06-26 21:28:00 +0200969out:
970 return address;
971}
972
Joerg Roedel431b2a22008-07-11 17:14:22 +0200973/*
974 * Does the reverse of the __map_single function. Must be called with
975 * the domain lock held too
976 */
Joerg Roedelcb76c322008-06-26 21:28:00 +0200977static void __unmap_single(struct amd_iommu *iommu,
978 struct dma_ops_domain *dma_dom,
979 dma_addr_t dma_addr,
980 size_t size,
981 int dir)
982{
983 dma_addr_t i, start;
984 unsigned int pages;
985
986 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
987 return;
988
Joerg Roedele3c449f2008-10-15 22:02:11 -0700989 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +0200990 dma_addr &= PAGE_MASK;
991 start = dma_addr;
992
993 for (i = 0; i < pages; ++i) {
994 dma_ops_domain_unmap(iommu, dma_dom, start);
995 start += PAGE_SIZE;
996 }
997
998 dma_ops_free_addresses(dma_dom, dma_addr, pages);
Joerg Roedel270cab242008-09-04 15:49:46 +0200999
Joerg Roedel80be3082008-11-06 14:59:05 +01001000 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
Joerg Roedel1c655772008-09-04 18:40:05 +02001001 iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
Joerg Roedel80be3082008-11-06 14:59:05 +01001002 dma_dom->need_flush = false;
1003 }
Joerg Roedelcb76c322008-06-26 21:28:00 +02001004}
1005
Joerg Roedel431b2a22008-07-11 17:14:22 +02001006/*
1007 * The exported map_single function for dma_ops.
1008 */
Joerg Roedel4da70b92008-06-26 21:28:01 +02001009static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
1010 size_t size, int dir)
1011{
1012 unsigned long flags;
1013 struct amd_iommu *iommu;
1014 struct protection_domain *domain;
1015 u16 devid;
1016 dma_addr_t addr;
Joerg Roedel832a90c2008-09-18 15:54:23 +02001017 u64 dma_mask;
Joerg Roedel4da70b92008-06-26 21:28:01 +02001018
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001019 if (!check_device(dev))
1020 return bad_dma_address;
1021
Joerg Roedel832a90c2008-09-18 15:54:23 +02001022 dma_mask = *dev->dma_mask;
Joerg Roedel4da70b92008-06-26 21:28:01 +02001023
1024 get_device_resources(dev, &iommu, &domain, &devid);
1025
1026 if (iommu == NULL || domain == NULL)
Joerg Roedel431b2a22008-07-11 17:14:22 +02001027 /* device not handled by any AMD IOMMU */
Joerg Roedel4da70b92008-06-26 21:28:01 +02001028 return (dma_addr_t)paddr;
1029
1030 spin_lock_irqsave(&domain->lock, flags);
Joerg Roedel832a90c2008-09-18 15:54:23 +02001031 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
1032 dma_mask);
Joerg Roedel4da70b92008-06-26 21:28:01 +02001033 if (addr == bad_dma_address)
1034 goto out;
1035
Joerg Roedel09ee17e2008-12-03 12:19:27 +01001036 iommu_completion_wait(iommu);
Joerg Roedel4da70b92008-06-26 21:28:01 +02001037
1038out:
1039 spin_unlock_irqrestore(&domain->lock, flags);
1040
1041 return addr;
1042}
1043
Joerg Roedel431b2a22008-07-11 17:14:22 +02001044/*
1045 * The exported unmap_single function for dma_ops.
1046 */
Joerg Roedel4da70b92008-06-26 21:28:01 +02001047static void unmap_single(struct device *dev, dma_addr_t dma_addr,
1048 size_t size, int dir)
1049{
1050 unsigned long flags;
1051 struct amd_iommu *iommu;
1052 struct protection_domain *domain;
1053 u16 devid;
1054
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001055 if (!check_device(dev) ||
1056 !get_device_resources(dev, &iommu, &domain, &devid))
Joerg Roedel431b2a22008-07-11 17:14:22 +02001057 /* device not handled by any AMD IOMMU */
Joerg Roedel4da70b92008-06-26 21:28:01 +02001058 return;
1059
1060 spin_lock_irqsave(&domain->lock, flags);
1061
1062 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
1063
Joerg Roedel09ee17e2008-12-03 12:19:27 +01001064 iommu_completion_wait(iommu);
Joerg Roedel4da70b92008-06-26 21:28:01 +02001065
1066 spin_unlock_irqrestore(&domain->lock, flags);
1067}
1068
Joerg Roedel431b2a22008-07-11 17:14:22 +02001069/*
1070 * This is a special map_sg function which is used if we should map a
1071 * device which is not handled by an AMD IOMMU in the system.
1072 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02001073static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
1074 int nelems, int dir)
1075{
1076 struct scatterlist *s;
1077 int i;
1078
1079 for_each_sg(sglist, s, nelems, i) {
1080 s->dma_address = (dma_addr_t)sg_phys(s);
1081 s->dma_length = s->length;
1082 }
1083
1084 return nelems;
1085}
1086
Joerg Roedel431b2a22008-07-11 17:14:22 +02001087/*
1088 * The exported map_sg function for dma_ops (handles scatter-gather
1089 * lists).
1090 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02001091static int map_sg(struct device *dev, struct scatterlist *sglist,
1092 int nelems, int dir)
1093{
1094 unsigned long flags;
1095 struct amd_iommu *iommu;
1096 struct protection_domain *domain;
1097 u16 devid;
1098 int i;
1099 struct scatterlist *s;
1100 phys_addr_t paddr;
1101 int mapped_elems = 0;
Joerg Roedel832a90c2008-09-18 15:54:23 +02001102 u64 dma_mask;
Joerg Roedel65b050a2008-06-26 21:28:02 +02001103
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001104 if (!check_device(dev))
1105 return 0;
1106
Joerg Roedel832a90c2008-09-18 15:54:23 +02001107 dma_mask = *dev->dma_mask;
Joerg Roedel65b050a2008-06-26 21:28:02 +02001108
1109 get_device_resources(dev, &iommu, &domain, &devid);
1110
1111 if (!iommu || !domain)
1112 return map_sg_no_iommu(dev, sglist, nelems, dir);
1113
1114 spin_lock_irqsave(&domain->lock, flags);
1115
1116 for_each_sg(sglist, s, nelems, i) {
1117 paddr = sg_phys(s);
1118
1119 s->dma_address = __map_single(dev, iommu, domain->priv,
Joerg Roedel832a90c2008-09-18 15:54:23 +02001120 paddr, s->length, dir, false,
1121 dma_mask);
Joerg Roedel65b050a2008-06-26 21:28:02 +02001122
1123 if (s->dma_address) {
1124 s->dma_length = s->length;
1125 mapped_elems++;
1126 } else
1127 goto unmap;
Joerg Roedel65b050a2008-06-26 21:28:02 +02001128 }
1129
Joerg Roedel09ee17e2008-12-03 12:19:27 +01001130 iommu_completion_wait(iommu);
Joerg Roedel65b050a2008-06-26 21:28:02 +02001131
1132out:
1133 spin_unlock_irqrestore(&domain->lock, flags);
1134
1135 return mapped_elems;
1136unmap:
1137 for_each_sg(sglist, s, mapped_elems, i) {
1138 if (s->dma_address)
1139 __unmap_single(iommu, domain->priv, s->dma_address,
1140 s->dma_length, dir);
1141 s->dma_address = s->dma_length = 0;
1142 }
1143
1144 mapped_elems = 0;
1145
1146 goto out;
1147}
1148
Joerg Roedel431b2a22008-07-11 17:14:22 +02001149/*
1150 * The exported map_sg function for dma_ops (handles scatter-gather
1151 * lists).
1152 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02001153static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1154 int nelems, int dir)
1155{
1156 unsigned long flags;
1157 struct amd_iommu *iommu;
1158 struct protection_domain *domain;
1159 struct scatterlist *s;
1160 u16 devid;
1161 int i;
1162
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001163 if (!check_device(dev) ||
1164 !get_device_resources(dev, &iommu, &domain, &devid))
Joerg Roedel65b050a2008-06-26 21:28:02 +02001165 return;
1166
1167 spin_lock_irqsave(&domain->lock, flags);
1168
1169 for_each_sg(sglist, s, nelems, i) {
1170 __unmap_single(iommu, domain->priv, s->dma_address,
1171 s->dma_length, dir);
Joerg Roedel65b050a2008-06-26 21:28:02 +02001172 s->dma_address = s->dma_length = 0;
1173 }
1174
Joerg Roedel09ee17e2008-12-03 12:19:27 +01001175 iommu_completion_wait(iommu);
Joerg Roedel65b050a2008-06-26 21:28:02 +02001176
1177 spin_unlock_irqrestore(&domain->lock, flags);
1178}
1179
Joerg Roedel431b2a22008-07-11 17:14:22 +02001180/*
1181 * The exported alloc_coherent function for dma_ops.
1182 */
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001183static void *alloc_coherent(struct device *dev, size_t size,
1184 dma_addr_t *dma_addr, gfp_t flag)
1185{
1186 unsigned long flags;
1187 void *virt_addr;
1188 struct amd_iommu *iommu;
1189 struct protection_domain *domain;
1190 u16 devid;
1191 phys_addr_t paddr;
Joerg Roedel832a90c2008-09-18 15:54:23 +02001192 u64 dma_mask = dev->coherent_dma_mask;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001193
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001194 if (!check_device(dev))
1195 return NULL;
1196
FUJITA Tomonori13d9fea2008-09-10 20:19:40 +09001197 if (!get_device_resources(dev, &iommu, &domain, &devid))
1198 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
1199
Joerg Roedelc97ac532008-09-11 10:59:15 +02001200 flag |= __GFP_ZERO;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001201 virt_addr = (void *)__get_free_pages(flag, get_order(size));
1202 if (!virt_addr)
1203 return 0;
1204
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001205 paddr = virt_to_phys(virt_addr);
1206
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001207 if (!iommu || !domain) {
1208 *dma_addr = (dma_addr_t)paddr;
1209 return virt_addr;
1210 }
1211
Joerg Roedel832a90c2008-09-18 15:54:23 +02001212 if (!dma_mask)
1213 dma_mask = *dev->dma_mask;
1214
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001215 spin_lock_irqsave(&domain->lock, flags);
1216
1217 *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
Joerg Roedel832a90c2008-09-18 15:54:23 +02001218 size, DMA_BIDIRECTIONAL, true, dma_mask);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001219
1220 if (*dma_addr == bad_dma_address) {
1221 free_pages((unsigned long)virt_addr, get_order(size));
1222 virt_addr = NULL;
1223 goto out;
1224 }
1225
Joerg Roedel09ee17e2008-12-03 12:19:27 +01001226 iommu_completion_wait(iommu);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001227
1228out:
1229 spin_unlock_irqrestore(&domain->lock, flags);
1230
1231 return virt_addr;
1232}
1233
Joerg Roedel431b2a22008-07-11 17:14:22 +02001234/*
1235 * The exported free_coherent function for dma_ops.
Joerg Roedel431b2a22008-07-11 17:14:22 +02001236 */
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001237static void free_coherent(struct device *dev, size_t size,
1238 void *virt_addr, dma_addr_t dma_addr)
1239{
1240 unsigned long flags;
1241 struct amd_iommu *iommu;
1242 struct protection_domain *domain;
1243 u16 devid;
1244
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001245 if (!check_device(dev))
1246 return;
1247
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001248 get_device_resources(dev, &iommu, &domain, &devid);
1249
1250 if (!iommu || !domain)
1251 goto free_mem;
1252
1253 spin_lock_irqsave(&domain->lock, flags);
1254
1255 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001256
Joerg Roedel09ee17e2008-12-03 12:19:27 +01001257 iommu_completion_wait(iommu);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001258
1259 spin_unlock_irqrestore(&domain->lock, flags);
1260
1261free_mem:
1262 free_pages((unsigned long)virt_addr, get_order(size));
1263}
1264
Joerg Roedelc432f3d2008-06-26 21:28:04 +02001265/*
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02001266 * This function is called by the DMA layer to find out if we can handle a
1267 * particular device. It is part of the dma_ops.
1268 */
1269static int amd_iommu_dma_supported(struct device *dev, u64 mask)
1270{
1271 u16 bdf;
1272 struct pci_dev *pcidev;
1273
1274 /* No device or no PCI device */
1275 if (!dev || dev->bus != &pci_bus_type)
1276 return 0;
1277
1278 pcidev = to_pci_dev(dev);
1279
1280 bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
1281
1282 /* Out of our scope? */
1283 if (bdf > amd_iommu_last_bdf)
1284 return 0;
1285
1286 return 1;
1287}
1288
1289/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001290 * The function for pre-allocating protection domains.
1291 *
Joerg Roedelc432f3d2008-06-26 21:28:04 +02001292 * If the driver core informs the DMA layer if a driver grabs a device
1293 * we don't need to preallocate the protection domains anymore.
1294 * For now we have to.
1295 */
1296void prealloc_protection_domains(void)
1297{
1298 struct pci_dev *dev = NULL;
1299 struct dma_ops_domain *dma_dom;
1300 struct amd_iommu *iommu;
1301 int order = amd_iommu_aperture_order;
1302 u16 devid;
1303
1304 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1305 devid = (dev->bus->number << 8) | dev->devfn;
Joerg Roedel3a61ec32008-07-25 13:07:50 +02001306 if (devid > amd_iommu_last_bdf)
Joerg Roedelc432f3d2008-06-26 21:28:04 +02001307 continue;
1308 devid = amd_iommu_alias_table[devid];
1309 if (domain_for_device(devid))
1310 continue;
1311 iommu = amd_iommu_rlookup_table[devid];
1312 if (!iommu)
1313 continue;
1314 dma_dom = dma_ops_domain_alloc(iommu, order);
1315 if (!dma_dom)
1316 continue;
1317 init_unity_mappings_for_device(dma_dom, devid);
Joerg Roedelbd60b732008-09-11 10:24:48 +02001318 dma_dom->target_dev = devid;
1319
1320 list_add_tail(&dma_dom->list, &iommu_pd_list);
Joerg Roedelc432f3d2008-06-26 21:28:04 +02001321 }
1322}
1323
Joerg Roedel6631ee92008-06-26 21:28:05 +02001324static struct dma_mapping_ops amd_iommu_dma_ops = {
1325 .alloc_coherent = alloc_coherent,
1326 .free_coherent = free_coherent,
1327 .map_single = map_single,
1328 .unmap_single = unmap_single,
1329 .map_sg = map_sg,
1330 .unmap_sg = unmap_sg,
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02001331 .dma_supported = amd_iommu_dma_supported,
Joerg Roedel6631ee92008-06-26 21:28:05 +02001332};
1333
Joerg Roedel431b2a22008-07-11 17:14:22 +02001334/*
1335 * The function which clues the AMD IOMMU driver into dma_ops.
1336 */
Joerg Roedel6631ee92008-06-26 21:28:05 +02001337int __init amd_iommu_init_dma_ops(void)
1338{
1339 struct amd_iommu *iommu;
1340 int order = amd_iommu_aperture_order;
1341 int ret;
1342
Joerg Roedel431b2a22008-07-11 17:14:22 +02001343 /*
1344 * first allocate a default protection domain for every IOMMU we
1345 * found in the system. Devices not assigned to any other
1346 * protection domain will be assigned to the default one.
1347 */
Joerg Roedel6631ee92008-06-26 21:28:05 +02001348 list_for_each_entry(iommu, &amd_iommu_list, list) {
1349 iommu->default_dom = dma_ops_domain_alloc(iommu, order);
1350 if (iommu->default_dom == NULL)
1351 return -ENOMEM;
1352 ret = iommu_init_unity_mappings(iommu);
1353 if (ret)
1354 goto free_domains;
1355 }
1356
Joerg Roedel431b2a22008-07-11 17:14:22 +02001357 /*
1358 * If device isolation is enabled, pre-allocate the protection
1359 * domains for each device.
1360 */
Joerg Roedel6631ee92008-06-26 21:28:05 +02001361 if (amd_iommu_isolate)
1362 prealloc_protection_domains();
1363
1364 iommu_detected = 1;
1365 force_iommu = 1;
1366 bad_dma_address = 0;
Ingo Molnar92af4e22008-06-27 10:48:16 +02001367#ifdef CONFIG_GART_IOMMU
Joerg Roedel6631ee92008-06-26 21:28:05 +02001368 gart_iommu_aperture_disabled = 1;
1369 gart_iommu_aperture = 0;
Ingo Molnar92af4e22008-06-27 10:48:16 +02001370#endif
Joerg Roedel6631ee92008-06-26 21:28:05 +02001371
Joerg Roedel431b2a22008-07-11 17:14:22 +02001372 /* Make the driver finally visible to the drivers */
Joerg Roedel6631ee92008-06-26 21:28:05 +02001373 dma_ops = &amd_iommu_dma_ops;
1374
1375 return 0;
1376
1377free_domains:
1378
1379 list_for_each_entry(iommu, &amd_iommu_list, list) {
1380 if (iommu->default_dom)
1381 dma_ops_domain_free(iommu->default_dom);
1382 }
1383
1384 return ret;
1385}