blob: 0922d5fe633c25ad14fb0fcf5bb7df1245f28817 [file] [log] [blame]
Joerg Roedelb6c02712008-06-26 21:27:53 +02001/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/pci.h>
21#include <linux/gfp.h>
22#include <linux/bitops.h>
23#include <linux/scatterlist.h>
24#include <linux/iommu-helper.h>
25#include <asm/proto.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090026#include <asm/iommu.h>
Joerg Roedel1d9b16d2008-11-27 18:39:15 +010027#include <asm/gart.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020028#include <asm/amd_iommu_types.h>
Joerg Roedelc6da9922008-06-26 21:28:06 +020029#include <asm/amd_iommu.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020030
31#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
32
Joerg Roedel136f78a2008-07-11 17:14:27 +020033#define EXIT_LOOP_COUNT 10000000
34
Joerg Roedelb6c02712008-06-26 21:27:53 +020035static DEFINE_RWLOCK(amd_iommu_devtable_lock);
36
Joerg Roedelbd60b732008-09-11 10:24:48 +020037/* A list of preallocated protection domains */
38static LIST_HEAD(iommu_pd_list);
39static DEFINE_SPINLOCK(iommu_pd_list_lock);
40
Joerg Roedel431b2a22008-07-11 17:14:22 +020041/*
42 * general struct to manage commands send to an IOMMU
43 */
Joerg Roedeld6449532008-07-11 17:14:28 +020044struct iommu_cmd {
Joerg Roedelb6c02712008-06-26 21:27:53 +020045 u32 data[4];
46};
47
Joerg Roedelbd0e5212008-06-26 21:27:56 +020048static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
49 struct unity_map_entry *e);
50
Joerg Roedel431b2a22008-07-11 17:14:22 +020051/* returns !0 if the IOMMU is caching non-present entries in its TLB */
Joerg Roedel4da70b92008-06-26 21:28:01 +020052static int iommu_has_npcache(struct amd_iommu *iommu)
53{
Joerg Roedelae9b9402008-10-30 17:43:57 +010054 return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
Joerg Roedel4da70b92008-06-26 21:28:01 +020055}
56
Joerg Roedel431b2a22008-07-11 17:14:22 +020057/****************************************************************************
58 *
Joerg Roedela80dc3e2008-09-11 16:51:41 +020059 * Interrupt handling functions
60 *
61 ****************************************************************************/
62
Joerg Roedel90008ee2008-09-09 16:41:05 +020063static void iommu_print_event(void *__evt)
64{
65 u32 *event = __evt;
66 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
67 int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
68 int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
69 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
70 u64 address = (u64)(((u64)event[3]) << 32) | event[2];
71
72 printk(KERN_ERR "AMD IOMMU: Event logged [");
73
74 switch (type) {
75 case EVENT_TYPE_ILL_DEV:
76 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
77 "address=0x%016llx flags=0x%04x]\n",
78 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
79 address, flags);
80 break;
81 case EVENT_TYPE_IO_FAULT:
82 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
83 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
84 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
85 domid, address, flags);
86 break;
87 case EVENT_TYPE_DEV_TAB_ERR:
88 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
89 "address=0x%016llx flags=0x%04x]\n",
90 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
91 address, flags);
92 break;
93 case EVENT_TYPE_PAGE_TAB_ERR:
94 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
95 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
96 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
97 domid, address, flags);
98 break;
99 case EVENT_TYPE_ILL_CMD:
100 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
101 break;
102 case EVENT_TYPE_CMD_HARD_ERR:
103 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
104 "flags=0x%04x]\n", address, flags);
105 break;
106 case EVENT_TYPE_IOTLB_INV_TO:
107 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
108 "address=0x%016llx]\n",
109 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
110 address);
111 break;
112 case EVENT_TYPE_INV_DEV_REQ:
113 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
114 "address=0x%016llx flags=0x%04x]\n",
115 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
116 address, flags);
117 break;
118 default:
119 printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
120 }
121}
122
123static void iommu_poll_events(struct amd_iommu *iommu)
124{
125 u32 head, tail;
126 unsigned long flags;
127
128 spin_lock_irqsave(&iommu->lock, flags);
129
130 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
131 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
132
133 while (head != tail) {
134 iommu_print_event(iommu->evt_buf + head);
135 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
136 }
137
138 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
139
140 spin_unlock_irqrestore(&iommu->lock, flags);
141}
142
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200143irqreturn_t amd_iommu_int_handler(int irq, void *data)
144{
Joerg Roedel90008ee2008-09-09 16:41:05 +0200145 struct amd_iommu *iommu;
146
147 list_for_each_entry(iommu, &amd_iommu_list, list)
148 iommu_poll_events(iommu);
149
150 return IRQ_HANDLED;
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200151}
152
153/****************************************************************************
154 *
Joerg Roedel431b2a22008-07-11 17:14:22 +0200155 * IOMMU command queuing functions
156 *
157 ****************************************************************************/
158
159/*
160 * Writes the command to the IOMMUs command buffer and informs the
161 * hardware about the new command. Must be called with iommu->lock held.
162 */
Joerg Roedeld6449532008-07-11 17:14:28 +0200163static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200164{
165 u32 tail, head;
166 u8 *target;
167
168 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
Jiri Kosina8a7c5ef2008-08-19 02:13:55 +0200169 target = iommu->cmd_buf + tail;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200170 memcpy_toio(target, cmd, sizeof(*cmd));
171 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
172 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
173 if (tail == head)
174 return -ENOMEM;
175 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
176
177 return 0;
178}
179
Joerg Roedel431b2a22008-07-11 17:14:22 +0200180/*
181 * General queuing function for commands. Takes iommu->lock and calls
182 * __iommu_queue_command().
183 */
Joerg Roedeld6449532008-07-11 17:14:28 +0200184static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200185{
186 unsigned long flags;
187 int ret;
188
189 spin_lock_irqsave(&iommu->lock, flags);
190 ret = __iommu_queue_command(iommu, cmd);
Joerg Roedel09ee17e2008-12-03 12:19:27 +0100191 if (!ret)
192 iommu->need_sync = 1;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200193 spin_unlock_irqrestore(&iommu->lock, flags);
194
195 return ret;
196}
197
Joerg Roedel431b2a22008-07-11 17:14:22 +0200198/*
199 * This function is called whenever we need to ensure that the IOMMU has
200 * completed execution of all commands we sent. It sends a
201 * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
202 * us about that by writing a value to a physical address we pass with
203 * the command.
204 */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200205static int iommu_completion_wait(struct amd_iommu *iommu)
206{
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200207 int ret = 0, ready = 0;
Joerg Roedel519c31b2008-08-14 19:55:15 +0200208 unsigned status = 0;
Joerg Roedeld6449532008-07-11 17:14:28 +0200209 struct iommu_cmd cmd;
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200210 unsigned long flags, i = 0;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200211
212 memset(&cmd, 0, sizeof(cmd));
Joerg Roedel519c31b2008-08-14 19:55:15 +0200213 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200214 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
215
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200216 spin_lock_irqsave(&iommu->lock, flags);
217
Joerg Roedel09ee17e2008-12-03 12:19:27 +0100218 if (!iommu->need_sync)
219 goto out;
220
221 iommu->need_sync = 0;
222
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200223 ret = __iommu_queue_command(iommu, &cmd);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200224
225 if (ret)
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200226 goto out;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200227
Joerg Roedel136f78a2008-07-11 17:14:27 +0200228 while (!ready && (i < EXIT_LOOP_COUNT)) {
229 ++i;
Joerg Roedel519c31b2008-08-14 19:55:15 +0200230 /* wait for the bit to become one */
231 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
232 ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
Joerg Roedel136f78a2008-07-11 17:14:27 +0200233 }
234
Joerg Roedel519c31b2008-08-14 19:55:15 +0200235 /* set bit back to zero */
236 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
237 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
238
Joerg Roedel84df8172008-12-17 16:36:44 +0100239 if (unlikely(i == EXIT_LOOP_COUNT))
240 panic("AMD IOMMU: Completion wait loop failed\n");
241
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200242out:
243 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200244
245 return 0;
246}
247
Joerg Roedel431b2a22008-07-11 17:14:22 +0200248/*
249 * Command send function for invalidating a device table entry
250 */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200251static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
252{
Joerg Roedeld6449532008-07-11 17:14:28 +0200253 struct iommu_cmd cmd;
Joerg Roedelee2fa742008-09-17 13:47:25 +0200254 int ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200255
256 BUG_ON(iommu == NULL);
257
258 memset(&cmd, 0, sizeof(cmd));
259 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
260 cmd.data[0] = devid;
261
Joerg Roedelee2fa742008-09-17 13:47:25 +0200262 ret = iommu_queue_command(iommu, &cmd);
263
Joerg Roedelee2fa742008-09-17 13:47:25 +0200264 return ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200265}
266
Joerg Roedel431b2a22008-07-11 17:14:22 +0200267/*
268 * Generic command send function for invalidaing TLB entries
269 */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200270static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
271 u64 address, u16 domid, int pde, int s)
272{
Joerg Roedeld6449532008-07-11 17:14:28 +0200273 struct iommu_cmd cmd;
Joerg Roedelee2fa742008-09-17 13:47:25 +0200274 int ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200275
276 memset(&cmd, 0, sizeof(cmd));
277 address &= PAGE_MASK;
278 CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
279 cmd.data[1] |= domid;
Joerg Roedel8a456692008-08-14 19:55:17 +0200280 cmd.data[2] = lower_32_bits(address);
Joerg Roedel8ea80d72008-07-11 17:14:23 +0200281 cmd.data[3] = upper_32_bits(address);
Joerg Roedel431b2a22008-07-11 17:14:22 +0200282 if (s) /* size bit - we flush more than one 4kb page */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200283 cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
Joerg Roedel431b2a22008-07-11 17:14:22 +0200284 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200285 cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
286
Joerg Roedelee2fa742008-09-17 13:47:25 +0200287 ret = iommu_queue_command(iommu, &cmd);
288
Joerg Roedelee2fa742008-09-17 13:47:25 +0200289 return ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200290}
291
Joerg Roedel431b2a22008-07-11 17:14:22 +0200292/*
293 * TLB invalidation function which is called from the mapping functions.
294 * It invalidates a single PTE if the range to flush is within a single
295 * page. Otherwise it flushes the whole TLB of the IOMMU.
296 */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200297static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
298 u64 address, size_t size)
299{
Joerg Roedel999ba412008-07-03 19:35:08 +0200300 int s = 0;
Joerg Roedele3c449f2008-10-15 22:02:11 -0700301 unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200302
303 address &= PAGE_MASK;
304
Joerg Roedel999ba412008-07-03 19:35:08 +0200305 if (pages > 1) {
306 /*
307 * If we have to flush more than one page, flush all
308 * TLB entries for this domain
309 */
310 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
311 s = 1;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200312 }
313
Joerg Roedel999ba412008-07-03 19:35:08 +0200314 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);
315
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200316 return 0;
317}
Joerg Roedelb6c02712008-06-26 21:27:53 +0200318
Joerg Roedel1c655772008-09-04 18:40:05 +0200319/* Flush the whole IO/TLB for a given protection domain */
320static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
321{
322 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
323
324 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
325}
326
Joerg Roedel431b2a22008-07-11 17:14:22 +0200327/****************************************************************************
328 *
329 * The functions below are used the create the page table mappings for
330 * unity mapped regions.
331 *
332 ****************************************************************************/
333
334/*
335 * Generic mapping functions. It maps a physical address into a DMA
336 * address space. It allocates the page table pages if necessary.
337 * In the future it can be extended to a generic mapping function
338 * supporting all features of AMD IOMMU page tables like level skipping
339 * and full 64 bit address spaces.
340 */
Joerg Roedel38e817f2008-12-02 17:27:52 +0100341static int iommu_map_page(struct protection_domain *dom,
342 unsigned long bus_addr,
343 unsigned long phys_addr,
344 int prot)
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200345{
346 u64 __pte, *pte, *page;
347
348 bus_addr = PAGE_ALIGN(bus_addr);
Joerg Roedelbb9d4ff2008-12-04 15:59:48 +0100349 phys_addr = PAGE_ALIGN(phys_addr);
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200350
351 /* only support 512GB address spaces for now */
352 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
353 return -EINVAL;
354
355 pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
356
357 if (!IOMMU_PTE_PRESENT(*pte)) {
358 page = (u64 *)get_zeroed_page(GFP_KERNEL);
359 if (!page)
360 return -ENOMEM;
361 *pte = IOMMU_L2_PDE(virt_to_phys(page));
362 }
363
364 pte = IOMMU_PTE_PAGE(*pte);
365 pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
366
367 if (!IOMMU_PTE_PRESENT(*pte)) {
368 page = (u64 *)get_zeroed_page(GFP_KERNEL);
369 if (!page)
370 return -ENOMEM;
371 *pte = IOMMU_L1_PDE(virt_to_phys(page));
372 }
373
374 pte = IOMMU_PTE_PAGE(*pte);
375 pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)];
376
377 if (IOMMU_PTE_PRESENT(*pte))
378 return -EBUSY;
379
380 __pte = phys_addr | IOMMU_PTE_P;
381 if (prot & IOMMU_PROT_IR)
382 __pte |= IOMMU_PTE_IR;
383 if (prot & IOMMU_PROT_IW)
384 __pte |= IOMMU_PTE_IW;
385
386 *pte = __pte;
387
388 return 0;
389}
390
Joerg Roedel431b2a22008-07-11 17:14:22 +0200391/*
392 * This function checks if a specific unity mapping entry is needed for
393 * this specific IOMMU.
394 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200395static int iommu_for_unity_map(struct amd_iommu *iommu,
396 struct unity_map_entry *entry)
397{
398 u16 bdf, i;
399
400 for (i = entry->devid_start; i <= entry->devid_end; ++i) {
401 bdf = amd_iommu_alias_table[i];
402 if (amd_iommu_rlookup_table[bdf] == iommu)
403 return 1;
404 }
405
406 return 0;
407}
408
Joerg Roedel431b2a22008-07-11 17:14:22 +0200409/*
410 * Init the unity mappings for a specific IOMMU in the system
411 *
412 * Basically iterates over all unity mapping entries and applies them to
413 * the default domain DMA of that IOMMU if necessary.
414 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200415static int iommu_init_unity_mappings(struct amd_iommu *iommu)
416{
417 struct unity_map_entry *entry;
418 int ret;
419
420 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
421 if (!iommu_for_unity_map(iommu, entry))
422 continue;
423 ret = dma_ops_unity_map(iommu->default_dom, entry);
424 if (ret)
425 return ret;
426 }
427
428 return 0;
429}
430
Joerg Roedel431b2a22008-07-11 17:14:22 +0200431/*
432 * This function actually applies the mapping to the page table of the
433 * dma_ops domain.
434 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200435static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
436 struct unity_map_entry *e)
437{
438 u64 addr;
439 int ret;
440
441 for (addr = e->address_start; addr < e->address_end;
442 addr += PAGE_SIZE) {
Joerg Roedel38e817f2008-12-02 17:27:52 +0100443 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot);
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200444 if (ret)
445 return ret;
446 /*
447 * if unity mapping is in aperture range mark the page
448 * as allocated in the aperture
449 */
450 if (addr < dma_dom->aperture_size)
451 __set_bit(addr >> PAGE_SHIFT, dma_dom->bitmap);
452 }
453
454 return 0;
455}
456
Joerg Roedel431b2a22008-07-11 17:14:22 +0200457/*
458 * Inits the unity mappings required for a specific device
459 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200460static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
461 u16 devid)
462{
463 struct unity_map_entry *e;
464 int ret;
465
466 list_for_each_entry(e, &amd_iommu_unity_map, list) {
467 if (!(devid >= e->devid_start && devid <= e->devid_end))
468 continue;
469 ret = dma_ops_unity_map(dma_dom, e);
470 if (ret)
471 return ret;
472 }
473
474 return 0;
475}
476
Joerg Roedel431b2a22008-07-11 17:14:22 +0200477/****************************************************************************
478 *
479 * The next functions belong to the address allocator for the dma_ops
480 * interface functions. They work like the allocators in the other IOMMU
481 * drivers. Its basically a bitmap which marks the allocated pages in
482 * the aperture. Maybe it could be enhanced in the future to a more
483 * efficient allocator.
484 *
485 ****************************************************************************/
Joerg Roedeld3086442008-06-26 21:27:57 +0200486
Joerg Roedel431b2a22008-07-11 17:14:22 +0200487/*
488 * The address allocator core function.
489 *
490 * called with domain->lock held
491 */
Joerg Roedeld3086442008-06-26 21:27:57 +0200492static unsigned long dma_ops_alloc_addresses(struct device *dev,
493 struct dma_ops_domain *dom,
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200494 unsigned int pages,
Joerg Roedel832a90c2008-09-18 15:54:23 +0200495 unsigned long align_mask,
496 u64 dma_mask)
Joerg Roedeld3086442008-06-26 21:27:57 +0200497{
FUJITA Tomonori40becd82008-09-29 00:06:36 +0900498 unsigned long limit;
Joerg Roedeld3086442008-06-26 21:27:57 +0200499 unsigned long address;
Joerg Roedeld3086442008-06-26 21:27:57 +0200500 unsigned long boundary_size;
501
502 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
503 PAGE_SIZE) >> PAGE_SHIFT;
FUJITA Tomonori40becd82008-09-29 00:06:36 +0900504 limit = iommu_device_max_index(dom->aperture_size >> PAGE_SHIFT, 0,
505 dma_mask >> PAGE_SHIFT);
Joerg Roedeld3086442008-06-26 21:27:57 +0200506
Joerg Roedel1c655772008-09-04 18:40:05 +0200507 if (dom->next_bit >= limit) {
Joerg Roedeld3086442008-06-26 21:27:57 +0200508 dom->next_bit = 0;
Joerg Roedel1c655772008-09-04 18:40:05 +0200509 dom->need_flush = true;
510 }
Joerg Roedeld3086442008-06-26 21:27:57 +0200511
512 address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200513 0 , boundary_size, align_mask);
Joerg Roedel1c655772008-09-04 18:40:05 +0200514 if (address == -1) {
Joerg Roedeld3086442008-06-26 21:27:57 +0200515 address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200516 0, boundary_size, align_mask);
Joerg Roedel1c655772008-09-04 18:40:05 +0200517 dom->need_flush = true;
518 }
Joerg Roedeld3086442008-06-26 21:27:57 +0200519
520 if (likely(address != -1)) {
Joerg Roedeld3086442008-06-26 21:27:57 +0200521 dom->next_bit = address + pages;
522 address <<= PAGE_SHIFT;
523 } else
524 address = bad_dma_address;
525
526 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
527
528 return address;
529}
530
Joerg Roedel431b2a22008-07-11 17:14:22 +0200531/*
532 * The address free function.
533 *
534 * called with domain->lock held
535 */
Joerg Roedeld3086442008-06-26 21:27:57 +0200536static void dma_ops_free_addresses(struct dma_ops_domain *dom,
537 unsigned long address,
538 unsigned int pages)
539{
540 address >>= PAGE_SHIFT;
541 iommu_area_free(dom->bitmap, address, pages);
Joerg Roedel80be3082008-11-06 14:59:05 +0100542
Joerg Roedel8501c452008-11-17 19:11:46 +0100543 if (address >= dom->next_bit)
Joerg Roedel80be3082008-11-06 14:59:05 +0100544 dom->need_flush = true;
Joerg Roedeld3086442008-06-26 21:27:57 +0200545}
546
Joerg Roedel431b2a22008-07-11 17:14:22 +0200547/****************************************************************************
548 *
549 * The next functions belong to the domain allocation. A domain is
550 * allocated for every IOMMU as the default domain. If device isolation
551 * is enabled, every device get its own domain. The most important thing
552 * about domains is the page table mapping the DMA address space they
553 * contain.
554 *
555 ****************************************************************************/
556
Joerg Roedelec487d12008-06-26 21:27:58 +0200557static u16 domain_id_alloc(void)
558{
559 unsigned long flags;
560 int id;
561
562 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
563 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
564 BUG_ON(id == 0);
565 if (id > 0 && id < MAX_DOMAIN_ID)
566 __set_bit(id, amd_iommu_pd_alloc_bitmap);
567 else
568 id = 0;
569 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
570
571 return id;
572}
573
Joerg Roedela2acfb72008-12-02 18:28:53 +0100574#ifdef CONFIG_IOMMU_API
575static void domain_id_free(int id)
576{
577 unsigned long flags;
578
579 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
580 if (id > 0 && id < MAX_DOMAIN_ID)
581 __clear_bit(id, amd_iommu_pd_alloc_bitmap);
582 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
583}
584#endif
585
Joerg Roedel431b2a22008-07-11 17:14:22 +0200586/*
587 * Used to reserve address ranges in the aperture (e.g. for exclusion
588 * ranges.
589 */
Joerg Roedelec487d12008-06-26 21:27:58 +0200590static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
591 unsigned long start_page,
592 unsigned int pages)
593{
594 unsigned int last_page = dom->aperture_size >> PAGE_SHIFT;
595
596 if (start_page + pages > last_page)
597 pages = last_page - start_page;
598
FUJITA Tomonorid26dbc52008-09-22 22:35:07 +0900599 iommu_area_reserve(dom->bitmap, start_page, pages);
Joerg Roedelec487d12008-06-26 21:27:58 +0200600}
601
Joerg Roedel86db2e52008-12-02 18:20:21 +0100602static void free_pagetable(struct protection_domain *domain)
Joerg Roedelec487d12008-06-26 21:27:58 +0200603{
604 int i, j;
605 u64 *p1, *p2, *p3;
606
Joerg Roedel86db2e52008-12-02 18:20:21 +0100607 p1 = domain->pt_root;
Joerg Roedelec487d12008-06-26 21:27:58 +0200608
609 if (!p1)
610 return;
611
612 for (i = 0; i < 512; ++i) {
613 if (!IOMMU_PTE_PRESENT(p1[i]))
614 continue;
615
616 p2 = IOMMU_PTE_PAGE(p1[i]);
Joerg Roedel3cc3d842008-12-04 16:44:31 +0100617 for (j = 0; j < 512; ++j) {
Joerg Roedelec487d12008-06-26 21:27:58 +0200618 if (!IOMMU_PTE_PRESENT(p2[j]))
619 continue;
620 p3 = IOMMU_PTE_PAGE(p2[j]);
621 free_page((unsigned long)p3);
622 }
623
624 free_page((unsigned long)p2);
625 }
626
627 free_page((unsigned long)p1);
Joerg Roedel86db2e52008-12-02 18:20:21 +0100628
629 domain->pt_root = NULL;
Joerg Roedelec487d12008-06-26 21:27:58 +0200630}
631
Joerg Roedel431b2a22008-07-11 17:14:22 +0200632/*
633 * Free a domain, only used if something went wrong in the
634 * allocation path and we need to free an already allocated page table
635 */
Joerg Roedelec487d12008-06-26 21:27:58 +0200636static void dma_ops_domain_free(struct dma_ops_domain *dom)
637{
638 if (!dom)
639 return;
640
Joerg Roedel86db2e52008-12-02 18:20:21 +0100641 free_pagetable(&dom->domain);
Joerg Roedelec487d12008-06-26 21:27:58 +0200642
643 kfree(dom->pte_pages);
644
645 kfree(dom->bitmap);
646
647 kfree(dom);
648}
649
Joerg Roedel431b2a22008-07-11 17:14:22 +0200650/*
651 * Allocates a new protection domain usable for the dma_ops functions.
652 * It also intializes the page table and the address allocator data
653 * structures required for the dma_ops interface
654 */
Joerg Roedelec487d12008-06-26 21:27:58 +0200655static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
656 unsigned order)
657{
658 struct dma_ops_domain *dma_dom;
659 unsigned i, num_pte_pages;
660 u64 *l2_pde;
661 u64 address;
662
663 /*
664 * Currently the DMA aperture must be between 32 MB and 1GB in size
665 */
666 if ((order < 25) || (order > 30))
667 return NULL;
668
669 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
670 if (!dma_dom)
671 return NULL;
672
673 spin_lock_init(&dma_dom->domain.lock);
674
675 dma_dom->domain.id = domain_id_alloc();
676 if (dma_dom->domain.id == 0)
677 goto free_dma_dom;
678 dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
679 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
680 dma_dom->domain.priv = dma_dom;
681 if (!dma_dom->domain.pt_root)
682 goto free_dma_dom;
683 dma_dom->aperture_size = (1ULL << order);
684 dma_dom->bitmap = kzalloc(dma_dom->aperture_size / (PAGE_SIZE * 8),
685 GFP_KERNEL);
686 if (!dma_dom->bitmap)
687 goto free_dma_dom;
688 /*
689 * mark the first page as allocated so we never return 0 as
690 * a valid dma-address. So we can use 0 as error value
691 */
692 dma_dom->bitmap[0] = 1;
693 dma_dom->next_bit = 0;
694
Joerg Roedel1c655772008-09-04 18:40:05 +0200695 dma_dom->need_flush = false;
Joerg Roedelbd60b732008-09-11 10:24:48 +0200696 dma_dom->target_dev = 0xffff;
Joerg Roedel1c655772008-09-04 18:40:05 +0200697
Joerg Roedel431b2a22008-07-11 17:14:22 +0200698 /* Intialize the exclusion range if necessary */
Joerg Roedelec487d12008-06-26 21:27:58 +0200699 if (iommu->exclusion_start &&
700 iommu->exclusion_start < dma_dom->aperture_size) {
701 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
Joerg Roedele3c449f2008-10-15 22:02:11 -0700702 int pages = iommu_num_pages(iommu->exclusion_start,
703 iommu->exclusion_length,
704 PAGE_SIZE);
Joerg Roedelec487d12008-06-26 21:27:58 +0200705 dma_ops_reserve_addresses(dma_dom, startpage, pages);
706 }
707
Joerg Roedel431b2a22008-07-11 17:14:22 +0200708 /*
709 * At the last step, build the page tables so we don't need to
710 * allocate page table pages in the dma_ops mapping/unmapping
711 * path.
712 */
Joerg Roedelec487d12008-06-26 21:27:58 +0200713 num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512);
714 dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *),
715 GFP_KERNEL);
716 if (!dma_dom->pte_pages)
717 goto free_dma_dom;
718
719 l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL);
720 if (l2_pde == NULL)
721 goto free_dma_dom;
722
723 dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde));
724
725 for (i = 0; i < num_pte_pages; ++i) {
726 dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL);
727 if (!dma_dom->pte_pages[i])
728 goto free_dma_dom;
729 address = virt_to_phys(dma_dom->pte_pages[i]);
730 l2_pde[i] = IOMMU_L1_PDE(address);
731 }
732
733 return dma_dom;
734
735free_dma_dom:
736 dma_ops_domain_free(dma_dom);
737
738 return NULL;
739}
740
Joerg Roedel431b2a22008-07-11 17:14:22 +0200741/*
742 * Find out the protection domain structure for a given PCI device. This
743 * will give us the pointer to the page table root for example.
744 */
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200745static struct protection_domain *domain_for_device(u16 devid)
746{
747 struct protection_domain *dom;
748 unsigned long flags;
749
750 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
751 dom = amd_iommu_pd_table[devid];
752 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
753
754 return dom;
755}
756
Joerg Roedel431b2a22008-07-11 17:14:22 +0200757/*
758 * If a device is not yet associated with a domain, this function does
759 * assigns it visible for the hardware
760 */
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200761static void set_device_domain(struct amd_iommu *iommu,
762 struct protection_domain *domain,
763 u16 devid)
764{
765 unsigned long flags;
766
767 u64 pte_root = virt_to_phys(domain->pt_root);
768
Joerg Roedel38ddf412008-09-11 10:38:32 +0200769 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
770 << DEV_ENTRY_MODE_SHIFT;
771 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200772
773 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
Joerg Roedel38ddf412008-09-11 10:38:32 +0200774 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
775 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200776 amd_iommu_dev_table[devid].data[2] = domain->id;
777
778 amd_iommu_pd_table[devid] = domain;
779 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
780
781 iommu_queue_inv_dev_entry(iommu, devid);
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200782}
783
Joerg Roedel431b2a22008-07-11 17:14:22 +0200784/*****************************************************************************
785 *
786 * The next functions belong to the dma_ops mapping/unmapping code.
787 *
788 *****************************************************************************/
789
790/*
Joerg Roedeldbcc1122008-09-04 15:04:26 +0200791 * This function checks if the driver got a valid device from the caller to
792 * avoid dereferencing invalid pointers.
793 */
794static bool check_device(struct device *dev)
795{
796 if (!dev || !dev->dma_mask)
797 return false;
798
799 return true;
800}
801
802/*
Joerg Roedelbd60b732008-09-11 10:24:48 +0200803 * In this function the list of preallocated protection domains is traversed to
804 * find the domain for a specific device
805 */
806static struct dma_ops_domain *find_protection_domain(u16 devid)
807{
808 struct dma_ops_domain *entry, *ret = NULL;
809 unsigned long flags;
810
811 if (list_empty(&iommu_pd_list))
812 return NULL;
813
814 spin_lock_irqsave(&iommu_pd_list_lock, flags);
815
816 list_for_each_entry(entry, &iommu_pd_list, list) {
817 if (entry->target_dev == devid) {
818 ret = entry;
819 list_del(&ret->list);
820 break;
821 }
822 }
823
824 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
825
826 return ret;
827}
828
829/*
Joerg Roedel431b2a22008-07-11 17:14:22 +0200830 * In the dma_ops path we only have the struct device. This function
831 * finds the corresponding IOMMU, the protection domain and the
832 * requestor id for a given device.
833 * If the device is not yet associated with a domain this is also done
834 * in this function.
835 */
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200836static int get_device_resources(struct device *dev,
837 struct amd_iommu **iommu,
838 struct protection_domain **domain,
839 u16 *bdf)
840{
841 struct dma_ops_domain *dma_dom;
842 struct pci_dev *pcidev;
843 u16 _bdf;
844
Joerg Roedeldbcc1122008-09-04 15:04:26 +0200845 *iommu = NULL;
846 *domain = NULL;
847 *bdf = 0xffff;
848
849 if (dev->bus != &pci_bus_type)
850 return 0;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200851
852 pcidev = to_pci_dev(dev);
Joerg Roedeld591b0a2008-07-11 17:14:35 +0200853 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200854
Joerg Roedel431b2a22008-07-11 17:14:22 +0200855 /* device not translated by any IOMMU in the system? */
Joerg Roedeldbcc1122008-09-04 15:04:26 +0200856 if (_bdf > amd_iommu_last_bdf)
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200857 return 0;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200858
859 *bdf = amd_iommu_alias_table[_bdf];
860
861 *iommu = amd_iommu_rlookup_table[*bdf];
862 if (*iommu == NULL)
863 return 0;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200864 *domain = domain_for_device(*bdf);
865 if (*domain == NULL) {
Joerg Roedelbd60b732008-09-11 10:24:48 +0200866 dma_dom = find_protection_domain(*bdf);
867 if (!dma_dom)
868 dma_dom = (*iommu)->default_dom;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200869 *domain = &dma_dom->domain;
870 set_device_domain(*iommu, *domain, *bdf);
871 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
872 "device ", (*domain)->id);
873 print_devid(_bdf, 1);
874 }
875
Joerg Roedelf91ba192008-11-25 12:56:12 +0100876 if (domain_for_device(_bdf) == NULL)
877 set_device_domain(*iommu, *domain, _bdf);
878
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200879 return 1;
880}
881
Joerg Roedel431b2a22008-07-11 17:14:22 +0200882/*
883 * This is the generic map function. It maps one 4kb page at paddr to
884 * the given address in the DMA address space for the domain.
885 */
Joerg Roedelcb76c322008-06-26 21:28:00 +0200886static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
887 struct dma_ops_domain *dom,
888 unsigned long address,
889 phys_addr_t paddr,
890 int direction)
891{
892 u64 *pte, __pte;
893
894 WARN_ON(address > dom->aperture_size);
895
896 paddr &= PAGE_MASK;
897
898 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
899 pte += IOMMU_PTE_L0_INDEX(address);
900
901 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
902
903 if (direction == DMA_TO_DEVICE)
904 __pte |= IOMMU_PTE_IR;
905 else if (direction == DMA_FROM_DEVICE)
906 __pte |= IOMMU_PTE_IW;
907 else if (direction == DMA_BIDIRECTIONAL)
908 __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
909
910 WARN_ON(*pte);
911
912 *pte = __pte;
913
914 return (dma_addr_t)address;
915}
916
Joerg Roedel431b2a22008-07-11 17:14:22 +0200917/*
918 * The generic unmapping function for on page in the DMA address space.
919 */
Joerg Roedelcb76c322008-06-26 21:28:00 +0200920static void dma_ops_domain_unmap(struct amd_iommu *iommu,
921 struct dma_ops_domain *dom,
922 unsigned long address)
923{
924 u64 *pte;
925
926 if (address >= dom->aperture_size)
927 return;
928
Joerg Roedel8ad909c2008-12-08 14:37:20 +0100929 WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size);
Joerg Roedelcb76c322008-06-26 21:28:00 +0200930
931 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
932 pte += IOMMU_PTE_L0_INDEX(address);
933
934 WARN_ON(!*pte);
935
936 *pte = 0ULL;
937}
938
Joerg Roedel431b2a22008-07-11 17:14:22 +0200939/*
940 * This function contains common code for mapping of a physically
Joerg Roedel24f81162008-12-08 14:25:39 +0100941 * contiguous memory region into DMA address space. It is used by all
942 * mapping functions provided with this IOMMU driver.
Joerg Roedel431b2a22008-07-11 17:14:22 +0200943 * Must be called with the domain lock held.
944 */
Joerg Roedelcb76c322008-06-26 21:28:00 +0200945static dma_addr_t __map_single(struct device *dev,
946 struct amd_iommu *iommu,
947 struct dma_ops_domain *dma_dom,
948 phys_addr_t paddr,
949 size_t size,
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200950 int dir,
Joerg Roedel832a90c2008-09-18 15:54:23 +0200951 bool align,
952 u64 dma_mask)
Joerg Roedelcb76c322008-06-26 21:28:00 +0200953{
954 dma_addr_t offset = paddr & ~PAGE_MASK;
955 dma_addr_t address, start;
956 unsigned int pages;
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200957 unsigned long align_mask = 0;
Joerg Roedelcb76c322008-06-26 21:28:00 +0200958 int i;
959
Joerg Roedele3c449f2008-10-15 22:02:11 -0700960 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +0200961 paddr &= PAGE_MASK;
962
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200963 if (align)
964 align_mask = (1UL << get_order(size)) - 1;
965
Joerg Roedel832a90c2008-09-18 15:54:23 +0200966 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
967 dma_mask);
Joerg Roedelcb76c322008-06-26 21:28:00 +0200968 if (unlikely(address == bad_dma_address))
969 goto out;
970
971 start = address;
972 for (i = 0; i < pages; ++i) {
973 dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
974 paddr += PAGE_SIZE;
975 start += PAGE_SIZE;
976 }
977 address += offset;
978
FUJITA Tomonoriafa9fdc2008-09-20 01:23:30 +0900979 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
Joerg Roedel1c655772008-09-04 18:40:05 +0200980 iommu_flush_tlb(iommu, dma_dom->domain.id);
981 dma_dom->need_flush = false;
982 } else if (unlikely(iommu_has_npcache(iommu)))
Joerg Roedel270cab242008-09-04 15:49:46 +0200983 iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
984
Joerg Roedelcb76c322008-06-26 21:28:00 +0200985out:
986 return address;
987}
988
Joerg Roedel431b2a22008-07-11 17:14:22 +0200989/*
990 * Does the reverse of the __map_single function. Must be called with
991 * the domain lock held too
992 */
Joerg Roedelcb76c322008-06-26 21:28:00 +0200993static void __unmap_single(struct amd_iommu *iommu,
994 struct dma_ops_domain *dma_dom,
995 dma_addr_t dma_addr,
996 size_t size,
997 int dir)
998{
999 dma_addr_t i, start;
1000 unsigned int pages;
1001
Joerg Roedelb8d99052008-12-08 14:40:26 +01001002 if ((dma_addr == bad_dma_address) ||
1003 (dma_addr + size > dma_dom->aperture_size))
Joerg Roedelcb76c322008-06-26 21:28:00 +02001004 return;
1005
Joerg Roedele3c449f2008-10-15 22:02:11 -07001006 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +02001007 dma_addr &= PAGE_MASK;
1008 start = dma_addr;
1009
1010 for (i = 0; i < pages; ++i) {
1011 dma_ops_domain_unmap(iommu, dma_dom, start);
1012 start += PAGE_SIZE;
1013 }
1014
1015 dma_ops_free_addresses(dma_dom, dma_addr, pages);
Joerg Roedel270cab242008-09-04 15:49:46 +02001016
Joerg Roedel80be3082008-11-06 14:59:05 +01001017 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
Joerg Roedel1c655772008-09-04 18:40:05 +02001018 iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
Joerg Roedel80be3082008-11-06 14:59:05 +01001019 dma_dom->need_flush = false;
1020 }
Joerg Roedelcb76c322008-06-26 21:28:00 +02001021}
1022
Joerg Roedel431b2a22008-07-11 17:14:22 +02001023/*
1024 * The exported map_single function for dma_ops.
1025 */
Joerg Roedel4da70b92008-06-26 21:28:01 +02001026static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
1027 size_t size, int dir)
1028{
1029 unsigned long flags;
1030 struct amd_iommu *iommu;
1031 struct protection_domain *domain;
1032 u16 devid;
1033 dma_addr_t addr;
Joerg Roedel832a90c2008-09-18 15:54:23 +02001034 u64 dma_mask;
Joerg Roedel4da70b92008-06-26 21:28:01 +02001035
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001036 if (!check_device(dev))
1037 return bad_dma_address;
1038
Joerg Roedel832a90c2008-09-18 15:54:23 +02001039 dma_mask = *dev->dma_mask;
Joerg Roedel4da70b92008-06-26 21:28:01 +02001040
1041 get_device_resources(dev, &iommu, &domain, &devid);
1042
1043 if (iommu == NULL || domain == NULL)
Joerg Roedel431b2a22008-07-11 17:14:22 +02001044 /* device not handled by any AMD IOMMU */
Joerg Roedel4da70b92008-06-26 21:28:01 +02001045 return (dma_addr_t)paddr;
1046
1047 spin_lock_irqsave(&domain->lock, flags);
Joerg Roedel832a90c2008-09-18 15:54:23 +02001048 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
1049 dma_mask);
Joerg Roedel4da70b92008-06-26 21:28:01 +02001050 if (addr == bad_dma_address)
1051 goto out;
1052
Joerg Roedel09ee17e2008-12-03 12:19:27 +01001053 iommu_completion_wait(iommu);
Joerg Roedel4da70b92008-06-26 21:28:01 +02001054
1055out:
1056 spin_unlock_irqrestore(&domain->lock, flags);
1057
1058 return addr;
1059}
1060
Joerg Roedel431b2a22008-07-11 17:14:22 +02001061/*
1062 * The exported unmap_single function for dma_ops.
1063 */
Joerg Roedel4da70b92008-06-26 21:28:01 +02001064static void unmap_single(struct device *dev, dma_addr_t dma_addr,
1065 size_t size, int dir)
1066{
1067 unsigned long flags;
1068 struct amd_iommu *iommu;
1069 struct protection_domain *domain;
1070 u16 devid;
1071
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001072 if (!check_device(dev) ||
1073 !get_device_resources(dev, &iommu, &domain, &devid))
Joerg Roedel431b2a22008-07-11 17:14:22 +02001074 /* device not handled by any AMD IOMMU */
Joerg Roedel4da70b92008-06-26 21:28:01 +02001075 return;
1076
1077 spin_lock_irqsave(&domain->lock, flags);
1078
1079 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
1080
Joerg Roedel09ee17e2008-12-03 12:19:27 +01001081 iommu_completion_wait(iommu);
Joerg Roedel4da70b92008-06-26 21:28:01 +02001082
1083 spin_unlock_irqrestore(&domain->lock, flags);
1084}
1085
Joerg Roedel431b2a22008-07-11 17:14:22 +02001086/*
1087 * This is a special map_sg function which is used if we should map a
1088 * device which is not handled by an AMD IOMMU in the system.
1089 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02001090static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
1091 int nelems, int dir)
1092{
1093 struct scatterlist *s;
1094 int i;
1095
1096 for_each_sg(sglist, s, nelems, i) {
1097 s->dma_address = (dma_addr_t)sg_phys(s);
1098 s->dma_length = s->length;
1099 }
1100
1101 return nelems;
1102}
1103
Joerg Roedel431b2a22008-07-11 17:14:22 +02001104/*
1105 * The exported map_sg function for dma_ops (handles scatter-gather
1106 * lists).
1107 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02001108static int map_sg(struct device *dev, struct scatterlist *sglist,
1109 int nelems, int dir)
1110{
1111 unsigned long flags;
1112 struct amd_iommu *iommu;
1113 struct protection_domain *domain;
1114 u16 devid;
1115 int i;
1116 struct scatterlist *s;
1117 phys_addr_t paddr;
1118 int mapped_elems = 0;
Joerg Roedel832a90c2008-09-18 15:54:23 +02001119 u64 dma_mask;
Joerg Roedel65b050a2008-06-26 21:28:02 +02001120
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001121 if (!check_device(dev))
1122 return 0;
1123
Joerg Roedel832a90c2008-09-18 15:54:23 +02001124 dma_mask = *dev->dma_mask;
Joerg Roedel65b050a2008-06-26 21:28:02 +02001125
1126 get_device_resources(dev, &iommu, &domain, &devid);
1127
1128 if (!iommu || !domain)
1129 return map_sg_no_iommu(dev, sglist, nelems, dir);
1130
1131 spin_lock_irqsave(&domain->lock, flags);
1132
1133 for_each_sg(sglist, s, nelems, i) {
1134 paddr = sg_phys(s);
1135
1136 s->dma_address = __map_single(dev, iommu, domain->priv,
Joerg Roedel832a90c2008-09-18 15:54:23 +02001137 paddr, s->length, dir, false,
1138 dma_mask);
Joerg Roedel65b050a2008-06-26 21:28:02 +02001139
1140 if (s->dma_address) {
1141 s->dma_length = s->length;
1142 mapped_elems++;
1143 } else
1144 goto unmap;
Joerg Roedel65b050a2008-06-26 21:28:02 +02001145 }
1146
Joerg Roedel09ee17e2008-12-03 12:19:27 +01001147 iommu_completion_wait(iommu);
Joerg Roedel65b050a2008-06-26 21:28:02 +02001148
1149out:
1150 spin_unlock_irqrestore(&domain->lock, flags);
1151
1152 return mapped_elems;
1153unmap:
1154 for_each_sg(sglist, s, mapped_elems, i) {
1155 if (s->dma_address)
1156 __unmap_single(iommu, domain->priv, s->dma_address,
1157 s->dma_length, dir);
1158 s->dma_address = s->dma_length = 0;
1159 }
1160
1161 mapped_elems = 0;
1162
1163 goto out;
1164}
1165
Joerg Roedel431b2a22008-07-11 17:14:22 +02001166/*
1167 * The exported map_sg function for dma_ops (handles scatter-gather
1168 * lists).
1169 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02001170static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1171 int nelems, int dir)
1172{
1173 unsigned long flags;
1174 struct amd_iommu *iommu;
1175 struct protection_domain *domain;
1176 struct scatterlist *s;
1177 u16 devid;
1178 int i;
1179
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001180 if (!check_device(dev) ||
1181 !get_device_resources(dev, &iommu, &domain, &devid))
Joerg Roedel65b050a2008-06-26 21:28:02 +02001182 return;
1183
1184 spin_lock_irqsave(&domain->lock, flags);
1185
1186 for_each_sg(sglist, s, nelems, i) {
1187 __unmap_single(iommu, domain->priv, s->dma_address,
1188 s->dma_length, dir);
Joerg Roedel65b050a2008-06-26 21:28:02 +02001189 s->dma_address = s->dma_length = 0;
1190 }
1191
Joerg Roedel09ee17e2008-12-03 12:19:27 +01001192 iommu_completion_wait(iommu);
Joerg Roedel65b050a2008-06-26 21:28:02 +02001193
1194 spin_unlock_irqrestore(&domain->lock, flags);
1195}
1196
Joerg Roedel431b2a22008-07-11 17:14:22 +02001197/*
1198 * The exported alloc_coherent function for dma_ops.
1199 */
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001200static void *alloc_coherent(struct device *dev, size_t size,
1201 dma_addr_t *dma_addr, gfp_t flag)
1202{
1203 unsigned long flags;
1204 void *virt_addr;
1205 struct amd_iommu *iommu;
1206 struct protection_domain *domain;
1207 u16 devid;
1208 phys_addr_t paddr;
Joerg Roedel832a90c2008-09-18 15:54:23 +02001209 u64 dma_mask = dev->coherent_dma_mask;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001210
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001211 if (!check_device(dev))
1212 return NULL;
1213
FUJITA Tomonori13d9fea2008-09-10 20:19:40 +09001214 if (!get_device_resources(dev, &iommu, &domain, &devid))
1215 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
1216
Joerg Roedelc97ac532008-09-11 10:59:15 +02001217 flag |= __GFP_ZERO;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001218 virt_addr = (void *)__get_free_pages(flag, get_order(size));
1219 if (!virt_addr)
1220 return 0;
1221
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001222 paddr = virt_to_phys(virt_addr);
1223
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001224 if (!iommu || !domain) {
1225 *dma_addr = (dma_addr_t)paddr;
1226 return virt_addr;
1227 }
1228
Joerg Roedel832a90c2008-09-18 15:54:23 +02001229 if (!dma_mask)
1230 dma_mask = *dev->dma_mask;
1231
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001232 spin_lock_irqsave(&domain->lock, flags);
1233
1234 *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
Joerg Roedel832a90c2008-09-18 15:54:23 +02001235 size, DMA_BIDIRECTIONAL, true, dma_mask);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001236
1237 if (*dma_addr == bad_dma_address) {
1238 free_pages((unsigned long)virt_addr, get_order(size));
1239 virt_addr = NULL;
1240 goto out;
1241 }
1242
Joerg Roedel09ee17e2008-12-03 12:19:27 +01001243 iommu_completion_wait(iommu);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001244
1245out:
1246 spin_unlock_irqrestore(&domain->lock, flags);
1247
1248 return virt_addr;
1249}
1250
Joerg Roedel431b2a22008-07-11 17:14:22 +02001251/*
1252 * The exported free_coherent function for dma_ops.
Joerg Roedel431b2a22008-07-11 17:14:22 +02001253 */
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001254static void free_coherent(struct device *dev, size_t size,
1255 void *virt_addr, dma_addr_t dma_addr)
1256{
1257 unsigned long flags;
1258 struct amd_iommu *iommu;
1259 struct protection_domain *domain;
1260 u16 devid;
1261
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001262 if (!check_device(dev))
1263 return;
1264
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001265 get_device_resources(dev, &iommu, &domain, &devid);
1266
1267 if (!iommu || !domain)
1268 goto free_mem;
1269
1270 spin_lock_irqsave(&domain->lock, flags);
1271
1272 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001273
Joerg Roedel09ee17e2008-12-03 12:19:27 +01001274 iommu_completion_wait(iommu);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001275
1276 spin_unlock_irqrestore(&domain->lock, flags);
1277
1278free_mem:
1279 free_pages((unsigned long)virt_addr, get_order(size));
1280}
1281
Joerg Roedelc432f3d2008-06-26 21:28:04 +02001282/*
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02001283 * This function is called by the DMA layer to find out if we can handle a
1284 * particular device. It is part of the dma_ops.
1285 */
1286static int amd_iommu_dma_supported(struct device *dev, u64 mask)
1287{
1288 u16 bdf;
1289 struct pci_dev *pcidev;
1290
1291 /* No device or no PCI device */
1292 if (!dev || dev->bus != &pci_bus_type)
1293 return 0;
1294
1295 pcidev = to_pci_dev(dev);
1296
1297 bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
1298
1299 /* Out of our scope? */
1300 if (bdf > amd_iommu_last_bdf)
1301 return 0;
1302
1303 return 1;
1304}
1305
1306/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001307 * The function for pre-allocating protection domains.
1308 *
Joerg Roedelc432f3d2008-06-26 21:28:04 +02001309 * If the driver core informs the DMA layer if a driver grabs a device
1310 * we don't need to preallocate the protection domains anymore.
1311 * For now we have to.
1312 */
1313void prealloc_protection_domains(void)
1314{
1315 struct pci_dev *dev = NULL;
1316 struct dma_ops_domain *dma_dom;
1317 struct amd_iommu *iommu;
1318 int order = amd_iommu_aperture_order;
1319 u16 devid;
1320
1321 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1322 devid = (dev->bus->number << 8) | dev->devfn;
Joerg Roedel3a61ec32008-07-25 13:07:50 +02001323 if (devid > amd_iommu_last_bdf)
Joerg Roedelc432f3d2008-06-26 21:28:04 +02001324 continue;
1325 devid = amd_iommu_alias_table[devid];
1326 if (domain_for_device(devid))
1327 continue;
1328 iommu = amd_iommu_rlookup_table[devid];
1329 if (!iommu)
1330 continue;
1331 dma_dom = dma_ops_domain_alloc(iommu, order);
1332 if (!dma_dom)
1333 continue;
1334 init_unity_mappings_for_device(dma_dom, devid);
Joerg Roedelbd60b732008-09-11 10:24:48 +02001335 dma_dom->target_dev = devid;
1336
1337 list_add_tail(&dma_dom->list, &iommu_pd_list);
Joerg Roedelc432f3d2008-06-26 21:28:04 +02001338 }
1339}
1340
Joerg Roedel6631ee92008-06-26 21:28:05 +02001341static struct dma_mapping_ops amd_iommu_dma_ops = {
1342 .alloc_coherent = alloc_coherent,
1343 .free_coherent = free_coherent,
1344 .map_single = map_single,
1345 .unmap_single = unmap_single,
1346 .map_sg = map_sg,
1347 .unmap_sg = unmap_sg,
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02001348 .dma_supported = amd_iommu_dma_supported,
Joerg Roedel6631ee92008-06-26 21:28:05 +02001349};
1350
Joerg Roedel431b2a22008-07-11 17:14:22 +02001351/*
1352 * The function which clues the AMD IOMMU driver into dma_ops.
1353 */
Joerg Roedel6631ee92008-06-26 21:28:05 +02001354int __init amd_iommu_init_dma_ops(void)
1355{
1356 struct amd_iommu *iommu;
1357 int order = amd_iommu_aperture_order;
1358 int ret;
1359
Joerg Roedel431b2a22008-07-11 17:14:22 +02001360 /*
1361 * first allocate a default protection domain for every IOMMU we
1362 * found in the system. Devices not assigned to any other
1363 * protection domain will be assigned to the default one.
1364 */
Joerg Roedel6631ee92008-06-26 21:28:05 +02001365 list_for_each_entry(iommu, &amd_iommu_list, list) {
1366 iommu->default_dom = dma_ops_domain_alloc(iommu, order);
1367 if (iommu->default_dom == NULL)
1368 return -ENOMEM;
1369 ret = iommu_init_unity_mappings(iommu);
1370 if (ret)
1371 goto free_domains;
1372 }
1373
Joerg Roedel431b2a22008-07-11 17:14:22 +02001374 /*
1375 * If device isolation is enabled, pre-allocate the protection
1376 * domains for each device.
1377 */
Joerg Roedel6631ee92008-06-26 21:28:05 +02001378 if (amd_iommu_isolate)
1379 prealloc_protection_domains();
1380
1381 iommu_detected = 1;
1382 force_iommu = 1;
1383 bad_dma_address = 0;
Ingo Molnar92af4e22008-06-27 10:48:16 +02001384#ifdef CONFIG_GART_IOMMU
Joerg Roedel6631ee92008-06-26 21:28:05 +02001385 gart_iommu_aperture_disabled = 1;
1386 gart_iommu_aperture = 0;
Ingo Molnar92af4e22008-06-27 10:48:16 +02001387#endif
Joerg Roedel6631ee92008-06-26 21:28:05 +02001388
Joerg Roedel431b2a22008-07-11 17:14:22 +02001389 /* Make the driver finally visible to the drivers */
Joerg Roedel6631ee92008-06-26 21:28:05 +02001390 dma_ops = &amd_iommu_dma_ops;
1391
1392 return 0;
1393
1394free_domains:
1395
1396 list_for_each_entry(iommu, &amd_iommu_list, list) {
1397 if (iommu->default_dom)
1398 dma_ops_domain_free(iommu->default_dom);
1399 }
1400
1401 return ret;
1402}