blob: 172e0dc4641ed1ba75e189685045365fa0b028c8 [file] [log] [blame]
Joerg Roedelb6c02712008-06-26 21:27:53 +02001/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/pci.h>
21#include <linux/gfp.h>
22#include <linux/bitops.h>
23#include <linux/scatterlist.h>
24#include <linux/iommu-helper.h>
25#include <asm/proto.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090026#include <asm/iommu.h>
Joerg Roedel1d9b16d2008-11-27 18:39:15 +010027#include <asm/gart.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020028#include <asm/amd_iommu_types.h>
Joerg Roedelc6da9922008-06-26 21:28:06 +020029#include <asm/amd_iommu.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020030
31#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
32
Joerg Roedel136f78a2008-07-11 17:14:27 +020033#define EXIT_LOOP_COUNT 10000000
34
Joerg Roedelb6c02712008-06-26 21:27:53 +020035static DEFINE_RWLOCK(amd_iommu_devtable_lock);
36
Joerg Roedelbd60b732008-09-11 10:24:48 +020037/* A list of preallocated protection domains */
38static LIST_HEAD(iommu_pd_list);
39static DEFINE_SPINLOCK(iommu_pd_list_lock);
40
Joerg Roedel431b2a22008-07-11 17:14:22 +020041/*
42 * general struct to manage commands send to an IOMMU
43 */
Joerg Roedeld6449532008-07-11 17:14:28 +020044struct iommu_cmd {
Joerg Roedelb6c02712008-06-26 21:27:53 +020045 u32 data[4];
46};
47
Joerg Roedelbd0e5212008-06-26 21:27:56 +020048static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
49 struct unity_map_entry *e);
50
Joerg Roedel431b2a22008-07-11 17:14:22 +020051/* returns !0 if the IOMMU is caching non-present entries in its TLB */
Joerg Roedel4da70b92008-06-26 21:28:01 +020052static int iommu_has_npcache(struct amd_iommu *iommu)
53{
Joerg Roedelae9b9402008-10-30 17:43:57 +010054 return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
Joerg Roedel4da70b92008-06-26 21:28:01 +020055}
56
Joerg Roedel431b2a22008-07-11 17:14:22 +020057/****************************************************************************
58 *
Joerg Roedela80dc3e2008-09-11 16:51:41 +020059 * Interrupt handling functions
60 *
61 ****************************************************************************/
62
Joerg Roedel90008ee2008-09-09 16:41:05 +020063static void iommu_print_event(void *__evt)
64{
65 u32 *event = __evt;
66 int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
67 int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
68 int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
69 int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
70 u64 address = (u64)(((u64)event[3]) << 32) | event[2];
71
72 printk(KERN_ERR "AMD IOMMU: Event logged [");
73
74 switch (type) {
75 case EVENT_TYPE_ILL_DEV:
76 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
77 "address=0x%016llx flags=0x%04x]\n",
78 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
79 address, flags);
80 break;
81 case EVENT_TYPE_IO_FAULT:
82 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
83 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
84 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
85 domid, address, flags);
86 break;
87 case EVENT_TYPE_DEV_TAB_ERR:
88 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
89 "address=0x%016llx flags=0x%04x]\n",
90 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
91 address, flags);
92 break;
93 case EVENT_TYPE_PAGE_TAB_ERR:
94 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
95 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
96 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
97 domid, address, flags);
98 break;
99 case EVENT_TYPE_ILL_CMD:
100 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
101 break;
102 case EVENT_TYPE_CMD_HARD_ERR:
103 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
104 "flags=0x%04x]\n", address, flags);
105 break;
106 case EVENT_TYPE_IOTLB_INV_TO:
107 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
108 "address=0x%016llx]\n",
109 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
110 address);
111 break;
112 case EVENT_TYPE_INV_DEV_REQ:
113 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
114 "address=0x%016llx flags=0x%04x]\n",
115 PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid),
116 address, flags);
117 break;
118 default:
119 printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
120 }
121}
122
123static void iommu_poll_events(struct amd_iommu *iommu)
124{
125 u32 head, tail;
126 unsigned long flags;
127
128 spin_lock_irqsave(&iommu->lock, flags);
129
130 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
131 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
132
133 while (head != tail) {
134 iommu_print_event(iommu->evt_buf + head);
135 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
136 }
137
138 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
139
140 spin_unlock_irqrestore(&iommu->lock, flags);
141}
142
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200143irqreturn_t amd_iommu_int_handler(int irq, void *data)
144{
Joerg Roedel90008ee2008-09-09 16:41:05 +0200145 struct amd_iommu *iommu;
146
147 list_for_each_entry(iommu, &amd_iommu_list, list)
148 iommu_poll_events(iommu);
149
150 return IRQ_HANDLED;
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200151}
152
153/****************************************************************************
154 *
Joerg Roedel431b2a22008-07-11 17:14:22 +0200155 * IOMMU command queuing functions
156 *
157 ****************************************************************************/
158
159/*
160 * Writes the command to the IOMMUs command buffer and informs the
161 * hardware about the new command. Must be called with iommu->lock held.
162 */
Joerg Roedeld6449532008-07-11 17:14:28 +0200163static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200164{
165 u32 tail, head;
166 u8 *target;
167
168 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
Jiri Kosina8a7c5ef2008-08-19 02:13:55 +0200169 target = iommu->cmd_buf + tail;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200170 memcpy_toio(target, cmd, sizeof(*cmd));
171 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
172 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
173 if (tail == head)
174 return -ENOMEM;
175 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
176
177 return 0;
178}
179
Joerg Roedel431b2a22008-07-11 17:14:22 +0200180/*
181 * General queuing function for commands. Takes iommu->lock and calls
182 * __iommu_queue_command().
183 */
Joerg Roedeld6449532008-07-11 17:14:28 +0200184static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200185{
186 unsigned long flags;
187 int ret;
188
189 spin_lock_irqsave(&iommu->lock, flags);
190 ret = __iommu_queue_command(iommu, cmd);
191 spin_unlock_irqrestore(&iommu->lock, flags);
192
193 return ret;
194}
195
Joerg Roedel431b2a22008-07-11 17:14:22 +0200196/*
197 * This function is called whenever we need to ensure that the IOMMU has
198 * completed execution of all commands we sent. It sends a
199 * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs
200 * us about that by writing a value to a physical address we pass with
201 * the command.
202 */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200203static int iommu_completion_wait(struct amd_iommu *iommu)
204{
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200205 int ret = 0, ready = 0;
Joerg Roedel519c31b2008-08-14 19:55:15 +0200206 unsigned status = 0;
Joerg Roedeld6449532008-07-11 17:14:28 +0200207 struct iommu_cmd cmd;
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200208 unsigned long flags, i = 0;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200209
210 memset(&cmd, 0, sizeof(cmd));
Joerg Roedel519c31b2008-08-14 19:55:15 +0200211 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200212 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
213
214 iommu->need_sync = 0;
215
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200216 spin_lock_irqsave(&iommu->lock, flags);
217
218 ret = __iommu_queue_command(iommu, &cmd);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200219
220 if (ret)
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200221 goto out;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200222
Joerg Roedel136f78a2008-07-11 17:14:27 +0200223 while (!ready && (i < EXIT_LOOP_COUNT)) {
224 ++i;
Joerg Roedel519c31b2008-08-14 19:55:15 +0200225 /* wait for the bit to become one */
226 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
227 ready = status & MMIO_STATUS_COM_WAIT_INT_MASK;
Joerg Roedel136f78a2008-07-11 17:14:27 +0200228 }
229
Joerg Roedel519c31b2008-08-14 19:55:15 +0200230 /* set bit back to zero */
231 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
232 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
233
Joerg Roedel136f78a2008-07-11 17:14:27 +0200234 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit()))
235 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n");
Joerg Roedel7e4f88d2008-09-17 14:19:15 +0200236out:
237 spin_unlock_irqrestore(&iommu->lock, flags);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200238
239 return 0;
240}
241
Joerg Roedel431b2a22008-07-11 17:14:22 +0200242/*
243 * Command send function for invalidating a device table entry
244 */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200245static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
246{
Joerg Roedeld6449532008-07-11 17:14:28 +0200247 struct iommu_cmd cmd;
Joerg Roedelee2fa742008-09-17 13:47:25 +0200248 int ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200249
250 BUG_ON(iommu == NULL);
251
252 memset(&cmd, 0, sizeof(cmd));
253 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
254 cmd.data[0] = devid;
255
Joerg Roedelee2fa742008-09-17 13:47:25 +0200256 ret = iommu_queue_command(iommu, &cmd);
257
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200258 iommu->need_sync = 1;
259
Joerg Roedelee2fa742008-09-17 13:47:25 +0200260 return ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200261}
262
Joerg Roedel431b2a22008-07-11 17:14:22 +0200263/*
264 * Generic command send function for invalidaing TLB entries
265 */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200266static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
267 u64 address, u16 domid, int pde, int s)
268{
Joerg Roedeld6449532008-07-11 17:14:28 +0200269 struct iommu_cmd cmd;
Joerg Roedelee2fa742008-09-17 13:47:25 +0200270 int ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200271
272 memset(&cmd, 0, sizeof(cmd));
273 address &= PAGE_MASK;
274 CMD_SET_TYPE(&cmd, CMD_INV_IOMMU_PAGES);
275 cmd.data[1] |= domid;
Joerg Roedel8a456692008-08-14 19:55:17 +0200276 cmd.data[2] = lower_32_bits(address);
Joerg Roedel8ea80d72008-07-11 17:14:23 +0200277 cmd.data[3] = upper_32_bits(address);
Joerg Roedel431b2a22008-07-11 17:14:22 +0200278 if (s) /* size bit - we flush more than one 4kb page */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200279 cmd.data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
Joerg Roedel431b2a22008-07-11 17:14:22 +0200280 if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200281 cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
282
Joerg Roedelee2fa742008-09-17 13:47:25 +0200283 ret = iommu_queue_command(iommu, &cmd);
284
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200285 iommu->need_sync = 1;
286
Joerg Roedelee2fa742008-09-17 13:47:25 +0200287 return ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200288}
289
Joerg Roedel431b2a22008-07-11 17:14:22 +0200290/*
291 * TLB invalidation function which is called from the mapping functions.
292 * It invalidates a single PTE if the range to flush is within a single
293 * page. Otherwise it flushes the whole TLB of the IOMMU.
294 */
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200295static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
296 u64 address, size_t size)
297{
Joerg Roedel999ba412008-07-03 19:35:08 +0200298 int s = 0;
Joerg Roedele3c449f2008-10-15 22:02:11 -0700299 unsigned pages = iommu_num_pages(address, size, PAGE_SIZE);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200300
301 address &= PAGE_MASK;
302
Joerg Roedel999ba412008-07-03 19:35:08 +0200303 if (pages > 1) {
304 /*
305 * If we have to flush more than one page, flush all
306 * TLB entries for this domain
307 */
308 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
309 s = 1;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200310 }
311
Joerg Roedel999ba412008-07-03 19:35:08 +0200312 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);
313
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200314 return 0;
315}
Joerg Roedelb6c02712008-06-26 21:27:53 +0200316
Joerg Roedel1c655772008-09-04 18:40:05 +0200317/* Flush the whole IO/TLB for a given protection domain */
318static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
319{
320 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
321
322 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
323}
324
Joerg Roedel431b2a22008-07-11 17:14:22 +0200325/****************************************************************************
326 *
327 * The functions below are used the create the page table mappings for
328 * unity mapped regions.
329 *
330 ****************************************************************************/
331
332/*
333 * Generic mapping functions. It maps a physical address into a DMA
334 * address space. It allocates the page table pages if necessary.
335 * In the future it can be extended to a generic mapping function
336 * supporting all features of AMD IOMMU page tables like level skipping
337 * and full 64 bit address spaces.
338 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200339static int iommu_map(struct protection_domain *dom,
340 unsigned long bus_addr,
341 unsigned long phys_addr,
342 int prot)
343{
344 u64 __pte, *pte, *page;
345
346 bus_addr = PAGE_ALIGN(bus_addr);
347 phys_addr = PAGE_ALIGN(bus_addr);
348
349 /* only support 512GB address spaces for now */
350 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
351 return -EINVAL;
352
353 pte = &dom->pt_root[IOMMU_PTE_L2_INDEX(bus_addr)];
354
355 if (!IOMMU_PTE_PRESENT(*pte)) {
356 page = (u64 *)get_zeroed_page(GFP_KERNEL);
357 if (!page)
358 return -ENOMEM;
359 *pte = IOMMU_L2_PDE(virt_to_phys(page));
360 }
361
362 pte = IOMMU_PTE_PAGE(*pte);
363 pte = &pte[IOMMU_PTE_L1_INDEX(bus_addr)];
364
365 if (!IOMMU_PTE_PRESENT(*pte)) {
366 page = (u64 *)get_zeroed_page(GFP_KERNEL);
367 if (!page)
368 return -ENOMEM;
369 *pte = IOMMU_L1_PDE(virt_to_phys(page));
370 }
371
372 pte = IOMMU_PTE_PAGE(*pte);
373 pte = &pte[IOMMU_PTE_L0_INDEX(bus_addr)];
374
375 if (IOMMU_PTE_PRESENT(*pte))
376 return -EBUSY;
377
378 __pte = phys_addr | IOMMU_PTE_P;
379 if (prot & IOMMU_PROT_IR)
380 __pte |= IOMMU_PTE_IR;
381 if (prot & IOMMU_PROT_IW)
382 __pte |= IOMMU_PTE_IW;
383
384 *pte = __pte;
385
386 return 0;
387}
388
Joerg Roedel431b2a22008-07-11 17:14:22 +0200389/*
390 * This function checks if a specific unity mapping entry is needed for
391 * this specific IOMMU.
392 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200393static int iommu_for_unity_map(struct amd_iommu *iommu,
394 struct unity_map_entry *entry)
395{
396 u16 bdf, i;
397
398 for (i = entry->devid_start; i <= entry->devid_end; ++i) {
399 bdf = amd_iommu_alias_table[i];
400 if (amd_iommu_rlookup_table[bdf] == iommu)
401 return 1;
402 }
403
404 return 0;
405}
406
Joerg Roedel431b2a22008-07-11 17:14:22 +0200407/*
408 * Init the unity mappings for a specific IOMMU in the system
409 *
410 * Basically iterates over all unity mapping entries and applies them to
411 * the default domain DMA of that IOMMU if necessary.
412 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200413static int iommu_init_unity_mappings(struct amd_iommu *iommu)
414{
415 struct unity_map_entry *entry;
416 int ret;
417
418 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
419 if (!iommu_for_unity_map(iommu, entry))
420 continue;
421 ret = dma_ops_unity_map(iommu->default_dom, entry);
422 if (ret)
423 return ret;
424 }
425
426 return 0;
427}
428
Joerg Roedel431b2a22008-07-11 17:14:22 +0200429/*
430 * This function actually applies the mapping to the page table of the
431 * dma_ops domain.
432 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200433static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
434 struct unity_map_entry *e)
435{
436 u64 addr;
437 int ret;
438
439 for (addr = e->address_start; addr < e->address_end;
440 addr += PAGE_SIZE) {
441 ret = iommu_map(&dma_dom->domain, addr, addr, e->prot);
442 if (ret)
443 return ret;
444 /*
445 * if unity mapping is in aperture range mark the page
446 * as allocated in the aperture
447 */
448 if (addr < dma_dom->aperture_size)
449 __set_bit(addr >> PAGE_SHIFT, dma_dom->bitmap);
450 }
451
452 return 0;
453}
454
Joerg Roedel431b2a22008-07-11 17:14:22 +0200455/*
456 * Inits the unity mappings required for a specific device
457 */
Joerg Roedelbd0e5212008-06-26 21:27:56 +0200458static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
459 u16 devid)
460{
461 struct unity_map_entry *e;
462 int ret;
463
464 list_for_each_entry(e, &amd_iommu_unity_map, list) {
465 if (!(devid >= e->devid_start && devid <= e->devid_end))
466 continue;
467 ret = dma_ops_unity_map(dma_dom, e);
468 if (ret)
469 return ret;
470 }
471
472 return 0;
473}
474
Joerg Roedel431b2a22008-07-11 17:14:22 +0200475/****************************************************************************
476 *
477 * The next functions belong to the address allocator for the dma_ops
478 * interface functions. They work like the allocators in the other IOMMU
479 * drivers. Its basically a bitmap which marks the allocated pages in
480 * the aperture. Maybe it could be enhanced in the future to a more
481 * efficient allocator.
482 *
483 ****************************************************************************/
Joerg Roedeld3086442008-06-26 21:27:57 +0200484
Joerg Roedel431b2a22008-07-11 17:14:22 +0200485/*
486 * The address allocator core function.
487 *
488 * called with domain->lock held
489 */
Joerg Roedeld3086442008-06-26 21:27:57 +0200490static unsigned long dma_ops_alloc_addresses(struct device *dev,
491 struct dma_ops_domain *dom,
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200492 unsigned int pages,
Joerg Roedel832a90c2008-09-18 15:54:23 +0200493 unsigned long align_mask,
494 u64 dma_mask)
Joerg Roedeld3086442008-06-26 21:27:57 +0200495{
FUJITA Tomonori40becd82008-09-29 00:06:36 +0900496 unsigned long limit;
Joerg Roedeld3086442008-06-26 21:27:57 +0200497 unsigned long address;
Joerg Roedeld3086442008-06-26 21:27:57 +0200498 unsigned long boundary_size;
499
500 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
501 PAGE_SIZE) >> PAGE_SHIFT;
FUJITA Tomonori40becd82008-09-29 00:06:36 +0900502 limit = iommu_device_max_index(dom->aperture_size >> PAGE_SHIFT, 0,
503 dma_mask >> PAGE_SHIFT);
Joerg Roedeld3086442008-06-26 21:27:57 +0200504
Joerg Roedel1c655772008-09-04 18:40:05 +0200505 if (dom->next_bit >= limit) {
Joerg Roedeld3086442008-06-26 21:27:57 +0200506 dom->next_bit = 0;
Joerg Roedel1c655772008-09-04 18:40:05 +0200507 dom->need_flush = true;
508 }
Joerg Roedeld3086442008-06-26 21:27:57 +0200509
510 address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200511 0 , boundary_size, align_mask);
Joerg Roedel1c655772008-09-04 18:40:05 +0200512 if (address == -1) {
Joerg Roedeld3086442008-06-26 21:27:57 +0200513 address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200514 0, boundary_size, align_mask);
Joerg Roedel1c655772008-09-04 18:40:05 +0200515 dom->need_flush = true;
516 }
Joerg Roedeld3086442008-06-26 21:27:57 +0200517
518 if (likely(address != -1)) {
Joerg Roedeld3086442008-06-26 21:27:57 +0200519 dom->next_bit = address + pages;
520 address <<= PAGE_SHIFT;
521 } else
522 address = bad_dma_address;
523
524 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
525
526 return address;
527}
528
Joerg Roedel431b2a22008-07-11 17:14:22 +0200529/*
530 * The address free function.
531 *
532 * called with domain->lock held
533 */
Joerg Roedeld3086442008-06-26 21:27:57 +0200534static void dma_ops_free_addresses(struct dma_ops_domain *dom,
535 unsigned long address,
536 unsigned int pages)
537{
538 address >>= PAGE_SHIFT;
539 iommu_area_free(dom->bitmap, address, pages);
Joerg Roedel80be3082008-11-06 14:59:05 +0100540
541 if (address + pages >= dom->next_bit)
542 dom->need_flush = true;
Joerg Roedeld3086442008-06-26 21:27:57 +0200543}
544
Joerg Roedel431b2a22008-07-11 17:14:22 +0200545/****************************************************************************
546 *
547 * The next functions belong to the domain allocation. A domain is
548 * allocated for every IOMMU as the default domain. If device isolation
549 * is enabled, every device get its own domain. The most important thing
550 * about domains is the page table mapping the DMA address space they
551 * contain.
552 *
553 ****************************************************************************/
554
Joerg Roedelec487d12008-06-26 21:27:58 +0200555static u16 domain_id_alloc(void)
556{
557 unsigned long flags;
558 int id;
559
560 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
561 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
562 BUG_ON(id == 0);
563 if (id > 0 && id < MAX_DOMAIN_ID)
564 __set_bit(id, amd_iommu_pd_alloc_bitmap);
565 else
566 id = 0;
567 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
568
569 return id;
570}
571
Joerg Roedel431b2a22008-07-11 17:14:22 +0200572/*
573 * Used to reserve address ranges in the aperture (e.g. for exclusion
574 * ranges.
575 */
Joerg Roedelec487d12008-06-26 21:27:58 +0200576static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
577 unsigned long start_page,
578 unsigned int pages)
579{
580 unsigned int last_page = dom->aperture_size >> PAGE_SHIFT;
581
582 if (start_page + pages > last_page)
583 pages = last_page - start_page;
584
FUJITA Tomonorid26dbc52008-09-22 22:35:07 +0900585 iommu_area_reserve(dom->bitmap, start_page, pages);
Joerg Roedelec487d12008-06-26 21:27:58 +0200586}
587
588static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
589{
590 int i, j;
591 u64 *p1, *p2, *p3;
592
593 p1 = dma_dom->domain.pt_root;
594
595 if (!p1)
596 return;
597
598 for (i = 0; i < 512; ++i) {
599 if (!IOMMU_PTE_PRESENT(p1[i]))
600 continue;
601
602 p2 = IOMMU_PTE_PAGE(p1[i]);
603 for (j = 0; j < 512; ++i) {
604 if (!IOMMU_PTE_PRESENT(p2[j]))
605 continue;
606 p3 = IOMMU_PTE_PAGE(p2[j]);
607 free_page((unsigned long)p3);
608 }
609
610 free_page((unsigned long)p2);
611 }
612
613 free_page((unsigned long)p1);
614}
615
Joerg Roedel431b2a22008-07-11 17:14:22 +0200616/*
617 * Free a domain, only used if something went wrong in the
618 * allocation path and we need to free an already allocated page table
619 */
Joerg Roedelec487d12008-06-26 21:27:58 +0200620static void dma_ops_domain_free(struct dma_ops_domain *dom)
621{
622 if (!dom)
623 return;
624
625 dma_ops_free_pagetable(dom);
626
627 kfree(dom->pte_pages);
628
629 kfree(dom->bitmap);
630
631 kfree(dom);
632}
633
Joerg Roedel431b2a22008-07-11 17:14:22 +0200634/*
635 * Allocates a new protection domain usable for the dma_ops functions.
636 * It also intializes the page table and the address allocator data
637 * structures required for the dma_ops interface
638 */
Joerg Roedelec487d12008-06-26 21:27:58 +0200639static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
640 unsigned order)
641{
642 struct dma_ops_domain *dma_dom;
643 unsigned i, num_pte_pages;
644 u64 *l2_pde;
645 u64 address;
646
647 /*
648 * Currently the DMA aperture must be between 32 MB and 1GB in size
649 */
650 if ((order < 25) || (order > 30))
651 return NULL;
652
653 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
654 if (!dma_dom)
655 return NULL;
656
657 spin_lock_init(&dma_dom->domain.lock);
658
659 dma_dom->domain.id = domain_id_alloc();
660 if (dma_dom->domain.id == 0)
661 goto free_dma_dom;
662 dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
663 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
664 dma_dom->domain.priv = dma_dom;
665 if (!dma_dom->domain.pt_root)
666 goto free_dma_dom;
667 dma_dom->aperture_size = (1ULL << order);
668 dma_dom->bitmap = kzalloc(dma_dom->aperture_size / (PAGE_SIZE * 8),
669 GFP_KERNEL);
670 if (!dma_dom->bitmap)
671 goto free_dma_dom;
672 /*
673 * mark the first page as allocated so we never return 0 as
674 * a valid dma-address. So we can use 0 as error value
675 */
676 dma_dom->bitmap[0] = 1;
677 dma_dom->next_bit = 0;
678
Joerg Roedel1c655772008-09-04 18:40:05 +0200679 dma_dom->need_flush = false;
Joerg Roedelbd60b732008-09-11 10:24:48 +0200680 dma_dom->target_dev = 0xffff;
Joerg Roedel1c655772008-09-04 18:40:05 +0200681
Joerg Roedel431b2a22008-07-11 17:14:22 +0200682 /* Intialize the exclusion range if necessary */
Joerg Roedelec487d12008-06-26 21:27:58 +0200683 if (iommu->exclusion_start &&
684 iommu->exclusion_start < dma_dom->aperture_size) {
685 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
Joerg Roedele3c449f2008-10-15 22:02:11 -0700686 int pages = iommu_num_pages(iommu->exclusion_start,
687 iommu->exclusion_length,
688 PAGE_SIZE);
Joerg Roedelec487d12008-06-26 21:27:58 +0200689 dma_ops_reserve_addresses(dma_dom, startpage, pages);
690 }
691
Joerg Roedel431b2a22008-07-11 17:14:22 +0200692 /*
693 * At the last step, build the page tables so we don't need to
694 * allocate page table pages in the dma_ops mapping/unmapping
695 * path.
696 */
Joerg Roedelec487d12008-06-26 21:27:58 +0200697 num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512);
698 dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *),
699 GFP_KERNEL);
700 if (!dma_dom->pte_pages)
701 goto free_dma_dom;
702
703 l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL);
704 if (l2_pde == NULL)
705 goto free_dma_dom;
706
707 dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde));
708
709 for (i = 0; i < num_pte_pages; ++i) {
710 dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL);
711 if (!dma_dom->pte_pages[i])
712 goto free_dma_dom;
713 address = virt_to_phys(dma_dom->pte_pages[i]);
714 l2_pde[i] = IOMMU_L1_PDE(address);
715 }
716
717 return dma_dom;
718
719free_dma_dom:
720 dma_ops_domain_free(dma_dom);
721
722 return NULL;
723}
724
Joerg Roedel431b2a22008-07-11 17:14:22 +0200725/*
726 * Find out the protection domain structure for a given PCI device. This
727 * will give us the pointer to the page table root for example.
728 */
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200729static struct protection_domain *domain_for_device(u16 devid)
730{
731 struct protection_domain *dom;
732 unsigned long flags;
733
734 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
735 dom = amd_iommu_pd_table[devid];
736 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
737
738 return dom;
739}
740
Joerg Roedel431b2a22008-07-11 17:14:22 +0200741/*
742 * If a device is not yet associated with a domain, this function does
743 * assigns it visible for the hardware
744 */
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200745static void set_device_domain(struct amd_iommu *iommu,
746 struct protection_domain *domain,
747 u16 devid)
748{
749 unsigned long flags;
750
751 u64 pte_root = virt_to_phys(domain->pt_root);
752
Joerg Roedel38ddf412008-09-11 10:38:32 +0200753 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
754 << DEV_ENTRY_MODE_SHIFT;
755 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200756
757 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
Joerg Roedel38ddf412008-09-11 10:38:32 +0200758 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
759 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200760 amd_iommu_dev_table[devid].data[2] = domain->id;
761
762 amd_iommu_pd_table[devid] = domain;
763 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
764
765 iommu_queue_inv_dev_entry(iommu, devid);
766
767 iommu->need_sync = 1;
768}
769
Joerg Roedel431b2a22008-07-11 17:14:22 +0200770/*****************************************************************************
771 *
772 * The next functions belong to the dma_ops mapping/unmapping code.
773 *
774 *****************************************************************************/
775
776/*
Joerg Roedeldbcc1122008-09-04 15:04:26 +0200777 * This function checks if the driver got a valid device from the caller to
778 * avoid dereferencing invalid pointers.
779 */
780static bool check_device(struct device *dev)
781{
782 if (!dev || !dev->dma_mask)
783 return false;
784
785 return true;
786}
787
788/*
Joerg Roedelbd60b732008-09-11 10:24:48 +0200789 * In this function the list of preallocated protection domains is traversed to
790 * find the domain for a specific device
791 */
792static struct dma_ops_domain *find_protection_domain(u16 devid)
793{
794 struct dma_ops_domain *entry, *ret = NULL;
795 unsigned long flags;
796
797 if (list_empty(&iommu_pd_list))
798 return NULL;
799
800 spin_lock_irqsave(&iommu_pd_list_lock, flags);
801
802 list_for_each_entry(entry, &iommu_pd_list, list) {
803 if (entry->target_dev == devid) {
804 ret = entry;
805 list_del(&ret->list);
806 break;
807 }
808 }
809
810 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
811
812 return ret;
813}
814
815/*
Joerg Roedel431b2a22008-07-11 17:14:22 +0200816 * In the dma_ops path we only have the struct device. This function
817 * finds the corresponding IOMMU, the protection domain and the
818 * requestor id for a given device.
819 * If the device is not yet associated with a domain this is also done
820 * in this function.
821 */
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200822static int get_device_resources(struct device *dev,
823 struct amd_iommu **iommu,
824 struct protection_domain **domain,
825 u16 *bdf)
826{
827 struct dma_ops_domain *dma_dom;
828 struct pci_dev *pcidev;
829 u16 _bdf;
830
Joerg Roedeldbcc1122008-09-04 15:04:26 +0200831 *iommu = NULL;
832 *domain = NULL;
833 *bdf = 0xffff;
834
835 if (dev->bus != &pci_bus_type)
836 return 0;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200837
838 pcidev = to_pci_dev(dev);
Joerg Roedeld591b0a2008-07-11 17:14:35 +0200839 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200840
Joerg Roedel431b2a22008-07-11 17:14:22 +0200841 /* device not translated by any IOMMU in the system? */
Joerg Roedeldbcc1122008-09-04 15:04:26 +0200842 if (_bdf > amd_iommu_last_bdf)
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200843 return 0;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200844
845 *bdf = amd_iommu_alias_table[_bdf];
846
847 *iommu = amd_iommu_rlookup_table[*bdf];
848 if (*iommu == NULL)
849 return 0;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200850 *domain = domain_for_device(*bdf);
851 if (*domain == NULL) {
Joerg Roedelbd60b732008-09-11 10:24:48 +0200852 dma_dom = find_protection_domain(*bdf);
853 if (!dma_dom)
854 dma_dom = (*iommu)->default_dom;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +0200855 *domain = &dma_dom->domain;
856 set_device_domain(*iommu, *domain, *bdf);
857 printk(KERN_INFO "AMD IOMMU: Using protection domain %d for "
858 "device ", (*domain)->id);
859 print_devid(_bdf, 1);
860 }
861
862 return 1;
863}
864
Joerg Roedel431b2a22008-07-11 17:14:22 +0200865/*
866 * This is the generic map function. It maps one 4kb page at paddr to
867 * the given address in the DMA address space for the domain.
868 */
Joerg Roedelcb76c322008-06-26 21:28:00 +0200869static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
870 struct dma_ops_domain *dom,
871 unsigned long address,
872 phys_addr_t paddr,
873 int direction)
874{
875 u64 *pte, __pte;
876
877 WARN_ON(address > dom->aperture_size);
878
879 paddr &= PAGE_MASK;
880
881 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
882 pte += IOMMU_PTE_L0_INDEX(address);
883
884 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
885
886 if (direction == DMA_TO_DEVICE)
887 __pte |= IOMMU_PTE_IR;
888 else if (direction == DMA_FROM_DEVICE)
889 __pte |= IOMMU_PTE_IW;
890 else if (direction == DMA_BIDIRECTIONAL)
891 __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
892
893 WARN_ON(*pte);
894
895 *pte = __pte;
896
897 return (dma_addr_t)address;
898}
899
Joerg Roedel431b2a22008-07-11 17:14:22 +0200900/*
901 * The generic unmapping function for on page in the DMA address space.
902 */
Joerg Roedelcb76c322008-06-26 21:28:00 +0200903static void dma_ops_domain_unmap(struct amd_iommu *iommu,
904 struct dma_ops_domain *dom,
905 unsigned long address)
906{
907 u64 *pte;
908
909 if (address >= dom->aperture_size)
910 return;
911
912 WARN_ON(address & 0xfffULL || address > dom->aperture_size);
913
914 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
915 pte += IOMMU_PTE_L0_INDEX(address);
916
917 WARN_ON(!*pte);
918
919 *pte = 0ULL;
920}
921
Joerg Roedel431b2a22008-07-11 17:14:22 +0200922/*
923 * This function contains common code for mapping of a physically
924 * contiguous memory region into DMA address space. It is uses by all
925 * mapping functions provided by this IOMMU driver.
926 * Must be called with the domain lock held.
927 */
Joerg Roedelcb76c322008-06-26 21:28:00 +0200928static dma_addr_t __map_single(struct device *dev,
929 struct amd_iommu *iommu,
930 struct dma_ops_domain *dma_dom,
931 phys_addr_t paddr,
932 size_t size,
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200933 int dir,
Joerg Roedel832a90c2008-09-18 15:54:23 +0200934 bool align,
935 u64 dma_mask)
Joerg Roedelcb76c322008-06-26 21:28:00 +0200936{
937 dma_addr_t offset = paddr & ~PAGE_MASK;
938 dma_addr_t address, start;
939 unsigned int pages;
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200940 unsigned long align_mask = 0;
Joerg Roedelcb76c322008-06-26 21:28:00 +0200941 int i;
942
Joerg Roedele3c449f2008-10-15 22:02:11 -0700943 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +0200944 paddr &= PAGE_MASK;
945
Joerg Roedel6d4f3432008-09-04 19:18:02 +0200946 if (align)
947 align_mask = (1UL << get_order(size)) - 1;
948
Joerg Roedel832a90c2008-09-18 15:54:23 +0200949 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
950 dma_mask);
Joerg Roedelcb76c322008-06-26 21:28:00 +0200951 if (unlikely(address == bad_dma_address))
952 goto out;
953
954 start = address;
955 for (i = 0; i < pages; ++i) {
956 dma_ops_domain_map(iommu, dma_dom, start, paddr, dir);
957 paddr += PAGE_SIZE;
958 start += PAGE_SIZE;
959 }
960 address += offset;
961
FUJITA Tomonoriafa9fdc2008-09-20 01:23:30 +0900962 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
Joerg Roedel1c655772008-09-04 18:40:05 +0200963 iommu_flush_tlb(iommu, dma_dom->domain.id);
964 dma_dom->need_flush = false;
965 } else if (unlikely(iommu_has_npcache(iommu)))
Joerg Roedel270cab242008-09-04 15:49:46 +0200966 iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
967
Joerg Roedelcb76c322008-06-26 21:28:00 +0200968out:
969 return address;
970}
971
Joerg Roedel431b2a22008-07-11 17:14:22 +0200972/*
973 * Does the reverse of the __map_single function. Must be called with
974 * the domain lock held too
975 */
Joerg Roedelcb76c322008-06-26 21:28:00 +0200976static void __unmap_single(struct amd_iommu *iommu,
977 struct dma_ops_domain *dma_dom,
978 dma_addr_t dma_addr,
979 size_t size,
980 int dir)
981{
982 dma_addr_t i, start;
983 unsigned int pages;
984
985 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size))
986 return;
987
Joerg Roedele3c449f2008-10-15 22:02:11 -0700988 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +0200989 dma_addr &= PAGE_MASK;
990 start = dma_addr;
991
992 for (i = 0; i < pages; ++i) {
993 dma_ops_domain_unmap(iommu, dma_dom, start);
994 start += PAGE_SIZE;
995 }
996
997 dma_ops_free_addresses(dma_dom, dma_addr, pages);
Joerg Roedel270cab242008-09-04 15:49:46 +0200998
Joerg Roedel80be3082008-11-06 14:59:05 +0100999 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
Joerg Roedel1c655772008-09-04 18:40:05 +02001000 iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
Joerg Roedel80be3082008-11-06 14:59:05 +01001001 dma_dom->need_flush = false;
1002 }
Joerg Roedelcb76c322008-06-26 21:28:00 +02001003}
1004
Joerg Roedel431b2a22008-07-11 17:14:22 +02001005/*
1006 * The exported map_single function for dma_ops.
1007 */
Joerg Roedel4da70b92008-06-26 21:28:01 +02001008static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
1009 size_t size, int dir)
1010{
1011 unsigned long flags;
1012 struct amd_iommu *iommu;
1013 struct protection_domain *domain;
1014 u16 devid;
1015 dma_addr_t addr;
Joerg Roedel832a90c2008-09-18 15:54:23 +02001016 u64 dma_mask;
Joerg Roedel4da70b92008-06-26 21:28:01 +02001017
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001018 if (!check_device(dev))
1019 return bad_dma_address;
1020
Joerg Roedel832a90c2008-09-18 15:54:23 +02001021 dma_mask = *dev->dma_mask;
Joerg Roedel4da70b92008-06-26 21:28:01 +02001022
1023 get_device_resources(dev, &iommu, &domain, &devid);
1024
1025 if (iommu == NULL || domain == NULL)
Joerg Roedel431b2a22008-07-11 17:14:22 +02001026 /* device not handled by any AMD IOMMU */
Joerg Roedel4da70b92008-06-26 21:28:01 +02001027 return (dma_addr_t)paddr;
1028
1029 spin_lock_irqsave(&domain->lock, flags);
Joerg Roedel832a90c2008-09-18 15:54:23 +02001030 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
1031 dma_mask);
Joerg Roedel4da70b92008-06-26 21:28:01 +02001032 if (addr == bad_dma_address)
1033 goto out;
1034
Joerg Roedel5507eef2008-09-04 19:01:02 +02001035 if (unlikely(iommu->need_sync))
Joerg Roedel4da70b92008-06-26 21:28:01 +02001036 iommu_completion_wait(iommu);
1037
1038out:
1039 spin_unlock_irqrestore(&domain->lock, flags);
1040
1041 return addr;
1042}
1043
Joerg Roedel431b2a22008-07-11 17:14:22 +02001044/*
1045 * The exported unmap_single function for dma_ops.
1046 */
Joerg Roedel4da70b92008-06-26 21:28:01 +02001047static void unmap_single(struct device *dev, dma_addr_t dma_addr,
1048 size_t size, int dir)
1049{
1050 unsigned long flags;
1051 struct amd_iommu *iommu;
1052 struct protection_domain *domain;
1053 u16 devid;
1054
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001055 if (!check_device(dev) ||
1056 !get_device_resources(dev, &iommu, &domain, &devid))
Joerg Roedel431b2a22008-07-11 17:14:22 +02001057 /* device not handled by any AMD IOMMU */
Joerg Roedel4da70b92008-06-26 21:28:01 +02001058 return;
1059
1060 spin_lock_irqsave(&domain->lock, flags);
1061
1062 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
1063
Joerg Roedel5507eef2008-09-04 19:01:02 +02001064 if (unlikely(iommu->need_sync))
Joerg Roedel4da70b92008-06-26 21:28:01 +02001065 iommu_completion_wait(iommu);
1066
1067 spin_unlock_irqrestore(&domain->lock, flags);
1068}
1069
Joerg Roedel431b2a22008-07-11 17:14:22 +02001070/*
1071 * This is a special map_sg function which is used if we should map a
1072 * device which is not handled by an AMD IOMMU in the system.
1073 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02001074static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
1075 int nelems, int dir)
1076{
1077 struct scatterlist *s;
1078 int i;
1079
1080 for_each_sg(sglist, s, nelems, i) {
1081 s->dma_address = (dma_addr_t)sg_phys(s);
1082 s->dma_length = s->length;
1083 }
1084
1085 return nelems;
1086}
1087
Joerg Roedel431b2a22008-07-11 17:14:22 +02001088/*
1089 * The exported map_sg function for dma_ops (handles scatter-gather
1090 * lists).
1091 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02001092static int map_sg(struct device *dev, struct scatterlist *sglist,
1093 int nelems, int dir)
1094{
1095 unsigned long flags;
1096 struct amd_iommu *iommu;
1097 struct protection_domain *domain;
1098 u16 devid;
1099 int i;
1100 struct scatterlist *s;
1101 phys_addr_t paddr;
1102 int mapped_elems = 0;
Joerg Roedel832a90c2008-09-18 15:54:23 +02001103 u64 dma_mask;
Joerg Roedel65b050a2008-06-26 21:28:02 +02001104
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001105 if (!check_device(dev))
1106 return 0;
1107
Joerg Roedel832a90c2008-09-18 15:54:23 +02001108 dma_mask = *dev->dma_mask;
Joerg Roedel65b050a2008-06-26 21:28:02 +02001109
1110 get_device_resources(dev, &iommu, &domain, &devid);
1111
1112 if (!iommu || !domain)
1113 return map_sg_no_iommu(dev, sglist, nelems, dir);
1114
1115 spin_lock_irqsave(&domain->lock, flags);
1116
1117 for_each_sg(sglist, s, nelems, i) {
1118 paddr = sg_phys(s);
1119
1120 s->dma_address = __map_single(dev, iommu, domain->priv,
Joerg Roedel832a90c2008-09-18 15:54:23 +02001121 paddr, s->length, dir, false,
1122 dma_mask);
Joerg Roedel65b050a2008-06-26 21:28:02 +02001123
1124 if (s->dma_address) {
1125 s->dma_length = s->length;
1126 mapped_elems++;
1127 } else
1128 goto unmap;
Joerg Roedel65b050a2008-06-26 21:28:02 +02001129 }
1130
Joerg Roedel5507eef2008-09-04 19:01:02 +02001131 if (unlikely(iommu->need_sync))
Joerg Roedel65b050a2008-06-26 21:28:02 +02001132 iommu_completion_wait(iommu);
1133
1134out:
1135 spin_unlock_irqrestore(&domain->lock, flags);
1136
1137 return mapped_elems;
1138unmap:
1139 for_each_sg(sglist, s, mapped_elems, i) {
1140 if (s->dma_address)
1141 __unmap_single(iommu, domain->priv, s->dma_address,
1142 s->dma_length, dir);
1143 s->dma_address = s->dma_length = 0;
1144 }
1145
1146 mapped_elems = 0;
1147
1148 goto out;
1149}
1150
Joerg Roedel431b2a22008-07-11 17:14:22 +02001151/*
1152 * The exported map_sg function for dma_ops (handles scatter-gather
1153 * lists).
1154 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02001155static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1156 int nelems, int dir)
1157{
1158 unsigned long flags;
1159 struct amd_iommu *iommu;
1160 struct protection_domain *domain;
1161 struct scatterlist *s;
1162 u16 devid;
1163 int i;
1164
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001165 if (!check_device(dev) ||
1166 !get_device_resources(dev, &iommu, &domain, &devid))
Joerg Roedel65b050a2008-06-26 21:28:02 +02001167 return;
1168
1169 spin_lock_irqsave(&domain->lock, flags);
1170
1171 for_each_sg(sglist, s, nelems, i) {
1172 __unmap_single(iommu, domain->priv, s->dma_address,
1173 s->dma_length, dir);
Joerg Roedel65b050a2008-06-26 21:28:02 +02001174 s->dma_address = s->dma_length = 0;
1175 }
1176
Joerg Roedel5507eef2008-09-04 19:01:02 +02001177 if (unlikely(iommu->need_sync))
Joerg Roedel65b050a2008-06-26 21:28:02 +02001178 iommu_completion_wait(iommu);
1179
1180 spin_unlock_irqrestore(&domain->lock, flags);
1181}
1182
Joerg Roedel431b2a22008-07-11 17:14:22 +02001183/*
1184 * The exported alloc_coherent function for dma_ops.
1185 */
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001186static void *alloc_coherent(struct device *dev, size_t size,
1187 dma_addr_t *dma_addr, gfp_t flag)
1188{
1189 unsigned long flags;
1190 void *virt_addr;
1191 struct amd_iommu *iommu;
1192 struct protection_domain *domain;
1193 u16 devid;
1194 phys_addr_t paddr;
Joerg Roedel832a90c2008-09-18 15:54:23 +02001195 u64 dma_mask = dev->coherent_dma_mask;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001196
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001197 if (!check_device(dev))
1198 return NULL;
1199
FUJITA Tomonori13d9fea2008-09-10 20:19:40 +09001200 if (!get_device_resources(dev, &iommu, &domain, &devid))
1201 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
1202
Joerg Roedelc97ac532008-09-11 10:59:15 +02001203 flag |= __GFP_ZERO;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001204 virt_addr = (void *)__get_free_pages(flag, get_order(size));
1205 if (!virt_addr)
1206 return 0;
1207
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001208 paddr = virt_to_phys(virt_addr);
1209
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001210 if (!iommu || !domain) {
1211 *dma_addr = (dma_addr_t)paddr;
1212 return virt_addr;
1213 }
1214
Joerg Roedel832a90c2008-09-18 15:54:23 +02001215 if (!dma_mask)
1216 dma_mask = *dev->dma_mask;
1217
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001218 spin_lock_irqsave(&domain->lock, flags);
1219
1220 *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
Joerg Roedel832a90c2008-09-18 15:54:23 +02001221 size, DMA_BIDIRECTIONAL, true, dma_mask);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001222
1223 if (*dma_addr == bad_dma_address) {
1224 free_pages((unsigned long)virt_addr, get_order(size));
1225 virt_addr = NULL;
1226 goto out;
1227 }
1228
Joerg Roedel5507eef2008-09-04 19:01:02 +02001229 if (unlikely(iommu->need_sync))
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001230 iommu_completion_wait(iommu);
1231
1232out:
1233 spin_unlock_irqrestore(&domain->lock, flags);
1234
1235 return virt_addr;
1236}
1237
Joerg Roedel431b2a22008-07-11 17:14:22 +02001238/*
1239 * The exported free_coherent function for dma_ops.
Joerg Roedel431b2a22008-07-11 17:14:22 +02001240 */
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001241static void free_coherent(struct device *dev, size_t size,
1242 void *virt_addr, dma_addr_t dma_addr)
1243{
1244 unsigned long flags;
1245 struct amd_iommu *iommu;
1246 struct protection_domain *domain;
1247 u16 devid;
1248
Joerg Roedeldbcc1122008-09-04 15:04:26 +02001249 if (!check_device(dev))
1250 return;
1251
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001252 get_device_resources(dev, &iommu, &domain, &devid);
1253
1254 if (!iommu || !domain)
1255 goto free_mem;
1256
1257 spin_lock_irqsave(&domain->lock, flags);
1258
1259 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001260
Joerg Roedel5507eef2008-09-04 19:01:02 +02001261 if (unlikely(iommu->need_sync))
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02001262 iommu_completion_wait(iommu);
1263
1264 spin_unlock_irqrestore(&domain->lock, flags);
1265
1266free_mem:
1267 free_pages((unsigned long)virt_addr, get_order(size));
1268}
1269
Joerg Roedelc432f3d2008-06-26 21:28:04 +02001270/*
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02001271 * This function is called by the DMA layer to find out if we can handle a
1272 * particular device. It is part of the dma_ops.
1273 */
1274static int amd_iommu_dma_supported(struct device *dev, u64 mask)
1275{
1276 u16 bdf;
1277 struct pci_dev *pcidev;
1278
1279 /* No device or no PCI device */
1280 if (!dev || dev->bus != &pci_bus_type)
1281 return 0;
1282
1283 pcidev = to_pci_dev(dev);
1284
1285 bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
1286
1287 /* Out of our scope? */
1288 if (bdf > amd_iommu_last_bdf)
1289 return 0;
1290
1291 return 1;
1292}
1293
1294/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001295 * The function for pre-allocating protection domains.
1296 *
Joerg Roedelc432f3d2008-06-26 21:28:04 +02001297 * If the driver core informs the DMA layer if a driver grabs a device
1298 * we don't need to preallocate the protection domains anymore.
1299 * For now we have to.
1300 */
1301void prealloc_protection_domains(void)
1302{
1303 struct pci_dev *dev = NULL;
1304 struct dma_ops_domain *dma_dom;
1305 struct amd_iommu *iommu;
1306 int order = amd_iommu_aperture_order;
1307 u16 devid;
1308
1309 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1310 devid = (dev->bus->number << 8) | dev->devfn;
Joerg Roedel3a61ec32008-07-25 13:07:50 +02001311 if (devid > amd_iommu_last_bdf)
Joerg Roedelc432f3d2008-06-26 21:28:04 +02001312 continue;
1313 devid = amd_iommu_alias_table[devid];
1314 if (domain_for_device(devid))
1315 continue;
1316 iommu = amd_iommu_rlookup_table[devid];
1317 if (!iommu)
1318 continue;
1319 dma_dom = dma_ops_domain_alloc(iommu, order);
1320 if (!dma_dom)
1321 continue;
1322 init_unity_mappings_for_device(dma_dom, devid);
Joerg Roedelbd60b732008-09-11 10:24:48 +02001323 dma_dom->target_dev = devid;
1324
1325 list_add_tail(&dma_dom->list, &iommu_pd_list);
Joerg Roedelc432f3d2008-06-26 21:28:04 +02001326 }
1327}
1328
Joerg Roedel6631ee92008-06-26 21:28:05 +02001329static struct dma_mapping_ops amd_iommu_dma_ops = {
1330 .alloc_coherent = alloc_coherent,
1331 .free_coherent = free_coherent,
1332 .map_single = map_single,
1333 .unmap_single = unmap_single,
1334 .map_sg = map_sg,
1335 .unmap_sg = unmap_sg,
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02001336 .dma_supported = amd_iommu_dma_supported,
Joerg Roedel6631ee92008-06-26 21:28:05 +02001337};
1338
Joerg Roedel431b2a22008-07-11 17:14:22 +02001339/*
1340 * The function which clues the AMD IOMMU driver into dma_ops.
1341 */
Joerg Roedel6631ee92008-06-26 21:28:05 +02001342int __init amd_iommu_init_dma_ops(void)
1343{
1344 struct amd_iommu *iommu;
1345 int order = amd_iommu_aperture_order;
1346 int ret;
1347
Joerg Roedel431b2a22008-07-11 17:14:22 +02001348 /*
1349 * first allocate a default protection domain for every IOMMU we
1350 * found in the system. Devices not assigned to any other
1351 * protection domain will be assigned to the default one.
1352 */
Joerg Roedel6631ee92008-06-26 21:28:05 +02001353 list_for_each_entry(iommu, &amd_iommu_list, list) {
1354 iommu->default_dom = dma_ops_domain_alloc(iommu, order);
1355 if (iommu->default_dom == NULL)
1356 return -ENOMEM;
1357 ret = iommu_init_unity_mappings(iommu);
1358 if (ret)
1359 goto free_domains;
1360 }
1361
Joerg Roedel431b2a22008-07-11 17:14:22 +02001362 /*
1363 * If device isolation is enabled, pre-allocate the protection
1364 * domains for each device.
1365 */
Joerg Roedel6631ee92008-06-26 21:28:05 +02001366 if (amd_iommu_isolate)
1367 prealloc_protection_domains();
1368
1369 iommu_detected = 1;
1370 force_iommu = 1;
1371 bad_dma_address = 0;
Ingo Molnar92af4e22008-06-27 10:48:16 +02001372#ifdef CONFIG_GART_IOMMU
Joerg Roedel6631ee92008-06-26 21:28:05 +02001373 gart_iommu_aperture_disabled = 1;
1374 gart_iommu_aperture = 0;
Ingo Molnar92af4e22008-06-27 10:48:16 +02001375#endif
Joerg Roedel6631ee92008-06-26 21:28:05 +02001376
Joerg Roedel431b2a22008-07-11 17:14:22 +02001377 /* Make the driver finally visible to the drivers */
Joerg Roedel6631ee92008-06-26 21:28:05 +02001378 dma_ops = &amd_iommu_dma_ops;
1379
1380 return 0;
1381
1382free_domains:
1383
1384 list_for_each_entry(iommu, &amd_iommu_list, list) {
1385 if (iommu->default_dom)
1386 dma_ops_domain_free(iommu->default_dom);
1387 }
1388
1389 return ret;
1390}