blob: b70f2d19ab990b5f541cd7ca4884675cf045c7b0 [file] [log] [blame]
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -04001/*
2 * Copyright 2010
3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
4 *
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * PV guests under Xen are running in an non-contiguous memory architecture.
17 *
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
21 * operations).
22 *
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
33 *
34 */
35
36#include <linux/bootmem.h>
37#include <linux/dma-mapping.h>
Paul Gortmaker63c97442011-07-10 13:22:07 -040038#include <linux/export.h>
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040039#include <xen/swiotlb-xen.h>
40#include <xen/page.h>
41#include <xen/xen-ops.h>
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -040042#include <xen/hvc-console.h>
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040043/*
44 * Used to do a quick range check in swiotlb_tbl_unmap_single and
45 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
46 * API.
47 */
48
49static char *xen_io_tlb_start, *xen_io_tlb_end;
50static unsigned long xen_io_tlb_nslabs;
51/*
52 * Quick lookup value of the bus address of the IOTLB.
53 */
54
55u64 start_dma_addr;
56
57static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
58{
Justin P. Mattock6eab04a2011-04-08 19:49:08 -070059 return phys_to_machine(XPADDR(paddr)).maddr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040060}
61
62static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
63{
64 return machine_to_phys(XMADDR(baddr)).paddr;
65}
66
67static dma_addr_t xen_virt_to_bus(void *address)
68{
69 return xen_phys_to_bus(virt_to_phys(address));
70}
71
72static int check_pages_physically_contiguous(unsigned long pfn,
73 unsigned int offset,
74 size_t length)
75{
76 unsigned long next_mfn;
77 int i;
78 int nr_pages;
79
80 next_mfn = pfn_to_mfn(pfn);
81 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
82
83 for (i = 1; i < nr_pages; i++) {
84 if (pfn_to_mfn(++pfn) != ++next_mfn)
85 return 0;
86 }
87 return 1;
88}
89
90static int range_straddles_page_boundary(phys_addr_t p, size_t size)
91{
92 unsigned long pfn = PFN_DOWN(p);
93 unsigned int offset = p & ~PAGE_MASK;
94
95 if (offset + size <= PAGE_SIZE)
96 return 0;
97 if (check_pages_physically_contiguous(pfn, offset, size))
98 return 0;
99 return 1;
100}
101
102static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
103{
104 unsigned long mfn = PFN_DOWN(dma_addr);
105 unsigned long pfn = mfn_to_local_pfn(mfn);
106 phys_addr_t paddr;
107
108 /* If the address is outside our domain, it CAN
109 * have the same virtual address as another address
110 * in our domain. Therefore _only_ check address within our domain.
111 */
112 if (pfn_valid(pfn)) {
113 paddr = PFN_PHYS(pfn);
114 return paddr >= virt_to_phys(xen_io_tlb_start) &&
115 paddr < virt_to_phys(xen_io_tlb_end);
116 }
117 return 0;
118}
119
120static int max_dma_bits = 32;
121
122static int
123xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
124{
125 int i, rc;
126 int dma_bits;
127
128 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
129
130 i = 0;
131 do {
132 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
133
134 do {
135 rc = xen_create_contiguous_region(
136 (unsigned long)buf + (i << IO_TLB_SHIFT),
137 get_order(slabs << IO_TLB_SHIFT),
138 dma_bits);
139 } while (rc && dma_bits++ < max_dma_bits);
140 if (rc)
141 return rc;
142
143 i += slabs;
144 } while (i < nslabs);
145 return 0;
146}
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400147static unsigned long xen_set_nslabs(unsigned long nr_tbl)
148{
149 if (!nr_tbl) {
150 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
151 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
152 } else
153 xen_io_tlb_nslabs = nr_tbl;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400154
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400155 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
156}
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400157
158enum xen_swiotlb_err {
159 XEN_SWIOTLB_UNKNOWN = 0,
160 XEN_SWIOTLB_ENOMEM,
161 XEN_SWIOTLB_EFIXUP
162};
163
164static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
165{
166 switch (err) {
167 case XEN_SWIOTLB_ENOMEM:
168 return "Cannot allocate Xen-SWIOTLB buffer\n";
169 case XEN_SWIOTLB_EFIXUP:
170 return "Failed to get contiguous memory for DMA from Xen!\n"\
171 "You either: don't have the permissions, do not have"\
172 " enough free memory under 4GB, or the hypervisor memory"\
173 " is too fragmented!";
174 default:
175 break;
176 }
177 return "";
178}
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400179int __ref xen_swiotlb_init(int verbose, bool early)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400180{
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400181 unsigned long bytes, order;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400182 int rc = -ENOMEM;
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400183 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400184 unsigned int repeat = 3;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400185
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400186 xen_io_tlb_nslabs = swiotlb_nr_tbl();
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400187retry:
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400188 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400189 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400190 /*
191 * Get IO TLB memory from any location.
192 */
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400193 if (early)
194 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
195 else {
196#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
197#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
198 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
199 xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
200 if (xen_io_tlb_start)
201 break;
202 order--;
203 }
204 if (order != get_order(bytes)) {
205 pr_warn("Warning: only able to allocate %ld MB "
206 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
207 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
208 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
209 }
210 }
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400211 if (!xen_io_tlb_start) {
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400212 m_ret = XEN_SWIOTLB_ENOMEM;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400213 goto error;
214 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400215 xen_io_tlb_end = xen_io_tlb_start + bytes;
216 /*
217 * And replace that memory with pages under 4GB.
218 */
219 rc = xen_swiotlb_fixup(xen_io_tlb_start,
220 bytes,
221 xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400222 if (rc) {
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400223 if (early)
224 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
225 else {
226 free_pages((unsigned long)xen_io_tlb_start, order);
227 xen_io_tlb_start = NULL;
228 }
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400229 m_ret = XEN_SWIOTLB_EFIXUP;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400230 goto error;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400231 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400232 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
Konrad Rzeszutek Wilkc468bde2012-09-17 10:20:09 -0400233 if (early) {
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400234 swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
Konrad Rzeszutek Wilkc468bde2012-09-17 10:20:09 -0400235 rc = 0;
236 } else
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400237 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
238 return rc;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400239error:
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400240 if (repeat--) {
241 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
242 (xen_io_tlb_nslabs >> 1));
243 printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n",
244 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
245 goto retry;
246 }
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400247 pr_err("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
248 if (early)
249 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
250 else
251 free_pages((unsigned long)xen_io_tlb_start, order);
252 return rc;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400253}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400254void *
255xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200256 dma_addr_t *dma_handle, gfp_t flags,
257 struct dma_attrs *attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400258{
259 void *ret;
260 int order = get_order(size);
261 u64 dma_mask = DMA_BIT_MASK(32);
262 unsigned long vstart;
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400263 phys_addr_t phys;
264 dma_addr_t dev_addr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400265
266 /*
267 * Ignore region specifiers - the kernel's ideas of
268 * pseudo-phys memory layout has nothing to do with the
269 * machine physical layout. We can't allocate highmem
270 * because we can't return a pointer to it.
271 */
272 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
273
274 if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
275 return ret;
276
277 vstart = __get_free_pages(flags, order);
278 ret = (void *)vstart;
279
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400280 if (!ret)
281 return ret;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400282
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400283 if (hwdev && hwdev->coherent_dma_mask)
284 dma_mask = hwdev->coherent_dma_mask;
285
286 phys = virt_to_phys(ret);
287 dev_addr = xen_phys_to_bus(phys);
288 if (((dev_addr + size - 1 <= dma_mask)) &&
289 !range_straddles_page_boundary(phys, size))
290 *dma_handle = dev_addr;
291 else {
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400292 if (xen_create_contiguous_region(vstart, order,
293 fls64(dma_mask)) != 0) {
294 free_pages(vstart, order);
295 return NULL;
296 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400297 *dma_handle = virt_to_machine(ret).maddr;
298 }
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400299 memset(ret, 0, size);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400300 return ret;
301}
302EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
303
304void
305xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200306 dma_addr_t dev_addr, struct dma_attrs *attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400307{
308 int order = get_order(size);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400309 phys_addr_t phys;
310 u64 dma_mask = DMA_BIT_MASK(32);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400311
312 if (dma_release_from_coherent(hwdev, order, vaddr))
313 return;
314
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400315 if (hwdev && hwdev->coherent_dma_mask)
316 dma_mask = hwdev->coherent_dma_mask;
317
318 phys = virt_to_phys(vaddr);
319
320 if (((dev_addr + size - 1 > dma_mask)) ||
321 range_straddles_page_boundary(phys, size))
322 xen_destroy_contiguous_region((unsigned long)vaddr, order);
323
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400324 free_pages((unsigned long)vaddr, order);
325}
326EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
327
328
329/*
330 * Map a single buffer of the indicated size for DMA in streaming mode. The
331 * physical address to use is returned.
332 *
333 * Once the device is given the dma address, the device owns this memory until
334 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
335 */
336dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
337 unsigned long offset, size_t size,
338 enum dma_data_direction dir,
339 struct dma_attrs *attrs)
340{
341 phys_addr_t phys = page_to_phys(page) + offset;
342 dma_addr_t dev_addr = xen_phys_to_bus(phys);
343 void *map;
344
345 BUG_ON(dir == DMA_NONE);
346 /*
347 * If the address happens to be in the device's DMA window,
348 * we can safely return the device addr and not worry about bounce
349 * buffering it.
350 */
351 if (dma_capable(dev, dev_addr, size) &&
352 !range_straddles_page_boundary(phys, size) && !swiotlb_force)
353 return dev_addr;
354
355 /*
356 * Oh well, have to allocate and map a bounce buffer.
357 */
358 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
359 if (!map)
360 return DMA_ERROR_CODE;
361
362 dev_addr = xen_virt_to_bus(map);
363
364 /*
365 * Ensure that the address returned is DMA'ble
366 */
Konrad Rzeszutek Wilkab2a47b2011-07-22 12:51:48 -0400367 if (!dma_capable(dev, dev_addr, size)) {
368 swiotlb_tbl_unmap_single(dev, map, size, dir);
369 dev_addr = 0;
370 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400371 return dev_addr;
372}
373EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
374
375/*
376 * Unmap a single streaming mode DMA translation. The dma_addr and size must
377 * match what was provided for in a previous xen_swiotlb_map_page call. All
378 * other usages are undefined.
379 *
380 * After this call, reads by the cpu to the buffer are guaranteed to see
381 * whatever the device wrote there.
382 */
383static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
384 size_t size, enum dma_data_direction dir)
385{
386 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
387
388 BUG_ON(dir == DMA_NONE);
389
390 /* NOTE: We use dev_addr here, not paddr! */
391 if (is_xen_swiotlb_buffer(dev_addr)) {
392 swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
393 return;
394 }
395
396 if (dir != DMA_FROM_DEVICE)
397 return;
398
399 /*
400 * phys_to_virt doesn't work with hihgmem page but we could
401 * call dma_mark_clean() with hihgmem page here. However, we
402 * are fine since dma_mark_clean() is null on POWERPC. We can
403 * make dma_mark_clean() take a physical address if necessary.
404 */
405 dma_mark_clean(phys_to_virt(paddr), size);
406}
407
408void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
409 size_t size, enum dma_data_direction dir,
410 struct dma_attrs *attrs)
411{
412 xen_unmap_single(hwdev, dev_addr, size, dir);
413}
414EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
415
416/*
417 * Make physical memory consistent for a single streaming mode DMA translation
418 * after a transfer.
419 *
420 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
421 * using the cpu, yet do not wish to teardown the dma mapping, you must
422 * call this function before doing so. At the next point you give the dma
423 * address back to the card, you must first perform a
424 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
425 */
426static void
427xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
428 size_t size, enum dma_data_direction dir,
429 enum dma_sync_target target)
430{
431 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
432
433 BUG_ON(dir == DMA_NONE);
434
435 /* NOTE: We use dev_addr here, not paddr! */
436 if (is_xen_swiotlb_buffer(dev_addr)) {
437 swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
438 target);
439 return;
440 }
441
442 if (dir != DMA_FROM_DEVICE)
443 return;
444
445 dma_mark_clean(phys_to_virt(paddr), size);
446}
447
448void
449xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
450 size_t size, enum dma_data_direction dir)
451{
452 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
453}
454EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
455
456void
457xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
458 size_t size, enum dma_data_direction dir)
459{
460 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
461}
462EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
463
464/*
465 * Map a set of buffers described by scatterlist in streaming mode for DMA.
466 * This is the scatter-gather version of the above xen_swiotlb_map_page
467 * interface. Here the scatter gather list elements are each tagged with the
468 * appropriate dma address and length. They are obtained via
469 * sg_dma_{address,length}(SG).
470 *
471 * NOTE: An implementation may be able to use a smaller number of
472 * DMA address/length pairs than there are SG table elements.
473 * (for example via virtual mapping capabilities)
474 * The routine returns the number of addr/length pairs actually
475 * used, at most nents.
476 *
477 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
478 * same here.
479 */
480int
481xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
482 int nelems, enum dma_data_direction dir,
483 struct dma_attrs *attrs)
484{
485 struct scatterlist *sg;
486 int i;
487
488 BUG_ON(dir == DMA_NONE);
489
490 for_each_sg(sgl, sg, nelems, i) {
491 phys_addr_t paddr = sg_phys(sg);
492 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
493
494 if (swiotlb_force ||
495 !dma_capable(hwdev, dev_addr, sg->length) ||
496 range_straddles_page_boundary(paddr, sg->length)) {
497 void *map = swiotlb_tbl_map_single(hwdev,
498 start_dma_addr,
499 sg_phys(sg),
500 sg->length, dir);
501 if (!map) {
502 /* Don't panic here, we expect map_sg users
503 to do proper error handling. */
504 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
505 attrs);
506 sgl[0].dma_length = 0;
507 return DMA_ERROR_CODE;
508 }
509 sg->dma_address = xen_virt_to_bus(map);
510 } else
511 sg->dma_address = dev_addr;
512 sg->dma_length = sg->length;
513 }
514 return nelems;
515}
516EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
517
518int
519xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
520 enum dma_data_direction dir)
521{
522 return xen_swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
523}
524EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg);
525
526/*
527 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
528 * concerning calls here are the same as for swiotlb_unmap_page() above.
529 */
530void
531xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
532 int nelems, enum dma_data_direction dir,
533 struct dma_attrs *attrs)
534{
535 struct scatterlist *sg;
536 int i;
537
538 BUG_ON(dir == DMA_NONE);
539
540 for_each_sg(sgl, sg, nelems, i)
541 xen_unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
542
543}
544EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
545
546void
547xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
548 enum dma_data_direction dir)
549{
550 return xen_swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
551}
552EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg);
553
554/*
555 * Make physical memory consistent for a set of streaming mode DMA translations
556 * after a transfer.
557 *
558 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
559 * and usage.
560 */
561static void
562xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
563 int nelems, enum dma_data_direction dir,
564 enum dma_sync_target target)
565{
566 struct scatterlist *sg;
567 int i;
568
569 for_each_sg(sgl, sg, nelems, i)
570 xen_swiotlb_sync_single(hwdev, sg->dma_address,
571 sg->dma_length, dir, target);
572}
573
574void
575xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
576 int nelems, enum dma_data_direction dir)
577{
578 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
579}
580EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
581
582void
583xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
584 int nelems, enum dma_data_direction dir)
585{
586 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
587}
588EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
589
590int
591xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
592{
593 return !dma_addr;
594}
595EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
596
597/*
598 * Return whether the given device DMA address mask can be supported
599 * properly. For example, if your device can only drive the low 24-bits
600 * during bus mastering, then you would pass 0x00ffffff as the mask to
601 * this function.
602 */
603int
604xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
605{
606 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
607}
608EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);