blob: 25bb398ec7a13d40fffbdea6b3cb5352639e4b0c [file] [log] [blame]
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04001/*
2 * Coherent per-device memory handling.
3 * Borrowed from i386
4 */
Brian Starkey6b03ae02016-03-22 14:28:03 -07005#include <linux/io.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09006#include <linux/slab.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04007#include <linux/kernel.h>
Paul Gortmaker08a999c2011-07-01 16:07:32 -04008#include <linux/module.h>
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +04009#include <linux/dma-mapping.h>
10
11struct dma_coherent_mem {
12 void *virt_base;
Marin Mitoved1d2182010-05-31 13:03:04 +030013 dma_addr_t device_base;
Bjorn Helgaas88a984b2014-05-20 16:54:22 -060014 unsigned long pfn_base;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040015 int size;
16 int flags;
17 unsigned long *bitmap;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070018 spinlock_t spinlock;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040019};
20
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010021static bool dma_init_coherent_memory(
22 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
23 struct dma_coherent_mem **mem)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040024{
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070025 struct dma_coherent_mem *dma_mem = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040026 void __iomem *mem_base = NULL;
27 int pages = size >> PAGE_SHIFT;
28 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
29
30 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
31 goto out;
32 if (!size)
33 goto out;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040034
Brian Starkey6b03ae02016-03-22 14:28:03 -070035 if (flags & DMA_MEMORY_MAP)
36 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
37 else
38 mem_base = ioremap(phys_addr, size);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040039 if (!mem_base)
40 goto out;
41
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070042 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
43 if (!dma_mem)
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040044 goto out;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070045 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
46 if (!dma_mem->bitmap)
47 goto out;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040048
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070049 dma_mem->virt_base = mem_base;
50 dma_mem->device_base = device_addr;
51 dma_mem->pfn_base = PFN_DOWN(phys_addr);
52 dma_mem->size = pages;
53 dma_mem->flags = flags;
54 spin_lock_init(&dma_mem->spinlock);
55
56 *mem = dma_mem;
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010057 return true;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040058
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070059out:
60 kfree(dma_mem);
Brian Starkey6b03ae02016-03-22 14:28:03 -070061 if (mem_base) {
62 if (flags & DMA_MEMORY_MAP)
63 memunmap(mem_base);
64 else
65 iounmap(mem_base);
66 }
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +010067 return false;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +040068}
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070069
70static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
71{
72 if (!mem)
73 return;
Brian Starkey6b03ae02016-03-22 14:28:03 -070074
75 if (mem->flags & DMA_MEMORY_MAP)
76 memunmap(mem->virt_base);
77 else
78 iounmap(mem->virt_base);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070079 kfree(mem->bitmap);
80 kfree(mem);
81}
82
83static int dma_assign_coherent_memory(struct device *dev,
84 struct dma_coherent_mem *mem)
85{
86 if (dev->dma_mem)
87 return -EBUSY;
88
89 dev->dma_mem = mem;
90 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
91
92 return 0;
93}
94
95int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
96 dma_addr_t device_addr, size_t size, int flags)
97{
98 struct dma_coherent_mem *mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -070099
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +0100100 if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
101 &mem))
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700102 return 0;
103
104 if (dma_assign_coherent_memory(dev, mem) == 0)
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +0100105 return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700106
107 dma_release_coherent_memory(mem);
108 return 0;
109}
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400110EXPORT_SYMBOL(dma_declare_coherent_memory);
111
112void dma_release_declared_memory(struct device *dev)
113{
114 struct dma_coherent_mem *mem = dev->dma_mem;
115
116 if (!mem)
117 return;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700118 dma_release_coherent_memory(mem);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400119 dev->dma_mem = NULL;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400120}
121EXPORT_SYMBOL(dma_release_declared_memory);
122
123void *dma_mark_declared_memory_occupied(struct device *dev,
124 dma_addr_t device_addr, size_t size)
125{
126 struct dma_coherent_mem *mem = dev->dma_mem;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700127 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400128 int pos, err;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400129
Jan Beulichd2dc1f42008-08-05 13:01:31 -0700130 size += device_addr & ~PAGE_MASK;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400131
132 if (!mem)
133 return ERR_PTR(-EINVAL);
134
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700135 spin_lock_irqsave(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400136 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
Jan Beulichd2dc1f42008-08-05 13:01:31 -0700137 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700138 spin_unlock_irqrestore(&mem->spinlock, flags);
139
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400140 if (err != 0)
141 return ERR_PTR(err);
142 return mem->virt_base + (pos << PAGE_SHIFT);
143}
144EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
145
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400146/**
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400147 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400148 *
149 * @dev: device from which we allocate memory
150 * @size: size of requested memory area
151 * @dma_handle: This will be filled with the correct dma handle
152 * @ret: This pointer will be filled with the virtual address
Paul Mundt06096972009-01-21 18:51:53 +0900153 * to allocated area.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400154 *
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400155 * This function should be only called from per-arch dma_alloc_coherent()
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400156 * to support allocation from per-device coherent memory pools.
157 *
158 * Returns 0 if dma_alloc_coherent should continue with allocating from
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400159 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400160 */
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400161int dma_alloc_from_coherent(struct device *dev, ssize_t size,
162 dma_addr_t *dma_handle, void **ret)
163{
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800164 struct dma_coherent_mem *mem;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400165 int order = get_order(size);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700166 unsigned long flags;
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800167 int pageno;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400168
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800169 if (!dev)
170 return 0;
171 mem = dev->dma_mem;
172 if (!mem)
173 return 0;
Paul Mundt06096972009-01-21 18:51:53 +0900174
175 *ret = NULL;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700176 spin_lock_irqsave(&mem->spinlock, flags);
Paul Mundt06096972009-01-21 18:51:53 +0900177
Adrian McMenamincdf57ca2009-01-21 18:47:38 +0900178 if (unlikely(size > (mem->size << PAGE_SHIFT)))
Paul Mundt06096972009-01-21 18:51:53 +0900179 goto err;
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800180
181 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
Paul Mundt06096972009-01-21 18:51:53 +0900182 if (unlikely(pageno < 0))
183 goto err;
184
185 /*
186 * Memory was found in the per-device area.
187 */
188 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
189 *ret = mem->virt_base + (pageno << PAGE_SHIFT);
190 memset(*ret, 0, size);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700191 spin_unlock_irqrestore(&mem->spinlock, flags);
Paul Mundt06096972009-01-21 18:51:53 +0900192
Andrew Mortoneccd83e2009-01-06 14:43:09 -0800193 return 1;
Paul Mundt06096972009-01-21 18:51:53 +0900194
195err:
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700196 spin_unlock_irqrestore(&mem->spinlock, flags);
Paul Mundt06096972009-01-21 18:51:53 +0900197 /*
198 * In the case where the allocation can not be satisfied from the
199 * per-device area, try to fall back to generic memory if the
200 * constraints allow it.
201 */
202 return mem->flags & DMA_MEMORY_EXCLUSIVE;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400203}
Ingo Molnara38409f2008-08-20 12:16:09 +0200204EXPORT_SYMBOL(dma_alloc_from_coherent);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400205
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400206/**
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400207 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400208 * @dev: device from which the memory was allocated
209 * @order: the order of pages allocated
210 * @vaddr: virtual address of allocated pages
211 *
212 * This checks whether the memory was allocated from the per-device
213 * coherent memory pool and if so, releases that memory.
214 *
215 * Returns 1 if we correctly released the memory, or 0 if
Dmitry Baryshkovcb3952b2008-07-30 14:46:50 +0400216 * dma_release_coherent() should proceed with releasing memory from
Dmitry Baryshkovb6d4f7e2008-07-20 15:01:10 +0400217 * generic pools.
218 */
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400219int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
220{
221 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
222
223 if (mem && vaddr >= mem->virt_base && vaddr <
224 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
225 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700226 unsigned long flags;
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400227
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700228 spin_lock_irqsave(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400229 bitmap_release_region(mem->bitmap, page, order);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700230 spin_unlock_irqrestore(&mem->spinlock, flags);
Dmitry Baryshkovee7e5512008-06-29 14:18:46 +0400231 return 1;
232 }
233 return 0;
234}
Ingo Molnara38409f2008-08-20 12:16:09 +0200235EXPORT_SYMBOL(dma_release_from_coherent);
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100236
237/**
238 * dma_mmap_from_coherent() - try to mmap the memory allocated from
239 * per-device coherent memory pool to userspace
240 * @dev: device from which the memory was allocated
241 * @vma: vm_area for the userspace memory
242 * @vaddr: cpu address returned by dma_alloc_from_coherent
243 * @size: size of the memory buffer allocated by dma_alloc_from_coherent
Randy Dunlap6e7b4a52012-06-09 15:02:59 -0700244 * @ret: result from remap_pfn_range()
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100245 *
246 * This checks whether the memory was allocated from the per-device
247 * coherent memory pool and if so, maps that memory to the provided vma.
248 *
Laurent Pinchartba4d93b2012-10-18 09:29:44 +0200249 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
250 * proceed with mapping memory from generic pools.
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100251 */
252int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
253 void *vaddr, size_t size, int *ret)
254{
255 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
256
257 if (mem && vaddr >= mem->virt_base && vaddr + size <=
258 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
259 unsigned long off = vma->vm_pgoff;
260 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
261 int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
262 int count = size >> PAGE_SHIFT;
263
264 *ret = -ENXIO;
265 if (off < count && user_count <= count - off) {
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600266 unsigned long pfn = mem->pfn_base + start + off;
Marek Szyprowskibca0fa52012-03-23 13:05:14 +0100267 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
268 user_count << PAGE_SHIFT,
269 vma->vm_page_prot);
270 }
271 return 1;
272 }
273 return 0;
274}
275EXPORT_SYMBOL(dma_mmap_from_coherent);
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700276
277/*
278 * Support for reserved memory regions defined in device tree
279 */
280#ifdef CONFIG_OF_RESERVED_MEM
281#include <linux/of.h>
282#include <linux/of_fdt.h>
283#include <linux/of_reserved_mem.h>
284
285static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
286{
287 struct dma_coherent_mem *mem = rmem->priv;
288
289 if (!mem &&
Michal Nazarewicz9e5b3d62016-01-04 22:36:40 +0100290 !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
291 DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
292 &mem)) {
Marek Szyprowski7bfa5ab2014-10-13 15:51:07 -0700293 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
294 &rmem->base, (unsigned long)rmem->size / SZ_1M);
295 return -ENODEV;
296 }
297 rmem->priv = mem;
298 dma_assign_coherent_memory(dev, mem);
299 return 0;
300}
301
302static void rmem_dma_device_release(struct reserved_mem *rmem,
303 struct device *dev)
304{
305 dev->dma_mem = NULL;
306}
307
308static const struct reserved_mem_ops rmem_dma_ops = {
309 .device_init = rmem_dma_device_init,
310 .device_release = rmem_dma_device_release,
311};
312
313static int __init rmem_dma_setup(struct reserved_mem *rmem)
314{
315 unsigned long node = rmem->fdt_node;
316
317 if (of_get_flat_dt_prop(node, "reusable", NULL))
318 return -EINVAL;
319
320#ifdef CONFIG_ARM
321 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
322 pr_err("Reserved memory: regions without no-map are not yet supported\n");
323 return -EINVAL;
324 }
325#endif
326
327 rmem->ops = &rmem_dma_ops;
328 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
329 &rmem->base, (unsigned long)rmem->size / SZ_1M);
330 return 0;
331}
332RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
333#endif