Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* pci-dma-nommu.c: Dynamic DMA mapping support for the FRV |
| 2 | * |
| 3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. |
David Woodhouse | 44d1b98 | 2008-06-05 22:46:18 -0700 | [diff] [blame] | 4 | * Written by David Woodhouse (dwmw2@infradead.org) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/types.h> |
| 13 | #include <linux/slab.h> |
David Howells | a5788ca | 2012-11-02 13:20:42 +0000 | [diff] [blame] | 14 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/dma-mapping.h> |
| 16 | #include <linux/list.h> |
| 17 | #include <linux/pci.h> |
| 18 | #include <asm/io.h> |
| 19 | |
| 20 | #if 1 |
| 21 | #define DMA_SRAM_START dma_coherent_mem_start |
| 22 | #define DMA_SRAM_END dma_coherent_mem_end |
| 23 | #else // Use video RAM on Matrox |
| 24 | #define DMA_SRAM_START 0xe8900000 |
| 25 | #define DMA_SRAM_END 0xe8a00000 |
| 26 | #endif |
| 27 | |
| 28 | struct dma_alloc_record { |
| 29 | struct list_head list; |
| 30 | unsigned long ofs; |
| 31 | unsigned long len; |
| 32 | }; |
| 33 | |
| 34 | static DEFINE_SPINLOCK(dma_alloc_lock); |
| 35 | static LIST_HEAD(dma_alloc_list); |
| 36 | |
Christoph Hellwig | eae0751 | 2016-01-20 15:01:44 -0800 | [diff] [blame] | 37 | static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame^] | 38 | gfp_t gfp, unsigned long attrs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | { |
| 40 | struct dma_alloc_record *new; |
| 41 | struct list_head *this = &dma_alloc_list; |
| 42 | unsigned long flags; |
| 43 | unsigned long start = DMA_SRAM_START; |
| 44 | unsigned long end; |
| 45 | |
| 46 | if (!DMA_SRAM_START) { |
| 47 | printk("%s called without any DMA area reserved!\n", __func__); |
| 48 | return NULL; |
| 49 | } |
| 50 | |
| 51 | new = kmalloc(sizeof (*new), GFP_ATOMIC); |
| 52 | if (!new) |
| 53 | return NULL; |
| 54 | |
| 55 | /* Round up to a reasonable alignment */ |
| 56 | new->len = (size + 31) & ~31; |
| 57 | |
| 58 | spin_lock_irqsave(&dma_alloc_lock, flags); |
| 59 | |
| 60 | list_for_each (this, &dma_alloc_list) { |
| 61 | struct dma_alloc_record *this_r = list_entry(this, struct dma_alloc_record, list); |
| 62 | end = this_r->ofs; |
| 63 | |
| 64 | if (end - start >= size) |
| 65 | goto gotone; |
| 66 | |
| 67 | start = this_r->ofs + this_r->len; |
| 68 | } |
| 69 | /* Reached end of list. */ |
| 70 | end = DMA_SRAM_END; |
| 71 | this = &dma_alloc_list; |
| 72 | |
| 73 | if (end - start >= size) { |
| 74 | gotone: |
| 75 | new->ofs = start; |
| 76 | list_add_tail(&new->list, this); |
| 77 | spin_unlock_irqrestore(&dma_alloc_lock, flags); |
| 78 | |
| 79 | *dma_handle = start; |
| 80 | return (void *)start; |
| 81 | } |
| 82 | |
| 83 | kfree(new); |
| 84 | spin_unlock_irqrestore(&dma_alloc_lock, flags); |
| 85 | return NULL; |
| 86 | } |
| 87 | |
Christoph Hellwig | eae0751 | 2016-01-20 15:01:44 -0800 | [diff] [blame] | 88 | static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame^] | 89 | dma_addr_t dma_handle, unsigned long attrs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | { |
| 91 | struct dma_alloc_record *rec; |
| 92 | unsigned long flags; |
| 93 | |
| 94 | spin_lock_irqsave(&dma_alloc_lock, flags); |
| 95 | |
| 96 | list_for_each_entry(rec, &dma_alloc_list, list) { |
| 97 | if (rec->ofs == dma_handle) { |
| 98 | list_del(&rec->list); |
| 99 | kfree(rec); |
| 100 | spin_unlock_irqrestore(&dma_alloc_lock, flags); |
| 101 | return; |
| 102 | } |
| 103 | } |
| 104 | spin_unlock_irqrestore(&dma_alloc_lock, flags); |
| 105 | BUG(); |
| 106 | } |
| 107 | |
Christoph Hellwig | eae0751 | 2016-01-20 15:01:44 -0800 | [diff] [blame] | 108 | static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, |
| 109 | int nents, enum dma_data_direction direction, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame^] | 110 | unsigned long attrs) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | { |
| 112 | int i; |
Akinobu Mita | 0989e1f | 2015-06-25 15:00:46 -0700 | [diff] [blame] | 113 | struct scatterlist *sg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | |
Akinobu Mita | 0989e1f | 2015-06-25 15:00:46 -0700 | [diff] [blame] | 115 | for_each_sg(sglist, sg, nents, i) { |
| 116 | frv_cache_wback_inv(sg_dma_address(sg), |
| 117 | sg_dma_address(sg) + sg_dma_len(sg)); |
| 118 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | |
Stoyan Gaydarov | db5c444 | 2009-06-11 13:05:04 +0100 | [diff] [blame] | 120 | BUG_ON(direction == DMA_NONE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | |
| 122 | return nents; |
| 123 | } |
David Howells | 4023440 | 2006-01-08 01:01:19 -0800 | [diff] [blame] | 124 | |
Christoph Hellwig | eae0751 | 2016-01-20 15:01:44 -0800 | [diff] [blame] | 125 | static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, |
| 126 | unsigned long offset, size_t size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame^] | 127 | enum dma_data_direction direction, unsigned long attrs) |
David Howells | c9af956 | 2008-10-15 16:50:53 +0100 | [diff] [blame] | 128 | { |
| 129 | BUG_ON(direction == DMA_NONE); |
| 130 | flush_dcache_page(page); |
| 131 | return (dma_addr_t) page_to_phys(page) + offset; |
| 132 | } |
| 133 | |
Christoph Hellwig | eae0751 | 2016-01-20 15:01:44 -0800 | [diff] [blame] | 134 | static void frv_dma_sync_single_for_device(struct device *dev, |
| 135 | dma_addr_t dma_handle, size_t size, |
| 136 | enum dma_data_direction direction) |
| 137 | { |
| 138 | flush_write_buffers(); |
| 139 | } |
| 140 | |
| 141 | static void frv_dma_sync_sg_for_device(struct device *dev, |
| 142 | struct scatterlist *sg, int nelems, |
| 143 | enum dma_data_direction direction) |
| 144 | { |
| 145 | flush_write_buffers(); |
| 146 | } |
| 147 | |
| 148 | |
| 149 | static int frv_dma_supported(struct device *dev, u64 mask) |
| 150 | { |
| 151 | /* |
| 152 | * we fall back to GFP_DMA when the mask isn't all 1s, |
| 153 | * so we can't guarantee allocations that must be |
| 154 | * within a tighter range than GFP_DMA.. |
| 155 | */ |
| 156 | if (mask < 0x00ffffff) |
| 157 | return 0; |
| 158 | return 1; |
| 159 | } |
| 160 | |
| 161 | struct dma_map_ops frv_dma_ops = { |
| 162 | .alloc = frv_dma_alloc, |
| 163 | .free = frv_dma_free, |
| 164 | .map_page = frv_dma_map_page, |
| 165 | .map_sg = frv_dma_map_sg, |
| 166 | .sync_single_for_device = frv_dma_sync_single_for_device, |
| 167 | .sync_sg_for_device = frv_dma_sync_sg_for_device, |
| 168 | .dma_supported = frv_dma_supported, |
| 169 | }; |
| 170 | EXPORT_SYMBOL(frv_dma_ops); |