blob: 574c67b663fe8a6ef802b36cb0379d21c96cb77c [file] [log] [blame]
Christoph Lameter8f6aac42007-10-16 01:24:13 -07001/*
2 * Virtual Memory Map support
3 *
Christoph Lametercde53532008-07-04 09:59:22 -07004 * (C) 2007 sgi. Christoph Lameter.
Christoph Lameter8f6aac42007-10-16 01:24:13 -07005 *
6 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
7 * virt_to_page, page_address() to be implemented as a base offset
8 * calculation without memory access.
9 *
10 * However, virtual mappings need a page table and TLBs. Many Linux
11 * architectures already map their physical space using 1-1 mappings
Uwe Kleine-Königb5950762010-11-01 15:38:34 -040012 * via TLBs. For those arches the virtual memory map is essentially
Christoph Lameter8f6aac42007-10-16 01:24:13 -070013 * for free if we use the same page size as the 1-1 mappings. In that
14 * case the overhead consists of a few additional pages that are
15 * allocated to create a view of memory for vmemmap.
16 *
Andy Whitcroft29c71112007-10-16 01:24:14 -070017 * The architecture is expected to provide a vmemmap_populate() function
18 * to instantiate the mapping.
Christoph Lameter8f6aac42007-10-16 01:24:13 -070019 */
20#include <linux/mm.h>
21#include <linux/mmzone.h>
22#include <linux/bootmem.h>
Dan Williams4b94ffd2016-01-15 16:56:22 -080023#include <linux/memremap.h>
Christoph Lameter8f6aac42007-10-16 01:24:13 -070024#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Christoph Lameter8f6aac42007-10-16 01:24:13 -070026#include <linux/spinlock.h>
27#include <linux/vmalloc.h>
Glauber de Oliveira Costa8bca44b2007-10-29 14:37:19 -070028#include <linux/sched.h>
Christoph Lameter8f6aac42007-10-16 01:24:13 -070029#include <asm/dma.h>
30#include <asm/pgalloc.h>
31#include <asm/pgtable.h>
32
33/*
34 * Allocate a block of memory to be used to back the virtual memory map
35 * or to back the page tables that are used to create the mapping.
36 * Uses the main allocators if they are available, else bootmem.
37 */
KAMEZAWA Hiroyukie0dc3a52007-11-28 16:21:57 -080038
Fabian Frederickbd721ea2016-08-02 14:03:33 -070039static void * __ref __earlyonly_bootmem_alloc(int node,
KAMEZAWA Hiroyukie0dc3a52007-11-28 16:21:57 -080040 unsigned long size,
41 unsigned long align,
42 unsigned long goal)
43{
Santosh Shilimkarbb016b82014-01-21 15:50:34 -080044 return memblock_virt_alloc_try_nid(size, align, goal,
45 BOOTMEM_ALLOC_ACCESSIBLE, node);
KAMEZAWA Hiroyukie0dc3a52007-11-28 16:21:57 -080046}
47
Yinghai Lu9bdac912010-02-10 01:20:22 -080048static void *vmemmap_buf;
49static void *vmemmap_buf_end;
KAMEZAWA Hiroyukie0dc3a52007-11-28 16:21:57 -080050
Christoph Lameter8f6aac42007-10-16 01:24:13 -070051void * __meminit vmemmap_alloc_block(unsigned long size, int node)
52{
53 /* If the main allocator is up use that, fallback to bootmem. */
54 if (slab_is_available()) {
Shaohua Lif52407c2009-09-21 17:01:19 -070055 struct page *page;
56
57 if (node_state(node, N_HIGH_MEMORY))
Ben Hutchings055e4fd2013-04-29 15:07:49 -070058 page = alloc_pages_node(
59 node, GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
60 get_order(size));
Shaohua Lif52407c2009-09-21 17:01:19 -070061 else
Ben Hutchings055e4fd2013-04-29 15:07:49 -070062 page = alloc_pages(
63 GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT,
Shaohua Lif52407c2009-09-21 17:01:19 -070064 get_order(size));
Christoph Lameter8f6aac42007-10-16 01:24:13 -070065 if (page)
66 return page_address(page);
67 return NULL;
68 } else
KAMEZAWA Hiroyukie0dc3a52007-11-28 16:21:57 -080069 return __earlyonly_bootmem_alloc(node, size, size,
Christoph Lameter8f6aac42007-10-16 01:24:13 -070070 __pa(MAX_DMA_ADDRESS));
71}
72
Yinghai Lu9bdac912010-02-10 01:20:22 -080073/* need to make sure size is all the same during early stage */
Dan Williams4b94ffd2016-01-15 16:56:22 -080074static void * __meminit alloc_block_buf(unsigned long size, int node)
Yinghai Lu9bdac912010-02-10 01:20:22 -080075{
76 void *ptr;
77
78 if (!vmemmap_buf)
79 return vmemmap_alloc_block(size, node);
80
81 /* take the from buf */
82 ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
83 if (ptr + size > vmemmap_buf_end)
84 return vmemmap_alloc_block(size, node);
85
86 vmemmap_buf = ptr + size;
87
88 return ptr;
89}
90
Dan Williams4b94ffd2016-01-15 16:56:22 -080091static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
92{
93 return altmap->base_pfn + altmap->reserve + altmap->alloc
94 + altmap->align;
95}
96
97static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
98{
99 unsigned long allocated = altmap->alloc + altmap->align;
100
101 if (altmap->free > allocated)
102 return altmap->free - allocated;
103 return 0;
104}
105
106/**
107 * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation
108 * @altmap - reserved page pool for the allocation
109 * @nr_pfns - size (in pages) of the allocation
110 *
111 * Allocations are aligned to the size of the request
112 */
113static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
114 unsigned long nr_pfns)
115{
116 unsigned long pfn = vmem_altmap_next_pfn(altmap);
117 unsigned long nr_align;
118
119 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
120 nr_align = ALIGN(pfn, nr_align) - pfn;
121
122 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
123 return ULONG_MAX;
124 altmap->alloc += nr_pfns;
125 altmap->align += nr_align;
126 return pfn + nr_align;
127}
128
129static void * __meminit altmap_alloc_block_buf(unsigned long size,
130 struct vmem_altmap *altmap)
131{
132 unsigned long pfn, nr_pfns;
133 void *ptr;
134
135 if (size & ~PAGE_MASK) {
136 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
137 __func__, size);
138 return NULL;
139 }
140
141 nr_pfns = size >> PAGE_SHIFT;
142 pfn = vmem_altmap_alloc(altmap, nr_pfns);
143 if (pfn < ULONG_MAX)
144 ptr = __va(__pfn_to_phys(pfn));
145 else
146 ptr = NULL;
147 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
148 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
149
150 return ptr;
151}
152
153/* need to make sure size is all the same during early stage */
154void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node,
155 struct vmem_altmap *altmap)
156{
157 if (altmap)
158 return altmap_alloc_block_buf(size, altmap);
159 return alloc_block_buf(size, node);
160}
161
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700162void __meminit vmemmap_verify(pte_t *pte, int node,
163 unsigned long start, unsigned long end)
164{
165 unsigned long pfn = pte_pfn(*pte);
166 int actual_node = early_pfn_to_nid(pfn);
167
David Rientjesb41ad142008-11-06 12:53:31 -0800168 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
Joe Perches11705322016-03-17 14:19:50 -0700169 pr_warn("[%lx-%lx] potential offnode page_structs\n",
170 start, end - 1);
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700171}
172
Andy Whitcroft29c71112007-10-16 01:24:14 -0700173pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700174{
Andy Whitcroft29c71112007-10-16 01:24:14 -0700175 pte_t *pte = pte_offset_kernel(pmd, addr);
176 if (pte_none(*pte)) {
177 pte_t entry;
Dan Williams4b94ffd2016-01-15 16:56:22 -0800178 void *p = alloc_block_buf(PAGE_SIZE, node);
Andy Whitcroft29c71112007-10-16 01:24:14 -0700179 if (!p)
Al Viro9dce07f2008-03-29 03:07:28 +0000180 return NULL;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700181 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
182 set_pte_at(&init_mm, addr, pte, entry);
183 }
184 return pte;
185}
186
187pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
188{
189 pmd_t *pmd = pmd_offset(pud, addr);
190 if (pmd_none(*pmd)) {
191 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
192 if (!p)
Al Viro9dce07f2008-03-29 03:07:28 +0000193 return NULL;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700194 pmd_populate_kernel(&init_mm, pmd, p);
195 }
196 return pmd;
197}
198
199pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
200{
201 pud_t *pud = pud_offset(pgd, addr);
202 if (pud_none(*pud)) {
203 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
204 if (!p)
Al Viro9dce07f2008-03-29 03:07:28 +0000205 return NULL;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700206 pud_populate(&init_mm, pud, p);
207 }
208 return pud;
209}
210
211pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
212{
213 pgd_t *pgd = pgd_offset_k(addr);
214 if (pgd_none(*pgd)) {
215 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
216 if (!p)
Al Viro9dce07f2008-03-29 03:07:28 +0000217 return NULL;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700218 pgd_populate(&init_mm, pgd, p);
219 }
220 return pgd;
221}
222
Johannes Weiner0aad8182013-04-29 15:07:50 -0700223int __meminit vmemmap_populate_basepages(unsigned long start,
224 unsigned long end, int node)
Andy Whitcroft29c71112007-10-16 01:24:14 -0700225{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700226 unsigned long addr = start;
Andy Whitcroft29c71112007-10-16 01:24:14 -0700227 pgd_t *pgd;
228 pud_t *pud;
229 pmd_t *pmd;
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700230 pte_t *pte;
231
Andy Whitcroft29c71112007-10-16 01:24:14 -0700232 for (; addr < end; addr += PAGE_SIZE) {
233 pgd = vmemmap_pgd_populate(addr, node);
234 if (!pgd)
235 return -ENOMEM;
236 pud = vmemmap_pud_populate(pgd, addr, node);
237 if (!pud)
238 return -ENOMEM;
239 pmd = vmemmap_pmd_populate(pud, addr, node);
240 if (!pmd)
241 return -ENOMEM;
242 pte = vmemmap_pte_populate(pmd, addr, node);
243 if (!pte)
244 return -ENOMEM;
245 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
246 }
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700247
248 return 0;
249}
250
Yasunori Goto98f3cfc2007-10-16 01:26:14 -0700251struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700252{
Johannes Weiner0aad8182013-04-29 15:07:50 -0700253 unsigned long start;
254 unsigned long end;
255 struct page *map;
256
257 map = pfn_to_page(pnum * PAGES_PER_SECTION);
258 start = (unsigned long)map;
259 end = (unsigned long)(map + PAGES_PER_SECTION);
260
261 if (vmemmap_populate(start, end, nid))
Christoph Lameter8f6aac42007-10-16 01:24:13 -0700262 return NULL;
263
264 return map;
265}
Yinghai Lu9bdac912010-02-10 01:20:22 -0800266
267void __init sparse_mem_maps_populate_node(struct page **map_map,
268 unsigned long pnum_begin,
269 unsigned long pnum_end,
270 unsigned long map_count, int nodeid)
271{
272 unsigned long pnum;
273 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
274 void *vmemmap_buf_start;
275
276 size = ALIGN(size, PMD_SIZE);
277 vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
278 PMD_SIZE, __pa(MAX_DMA_ADDRESS));
279
280 if (vmemmap_buf_start) {
281 vmemmap_buf = vmemmap_buf_start;
282 vmemmap_buf_end = vmemmap_buf_start + size * map_count;
283 }
284
285 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
286 struct mem_section *ms;
287
288 if (!present_section_nr(pnum))
289 continue;
290
291 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
292 if (map_map[pnum])
293 continue;
294 ms = __nr_to_section(pnum);
Joe Perches11705322016-03-17 14:19:50 -0700295 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
Joe Perches756a0252016-03-17 14:19:47 -0700296 __func__);
Yinghai Lu9bdac912010-02-10 01:20:22 -0800297 ms->section_mem_map = 0;
298 }
299
300 if (vmemmap_buf_start) {
301 /* need to free left buf */
Santosh Shilimkarbb016b82014-01-21 15:50:34 -0800302 memblock_free_early(__pa(vmemmap_buf),
303 vmemmap_buf_end - vmemmap_buf);
Yinghai Lu9bdac912010-02-10 01:20:22 -0800304 vmemmap_buf = NULL;
305 vmemmap_buf_end = NULL;
306 }
307}