blob: 207244489a681d10f16c318c0a6ff0423b5fe30b [file] [log] [blame]
Matt Mackalle6473092008-02-04 22:29:01 -08001#include <linux/mm.h>
2#include <linux/highmem.h>
3#include <linux/sched.h>
Naoya Horiguchid33b9f42009-12-14 17:59:59 -08004#include <linux/hugetlb.h>
Matt Mackalle6473092008-02-04 22:29:01 -08005
6static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -07007 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -08008{
9 pte_t *pte;
10 int err = 0;
11
12 pte = pte_offset_map(pmd, addr);
Johannes Weiner556637c2008-04-28 02:11:47 -070013 for (;;) {
Dave Hansen21650092008-06-12 15:21:47 -070014 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080015 if (err)
16 break;
Johannes Weiner556637c2008-04-28 02:11:47 -070017 addr += PAGE_SIZE;
18 if (addr == end)
19 break;
20 pte++;
21 }
Matt Mackalle6473092008-02-04 22:29:01 -080022
23 pte_unmap(pte);
24 return err;
25}
26
27static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -070028 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -080029{
30 pmd_t *pmd;
31 unsigned long next;
32 int err = 0;
33
34 pmd = pmd_offset(pud, addr);
35 do {
Dave Hansen03319322011-03-22 16:32:56 -070036again:
Matt Mackalle6473092008-02-04 22:29:01 -080037 next = pmd_addr_end(addr, end);
Naoya Horiguchi48684a62015-02-11 15:28:06 -080038 if (pmd_none(*pmd) || !walk->vma) {
Matt Mackalle6473092008-02-04 22:29:01 -080039 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -070040 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080041 if (err)
42 break;
43 continue;
44 }
Dave Hansen03319322011-03-22 16:32:56 -070045 /*
46 * This implies that each ->pmd_entry() handler
47 * needs to know about pmd_trans_huge() pmds
48 */
Matt Mackalle6473092008-02-04 22:29:01 -080049 if (walk->pmd_entry)
Dave Hansen21650092008-06-12 15:21:47 -070050 err = walk->pmd_entry(pmd, addr, next, walk);
Dave Hansen03319322011-03-22 16:32:56 -070051 if (err)
52 break;
53
54 /*
55 * Check this here so we only break down trans_huge
56 * pages when we _need_ to
57 */
58 if (!walk->pte_entry)
59 continue;
60
Kirill A. Shutemov78ddc532016-01-15 16:52:42 -080061 split_huge_pmd(walk->vma, pmd, addr);
Naoya Horiguchifafaa422015-02-11 15:27:37 -080062 if (pmd_trans_unstable(pmd))
Dave Hansen03319322011-03-22 16:32:56 -070063 goto again;
64 err = walk_pte_range(pmd, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080065 if (err)
66 break;
67 } while (pmd++, addr = next, addr != end);
68
69 return err;
70}
71
72static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -070073 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -080074{
75 pud_t *pud;
76 unsigned long next;
77 int err = 0;
78
79 pud = pud_offset(pgd, addr);
80 do {
81 next = pud_addr_end(addr, end);
82 if (pud_none_or_clear_bad(pud)) {
83 if (walk->pte_hole)
Dave Hansen21650092008-06-12 15:21:47 -070084 err = walk->pte_hole(addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080085 if (err)
86 break;
87 continue;
88 }
Naoya Horiguchi0b1fbfe2015-02-11 15:27:34 -080089 if (walk->pmd_entry || walk->pte_entry)
Dave Hansen21650092008-06-12 15:21:47 -070090 err = walk_pmd_range(pud, addr, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -080091 if (err)
92 break;
93 } while (pud++, addr = next, addr != end);
94
95 return err;
96}
97
Naoya Horiguchifafaa422015-02-11 15:27:37 -080098static int walk_pgd_range(unsigned long addr, unsigned long end,
99 struct mm_walk *walk)
100{
101 pgd_t *pgd;
102 unsigned long next;
103 int err = 0;
104
105 pgd = pgd_offset(walk->mm, addr);
106 do {
107 next = pgd_addr_end(addr, end);
108 if (pgd_none_or_clear_bad(pgd)) {
109 if (walk->pte_hole)
110 err = walk->pte_hole(addr, next, walk);
111 if (err)
112 break;
113 continue;
114 }
115 if (walk->pmd_entry || walk->pte_entry)
116 err = walk_pud_range(pgd, addr, next, walk);
117 if (err)
118 break;
119 } while (pgd++, addr = next, addr != end);
120
121 return err;
122}
123
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700124#ifdef CONFIG_HUGETLB_PAGE
125static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
126 unsigned long end)
127{
128 unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
129 return boundary < end ? boundary : end;
130}
131
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800132static int walk_hugetlb_range(unsigned long addr, unsigned long end,
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700133 struct mm_walk *walk)
134{
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800135 struct vm_area_struct *vma = walk->vma;
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700136 struct hstate *h = hstate_vma(vma);
137 unsigned long next;
138 unsigned long hmask = huge_page_mask(h);
139 pte_t *pte;
140 int err = 0;
141
142 do {
143 next = hugetlb_entry_end(h, addr, end);
144 pte = huge_pte_offset(walk->mm, addr & hmask);
145 if (pte && walk->hugetlb_entry)
146 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
147 if (err)
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800148 break;
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700149 } while (addr = next, addr != end);
150
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800151 return err;
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700152}
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700153
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700154#else /* CONFIG_HUGETLB_PAGE */
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800155static int walk_hugetlb_range(unsigned long addr, unsigned long end,
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700156 struct mm_walk *walk)
157{
158 return 0;
159}
160
161#endif /* CONFIG_HUGETLB_PAGE */
162
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800163/*
164 * Decide whether we really walk over the current vma on [@start, @end)
165 * or skip it via the returned value. Return 0 if we do walk over the
166 * current vma, and return 1 if we skip the vma. Negative values means
167 * error, where we abort the current walk.
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800168 */
169static int walk_page_test(unsigned long start, unsigned long end,
170 struct mm_walk *walk)
171{
172 struct vm_area_struct *vma = walk->vma;
KOSAKI Motohiro6c6d5282011-07-25 17:12:09 -0700173
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800174 if (walk->test_walk)
175 return walk->test_walk(start, end, walk);
176
177 /*
Naoya Horiguchi48684a62015-02-11 15:28:06 -0800178 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
179 * range, so we don't walk over it as we do for normal vmas. However,
180 * Some callers are interested in handling hole range and they don't
181 * want to just ignore any single address range. Such users certainly
182 * define their ->pte_hole() callbacks, so let's delegate them to handle
183 * vma(VM_PFNMAP).
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800184 */
Naoya Horiguchi48684a62015-02-11 15:28:06 -0800185 if (vma->vm_flags & VM_PFNMAP) {
186 int err = 1;
187 if (walk->pte_hole)
188 err = walk->pte_hole(start, end, walk);
189 return err ? err : 1;
190 }
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800191 return 0;
192}
193
194static int __walk_page_range(unsigned long start, unsigned long end,
195 struct mm_walk *walk)
196{
197 int err = 0;
198 struct vm_area_struct *vma = walk->vma;
199
200 if (vma && is_vm_hugetlb_page(vma)) {
201 if (walk->hugetlb_entry)
202 err = walk_hugetlb_range(start, end, walk);
203 } else
204 err = walk_pgd_range(start, end, walk);
205
206 return err;
207}
Naoya Horiguchi116354d2010-04-06 14:35:04 -0700208
Matt Mackalle6473092008-02-04 22:29:01 -0800209/**
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800210 * walk_page_range - walk page table with caller specific callbacks
Matt Mackalle6473092008-02-04 22:29:01 -0800211 *
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800212 * Recursively walk the page table tree of the process represented by @walk->mm
213 * within the virtual address range [@start, @end). During walking, we can do
214 * some caller-specific works for each entry, by setting up pmd_entry(),
215 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
216 * callbacks, the associated entries/pages are just ignored.
217 * The return values of these callbacks are commonly defined like below:
218 * - 0 : succeeded to handle the current entry, and if you don't reach the
219 * end address yet, continue to walk.
220 * - >0 : succeeded to handle the current entry, and return to the caller
221 * with caller specific value.
222 * - <0 : failed to handle the current entry, and return to the caller
223 * with error code.
Matt Mackalle6473092008-02-04 22:29:01 -0800224 *
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800225 * Before starting to walk page table, some callers want to check whether
226 * they really want to walk over the current vma, typically by checking
227 * its vm_flags. walk_page_test() and @walk->test_walk() are used for this
228 * purpose.
Matt Mackalle6473092008-02-04 22:29:01 -0800229 *
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800230 * struct mm_walk keeps current values of some common data like vma and pmd,
231 * which are useful for the access from callbacks. If you want to pass some
232 * caller-specific data to callbacks, @walk->private should be helpful.
Matt Mackalle6473092008-02-04 22:29:01 -0800233 *
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800234 * Locking:
235 * Callers of walk_page_range() and walk_page_vma() should hold
236 * @walk->mm->mmap_sem, because these function traverse vma list and/or
237 * access to vma's data.
Matt Mackalle6473092008-02-04 22:29:01 -0800238 */
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800239int walk_page_range(unsigned long start, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -0700240 struct mm_walk *walk)
Matt Mackalle6473092008-02-04 22:29:01 -0800241{
Matt Mackalle6473092008-02-04 22:29:01 -0800242 int err = 0;
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800243 unsigned long next;
244 struct vm_area_struct *vma;
Matt Mackalle6473092008-02-04 22:29:01 -0800245
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800246 if (start >= end)
247 return -EINVAL;
Matt Mackalle6473092008-02-04 22:29:01 -0800248
Dave Hansen21650092008-06-12 15:21:47 -0700249 if (!walk->mm)
250 return -EINVAL;
251
Sasha Levin96dad672014-10-09 15:28:39 -0700252 VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
Cliff Wickmana9ff7852013-05-24 15:55:36 -0700253
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800254 vma = find_vma(walk->mm, start);
Matt Mackalle6473092008-02-04 22:29:01 -0800255 do {
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800256 if (!vma) { /* after the last vma */
257 walk->vma = NULL;
258 next = end;
259 } else if (start < vma->vm_start) { /* outside vma */
260 walk->vma = NULL;
261 next = min(end, vma->vm_start);
262 } else { /* inside vma */
263 walk->vma = vma;
264 next = min(end, vma->vm_end);
265 vma = vma->vm_next;
David Sterba5f0af702010-11-24 12:57:10 -0800266
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800267 err = walk_page_test(start, next, walk);
Naoya Horiguchif6837392015-03-25 15:55:14 -0700268 if (err > 0) {
269 /*
270 * positive return values are purely for
271 * controlling the pagewalk, so should never
272 * be passed to the callers.
273 */
274 err = 0;
Cliff Wickmana9ff7852013-05-24 15:55:36 -0700275 continue;
Naoya Horiguchif6837392015-03-25 15:55:14 -0700276 }
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800277 if (err < 0)
Matt Mackalle6473092008-02-04 22:29:01 -0800278 break;
Matt Mackalle6473092008-02-04 22:29:01 -0800279 }
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800280 if (walk->vma || walk->pte_hole)
281 err = __walk_page_range(start, next, walk);
Matt Mackalle6473092008-02-04 22:29:01 -0800282 if (err)
283 break;
Naoya Horiguchifafaa422015-02-11 15:27:37 -0800284 } while (start = next, start < end);
Matt Mackalle6473092008-02-04 22:29:01 -0800285 return err;
286}
Naoya Horiguchi900fc5f12015-02-11 15:27:40 -0800287
288int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
289{
290 int err;
291
292 if (!walk->mm)
293 return -EINVAL;
294
295 VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
296 VM_BUG_ON(!vma);
297 walk->vma = vma;
298 err = walk_page_test(vma->vm_start, vma->vm_end, walk);
299 if (err > 0)
300 return 0;
301 if (err < 0)
302 return err;
303 return __walk_page_range(vma->vm_start, vma->vm_end, walk);
304}