blob: ea132f7ea2d2981ad787222db2a423edfae7d795 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/truncate.c - code for taking down pages from address_spaces
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
Francois Camie1f8e872008-10-15 22:01:59 -07006 * 10Sep2002 Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Initial version.
8 */
9
10#include <linux/kernel.h>
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -070011#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/mm.h>
Nick Piggin0fd0e6b2006-09-27 01:50:02 -070013#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/module.h>
15#include <linux/pagemap.h>
Nate Diller01f2705d2007-05-09 02:35:07 -070016#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/pagevec.h>
Andrew Mortone08748ce2006-12-10 02:19:31 -080018#include <linux/task_io_accounting_ops.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/buffer_head.h> /* grr. try_to_release_page,
Jan Karaaaa40592005-10-30 15:00:16 -080020 do_invalidatepage */
Rik van Rielba470de2008-10-18 20:26:50 -070021#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23
David Howellscf9a2ae2006-08-29 19:05:54 +010024/**
Fengguang Wu28bc44d2008-02-03 18:04:10 +020025 * do_invalidatepage - invalidate part or all of a page
David Howellscf9a2ae2006-08-29 19:05:54 +010026 * @page: the page which is affected
27 * @offset: the index of the truncation point
28 *
29 * do_invalidatepage() is called when all or part of the page has become
30 * invalidated by a truncate operation.
31 *
32 * do_invalidatepage() does not have to release all buffers, but it must
33 * ensure that no dirty buffer is left outside @offset and that no I/O
34 * is underway against any of the blocks which are outside the truncation
35 * point. Because the caller is about to free (and possibly reuse) those
36 * blocks on-disk.
37 */
38void do_invalidatepage(struct page *page, unsigned long offset)
39{
40 void (*invalidatepage)(struct page *, unsigned long);
41 invalidatepage = page->mapping->a_ops->invalidatepage;
David Howells93614012006-09-30 20:45:40 +020042#ifdef CONFIG_BLOCK
David Howellscf9a2ae2006-08-29 19:05:54 +010043 if (!invalidatepage)
44 invalidatepage = block_invalidatepage;
David Howells93614012006-09-30 20:45:40 +020045#endif
David Howellscf9a2ae2006-08-29 19:05:54 +010046 if (invalidatepage)
47 (*invalidatepage)(page, offset);
48}
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050static inline void truncate_partial_page(struct page *page, unsigned partial)
51{
Christoph Lametereebd2aa2008-02-04 22:28:29 -080052 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
David Howells266cf652009-04-03 16:42:36 +010053 if (page_has_private(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 do_invalidatepage(page, partial);
55}
56
Linus Torvaldsecdfc972007-01-26 12:47:06 -080057/*
58 * This cancels just the dirty bit on the kernel page itself, it
59 * does NOT actually remove dirty bits on any mmap's that may be
60 * around. It also leaves the page tagged dirty, so any sync
61 * activity will still find it on the dirty lists, and in particular,
62 * clear_page_dirty_for_io() will still look at the dirty bits in
63 * the VM.
64 *
65 * Doing this should *normally* only ever be done when a page
66 * is truncated, and is not actually mapped anywhere at all. However,
67 * fs/buffer.c does this when it notices that somebody has cleaned
68 * out all the buffers on a page without actually doing it through
69 * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
70 */
Linus Torvaldsfba25912006-12-20 13:46:42 -080071void cancel_dirty_page(struct page *page, unsigned int account_size)
72{
Linus Torvalds8368e322006-12-23 09:25:04 -080073 if (TestClearPageDirty(page)) {
74 struct address_space *mapping = page->mapping;
75 if (mapping && mapping_cap_account_dirty(mapping)) {
76 dec_zone_page_state(page, NR_FILE_DIRTY);
Peter Zijlstrac9e51e42007-10-16 23:25:47 -070077 dec_bdi_stat(mapping->backing_dev_info,
78 BDI_RECLAIMABLE);
Linus Torvalds8368e322006-12-23 09:25:04 -080079 if (account_size)
80 task_io_account_cancelled_write(account_size);
81 }
Andrew Morton3e67c092006-12-21 11:00:33 -080082 }
Linus Torvaldsfba25912006-12-20 13:46:42 -080083}
Linus Torvalds8368e322006-12-23 09:25:04 -080084EXPORT_SYMBOL(cancel_dirty_page);
Linus Torvaldsfba25912006-12-20 13:46:42 -080085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086/*
87 * If truncate cannot remove the fs-private metadata from the page, the page
Shaohua Li62e1c552008-02-04 22:29:33 -080088 * becomes orphaned. It will be left on the LRU and may even be mapped into
Nick Piggin54cb8822007-07-19 01:46:59 -070089 * user pagetables if we're racing with filemap_fault().
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 *
91 * We need to bale out if page->mapping is no longer equal to the original
92 * mapping. This happens a) when the VM reclaimed the page while we waited on
Andrew Mortonfc0ecff2007-02-10 01:45:39 -080093 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
95 */
Nick Piggin750b4982009-09-16 11:50:12 +020096static int
Linus Torvalds1da177e2005-04-16 15:20:36 -070097truncate_complete_page(struct address_space *mapping, struct page *page)
98{
99 if (page->mapping != mapping)
Nick Piggin750b4982009-09-16 11:50:12 +0200100 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
David Howells266cf652009-04-03 16:42:36 +0100102 if (page_has_private(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 do_invalidatepage(page, 0);
104
Bjorn Steinbrinka2b34562008-02-04 22:29:28 -0800105 cancel_dirty_page(page, PAGE_CACHE_SIZE);
106
Rik van Rielba470de2008-10-18 20:26:50 -0700107 clear_page_mlock(page);
Nick Piggin787d2212007-07-17 04:03:34 -0700108 remove_from_page_cache(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 ClearPageMappedToDisk(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 page_cache_release(page); /* pagecache ref */
Nick Piggin750b4982009-09-16 11:50:12 +0200111 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112}
113
114/*
Andrew Mortonfc0ecff2007-02-10 01:45:39 -0800115 * This is for invalidate_mapping_pages(). That function can be called at
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 * any time, and is not supposed to throw away dirty pages. But pages can
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700117 * be marked dirty at any time too, so use remove_mapping which safely
118 * discards clean, unused pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 *
120 * Returns non-zero if the page was successfully invalidated.
121 */
122static int
123invalidate_complete_page(struct address_space *mapping, struct page *page)
124{
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700125 int ret;
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 if (page->mapping != mapping)
128 return 0;
129
David Howells266cf652009-04-03 16:42:36 +0100130 if (page_has_private(page) && !try_to_release_page(page, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 return 0;
132
Rik van Rielba470de2008-10-18 20:26:50 -0700133 clear_page_mlock(page);
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700134 ret = remove_mapping(mapping, page);
Nick Piggin0fd0e6b2006-09-27 01:50:02 -0700135
136 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137}
138
Nick Piggin750b4982009-09-16 11:50:12 +0200139int truncate_inode_page(struct address_space *mapping, struct page *page)
140{
141 if (page_mapped(page)) {
142 unmap_mapping_range(mapping,
143 (loff_t)page->index << PAGE_CACHE_SHIFT,
144 PAGE_CACHE_SIZE, 0);
145 }
146 return truncate_complete_page(mapping, page);
147}
148
Wu Fengguang83f78662009-09-16 11:50:13 +0200149/*
150 * Safely invalidate one page from its pagecache mapping.
151 * It only drops clean, unused pages. The page must be locked.
152 *
153 * Returns 1 if the page is successfully invalidated, otherwise 0.
154 */
155int invalidate_inode_page(struct page *page)
156{
157 struct address_space *mapping = page_mapping(page);
158 if (!mapping)
159 return 0;
160 if (PageDirty(page) || PageWriteback(page))
161 return 0;
162 if (page_mapped(page))
163 return 0;
164 return invalidate_complete_page(mapping, page);
165}
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167/**
Randy Dunlap06432452008-02-29 22:03:15 -0800168 * truncate_inode_pages - truncate range of pages specified by start & end byte offsets
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 * @mapping: mapping to truncate
170 * @lstart: offset from which to truncate
Hans Reiserd7339072006-01-06 00:10:36 -0800171 * @lend: offset to which to truncate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 *
Hans Reiserd7339072006-01-06 00:10:36 -0800173 * Truncate the page cache, removing the pages that are between
174 * specified offsets (and zeroing out partial page
175 * (if lstart is not page aligned)).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 *
177 * Truncate takes two passes - the first pass is nonblocking. It will not
178 * block on page locks and it will not block on writeback. The second pass
179 * will wait. This is to prevent as much IO as possible in the affected region.
180 * The first pass will remove most pages, so the search cost of the second pass
181 * is low.
182 *
183 * When looking at page->index outside the page lock we need to be careful to
184 * copy it into a local to avoid races (it could change at any time).
185 *
186 * We pass down the cache-hot hint to the page freeing code. Even if the
187 * mapping is large, it is probably the case that the final pages are the most
188 * recently touched, and freeing happens in ascending file offset order.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 */
Hans Reiserd7339072006-01-06 00:10:36 -0800190void truncate_inode_pages_range(struct address_space *mapping,
191 loff_t lstart, loff_t lend)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
193 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
Hans Reiserd7339072006-01-06 00:10:36 -0800194 pgoff_t end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
196 struct pagevec pvec;
197 pgoff_t next;
198 int i;
199
200 if (mapping->nrpages == 0)
201 return;
202
Hans Reiserd7339072006-01-06 00:10:36 -0800203 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
204 end = (lend >> PAGE_CACHE_SHIFT);
205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 pagevec_init(&pvec, 0);
207 next = start;
Hans Reiserd7339072006-01-06 00:10:36 -0800208 while (next <= end &&
209 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 for (i = 0; i < pagevec_count(&pvec); i++) {
211 struct page *page = pvec.pages[i];
212 pgoff_t page_index = page->index;
213
Hans Reiserd7339072006-01-06 00:10:36 -0800214 if (page_index > end) {
215 next = page_index;
216 break;
217 }
218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 if (page_index > next)
220 next = page_index;
221 next++;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200222 if (!trylock_page(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 continue;
224 if (PageWriteback(page)) {
225 unlock_page(page);
226 continue;
227 }
Nick Piggin750b4982009-09-16 11:50:12 +0200228 truncate_inode_page(mapping, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 unlock_page(page);
230 }
231 pagevec_release(&pvec);
232 cond_resched();
233 }
234
235 if (partial) {
236 struct page *page = find_lock_page(mapping, start - 1);
237 if (page) {
238 wait_on_page_writeback(page);
239 truncate_partial_page(page, partial);
240 unlock_page(page);
241 page_cache_release(page);
242 }
243 }
244
245 next = start;
246 for ( ; ; ) {
247 cond_resched();
248 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
249 if (next == start)
250 break;
251 next = start;
252 continue;
253 }
Hans Reiserd7339072006-01-06 00:10:36 -0800254 if (pvec.pages[0]->index > end) {
255 pagevec_release(&pvec);
256 break;
257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 for (i = 0; i < pagevec_count(&pvec); i++) {
259 struct page *page = pvec.pages[i];
260
Hans Reiserd7339072006-01-06 00:10:36 -0800261 if (page->index > end)
262 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 lock_page(page);
264 wait_on_page_writeback(page);
Nick Piggin750b4982009-09-16 11:50:12 +0200265 truncate_inode_page(mapping, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 if (page->index > next)
267 next = page->index;
268 next++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 unlock_page(page);
270 }
271 pagevec_release(&pvec);
272 }
273}
Hans Reiserd7339072006-01-06 00:10:36 -0800274EXPORT_SYMBOL(truncate_inode_pages_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
Hans Reiserd7339072006-01-06 00:10:36 -0800276/**
277 * truncate_inode_pages - truncate *all* the pages from an offset
278 * @mapping: mapping to truncate
279 * @lstart: offset from which to truncate
280 *
Jes Sorensen1b1dcc12006-01-09 15:59:24 -0800281 * Called under (and serialised by) inode->i_mutex.
Hans Reiserd7339072006-01-06 00:10:36 -0800282 */
283void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
284{
285 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
286}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287EXPORT_SYMBOL(truncate_inode_pages);
288
Mike Waychison28697352009-06-16 15:32:59 -0700289/**
290 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
291 * @mapping: the address_space which holds the pages to invalidate
292 * @start: the offset 'from' which to invalidate
293 * @end: the offset 'to' which to invalidate (inclusive)
294 *
295 * This function only removes the unlocked pages, if you want to
296 * remove all the pages of one inode, you must call truncate_inode_pages.
297 *
298 * invalidate_mapping_pages() will not block on IO activity. It will not
299 * invalidate pages which are dirty, locked, under writeback or mapped into
300 * pagetables.
301 */
302unsigned long invalidate_mapping_pages(struct address_space *mapping,
303 pgoff_t start, pgoff_t end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304{
305 struct pagevec pvec;
306 pgoff_t next = start;
307 unsigned long ret = 0;
308 int i;
309
310 pagevec_init(&pvec, 0);
311 while (next <= end &&
312 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
313 for (i = 0; i < pagevec_count(&pvec); i++) {
314 struct page *page = pvec.pages[i];
NeilBrowne0f236032006-06-23 02:05:48 -0700315 pgoff_t index;
316 int lock_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
Nick Piggin529ae9a2008-08-02 12:01:03 +0200318 lock_failed = !trylock_page(page);
NeilBrowne0f236032006-06-23 02:05:48 -0700319
320 /*
321 * We really shouldn't be looking at the ->index of an
322 * unlocked page. But we're not allowed to lock these
323 * pages. So we rely upon nobody altering the ->index
324 * of this (pinned-by-us) page.
325 */
326 index = page->index;
327 if (index > next)
328 next = index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 next++;
NeilBrowne0f236032006-06-23 02:05:48 -0700330 if (lock_failed)
331 continue;
332
Wu Fengguang83f78662009-09-16 11:50:13 +0200333 ret += invalidate_inode_page(page);
334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 unlock_page(page);
336 if (next > end)
337 break;
338 }
339 pagevec_release(&pvec);
Mike Waychison28697352009-06-16 15:32:59 -0700340 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 }
342 return ret;
343}
Anton Altaparmakov54bc4852007-02-10 01:45:38 -0800344EXPORT_SYMBOL(invalidate_mapping_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700346/*
347 * This is like invalidate_complete_page(), except it ignores the page's
348 * refcount. We do this because invalidate_inode_pages2() needs stronger
349 * invalidation guarantees, and cannot afford to leave pages behind because
Anderson Briglia2706a1b2007-07-15 23:38:09 -0700350 * shrink_page_list() has a temp ref on them, or because they're transiently
351 * sitting in the lru_cache_add() pagevecs.
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700352 */
353static int
354invalidate_complete_page2(struct address_space *mapping, struct page *page)
355{
356 if (page->mapping != mapping)
357 return 0;
358
David Howells266cf652009-04-03 16:42:36 +0100359 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700360 return 0;
361
Nick Piggin19fd6232008-07-25 19:45:32 -0700362 spin_lock_irq(&mapping->tree_lock);
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700363 if (PageDirty(page))
364 goto failed;
365
Rik van Rielba470de2008-10-18 20:26:50 -0700366 clear_page_mlock(page);
David Howells266cf652009-04-03 16:42:36 +0100367 BUG_ON(page_has_private(page));
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700368 __remove_from_page_cache(page);
Nick Piggin19fd6232008-07-25 19:45:32 -0700369 spin_unlock_irq(&mapping->tree_lock);
Daisuke Nishimurae767e052009-05-28 14:34:28 -0700370 mem_cgroup_uncharge_cache_page(page);
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700371 page_cache_release(page); /* pagecache ref */
372 return 1;
373failed:
Nick Piggin19fd6232008-07-25 19:45:32 -0700374 spin_unlock_irq(&mapping->tree_lock);
Andrew Mortonbd4c8ce2006-09-30 23:29:29 -0700375 return 0;
376}
377
Trond Myklebuste3db7692007-01-10 23:15:39 -0800378static int do_launder_page(struct address_space *mapping, struct page *page)
379{
380 if (!PageDirty(page))
381 return 0;
382 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
383 return 0;
384 return mapping->a_ops->launder_page(page);
385}
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387/**
388 * invalidate_inode_pages2_range - remove range of pages from an address_space
Martin Waitz67be2dd2005-05-01 08:59:26 -0700389 * @mapping: the address_space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 * @start: the page offset 'from' which to invalidate
391 * @end: the page offset 'to' which to invalidate (inclusive)
392 *
393 * Any pages which are found to be mapped into pagetables are unmapped prior to
394 * invalidation.
395 *
Hisashi Hifumi6ccfa802008-09-02 14:35:40 -0700396 * Returns -EBUSY if any pages could not be invalidated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 */
398int invalidate_inode_pages2_range(struct address_space *mapping,
399 pgoff_t start, pgoff_t end)
400{
401 struct pagevec pvec;
402 pgoff_t next;
403 int i;
404 int ret = 0;
Hisashi Hifumi0dd13342008-04-28 02:12:08 -0700405 int ret2 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 int did_range_unmap = 0;
407 int wrapped = 0;
408
409 pagevec_init(&pvec, 0);
410 next = start;
Trond Myklebust7b965e02007-02-28 20:13:55 -0800411 while (next <= end && !wrapped &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 pagevec_lookup(&pvec, mapping, next,
413 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
Trond Myklebust7b965e02007-02-28 20:13:55 -0800414 for (i = 0; i < pagevec_count(&pvec); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 struct page *page = pvec.pages[i];
416 pgoff_t page_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
418 lock_page(page);
419 if (page->mapping != mapping) {
420 unlock_page(page);
421 continue;
422 }
423 page_index = page->index;
424 next = page_index + 1;
425 if (next == 0)
426 wrapped = 1;
427 if (page_index > end) {
428 unlock_page(page);
429 break;
430 }
431 wait_on_page_writeback(page);
Nick Piggind00806b2007-07-19 01:46:57 -0700432 if (page_mapped(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 if (!did_range_unmap) {
434 /*
435 * Zap the rest of the file in one hit.
436 */
437 unmap_mapping_range(mapping,
Oleg Drokin479ef592005-11-23 13:37:47 -0800438 (loff_t)page_index<<PAGE_CACHE_SHIFT,
439 (loff_t)(end - page_index + 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 << PAGE_CACHE_SHIFT,
441 0);
442 did_range_unmap = 1;
443 } else {
444 /*
445 * Just zap this page
446 */
447 unmap_mapping_range(mapping,
Oleg Drokin479ef592005-11-23 13:37:47 -0800448 (loff_t)page_index<<PAGE_CACHE_SHIFT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 PAGE_CACHE_SIZE, 0);
450 }
451 }
Nick Piggind00806b2007-07-19 01:46:57 -0700452 BUG_ON(page_mapped(page));
Hisashi Hifumi0dd13342008-04-28 02:12:08 -0700453 ret2 = do_launder_page(mapping, page);
454 if (ret2 == 0) {
455 if (!invalidate_complete_page2(mapping, page))
Hisashi Hifumi6ccfa802008-09-02 14:35:40 -0700456 ret2 = -EBUSY;
Hisashi Hifumi0dd13342008-04-28 02:12:08 -0700457 }
458 if (ret2 < 0)
459 ret = ret2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 unlock_page(page);
461 }
462 pagevec_release(&pvec);
463 cond_resched();
464 }
465 return ret;
466}
467EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
468
469/**
470 * invalidate_inode_pages2 - remove all pages from an address_space
Martin Waitz67be2dd2005-05-01 08:59:26 -0700471 * @mapping: the address_space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 *
473 * Any pages which are found to be mapped into pagetables are unmapped prior to
474 * invalidation.
475 *
476 * Returns -EIO if any pages could not be invalidated.
477 */
478int invalidate_inode_pages2(struct address_space *mapping)
479{
480 return invalidate_inode_pages2_range(mapping, 0, -1);
481}
482EXPORT_SYMBOL_GPL(invalidate_inode_pages2);