blob: 6c707bfe02fde002feae9ea2fab3fc9647c3baa0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/fadvise.c
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
Francois Camie1f8e872008-10-15 22:01:59 -07006 * 11Jan2003 Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Initial version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/file.h>
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/pagemap.h>
15#include <linux/backing-dev.h>
16#include <linux/pagevec.h>
17#include <linux/fadvise.h>
Andrew Mortonebcf28e2006-03-24 03:18:04 -080018#include <linux/writeback.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/syscalls.h>
Mel Gorman67d46b22013-02-22 16:35:59 -080020#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22#include <asm/unistd.h>
23
24/*
25 * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could
26 * deactivate the pages and clear PG_Referenced.
27 */
Al Viro4a0fd5b2013-01-21 15:16:58 -050028SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
Al Viro2903ff02012-08-28 12:52:22 -040030 struct fd f = fdget(fd);
Matthew Wilcoxe748dcd2015-02-16 15:59:12 -080031 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 struct address_space *mapping;
33 struct backing_dev_info *bdi;
Andrew Mortonebcf28e2006-03-24 03:18:04 -080034 loff_t endbyte; /* inclusive */
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 pgoff_t start_index;
36 pgoff_t end_index;
37 unsigned long nrpages;
38 int ret = 0;
39
Al Viro2903ff02012-08-28 12:52:22 -040040 if (!f.file)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 return -EBADF;
42
Matthew Wilcoxe748dcd2015-02-16 15:59:12 -080043 inode = file_inode(f.file);
44 if (S_ISFIFO(inode->i_mode)) {
Valentine Barshak87ba81d2006-01-08 01:03:44 -080045 ret = -ESPIPE;
46 goto out;
47 }
48
Al Viro2903ff02012-08-28 12:52:22 -040049 mapping = f.file->f_mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 if (!mapping || len < 0) {
51 ret = -EINVAL;
52 goto out;
53 }
54
Matthew Wilcoxe748dcd2015-02-16 15:59:12 -080055 if (IS_DAX(inode)) {
Masatake YAMATOb5beb1c2008-02-04 22:29:31 -080056 switch (advice) {
57 case POSIX_FADV_NORMAL:
58 case POSIX_FADV_RANDOM:
59 case POSIX_FADV_SEQUENTIAL:
60 case POSIX_FADV_WILLNEED:
61 case POSIX_FADV_NOREUSE:
62 case POSIX_FADV_DONTNEED:
63 /* no bad return value, but ignore advice */
64 break;
65 default:
66 ret = -EINVAL;
67 }
Carsten Ottefe77ba62005-06-23 22:05:29 -070068 goto out;
Masatake YAMATOb5beb1c2008-02-04 22:29:31 -080069 }
Carsten Ottefe77ba62005-06-23 22:05:29 -070070
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 /* Careful about overflows. Len == 0 means "as much as possible" */
72 endbyte = offset + len;
73 if (!len || endbyte < len)
74 endbyte = -1;
Andrew Mortonebcf28e2006-03-24 03:18:04 -080075 else
76 endbyte--; /* inclusive */
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Christoph Hellwigde1414a2015-01-14 10:42:36 +010078 bdi = inode_to_bdi(mapping->host);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80 switch (advice) {
81 case POSIX_FADV_NORMAL:
Al Viro2903ff02012-08-28 12:52:22 -040082 f.file->f_ra.ra_pages = bdi->ra_pages;
83 spin_lock(&f.file->f_lock);
84 f.file->f_mode &= ~FMODE_RANDOM;
85 spin_unlock(&f.file->f_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 break;
87 case POSIX_FADV_RANDOM:
Al Viro2903ff02012-08-28 12:52:22 -040088 spin_lock(&f.file->f_lock);
89 f.file->f_mode |= FMODE_RANDOM;
90 spin_unlock(&f.file->f_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 break;
92 case POSIX_FADV_SEQUENTIAL:
Al Viro2903ff02012-08-28 12:52:22 -040093 f.file->f_ra.ra_pages = bdi->ra_pages * 2;
94 spin_lock(&f.file->f_lock);
95 f.file->f_mode &= ~FMODE_RANDOM;
96 spin_unlock(&f.file->f_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 break;
98 case POSIX_FADV_WILLNEED:
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 /* First and last PARTIAL page! */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300100 start_index = offset >> PAGE_SHIFT;
101 end_index = endbyte >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
103 /* Careful about overflow on the "+1" */
104 nrpages = end_index - start_index + 1;
105 if (!nrpages)
106 nrpages = ~0UL;
KOSAKI Motohiro3d3727c2012-07-31 16:42:50 -0700107
108 /*
109 * Ignore return value because fadvise() shall return
110 * success even if filesystem can't retrieve a hint,
111 */
Al Viro2903ff02012-08-28 12:52:22 -0400112 force_page_cache_readahead(mapping, f.file, start_index,
KOSAKI Motohiro3d3727c2012-07-31 16:42:50 -0700113 nrpages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 break;
Andrew Morton60c371b2006-08-05 12:14:25 -0700115 case POSIX_FADV_NOREUSE:
116 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 case POSIX_FADV_DONTNEED:
Tejun Heo703c2702015-05-22 17:13:44 -0400118 if (!inode_write_congested(mapping->host))
Shawn Bohrerad8a1b52012-01-10 15:07:35 -0800119 __filemap_fdatawrite_range(mapping, offset, endbyte,
120 WB_SYNC_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Mel Gorman441c2282014-12-12 16:56:33 -0800122 /*
123 * First and last FULL page! Partial pages are deliberately
124 * preserved on the expectation that it is better to preserve
125 * needed memory than to discard unneeded memory.
126 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300127 start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
128 end_index = (endbyte >> PAGE_SHIFT);
Oleg Drokin18aba412016-06-08 15:33:59 -0700129 if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
130 /* First page is tricky as 0 - 1 = -1, but pgoff_t
131 * is unsigned, so the end_index >= start_index
132 * check below would be true and we'll discard the whole
133 * file cache which is not what was asked.
134 */
135 if (end_index == 0)
136 break;
137
138 end_index--;
139 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Mel Gorman67d46b22013-02-22 16:35:59 -0800141 if (end_index >= start_index) {
142 unsigned long count = invalidate_mapping_pages(mapping,
143 start_index, end_index);
144
145 /*
146 * If fewer pages were invalidated than expected then
147 * it is possible that some of the pages were on
148 * a per-cpu pagevec for a remote CPU. Drain all
149 * pagevecs and try again.
150 */
151 if (count < (end_index - start_index + 1)) {
152 lru_add_drain_all();
153 invalidate_mapping_pages(mapping, start_index,
Andrew Mortonebcf28e2006-03-24 03:18:04 -0800154 end_index);
Mel Gorman67d46b22013-02-22 16:35:59 -0800155 }
156 }
Andrew Mortonebcf28e2006-03-24 03:18:04 -0800157 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 default:
159 ret = -EINVAL;
160 }
161out:
Al Viro2903ff02012-08-28 12:52:22 -0400162 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 return ret;
164}
165
166#ifdef __ARCH_WANT_SYS_FADVISE64
167
Al Viro4a0fd5b2013-01-21 15:16:58 -0500168SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
170 return sys_fadvise64_64(fd, offset, len, advice);
171}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
173#endif