blob: 1a5c2911b04366678e7ad9c77f25feec5c515d78 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700180 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700182 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700183 return -ENOSYS;
184
185 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
187/*********************************************************************
188 * open flag mapping table:
189 *
190 * POSIX Flag CIFS Disposition
191 * ---------- ----------------
192 * O_CREAT FILE_OPEN_IF
193 * O_CREAT | O_EXCL FILE_CREATE
194 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
195 * O_TRUNC FILE_OVERWRITE
196 * none of the above FILE_OPEN
197 *
198 * Note that there is not a direct match between disposition
199 * FILE_SUPERSEDE (ie create whether or not file exists although
200 * O_CREAT | O_TRUNC is similar but truncates the existing
201 * file rather than creating a new file as FILE_SUPERSEDE does
202 * (which uses the attributes / metadata passed in on open call)
203 *?
204 *? O_SYNC is a reasonable match to CIFS writethrough flag
205 *? and the read write flags match reasonably. O_LARGEFILE
206 *? is irrelevant because largefile support is always used
207 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
208 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
209 *********************************************************************/
210
211 disposition = cifs_get_disposition(f_flags);
212
213 /* BB pass O_SYNC flag through on file attributes .. BB */
214
215 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
216 if (!buf)
217 return -ENOMEM;
218
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500219 if (backup_cred(cifs_sb))
220 create_options |= CREATE_OPEN_BACKUP_INTENT;
221
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700222 rc = server->ops->open(xid, tcon, full_path, disposition,
223 desired_access, create_options, fid, oplock, buf,
224 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300225
226 if (rc)
227 goto out;
228
229 if (tcon->unix_ext)
230 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
231 xid);
232 else
233 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700234 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300235
236out:
237 kfree(buf);
238 return rc;
239}
240
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400241static bool
242cifs_has_mand_locks(struct cifsInodeInfo *cinode)
243{
244 struct cifs_fid_locks *cur;
245 bool has_locks = false;
246
247 down_read(&cinode->lock_sem);
248 list_for_each_entry(cur, &cinode->llist, llist) {
249 if (!list_empty(&cur->locks)) {
250 has_locks = true;
251 break;
252 }
253 }
254 up_read(&cinode->lock_sem);
255 return has_locks;
256}
257
Jeff Layton15ecb432010-10-15 15:34:02 -0400258struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700259cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400260 struct tcon_link *tlink, __u32 oplock)
261{
262 struct dentry *dentry = file->f_path.dentry;
263 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700264 struct cifsInodeInfo *cinode = CIFS_I(inode);
265 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700266 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700267 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400268 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400269
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700270 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
271 if (cfile == NULL)
272 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400273
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700274 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
275 if (!fdlocks) {
276 kfree(cfile);
277 return NULL;
278 }
279
280 INIT_LIST_HEAD(&fdlocks->locks);
281 fdlocks->cfile = cfile;
282 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700283 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700284 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700285 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700286
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700287 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700288 cfile->pid = current->tgid;
289 cfile->uid = current_fsuid();
290 cfile->dentry = dget(dentry);
291 cfile->f_flags = file->f_flags;
292 cfile->invalidHandle = false;
293 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700294 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700295 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400296
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400297 /*
298 * If the server returned a read oplock and we have mandatory brlocks,
299 * set oplock level to None.
300 */
301 if (oplock == server->vals->oplock_read &&
302 cifs_has_mand_locks(cinode)) {
303 cFYI(1, "Reset oplock val from read to None due to mand locks");
304 oplock = 0;
305 }
306
Jeff Layton44772882010-10-15 15:34:03 -0400307 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400308 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700309 oplock = fid->pending_open->oplock;
310 list_del(&fid->pending_open->olist);
311
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400312 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700313
314 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400315 /* if readable file instance put first in list*/
316 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700317 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400318 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700319 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400320 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400321
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700322 file->private_data = cfile;
323 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400324}
325
Jeff Layton764a1b12012-07-25 14:59:54 -0400326struct cifsFileInfo *
327cifsFileInfo_get(struct cifsFileInfo *cifs_file)
328{
329 spin_lock(&cifs_file_list_lock);
330 cifsFileInfo_get_locked(cifs_file);
331 spin_unlock(&cifs_file_list_lock);
332 return cifs_file;
333}
334
Steve Frenchcdff08e2010-10-21 22:46:14 +0000335/*
336 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400337 * the filehandle out on the server. Must be called without holding
338 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000339 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400340void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
341{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300342 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000343 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700344 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300345 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300346 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000347 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700348 struct cifs_fid fid;
349 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000350
351 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400352 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000353 spin_unlock(&cifs_file_list_lock);
354 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400355 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000356
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700357 if (server->ops->get_lease_key)
358 server->ops->get_lease_key(inode, &fid);
359
360 /* store open in pending opens to make sure we don't miss lease break */
361 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
362
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363 /* remove it from the lists */
364 list_del(&cifs_file->flist);
365 list_del(&cifs_file->tlist);
366
367 if (list_empty(&cifsi->openFileList)) {
368 cFYI(1, "closing last open instance for inode %p",
369 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700370 /*
371 * In strict cache mode we need invalidate mapping on the last
372 * close because it may cause a error when we open this file
373 * again and get at least level II oplock.
374 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300375 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
376 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300377 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000378 }
379 spin_unlock(&cifs_file_list_lock);
380
Jeff Laytonad635942011-07-26 12:20:17 -0400381 cancel_work_sync(&cifs_file->oplock_break);
382
Steve Frenchcdff08e2010-10-21 22:46:14 +0000383 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700384 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400385 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700386
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400387 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700388 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400389 server->ops->close(xid, tcon, &cifs_file->fid);
390 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000391 }
392
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700393 cifs_del_pending_open(&open);
394
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700395 /*
396 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000397 * is closed anyway.
398 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700399 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700400 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000401 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400402 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000403 kfree(li);
404 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700405 list_del(&cifs_file->llist->llist);
406 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700407 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000408
409 cifs_put_tlink(cifs_file->tlink);
410 dput(cifs_file->dentry);
411 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400412}
413
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416{
417 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400418 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400419 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700421 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000422 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400423 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700424 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300426 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700427 struct cifs_fid fid;
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700428 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400430 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
432 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400433 tlink = cifs_sb_tlink(cifs_sb);
434 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400435 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400436 return PTR_ERR(tlink);
437 }
438 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700439 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800441 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530443 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400444 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 }
446
Joe Perchesb6b38f72010-04-21 03:50:45 +0000447 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
448 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000449
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700450 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000451 oplock = REQ_OPLOCK;
452 else
453 oplock = 0;
454
Steve French64cc2c62009-03-04 19:54:08 +0000455 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400456 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
457 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000458 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400459 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000460 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700461 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000462 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000463 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300464 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000465 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
466 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000467 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000468 " unexpected error on SMB posix open"
469 ", disabling posix open support."
470 " Check if server update available.",
471 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000472 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000473 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000474 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
475 (rc != -EOPNOTSUPP)) /* path not found or net err */
476 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700477 /*
478 * Else fallthrough to retry open the old way on network i/o
479 * or DFS errors.
480 */
Steve French276a74a2009-03-03 18:00:34 +0000481 }
482
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700483 if (server->ops->get_lease_key)
484 server->ops->get_lease_key(inode, &fid);
485
486 cifs_add_pending_open(&fid, tlink, &open);
487
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300488 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700489 if (server->ops->get_lease_key)
490 server->ops->get_lease_key(inode, &fid);
491
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300492 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700493 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700494 if (rc) {
495 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300496 goto out;
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700497 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300498 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400499
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700500 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
501 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700502 if (server->ops->close)
503 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b12012-09-19 06:22:45 -0700504 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 rc = -ENOMEM;
506 goto out;
507 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530509 cifs_fscache_set_inode_cookie(inode, file);
510
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300511 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700512 /*
513 * Time to set mode which we can not set earlier due to
514 * problems creating new read-only files.
515 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300516 struct cifs_unix_set_info_args args = {
517 .mode = inode->i_mode,
518 .uid = NO_CHANGE_64,
519 .gid = NO_CHANGE_64,
520 .ctime = NO_CHANGE_64,
521 .atime = NO_CHANGE_64,
522 .mtime = NO_CHANGE_64,
523 .device = 0,
524 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700525 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
526 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 }
528
529out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400531 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400532 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 return rc;
534}
535
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400536static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
537
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700538/*
539 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400540 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700541 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400542static int
543cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400545 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
546 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
547 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 int rc = 0;
549
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400550 /* we are going to update can_cache_brlcks here - need a write access */
551 down_write(&cinode->lock_sem);
552 if (cinode->can_cache_brlcks) {
553 /* can cache locks - no need to push them */
554 up_write(&cinode->lock_sem);
555 return rc;
556 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400558 if (cap_unix(tcon->ses) &&
559 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
560 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
561 rc = cifs_push_posix_locks(cfile);
562 else
563 rc = tcon->ses->server->ops->push_mand_locks(cfile);
564
565 up_write(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 return rc;
567}
568
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700569static int
570cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571{
572 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400573 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400574 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000576 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700577 struct TCP_Server_Info *server;
578 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000579 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700581 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500583 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700584 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400586 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700587 mutex_lock(&cfile->fh_mutex);
588 if (!cfile->invalidHandle) {
589 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530590 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400591 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530592 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 }
594
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700595 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700597 tcon = tlink_tcon(cfile->tlink);
598 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000599
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700600 /*
601 * Can not grab rename sem here because various ops, including those
602 * that already have the rename sem can end up causing writepage to get
603 * called and if the server was down that means we end up here, and we
604 * can never tell if the caller already has the rename_sem.
605 */
606 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000608 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700609 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400610 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000611 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 }
613
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700614 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
615 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300617 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 oplock = REQ_OPLOCK;
619 else
Steve French4b18f2a2008-04-29 00:06:05 +0000620 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400622 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000623 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400624 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400625 /*
626 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
627 * original open. Must mask them off for a reopen.
628 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700629 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400630 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400631
Jeff Layton2422f672010-06-16 13:40:16 -0400632 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700633 cifs_sb->mnt_file_mode /* ignored */,
634 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000635 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000636 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000637 goto reopen_success;
638 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700639 /*
640 * fallthrough to retry open the old way on errors, especially
641 * in the reconnect path it is important to retry hard
642 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000643 }
644
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700645 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000646
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500647 if (backup_cred(cifs_sb))
648 create_options |= CREATE_OPEN_BACKUP_INTENT;
649
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700650 if (server->ops->get_lease_key)
651 server->ops->get_lease_key(inode, &fid);
652
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700653 /*
654 * Can not refresh inode by passing in file_info buf to be returned by
655 * CIFSSMBOpen and then calling get_inode_info with returned buf since
656 * file might have write behind data that needs to be flushed and server
657 * version of file size can be stale. If we knew for sure that inode was
658 * not dirty locally we could do this.
659 */
660 rc = server->ops->open(xid, tcon, full_path, disposition,
661 desired_access, create_options, &fid, &oplock,
662 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700664 mutex_unlock(&cfile->fh_mutex);
665 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000666 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400667 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 }
Jeff Layton15886172010-10-15 15:33:59 -0400669
670reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700671 cfile->invalidHandle = false;
672 mutex_unlock(&cfile->fh_mutex);
673 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400674
675 if (can_flush) {
676 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400677 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400678
Jeff Layton15886172010-10-15 15:33:59 -0400679 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700680 rc = cifs_get_inode_info_unix(&inode, full_path,
681 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400682 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700683 rc = cifs_get_inode_info(&inode, full_path, NULL,
684 inode->i_sb, xid, NULL);
685 }
686 /*
687 * Else we are writing out data to server already and could deadlock if
688 * we tried to flush data, and since we do not know if we have data that
689 * would invalidate the current end of file on the server we can not go
690 * to the server to get the new inode info.
691 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300692
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700693 server->ops->set_fid(cfile, &fid, oplock);
694 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400695
696reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400698 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 return rc;
700}
701
702int cifs_close(struct inode *inode, struct file *file)
703{
Jeff Layton77970692011-04-05 16:23:47 -0700704 if (file->private_data != NULL) {
705 cifsFileInfo_put(file->private_data);
706 file->private_data = NULL;
707 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
Steve Frenchcdff08e2010-10-21 22:46:14 +0000709 /* return code from the ->release op is always ignored */
710 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711}
712
713int cifs_closedir(struct inode *inode, struct file *file)
714{
715 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400716 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700717 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700718 struct cifs_tcon *tcon;
719 struct TCP_Server_Info *server;
720 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
Joe Perchesb6b38f72010-04-21 03:50:45 +0000722 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700724 if (cfile == NULL)
725 return rc;
726
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400727 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700728 tcon = tlink_tcon(cfile->tlink);
729 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700731 cFYI(1, "Freeing private data in close dir");
732 spin_lock(&cifs_file_list_lock);
733 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
734 cfile->invalidHandle = true;
735 spin_unlock(&cifs_file_list_lock);
736 if (server->ops->close_dir)
737 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
738 else
739 rc = -ENOSYS;
740 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
741 /* not much we can do if it fails anyway, ignore rc */
742 rc = 0;
743 } else
744 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700746 buf = cfile->srch_inf.ntwrk_buf_start;
747 if (buf) {
748 cFYI(1, "closedir free smb buf in srch struct");
749 cfile->srch_inf.ntwrk_buf_start = NULL;
750 if (cfile->srch_inf.smallBuf)
751 cifs_small_buf_release(buf);
752 else
753 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700755
756 cifs_put_tlink(cfile->tlink);
757 kfree(file->private_data);
758 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400760 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 return rc;
762}
763
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400764static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300765cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000766{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400767 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000768 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400769 if (!lock)
770 return lock;
771 lock->offset = offset;
772 lock->length = length;
773 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400774 lock->pid = current->tgid;
775 INIT_LIST_HEAD(&lock->blist);
776 init_waitqueue_head(&lock->block_q);
777 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400778}
779
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700780void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400781cifs_del_lock_waiters(struct cifsLockInfo *lock)
782{
783 struct cifsLockInfo *li, *tmp;
784 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
785 list_del_init(&li->blist);
786 wake_up(&li->block_q);
787 }
788}
789
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400790#define CIFS_LOCK_OP 0
791#define CIFS_READ_OP 1
792#define CIFS_WRITE_OP 2
793
794/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400795static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700796cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
797 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400798 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400799{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300800 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700801 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300802 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400803
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700804 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400805 if (offset + length <= li->offset ||
806 offset >= li->offset + li->length)
807 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400808 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
809 server->ops->compare_fids(cfile, cur_cfile)) {
810 /* shared lock prevents write op through the same fid */
811 if (!(li->type & server->vals->shared_lock_type) ||
812 rw_check != CIFS_WRITE_OP)
813 continue;
814 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700815 if ((type & server->vals->shared_lock_type) &&
816 ((server->ops->compare_fids(cfile, cur_cfile) &&
817 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400818 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700819 if (conf_lock)
820 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700821 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400822 }
823 return false;
824}
825
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700826bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300827cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700828 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400829 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400830{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300831 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700832 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300833 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300834
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700835 list_for_each_entry(cur, &cinode->llist, llist) {
836 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700837 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300838 if (rc)
839 break;
840 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300841
842 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400843}
844
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300845/*
846 * Check if there is another lock that prevents us to set the lock (mandatory
847 * style). If such a lock exists, update the flock structure with its
848 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
849 * or leave it the same if we can't. Returns 0 if we don't need to request to
850 * the server or 1 otherwise.
851 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400852static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300853cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
854 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400855{
856 int rc = 0;
857 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300858 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300859 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400860 bool exist;
861
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700862 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400863
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300864 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400865 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400866 if (exist) {
867 flock->fl_start = conf_lock->offset;
868 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
869 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300870 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400871 flock->fl_type = F_RDLCK;
872 else
873 flock->fl_type = F_WRLCK;
874 } else if (!cinode->can_cache_brlcks)
875 rc = 1;
876 else
877 flock->fl_type = F_UNLCK;
878
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700879 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400880 return rc;
881}
882
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400883static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300884cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400885{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300886 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700887 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700888 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700889 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000890}
891
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300892/*
893 * Set the byte-range lock (mandatory style). Returns:
894 * 1) 0, if we set the lock and don't need to request to the server;
895 * 2) 1, if no locks prevent us but we need to request to the server;
896 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
897 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400898static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300899cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400900 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400901{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400902 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300903 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400904 bool exist;
905 int rc = 0;
906
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400907try_again:
908 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700909 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400910
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300911 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400912 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400913 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700914 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700915 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400916 return rc;
917 }
918
919 if (!exist)
920 rc = 1;
921 else if (!wait)
922 rc = -EACCES;
923 else {
924 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700925 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400926 rc = wait_event_interruptible(lock->block_q,
927 (lock->blist.prev == &lock->blist) &&
928 (lock->blist.next == &lock->blist));
929 if (!rc)
930 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700931 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400932 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400933 }
934
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700935 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400936 return rc;
937}
938
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300939/*
940 * Check if there is another lock that prevents us to set the lock (posix
941 * style). If such a lock exists, update the flock structure with its
942 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
943 * or leave it the same if we can't. Returns 0 if we don't need to request to
944 * the server or 1 otherwise.
945 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400946static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400947cifs_posix_lock_test(struct file *file, struct file_lock *flock)
948{
949 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -0500950 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400951 unsigned char saved_type = flock->fl_type;
952
Pavel Shilovsky50792762011-10-29 17:17:57 +0400953 if ((flock->fl_flags & FL_POSIX) == 0)
954 return 1;
955
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700956 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400957 posix_test_lock(file, flock);
958
959 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
960 flock->fl_type = saved_type;
961 rc = 1;
962 }
963
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700964 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400965 return rc;
966}
967
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300968/*
969 * Set the byte-range lock (posix style). Returns:
970 * 1) 0, if we set the lock and don't need to request to the server;
971 * 2) 1, if we need to request to the server;
972 * 3) <0, if the error occurs while setting the lock.
973 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400974static int
975cifs_posix_lock_set(struct file *file, struct file_lock *flock)
976{
Al Viro496ad9a2013-01-23 17:07:38 -0500977 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +0400978 int rc = 1;
979
980 if ((flock->fl_flags & FL_POSIX) == 0)
981 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400982
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400983try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700984 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400985 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700986 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400987 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400988 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400989
990 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700991 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400992 if (rc == FILE_LOCK_DEFERRED) {
993 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
994 if (!rc)
995 goto try_again;
996 locks_delete_block(flock);
997 }
Steve French9ebb3892012-04-01 13:52:54 -0500998 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400999}
1000
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001001int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001002cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001003{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001004 unsigned int xid;
1005 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001006 struct cifsLockInfo *li, *tmp;
1007 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001008 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001009 LOCKING_ANDX_RANGE *buf, *cur;
1010 int types[] = {LOCKING_ANDX_LARGE_FILES,
1011 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1012 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001013
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001014 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001015 tcon = tlink_tcon(cfile->tlink);
1016
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001017 /*
1018 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1019 * and check it for zero before using.
1020 */
1021 max_buf = tcon->ses->server->maxBuf;
1022 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001023 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001024 return -EINVAL;
1025 }
1026
1027 max_num = (max_buf - sizeof(struct smb_hdr)) /
1028 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001029 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1030 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001031 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001032 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001033 }
1034
1035 for (i = 0; i < 2; i++) {
1036 cur = buf;
1037 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001038 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001039 if (li->type != types[i])
1040 continue;
1041 cur->Pid = cpu_to_le16(li->pid);
1042 cur->LengthLow = cpu_to_le32((u32)li->length);
1043 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1044 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1045 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1046 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001047 stored_rc = cifs_lockv(xid, tcon,
1048 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001049 (__u8)li->type, 0, num,
1050 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001051 if (stored_rc)
1052 rc = stored_rc;
1053 cur = buf;
1054 num = 0;
1055 } else
1056 cur++;
1057 }
1058
1059 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001060 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001061 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001062 if (stored_rc)
1063 rc = stored_rc;
1064 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001065 }
1066
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001067 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001068 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001069 return rc;
1070}
1071
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001072/* copied from fs/locks.c with a name change */
1073#define cifs_for_each_lock(inode, lockp) \
1074 for (lockp = &inode->i_flock; *lockp != NULL; \
1075 lockp = &(*lockp)->fl_next)
1076
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001077struct lock_to_push {
1078 struct list_head llist;
1079 __u64 offset;
1080 __u64 length;
1081 __u32 pid;
1082 __u16 netfid;
1083 __u8 type;
1084};
1085
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001086static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001087cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001088{
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001089 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1090 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001091 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001092 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001093 struct list_head locks_to_send, *el;
1094 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001095 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001096
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001097 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001098
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001099 lock_flocks();
1100 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001101 if ((*before)->fl_flags & FL_POSIX)
1102 count++;
1103 }
1104 unlock_flocks();
1105
1106 INIT_LIST_HEAD(&locks_to_send);
1107
1108 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001109 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001110 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001111 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001112 */
1113 for (; i < count; i++) {
1114 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1115 if (!lck) {
1116 rc = -ENOMEM;
1117 goto err_out;
1118 }
1119 list_add_tail(&lck->llist, &locks_to_send);
1120 }
1121
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001122 el = locks_to_send.next;
1123 lock_flocks();
1124 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001125 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001126 if ((flock->fl_flags & FL_POSIX) == 0)
1127 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001128 if (el == &locks_to_send) {
1129 /*
1130 * The list ended. We don't have enough allocated
1131 * structures - something is really wrong.
1132 */
1133 cERROR(1, "Can't push all brlocks!");
1134 break;
1135 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001136 length = 1 + flock->fl_end - flock->fl_start;
1137 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1138 type = CIFS_RDLCK;
1139 else
1140 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001141 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001142 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001143 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001144 lck->length = length;
1145 lck->type = type;
1146 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001147 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001148 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001149 unlock_flocks();
1150
1151 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001152 int stored_rc;
1153
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001154 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001155 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001156 lck->type, 0);
1157 if (stored_rc)
1158 rc = stored_rc;
1159 list_del(&lck->llist);
1160 kfree(lck);
1161 }
1162
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001163out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001164 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001165 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001166err_out:
1167 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1168 list_del(&lck->llist);
1169 kfree(lck);
1170 }
1171 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001172}
1173
1174static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001175cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001176{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001177 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001178 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001179 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001180 int rc = 0;
1181
1182 /* we are going to update can_cache_brlcks here - need a write access */
1183 down_write(&cinode->lock_sem);
1184 if (!cinode->can_cache_brlcks) {
1185 up_write(&cinode->lock_sem);
1186 return rc;
1187 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001188
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001189 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001190 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1191 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001192 rc = cifs_push_posix_locks(cfile);
1193 else
1194 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001195
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001196 cinode->can_cache_brlcks = false;
1197 up_write(&cinode->lock_sem);
1198 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001199}
1200
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001201static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001202cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001203 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001205 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001206 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001207 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001208 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001209 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001210 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001211 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001213 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001214 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001215 "not implemented yet");
1216 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001217 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001218 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001219 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1220 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001221 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001223 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001224 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001225 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001226 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001227 *lock = 1;
1228 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001229 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001230 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001231 *unlock = 1;
1232 /* Check if unlock includes more than one lock range */
1233 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001234 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001235 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001236 *lock = 1;
1237 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001238 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001239 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001240 *lock = 1;
1241 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001242 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001243 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001244 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001246 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001247}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001249static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001250cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001251 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001252{
1253 int rc = 0;
1254 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001255 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1256 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001257 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001258 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001260 if (posix_lck) {
1261 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001262
1263 rc = cifs_posix_lock_test(file, flock);
1264 if (!rc)
1265 return rc;
1266
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001267 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001268 posix_lock_type = CIFS_RDLCK;
1269 else
1270 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001271 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001272 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001273 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 return rc;
1275 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001276
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001277 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001278 if (!rc)
1279 return rc;
1280
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001281 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001282 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1283 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001284 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001285 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1286 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001287 flock->fl_type = F_UNLCK;
1288 if (rc != 0)
1289 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001290 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001291 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001292 }
1293
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001294 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001295 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001296 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001297 }
1298
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001299 type &= ~server->vals->exclusive_lock_type;
1300
1301 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1302 type | server->vals->shared_lock_type,
1303 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001304 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001305 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1306 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001307 flock->fl_type = F_RDLCK;
1308 if (rc != 0)
1309 cERROR(1, "Error unlocking previously locked "
1310 "range %d during test of lock", rc);
1311 } else
1312 flock->fl_type = F_WRLCK;
1313
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001314 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001315}
1316
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001317void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001318cifs_move_llist(struct list_head *source, struct list_head *dest)
1319{
1320 struct list_head *li, *tmp;
1321 list_for_each_safe(li, tmp, source)
1322 list_move(li, dest);
1323}
1324
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001325void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001326cifs_free_llist(struct list_head *llist)
1327{
1328 struct cifsLockInfo *li, *tmp;
1329 list_for_each_entry_safe(li, tmp, llist, llist) {
1330 cifs_del_lock_waiters(li);
1331 list_del(&li->llist);
1332 kfree(li);
1333 }
1334}
1335
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001336int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001337cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1338 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001339{
1340 int rc = 0, stored_rc;
1341 int types[] = {LOCKING_ANDX_LARGE_FILES,
1342 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1343 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001344 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001345 LOCKING_ANDX_RANGE *buf, *cur;
1346 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1347 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1348 struct cifsLockInfo *li, *tmp;
1349 __u64 length = 1 + flock->fl_end - flock->fl_start;
1350 struct list_head tmp_llist;
1351
1352 INIT_LIST_HEAD(&tmp_llist);
1353
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001354 /*
1355 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1356 * and check it for zero before using.
1357 */
1358 max_buf = tcon->ses->server->maxBuf;
1359 if (!max_buf)
1360 return -EINVAL;
1361
1362 max_num = (max_buf - sizeof(struct smb_hdr)) /
1363 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001364 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1365 if (!buf)
1366 return -ENOMEM;
1367
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001368 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001369 for (i = 0; i < 2; i++) {
1370 cur = buf;
1371 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001372 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001373 if (flock->fl_start > li->offset ||
1374 (flock->fl_start + length) <
1375 (li->offset + li->length))
1376 continue;
1377 if (current->tgid != li->pid)
1378 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001379 if (types[i] != li->type)
1380 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001381 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001382 /*
1383 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001384 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001385 */
1386 list_del(&li->llist);
1387 cifs_del_lock_waiters(li);
1388 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001389 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001390 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001391 cur->Pid = cpu_to_le16(li->pid);
1392 cur->LengthLow = cpu_to_le32((u32)li->length);
1393 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1394 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1395 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1396 /*
1397 * We need to save a lock here to let us add it again to
1398 * the file's list if the unlock range request fails on
1399 * the server.
1400 */
1401 list_move(&li->llist, &tmp_llist);
1402 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001403 stored_rc = cifs_lockv(xid, tcon,
1404 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001405 li->type, num, 0, buf);
1406 if (stored_rc) {
1407 /*
1408 * We failed on the unlock range
1409 * request - add all locks from the tmp
1410 * list to the head of the file's list.
1411 */
1412 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001413 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001414 rc = stored_rc;
1415 } else
1416 /*
1417 * The unlock range request succeed -
1418 * free the tmp list.
1419 */
1420 cifs_free_llist(&tmp_llist);
1421 cur = buf;
1422 num = 0;
1423 } else
1424 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001425 }
1426 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001427 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001428 types[i], num, 0, buf);
1429 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001430 cifs_move_llist(&tmp_llist,
1431 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001432 rc = stored_rc;
1433 } else
1434 cifs_free_llist(&tmp_llist);
1435 }
1436 }
1437
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001438 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001439 kfree(buf);
1440 return rc;
1441}
1442
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001443static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001444cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001445 bool wait_flag, bool posix_lck, int lock, int unlock,
1446 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001447{
1448 int rc = 0;
1449 __u64 length = 1 + flock->fl_end - flock->fl_start;
1450 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1451 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001452 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001453 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001454
1455 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001456 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001457
1458 rc = cifs_posix_lock_set(file, flock);
1459 if (!rc || rc < 0)
1460 return rc;
1461
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001462 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001463 posix_lock_type = CIFS_RDLCK;
1464 else
1465 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001466
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001467 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001468 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001469
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001470 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1471 current->tgid, flock->fl_start, length,
1472 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001473 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001474 }
1475
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001476 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001477 struct cifsLockInfo *lock;
1478
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001479 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001480 if (!lock)
1481 return -ENOMEM;
1482
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001483 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001484 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001485 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001486 return rc;
1487 }
1488 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001489 goto out;
1490
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001491 /*
1492 * Windows 7 server can delay breaking lease from read to None
1493 * if we set a byte-range lock on a file - break it explicitly
1494 * before sending the lock to the server to be sure the next
1495 * read won't conflict with non-overlapted locks due to
1496 * pagereading.
1497 */
1498 if (!CIFS_I(inode)->clientCanCacheAll &&
1499 CIFS_I(inode)->clientCanCacheRead) {
1500 cifs_invalidate_mapping(inode);
1501 cFYI(1, "Set no oplock for inode=%p due to mand locks",
1502 inode);
1503 CIFS_I(inode)->clientCanCacheRead = false;
1504 }
1505
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001506 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1507 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001508 if (rc) {
1509 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001510 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001511 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001512
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001513 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001514 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001515 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001516
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001517out:
1518 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001519 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001520 return rc;
1521}
1522
1523int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1524{
1525 int rc, xid;
1526 int lock = 0, unlock = 0;
1527 bool wait_flag = false;
1528 bool posix_lck = false;
1529 struct cifs_sb_info *cifs_sb;
1530 struct cifs_tcon *tcon;
1531 struct cifsInodeInfo *cinode;
1532 struct cifsFileInfo *cfile;
1533 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001534 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001535
1536 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001537 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001538
1539 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1540 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1541 flock->fl_start, flock->fl_end);
1542
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001543 cfile = (struct cifsFileInfo *)file->private_data;
1544 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001545
1546 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1547 tcon->ses->server);
1548
1549 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001550 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001551 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001552
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001553 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001554 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1555 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1556 posix_lck = true;
1557 /*
1558 * BB add code here to normalize offset and length to account for
1559 * negative length which we can not accept over the wire.
1560 */
1561 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001562 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001563 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001564 return rc;
1565 }
1566
1567 if (!lock && !unlock) {
1568 /*
1569 * if no lock or unlock then nothing to do since we do not
1570 * know what it is
1571 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001572 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001573 return -EOPNOTSUPP;
1574 }
1575
1576 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1577 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001578 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 return rc;
1580}
1581
Jeff Layton597b0272012-03-23 14:40:56 -04001582/*
1583 * update the file size (if needed) after a write. Should be called with
1584 * the inode->i_lock held
1585 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001586void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001587cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1588 unsigned int bytes_written)
1589{
1590 loff_t end_of_write = offset + bytes_written;
1591
1592 if (end_of_write > cifsi->server_eof)
1593 cifsi->server_eof = end_of_write;
1594}
1595
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001596static ssize_t
1597cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1598 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599{
1600 int rc = 0;
1601 unsigned int bytes_written = 0;
1602 unsigned int total_written;
1603 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001604 struct cifs_tcon *tcon;
1605 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001606 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001607 struct dentry *dentry = open_file->dentry;
1608 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001609 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Jeff Layton7da4b492010-10-15 15:34:00 -04001611 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
Joe Perchesb6b38f72010-04-21 03:50:45 +00001613 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001614 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001616 tcon = tlink_tcon(open_file->tlink);
1617 server = tcon->ses->server;
1618
1619 if (!server->ops->sync_write)
1620 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001621
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001622 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 for (total_written = 0; write_size > total_written;
1625 total_written += bytes_written) {
1626 rc = -EAGAIN;
1627 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001628 struct kvec iov[2];
1629 unsigned int len;
1630
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 /* we could deadlock if we called
1633 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001634 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001636 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 if (rc != 0)
1638 break;
1639 }
Steve French3e844692005-10-03 13:37:24 -07001640
Jeff Laytonca83ce32011-04-12 09:13:44 -04001641 len = min((size_t)cifs_sb->wsize,
1642 write_size - total_written);
1643 /* iov[0] is reserved for smb header */
1644 iov[1].iov_base = (char *)write_data + total_written;
1645 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001646 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001647 io_parms.tcon = tcon;
1648 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001649 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001650 rc = server->ops->sync_write(xid, open_file, &io_parms,
1651 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 }
1653 if (rc || (bytes_written == 0)) {
1654 if (total_written)
1655 break;
1656 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001657 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 return rc;
1659 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001660 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001661 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001662 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001663 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001664 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001665 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 }
1667
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001668 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
Jeff Layton7da4b492010-10-15 15:34:00 -04001670 if (total_written > 0) {
1671 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001672 if (*offset > dentry->d_inode->i_size)
1673 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001674 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001676 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001677 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 return total_written;
1679}
1680
Jeff Layton6508d902010-09-29 19:51:11 -04001681struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1682 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001683{
1684 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001685 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1686
1687 /* only filter by fsuid on multiuser mounts */
1688 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1689 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001690
Jeff Layton44772882010-10-15 15:34:03 -04001691 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001692 /* we could simply get the first_list_entry since write-only entries
1693 are always at the end of the list but since the first entry might
1694 have a close pending, we go through the whole list */
1695 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001696 if (fsuid_only && open_file->uid != current_fsuid())
1697 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001698 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001699 if (!open_file->invalidHandle) {
1700 /* found a good file */
1701 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001702 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001703 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001704 return open_file;
1705 } /* else might as well continue, and look for
1706 another, or simply have the caller reopen it
1707 again rather than trying to fix this handle */
1708 } else /* write only file */
1709 break; /* write only files are last so must be done */
1710 }
Jeff Layton44772882010-10-15 15:34:03 -04001711 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001712 return NULL;
1713}
Steve French630f3f0c2007-10-25 21:17:17 +00001714
Jeff Layton6508d902010-09-29 19:51:11 -04001715struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1716 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001717{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001718 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001719 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001720 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001721 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001722 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001723
Steve French60808232006-04-22 15:53:05 +00001724 /* Having a null inode here (because mapping->host was set to zero by
1725 the VFS or MM) should not happen but we had reports of on oops (due to
1726 it being zero) during stress testcases so we need to check for it */
1727
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001728 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001729 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001730 dump_stack();
1731 return NULL;
1732 }
1733
Jeff Laytond3892292010-11-02 16:22:50 -04001734 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1735
Jeff Layton6508d902010-09-29 19:51:11 -04001736 /* only filter by fsuid on multiuser mounts */
1737 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1738 fsuid_only = false;
1739
Jeff Layton44772882010-10-15 15:34:03 -04001740 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001741refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001742 if (refind > MAX_REOPEN_ATT) {
1743 spin_unlock(&cifs_file_list_lock);
1744 return NULL;
1745 }
Steve French6148a742005-10-05 12:23:19 -07001746 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001747 if (!any_available && open_file->pid != current->tgid)
1748 continue;
1749 if (fsuid_only && open_file->uid != current_fsuid())
1750 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001751 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001752 if (!open_file->invalidHandle) {
1753 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001754 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001755 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001756 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001757 } else {
1758 if (!inv_file)
1759 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001760 }
Steve French6148a742005-10-05 12:23:19 -07001761 }
1762 }
Jeff Layton2846d382008-09-22 21:33:33 -04001763 /* couldn't find useable FH with same pid, try any available */
1764 if (!any_available) {
1765 any_available = true;
1766 goto refind_writable;
1767 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001768
1769 if (inv_file) {
1770 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001771 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001772 }
1773
Jeff Layton44772882010-10-15 15:34:03 -04001774 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001775
1776 if (inv_file) {
1777 rc = cifs_reopen_file(inv_file, false);
1778 if (!rc)
1779 return inv_file;
1780 else {
1781 spin_lock(&cifs_file_list_lock);
1782 list_move_tail(&inv_file->flist,
1783 &cifs_inode->openFileList);
1784 spin_unlock(&cifs_file_list_lock);
1785 cifsFileInfo_put(inv_file);
1786 spin_lock(&cifs_file_list_lock);
1787 ++refind;
1788 goto refind_writable;
1789 }
1790 }
1791
Steve French6148a742005-10-05 12:23:19 -07001792 return NULL;
1793}
1794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1796{
1797 struct address_space *mapping = page->mapping;
1798 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1799 char *write_data;
1800 int rc = -EFAULT;
1801 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001803 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
1805 if (!mapping || !mapping->host)
1806 return -EFAULT;
1807
1808 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
1810 offset += (loff_t)from;
1811 write_data = kmap(page);
1812 write_data += from;
1813
1814 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1815 kunmap(page);
1816 return -EIO;
1817 }
1818
1819 /* racing with truncate? */
1820 if (offset > mapping->host->i_size) {
1821 kunmap(page);
1822 return 0; /* don't care */
1823 }
1824
1825 /* check to make sure that we are not extending the file */
1826 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001827 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828
Jeff Layton6508d902010-09-29 19:51:11 -04001829 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001830 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001831 bytes_written = cifs_write(open_file, open_file->pid,
1832 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001833 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001835 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001836 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001837 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001838 else if (bytes_written < 0)
1839 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001840 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001841 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 rc = -EIO;
1843 }
1844
1845 kunmap(page);
1846 return rc;
1847}
1848
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001850 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001852 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1853 bool done = false, scanned = false, range_whole = false;
1854 pgoff_t end, index;
1855 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001856 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001857 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001858 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001859
Steve French37c0eb42005-10-05 14:50:29 -07001860 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001861 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001862 * one page at a time via cifs_writepage
1863 */
1864 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1865 return generic_writepages(mapping, wbc);
1866
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001867 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001868 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001869 end = -1;
1870 } else {
1871 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1872 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1873 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001874 range_whole = true;
1875 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001876 }
1877retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001878 while (!done && index <= end) {
1879 unsigned int i, nr_pages, found_pages;
1880 pgoff_t next = 0, tofind;
1881 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001882
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001883 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1884 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001885
Jeff Laytonc2e87642012-03-23 14:40:55 -04001886 wdata = cifs_writedata_alloc((unsigned int)tofind,
1887 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001888 if (!wdata) {
1889 rc = -ENOMEM;
1890 break;
1891 }
1892
1893 /*
1894 * find_get_pages_tag seems to return a max of 256 on each
1895 * iteration, so we must call it several times in order to
1896 * fill the array or the wsize is effectively limited to
1897 * 256 * PAGE_CACHE_SIZE.
1898 */
1899 found_pages = 0;
1900 pages = wdata->pages;
1901 do {
1902 nr_pages = find_get_pages_tag(mapping, &index,
1903 PAGECACHE_TAG_DIRTY,
1904 tofind, pages);
1905 found_pages += nr_pages;
1906 tofind -= nr_pages;
1907 pages += nr_pages;
1908 } while (nr_pages && tofind && index <= end);
1909
1910 if (found_pages == 0) {
1911 kref_put(&wdata->refcount, cifs_writedata_release);
1912 break;
1913 }
1914
1915 nr_pages = 0;
1916 for (i = 0; i < found_pages; i++) {
1917 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001918 /*
1919 * At this point we hold neither mapping->tree_lock nor
1920 * lock on the page itself: the page may be truncated or
1921 * invalidated (changing page->mapping to NULL), or even
1922 * swizzled back from swapper_space to tmpfs file
1923 * mapping
1924 */
1925
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001926 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001927 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001928 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001929 break;
1930
1931 if (unlikely(page->mapping != mapping)) {
1932 unlock_page(page);
1933 break;
1934 }
1935
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001936 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001937 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001938 unlock_page(page);
1939 break;
1940 }
1941
1942 if (next && (page->index != next)) {
1943 /* Not next consecutive page */
1944 unlock_page(page);
1945 break;
1946 }
1947
1948 if (wbc->sync_mode != WB_SYNC_NONE)
1949 wait_on_page_writeback(page);
1950
1951 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001952 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001953 unlock_page(page);
1954 break;
1955 }
Steve French84d2f072005-10-12 15:32:05 -07001956
Linus Torvaldscb876f42006-12-23 16:19:07 -08001957 /*
1958 * This actually clears the dirty bit in the radix tree.
1959 * See cifs_writepage() for more commentary.
1960 */
1961 set_page_writeback(page);
1962
Jeff Layton3a98b862012-11-26 09:48:41 -05001963 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001964 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001965 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001966 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001967 break;
1968 }
1969
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001970 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001971 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001972 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001973 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001974
1975 /* reset index to refind any pages skipped */
1976 if (nr_pages == 0)
1977 index = wdata->pages[0]->index + 1;
1978
1979 /* put any pages we aren't going to use */
1980 for (i = nr_pages; i < found_pages; i++) {
1981 page_cache_release(wdata->pages[i]);
1982 wdata->pages[i] = NULL;
1983 }
1984
1985 /* nothing to write? */
1986 if (nr_pages == 0) {
1987 kref_put(&wdata->refcount, cifs_writedata_release);
1988 continue;
1989 }
1990
1991 wdata->sync_mode = wbc->sync_mode;
1992 wdata->nr_pages = nr_pages;
1993 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001994 wdata->pagesz = PAGE_CACHE_SIZE;
1995 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05001996 min(i_size_read(mapping->host) -
1997 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07001998 (loff_t)PAGE_CACHE_SIZE);
1999 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
2000 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002001
2002 do {
2003 if (wdata->cfile != NULL)
2004 cifsFileInfo_put(wdata->cfile);
2005 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
2006 false);
2007 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002008 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07002009 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002010 break;
Steve French37c0eb42005-10-05 14:50:29 -07002011 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04002012 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002013 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2014 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002015 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07002016
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002017 for (i = 0; i < nr_pages; ++i)
2018 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05002019
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002020 /* send failure -- clean up the mess */
2021 if (rc != 0) {
2022 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002023 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002024 redirty_page_for_writepage(wbc,
2025 wdata->pages[i]);
2026 else
2027 SetPageError(wdata->pages[i]);
2028 end_page_writeback(wdata->pages[i]);
2029 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002030 }
Jeff Layton941b8532011-01-11 07:24:01 -05002031 if (rc != -EAGAIN)
2032 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002033 }
2034 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002035
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002036 wbc->nr_to_write -= nr_pages;
2037 if (wbc->nr_to_write <= 0)
2038 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002039
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002040 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002041 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002042
Steve French37c0eb42005-10-05 14:50:29 -07002043 if (!scanned && !done) {
2044 /*
2045 * We hit the last page and there is more work to be done: wrap
2046 * back to the start of the file
2047 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002048 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002049 index = 0;
2050 goto retry;
2051 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002052
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002053 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002054 mapping->writeback_index = index;
2055
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 return rc;
2057}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002059static int
2060cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002062 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002063 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002065 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066/* BB add check for wbc flags */
2067 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002068 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00002069 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002070
2071 /*
2072 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2073 *
2074 * A writepage() implementation always needs to do either this,
2075 * or re-dirty the page with "redirty_page_for_writepage()" in
2076 * the case of a failure.
2077 *
2078 * Just unlocking the page will cause the radix tree tag-bits
2079 * to fail to update with the state of the page correctly.
2080 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002081 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002082retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002084 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2085 goto retry_write;
2086 else if (rc == -EAGAIN)
2087 redirty_page_for_writepage(wbc, page);
2088 else if (rc != 0)
2089 SetPageError(page);
2090 else
2091 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002092 end_page_writeback(page);
2093 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002094 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 return rc;
2096}
2097
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002098static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2099{
2100 int rc = cifs_writepage_locked(page, wbc);
2101 unlock_page(page);
2102 return rc;
2103}
2104
Nick Piggind9414772008-09-24 11:32:59 -04002105static int cifs_write_end(struct file *file, struct address_space *mapping,
2106 loff_t pos, unsigned len, unsigned copied,
2107 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108{
Nick Piggind9414772008-09-24 11:32:59 -04002109 int rc;
2110 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002111 struct cifsFileInfo *cfile = file->private_data;
2112 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2113 __u32 pid;
2114
2115 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2116 pid = cfile->pid;
2117 else
2118 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
Joe Perchesb6b38f72010-04-21 03:50:45 +00002120 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2121 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002122
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002123 if (PageChecked(page)) {
2124 if (copied == len)
2125 SetPageUptodate(page);
2126 ClearPageChecked(page);
2127 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002128 SetPageUptodate(page);
2129
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002131 char *page_data;
2132 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002133 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002134
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002135 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 /* this is probably better than directly calling
2137 partialpage_write since in this function the file handle is
2138 known which we might as well leverage */
2139 /* BB check if anything else missing out of ppw
2140 such as updating last write time */
2141 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002142 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002143 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002145
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002146 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002147 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002148 rc = copied;
2149 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002150 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 }
2152
Nick Piggind9414772008-09-24 11:32:59 -04002153 if (rc > 0) {
2154 spin_lock(&inode->i_lock);
2155 if (pos > inode->i_size)
2156 i_size_write(inode, pos);
2157 spin_unlock(&inode->i_lock);
2158 }
2159
2160 unlock_page(page);
2161 page_cache_release(page);
2162
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 return rc;
2164}
2165
Josef Bacik02c24a82011-07-16 20:44:56 -04002166int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2167 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002169 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002171 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002172 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002173 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002174 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002175 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
Josef Bacik02c24a82011-07-16 20:44:56 -04002177 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2178 if (rc)
2179 return rc;
2180 mutex_lock(&inode->i_mutex);
2181
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002182 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
Joe Perchesb6b38f72010-04-21 03:50:45 +00002184 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002185 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002186
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002187 if (!CIFS_I(inode)->clientCanCacheRead) {
2188 rc = cifs_invalidate_mapping(inode);
2189 if (rc) {
2190 cFYI(1, "rc: %d during invalidate phase", rc);
2191 rc = 0; /* don't care about it in fsync */
2192 }
2193 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002194
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002195 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002196 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2197 server = tcon->ses->server;
2198 if (server->ops->flush)
2199 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2200 else
2201 rc = -ENOSYS;
2202 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002203
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002204 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002205 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002206 return rc;
2207}
2208
Josef Bacik02c24a82011-07-16 20:44:56 -04002209int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002210{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002211 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002212 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002213 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002214 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002215 struct cifsFileInfo *smbfile = file->private_data;
2216 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002217 struct inode *inode = file->f_mapping->host;
2218
2219 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2220 if (rc)
2221 return rc;
2222 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002223
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002224 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002225
2226 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2227 file->f_path.dentry->d_name.name, datasync);
2228
2229 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002230 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2231 server = tcon->ses->server;
2232 if (server->ops->flush)
2233 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2234 else
2235 rc = -ENOSYS;
2236 }
Steve Frenchb298f222009-02-21 21:17:43 +00002237
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002238 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002239 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 return rc;
2241}
2242
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243/*
2244 * As file closes, flush all cached write data for this inode checking
2245 * for write behind errors.
2246 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002247int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248{
Al Viro496ad9a2013-01-23 17:07:38 -05002249 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 int rc = 0;
2251
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002252 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002253 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002254
Joe Perchesb6b38f72010-04-21 03:50:45 +00002255 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256
2257 return rc;
2258}
2259
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002260static int
2261cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2262{
2263 int rc = 0;
2264 unsigned long i;
2265
2266 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002267 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002268 if (!pages[i]) {
2269 /*
2270 * save number of pages we have already allocated and
2271 * return with ENOMEM error
2272 */
2273 num_pages = i;
2274 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002275 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002276 }
2277 }
2278
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002279 if (rc) {
2280 for (i = 0; i < num_pages; i++)
2281 put_page(pages[i]);
2282 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002283 return rc;
2284}
2285
2286static inline
2287size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2288{
2289 size_t num_pages;
2290 size_t clen;
2291
2292 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002293 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002294
2295 if (cur_len)
2296 *cur_len = clen;
2297
2298 return num_pages;
2299}
2300
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002301static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002302cifs_uncached_writev_complete(struct work_struct *work)
2303{
2304 int i;
2305 struct cifs_writedata *wdata = container_of(work,
2306 struct cifs_writedata, work);
2307 struct inode *inode = wdata->cfile->dentry->d_inode;
2308 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2309
2310 spin_lock(&inode->i_lock);
2311 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2312 if (cifsi->server_eof > inode->i_size)
2313 i_size_write(inode, cifsi->server_eof);
2314 spin_unlock(&inode->i_lock);
2315
2316 complete(&wdata->done);
2317
2318 if (wdata->result != -EAGAIN) {
2319 for (i = 0; i < wdata->nr_pages; i++)
2320 put_page(wdata->pages[i]);
2321 }
2322
2323 kref_put(&wdata->refcount, cifs_writedata_release);
2324}
2325
2326/* attempt to send write to server, retry on any -EAGAIN errors */
2327static int
2328cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2329{
2330 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002331 struct TCP_Server_Info *server;
2332
2333 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002334
2335 do {
2336 if (wdata->cfile->invalidHandle) {
2337 rc = cifs_reopen_file(wdata->cfile, false);
2338 if (rc != 0)
2339 continue;
2340 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002341 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002342 } while (rc == -EAGAIN);
2343
2344 return rc;
2345}
2346
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002347static ssize_t
2348cifs_iovec_write(struct file *file, const struct iovec *iov,
2349 unsigned long nr_segs, loff_t *poffset)
2350{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002351 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002352 size_t copied, len, cur_len;
2353 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002354 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002355 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002356 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002357 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002358 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002359 struct cifs_writedata *wdata, *tmp;
2360 struct list_head wdata_list;
2361 int rc;
2362 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002363
2364 len = iov_length(iov, nr_segs);
2365 if (!len)
2366 return 0;
2367
2368 rc = generic_write_checks(file, poffset, &len, 0);
2369 if (rc)
2370 return rc;
2371
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002372 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002373 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002374 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002375 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002376
2377 if (!tcon->ses->server->ops->async_writev)
2378 return -ENOSYS;
2379
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002380 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002381
2382 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2383 pid = open_file->pid;
2384 else
2385 pid = current->tgid;
2386
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002387 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002388 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002389 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002390
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002391 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2392 wdata = cifs_writedata_alloc(nr_pages,
2393 cifs_uncached_writev_complete);
2394 if (!wdata) {
2395 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002396 break;
2397 }
2398
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002399 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2400 if (rc) {
2401 kfree(wdata);
2402 break;
2403 }
2404
2405 save_len = cur_len;
2406 for (i = 0; i < nr_pages; i++) {
2407 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2408 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2409 0, copied);
2410 cur_len -= copied;
2411 iov_iter_advance(&it, copied);
2412 }
2413 cur_len = save_len - cur_len;
2414
2415 wdata->sync_mode = WB_SYNC_ALL;
2416 wdata->nr_pages = nr_pages;
2417 wdata->offset = (__u64)offset;
2418 wdata->cfile = cifsFileInfo_get(open_file);
2419 wdata->pid = pid;
2420 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002421 wdata->pagesz = PAGE_SIZE;
2422 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002423 rc = cifs_uncached_retry_writev(wdata);
2424 if (rc) {
2425 kref_put(&wdata->refcount, cifs_writedata_release);
2426 break;
2427 }
2428
2429 list_add_tail(&wdata->list, &wdata_list);
2430 offset += cur_len;
2431 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002432 } while (len > 0);
2433
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002434 /*
2435 * If at least one write was successfully sent, then discard any rc
2436 * value from the later writes. If the other write succeeds, then
2437 * we'll end up returning whatever was written. If it fails, then
2438 * we'll get a new rc value from that.
2439 */
2440 if (!list_empty(&wdata_list))
2441 rc = 0;
2442
2443 /*
2444 * Wait for and collect replies for any successful sends in order of
2445 * increasing offset. Once an error is hit or we get a fatal signal
2446 * while waiting, then return without waiting for any more replies.
2447 */
2448restart_loop:
2449 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2450 if (!rc) {
2451 /* FIXME: freezable too? */
2452 rc = wait_for_completion_killable(&wdata->done);
2453 if (rc)
2454 rc = -EINTR;
2455 else if (wdata->result)
2456 rc = wdata->result;
2457 else
2458 total_written += wdata->bytes;
2459
2460 /* resend call if it's a retryable error */
2461 if (rc == -EAGAIN) {
2462 rc = cifs_uncached_retry_writev(wdata);
2463 goto restart_loop;
2464 }
2465 }
2466 list_del_init(&wdata->list);
2467 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002468 }
2469
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002470 if (total_written > 0)
2471 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002472
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002473 cifs_stats_bytes_written(tcon, total_written);
2474 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002475}
2476
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002477ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002478 unsigned long nr_segs, loff_t pos)
2479{
2480 ssize_t written;
2481 struct inode *inode;
2482
Al Viro496ad9a2013-01-23 17:07:38 -05002483 inode = file_inode(iocb->ki_filp);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002484
2485 /*
2486 * BB - optimize the way when signing is disabled. We can drop this
2487 * extra memory-to-memory copying and use iovec buffers for constructing
2488 * write request.
2489 */
2490
2491 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2492 if (written > 0) {
2493 CIFS_I(inode)->invalid_mapping = true;
2494 iocb->ki_pos = pos;
2495 }
2496
2497 return written;
2498}
2499
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002500static ssize_t
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002501cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2502 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002503{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002504 struct file *file = iocb->ki_filp;
2505 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2506 struct inode *inode = file->f_mapping->host;
2507 struct cifsInodeInfo *cinode = CIFS_I(inode);
2508 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2509 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002510
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002511 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002512
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002513 sb_start_write(inode->i_sb);
2514
2515 /*
2516 * We need to hold the sem to be sure nobody modifies lock list
2517 * with a brlock that prevents writing.
2518 */
2519 down_read(&cinode->lock_sem);
2520 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2521 server->vals->exclusive_lock_type, NULL,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002522 CIFS_WRITE_OP)) {
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002523 mutex_lock(&inode->i_mutex);
2524 rc = __generic_file_aio_write(iocb, iov, nr_segs,
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002525 &iocb->ki_pos);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002526 mutex_unlock(&inode->i_mutex);
2527 }
2528
2529 if (rc > 0 || rc == -EIOCBQUEUED) {
2530 ssize_t err;
2531
2532 err = generic_write_sync(file, pos, rc);
2533 if (err < 0 && rc > 0)
2534 rc = err;
2535 }
2536
2537 up_read(&cinode->lock_sem);
2538 sb_end_write(inode->i_sb);
2539 return rc;
2540}
2541
2542ssize_t
2543cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2544 unsigned long nr_segs, loff_t pos)
2545{
Al Viro496ad9a2013-01-23 17:07:38 -05002546 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002547 struct cifsInodeInfo *cinode = CIFS_I(inode);
2548 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2549 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2550 iocb->ki_filp->private_data;
2551 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002552 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002553
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002554 if (cinode->clientCanCacheAll) {
2555 if (cap_unix(tcon->ses) &&
2556 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2557 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2558 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2559 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002560 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002561 /*
2562 * For non-oplocked files in strict cache mode we need to write the data
2563 * to the server exactly from the pos to pos+len-1 rather than flush all
2564 * affected pages because it may cause a error with mandatory locks on
2565 * these pages but not on the region from pos to ppos+len-1.
2566 */
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002567 written = cifs_user_writev(iocb, iov, nr_segs, pos);
2568 if (written > 0 && cinode->clientCanCacheRead) {
2569 /*
2570 * Windows 7 server can delay breaking level2 oplock if a write
2571 * request comes - break it on the client to prevent reading
2572 * an old data.
2573 */
2574 cifs_invalidate_mapping(inode);
2575 cFYI(1, "Set no oplock for inode=%p after a write operation",
2576 inode);
2577 cinode->clientCanCacheRead = false;
2578 }
2579 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002580}
2581
Jeff Layton0471ca32012-05-16 07:13:16 -04002582static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002583cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002584{
2585 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002586
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002587 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2588 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002589 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002590 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002591 INIT_LIST_HEAD(&rdata->list);
2592 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002593 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002594 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002595
Jeff Layton0471ca32012-05-16 07:13:16 -04002596 return rdata;
2597}
2598
Jeff Layton6993f742012-05-16 07:13:17 -04002599void
2600cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002601{
Jeff Layton6993f742012-05-16 07:13:17 -04002602 struct cifs_readdata *rdata = container_of(refcount,
2603 struct cifs_readdata, refcount);
2604
2605 if (rdata->cfile)
2606 cifsFileInfo_put(rdata->cfile);
2607
Jeff Layton0471ca32012-05-16 07:13:16 -04002608 kfree(rdata);
2609}
2610
Jeff Layton2a1bb132012-05-16 07:13:17 -04002611static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002612cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002613{
2614 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002615 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002616 unsigned int i;
2617
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002618 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002619 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2620 if (!page) {
2621 rc = -ENOMEM;
2622 break;
2623 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002624 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002625 }
2626
2627 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002628 for (i = 0; i < nr_pages; i++) {
2629 put_page(rdata->pages[i]);
2630 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002631 }
2632 }
2633 return rc;
2634}
2635
2636static void
2637cifs_uncached_readdata_release(struct kref *refcount)
2638{
Jeff Layton1c892542012-05-16 07:13:17 -04002639 struct cifs_readdata *rdata = container_of(refcount,
2640 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002641 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002642
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002643 for (i = 0; i < rdata->nr_pages; i++) {
2644 put_page(rdata->pages[i]);
2645 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002646 }
2647 cifs_readdata_release(refcount);
2648}
2649
2650static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002651cifs_retry_async_readv(struct cifs_readdata *rdata)
2652{
2653 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002654 struct TCP_Server_Info *server;
2655
2656 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002657
2658 do {
2659 if (rdata->cfile->invalidHandle) {
2660 rc = cifs_reopen_file(rdata->cfile, true);
2661 if (rc != 0)
2662 continue;
2663 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002664 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002665 } while (rc == -EAGAIN);
2666
2667 return rc;
2668}
2669
Jeff Layton1c892542012-05-16 07:13:17 -04002670/**
2671 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2672 * @rdata: the readdata response with list of pages holding data
2673 * @iov: vector in which we should copy the data
2674 * @nr_segs: number of segments in vector
2675 * @offset: offset into file of the first iovec
2676 * @copied: used to return the amount of data copied to the iov
2677 *
2678 * This function copies data from a list of pages in a readdata response into
2679 * an array of iovecs. It will first calculate where the data should go
2680 * based on the info in the readdata and then copy the data into that spot.
2681 */
2682static ssize_t
2683cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2684 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2685{
2686 int rc = 0;
2687 struct iov_iter ii;
2688 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002689 ssize_t remaining = rdata->bytes;
2690 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002691 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002692
2693 /* set up iov_iter and advance to the correct offset */
2694 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2695 iov_iter_advance(&ii, pos);
2696
2697 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002698 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002699 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002700 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002701
2702 /* copy a whole page or whatever's left */
2703 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2704
2705 /* ...but limit it to whatever space is left in the iov */
2706 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2707
2708 /* go while there's data to be copied and no errors */
2709 if (copy && !rc) {
2710 pdata = kmap(page);
2711 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2712 (int)copy);
2713 kunmap(page);
2714 if (!rc) {
2715 *copied += copy;
2716 remaining -= copy;
2717 iov_iter_advance(&ii, copy);
2718 }
2719 }
Jeff Layton1c892542012-05-16 07:13:17 -04002720 }
2721
2722 return rc;
2723}
2724
2725static void
2726cifs_uncached_readv_complete(struct work_struct *work)
2727{
2728 struct cifs_readdata *rdata = container_of(work,
2729 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002730
2731 complete(&rdata->done);
2732 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2733}
2734
2735static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002736cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2737 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002738{
Jeff Layton8321fec2012-09-19 06:22:32 -07002739 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002740 unsigned int i;
2741 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002742 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002743
Jeff Layton8321fec2012-09-19 06:22:32 -07002744 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002745 for (i = 0; i < nr_pages; i++) {
2746 struct page *page = rdata->pages[i];
2747
Jeff Layton8321fec2012-09-19 06:22:32 -07002748 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002749 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002750 iov.iov_base = kmap(page);
2751 iov.iov_len = PAGE_SIZE;
2752 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2753 i, iov.iov_base, iov.iov_len);
2754 len -= PAGE_SIZE;
2755 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002756 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002757 iov.iov_base = kmap(page);
2758 iov.iov_len = len;
2759 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2760 i, iov.iov_base, iov.iov_len);
2761 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2762 rdata->tailsz = len;
2763 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002764 } else {
2765 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002766 rdata->pages[i] = NULL;
2767 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002768 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002769 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002770 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002771
2772 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2773 kunmap(page);
2774 if (result < 0)
2775 break;
2776
2777 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002778 }
2779
Jeff Layton8321fec2012-09-19 06:22:32 -07002780 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002781}
2782
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002783static ssize_t
2784cifs_iovec_read(struct file *file, const struct iovec *iov,
2785 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786{
Jeff Layton1c892542012-05-16 07:13:17 -04002787 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002788 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002789 ssize_t total_read = 0;
2790 loff_t offset = *poffset;
2791 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002793 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002795 struct cifs_readdata *rdata, *tmp;
2796 struct list_head rdata_list;
2797 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002798
2799 if (!nr_segs)
2800 return 0;
2801
2802 len = iov_length(iov, nr_segs);
2803 if (!len)
2804 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805
Jeff Layton1c892542012-05-16 07:13:17 -04002806 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002807 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002808 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002809 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002811 if (!tcon->ses->server->ops->async_readv)
2812 return -ENOSYS;
2813
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002814 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2815 pid = open_file->pid;
2816 else
2817 pid = current->tgid;
2818
Steve Frenchad7a2922008-02-07 23:25:02 +00002819 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002820 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002821
Jeff Layton1c892542012-05-16 07:13:17 -04002822 do {
2823 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2824 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002825
Jeff Layton1c892542012-05-16 07:13:17 -04002826 /* allocate a readdata struct */
2827 rdata = cifs_readdata_alloc(npages,
2828 cifs_uncached_readv_complete);
2829 if (!rdata) {
2830 rc = -ENOMEM;
2831 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002833
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002834 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002835 if (rc)
2836 goto error;
2837
2838 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002839 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002840 rdata->offset = offset;
2841 rdata->bytes = cur_len;
2842 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002843 rdata->pagesz = PAGE_SIZE;
2844 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002845
2846 rc = cifs_retry_async_readv(rdata);
2847error:
2848 if (rc) {
2849 kref_put(&rdata->refcount,
2850 cifs_uncached_readdata_release);
2851 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 }
Jeff Layton1c892542012-05-16 07:13:17 -04002853
2854 list_add_tail(&rdata->list, &rdata_list);
2855 offset += cur_len;
2856 len -= cur_len;
2857 } while (len > 0);
2858
2859 /* if at least one read request send succeeded, then reset rc */
2860 if (!list_empty(&rdata_list))
2861 rc = 0;
2862
2863 /* the loop below should proceed in the order of increasing offsets */
2864restart_loop:
2865 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2866 if (!rc) {
2867 ssize_t copied;
2868
2869 /* FIXME: freezable sleep too? */
2870 rc = wait_for_completion_killable(&rdata->done);
2871 if (rc)
2872 rc = -EINTR;
2873 else if (rdata->result)
2874 rc = rdata->result;
2875 else {
2876 rc = cifs_readdata_to_iov(rdata, iov,
2877 nr_segs, *poffset,
2878 &copied);
2879 total_read += copied;
2880 }
2881
2882 /* resend call if it's a retryable error */
2883 if (rc == -EAGAIN) {
2884 rc = cifs_retry_async_readv(rdata);
2885 goto restart_loop;
2886 }
2887 }
2888 list_del_init(&rdata->list);
2889 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002891
Jeff Layton1c892542012-05-16 07:13:17 -04002892 cifs_stats_bytes_read(tcon, total_read);
2893 *poffset += total_read;
2894
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002895 /* mask nodata case */
2896 if (rc == -ENODATA)
2897 rc = 0;
2898
Jeff Layton1c892542012-05-16 07:13:17 -04002899 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900}
2901
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002902ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002903 unsigned long nr_segs, loff_t pos)
2904{
2905 ssize_t read;
2906
2907 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2908 if (read > 0)
2909 iocb->ki_pos = pos;
2910
2911 return read;
2912}
2913
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002914ssize_t
2915cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2916 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002917{
Al Viro496ad9a2013-01-23 17:07:38 -05002918 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002919 struct cifsInodeInfo *cinode = CIFS_I(inode);
2920 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2921 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2922 iocb->ki_filp->private_data;
2923 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2924 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002925
2926 /*
2927 * In strict cache mode we need to read from the server all the time
2928 * if we don't have level II oplock because the server can delay mtime
2929 * change - so we can't make a decision about inode invalidating.
2930 * And we can also fail with pagereading if there are mandatory locks
2931 * on pages affected by this read but not on the region from pos to
2932 * pos+len-1.
2933 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002934 if (!cinode->clientCanCacheRead)
2935 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002936
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002937 if (cap_unix(tcon->ses) &&
2938 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2939 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2940 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2941
2942 /*
2943 * We need to hold the sem to be sure nobody modifies lock list
2944 * with a brlock that prevents reading.
2945 */
2946 down_read(&cinode->lock_sem);
2947 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2948 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002949 NULL, CIFS_READ_OP))
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002950 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2951 up_read(&cinode->lock_sem);
2952 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002953}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002955static ssize_t
2956cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957{
2958 int rc = -EACCES;
2959 unsigned int bytes_read = 0;
2960 unsigned int total_read;
2961 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002962 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002964 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002965 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002966 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002967 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002969 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002970 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002971 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002973 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002974 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002976 /* FIXME: set up handlers for larger reads and/or convert to async */
2977 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2978
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302980 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002981 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302982 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002984 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002985 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002986 server = tcon->ses->server;
2987
2988 if (!server->ops->sync_read) {
2989 free_xid(xid);
2990 return -ENOSYS;
2991 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002993 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2994 pid = open_file->pid;
2995 else
2996 pid = current->tgid;
2997
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002999 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003001 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3002 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003003 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003004 /*
3005 * For windows me and 9x we do not want to request more than it
3006 * negotiated since it will refuse the read then.
3007 */
3008 if ((tcon->ses) && !(tcon->ses->capabilities &
3009 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03003010 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04003011 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07003012 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 rc = -EAGAIN;
3014 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00003015 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003016 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017 if (rc != 0)
3018 break;
3019 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003020 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003021 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003022 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003023 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003024 rc = server->ops->sync_read(xid, open_file, &io_parms,
3025 &bytes_read, &cur_offset,
3026 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 }
3028 if (rc || (bytes_read == 0)) {
3029 if (total_read) {
3030 break;
3031 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003032 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033 return rc;
3034 }
3035 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003036 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003037 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 }
3039 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003040 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 return total_read;
3042}
3043
Jeff Laytonca83ce32011-04-12 09:13:44 -04003044/*
3045 * If the page is mmap'ed into a process' page tables, then we need to make
3046 * sure that it doesn't change while being written back.
3047 */
3048static int
3049cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3050{
3051 struct page *page = vmf->page;
3052
3053 lock_page(page);
3054 return VM_FAULT_LOCKED;
3055}
3056
3057static struct vm_operations_struct cifs_file_vm_ops = {
3058 .fault = filemap_fault,
3059 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003060 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003061};
3062
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003063int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3064{
3065 int rc, xid;
Al Viro496ad9a2013-01-23 17:07:38 -05003066 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003067
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003068 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003069
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003070 if (!CIFS_I(inode)->clientCanCacheRead) {
3071 rc = cifs_invalidate_mapping(inode);
3072 if (rc)
3073 return rc;
3074 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003075
3076 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003077 if (rc == 0)
3078 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003079 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003080 return rc;
3081}
3082
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3084{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085 int rc, xid;
3086
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003087 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003088 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00003090 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003091 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092 return rc;
3093 }
3094 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003095 if (rc == 0)
3096 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003097 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098 return rc;
3099}
3100
Jeff Layton0471ca32012-05-16 07:13:16 -04003101static void
3102cifs_readv_complete(struct work_struct *work)
3103{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003104 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003105 struct cifs_readdata *rdata = container_of(work,
3106 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003107
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003108 for (i = 0; i < rdata->nr_pages; i++) {
3109 struct page *page = rdata->pages[i];
3110
Jeff Layton0471ca32012-05-16 07:13:16 -04003111 lru_cache_add_file(page);
3112
3113 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003114 flush_dcache_page(page);
3115 SetPageUptodate(page);
3116 }
3117
3118 unlock_page(page);
3119
3120 if (rdata->result == 0)
3121 cifs_readpage_to_fscache(rdata->mapping->host, page);
3122
3123 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003124 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003125 }
Jeff Layton6993f742012-05-16 07:13:17 -04003126 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003127}
3128
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003129static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003130cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3131 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003132{
Jeff Layton8321fec2012-09-19 06:22:32 -07003133 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003134 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003135 u64 eof;
3136 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003137 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003138 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003139
3140 /* determine the eof that the server (probably) has */
3141 eof = CIFS_I(rdata->mapping->host)->server_eof;
3142 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
3143 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
3144
Jeff Layton8321fec2012-09-19 06:22:32 -07003145 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003146 for (i = 0; i < nr_pages; i++) {
3147 struct page *page = rdata->pages[i];
3148
Jeff Layton8321fec2012-09-19 06:22:32 -07003149 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003150 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003151 iov.iov_base = kmap(page);
3152 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003153 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003154 i, page->index, iov.iov_base, iov.iov_len);
3155 len -= PAGE_CACHE_SIZE;
3156 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003157 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003158 iov.iov_base = kmap(page);
3159 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003160 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07003161 i, page->index, iov.iov_base, iov.iov_len);
3162 memset(iov.iov_base + len,
3163 '\0', PAGE_CACHE_SIZE - len);
3164 rdata->tailsz = len;
3165 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003166 } else if (page->index > eof_index) {
3167 /*
3168 * The VFS will not try to do readahead past the
3169 * i_size, but it's possible that we have outstanding
3170 * writes with gaps in the middle and the i_size hasn't
3171 * caught up yet. Populate those with zeroed out pages
3172 * to prevent the VFS from repeatedly attempting to
3173 * fill them until the writes are flushed.
3174 */
3175 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003176 lru_cache_add_file(page);
3177 flush_dcache_page(page);
3178 SetPageUptodate(page);
3179 unlock_page(page);
3180 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003181 rdata->pages[i] = NULL;
3182 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003183 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003184 } else {
3185 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003186 lru_cache_add_file(page);
3187 unlock_page(page);
3188 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003189 rdata->pages[i] = NULL;
3190 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003191 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003192 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003193
3194 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3195 kunmap(page);
3196 if (result < 0)
3197 break;
3198
3199 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003200 }
3201
Jeff Layton8321fec2012-09-19 06:22:32 -07003202 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003203}
3204
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205static int cifs_readpages(struct file *file, struct address_space *mapping,
3206 struct list_head *page_list, unsigned num_pages)
3207{
Jeff Layton690c5e32011-10-19 15:30:16 -04003208 int rc;
3209 struct list_head tmplist;
3210 struct cifsFileInfo *open_file = file->private_data;
3211 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3212 unsigned int rsize = cifs_sb->rsize;
3213 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214
Jeff Layton690c5e32011-10-19 15:30:16 -04003215 /*
3216 * Give up immediately if rsize is too small to read an entire page.
3217 * The VFS will fall back to readpage. We should never reach this
3218 * point however since we set ra_pages to 0 when the rsize is smaller
3219 * than a cache page.
3220 */
3221 if (unlikely(rsize < PAGE_CACHE_SIZE))
3222 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003223
Suresh Jayaraman566982362010-07-05 18:13:25 +05303224 /*
3225 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3226 * immediately if the cookie is negative
3227 */
3228 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3229 &num_pages);
3230 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003231 return rc;
Suresh Jayaraman566982362010-07-05 18:13:25 +05303232
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003233 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3234 pid = open_file->pid;
3235 else
3236 pid = current->tgid;
3237
Jeff Layton690c5e32011-10-19 15:30:16 -04003238 rc = 0;
3239 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240
Jeff Layton690c5e32011-10-19 15:30:16 -04003241 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3242 mapping, num_pages);
3243
3244 /*
3245 * Start with the page at end of list and move it to private
3246 * list. Do the same with any following pages until we hit
3247 * the rsize limit, hit an index discontinuity, or run out of
3248 * pages. Issue the async read and then start the loop again
3249 * until the list is empty.
3250 *
3251 * Note that list order is important. The page_list is in
3252 * the order of declining indexes. When we put the pages in
3253 * the rdata->pages, then we want them in increasing order.
3254 */
3255 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003256 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003257 unsigned int bytes = PAGE_CACHE_SIZE;
3258 unsigned int expected_index;
3259 unsigned int nr_pages = 1;
3260 loff_t offset;
3261 struct page *page, *tpage;
3262 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263
3264 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265
Jeff Layton690c5e32011-10-19 15:30:16 -04003266 /*
3267 * Lock the page and put it in the cache. Since no one else
3268 * should have access to this page, we're safe to simply set
3269 * PG_locked without checking it first.
3270 */
3271 __set_page_locked(page);
3272 rc = add_to_page_cache_locked(page, mapping,
3273 page->index, GFP_KERNEL);
3274
3275 /* give up if we can't stick it in the cache */
3276 if (rc) {
3277 __clear_page_locked(page);
3278 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280
Jeff Layton690c5e32011-10-19 15:30:16 -04003281 /* move first page to the tmplist */
3282 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3283 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284
Jeff Layton690c5e32011-10-19 15:30:16 -04003285 /* now try and add more pages onto the request */
3286 expected_index = page->index + 1;
3287 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3288 /* discontinuity ? */
3289 if (page->index != expected_index)
3290 break;
3291
3292 /* would this page push the read over the rsize? */
3293 if (bytes + PAGE_CACHE_SIZE > rsize)
3294 break;
3295
3296 __set_page_locked(page);
3297 if (add_to_page_cache_locked(page, mapping,
3298 page->index, GFP_KERNEL)) {
3299 __clear_page_locked(page);
3300 break;
3301 }
3302 list_move_tail(&page->lru, &tmplist);
3303 bytes += PAGE_CACHE_SIZE;
3304 expected_index++;
3305 nr_pages++;
3306 }
3307
Jeff Layton0471ca32012-05-16 07:13:16 -04003308 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003309 if (!rdata) {
3310 /* best to give up if we're out of mem */
3311 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3312 list_del(&page->lru);
3313 lru_cache_add_file(page);
3314 unlock_page(page);
3315 page_cache_release(page);
3316 }
3317 rc = -ENOMEM;
3318 break;
3319 }
3320
Jeff Layton6993f742012-05-16 07:13:17 -04003321 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003322 rdata->mapping = mapping;
3323 rdata->offset = offset;
3324 rdata->bytes = bytes;
3325 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003326 rdata->pagesz = PAGE_CACHE_SIZE;
3327 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003328
3329 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3330 list_del(&page->lru);
3331 rdata->pages[rdata->nr_pages++] = page;
3332 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003333
Jeff Layton2a1bb132012-05-16 07:13:17 -04003334 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003335 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003336 for (i = 0; i < rdata->nr_pages; i++) {
3337 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003338 lru_cache_add_file(page);
3339 unlock_page(page);
3340 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341 }
Jeff Layton6993f742012-05-16 07:13:17 -04003342 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 break;
3344 }
Jeff Layton6993f742012-05-16 07:13:17 -04003345
3346 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347 }
3348
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 return rc;
3350}
3351
3352static int cifs_readpage_worker(struct file *file, struct page *page,
3353 loff_t *poffset)
3354{
3355 char *read_data;
3356 int rc;
3357
Suresh Jayaraman566982362010-07-05 18:13:25 +05303358 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003359 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05303360 if (rc == 0)
3361 goto read_complete;
3362
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 page_cache_get(page);
3364 read_data = kmap(page);
3365 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003366
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003368
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 if (rc < 0)
3370 goto io_error;
3371 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003372 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003373
Al Viro496ad9a2013-01-23 17:07:38 -05003374 file_inode(file)->i_atime =
3375 current_fs_time(file_inode(file)->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003376
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 if (PAGE_CACHE_SIZE > rc)
3378 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3379
3380 flush_dcache_page(page);
3381 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303382
3383 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003384 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303385
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003387
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003389 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390 page_cache_release(page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05303391
3392read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393 return rc;
3394}
3395
3396static int cifs_readpage(struct file *file, struct page *page)
3397{
3398 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3399 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003400 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003402 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403
3404 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303405 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003406 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303407 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 }
3409
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003410 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003411 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412
3413 rc = cifs_readpage_worker(file, page, &offset);
3414
3415 unlock_page(page);
3416
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003417 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418 return rc;
3419}
3420
Steve Frencha403a0a2007-07-26 15:54:16 +00003421static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3422{
3423 struct cifsFileInfo *open_file;
3424
Jeff Layton44772882010-10-15 15:34:03 -04003425 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003426 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003427 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003428 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003429 return 1;
3430 }
3431 }
Jeff Layton44772882010-10-15 15:34:03 -04003432 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003433 return 0;
3434}
3435
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436/* We do not want to update the file size from server for inodes
3437 open for write - to avoid races with writepage extending
3438 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003439 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440 but this is tricky to do without racing with writebehind
3441 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003442bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443{
Steve Frencha403a0a2007-07-26 15:54:16 +00003444 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003445 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003446
Steve Frencha403a0a2007-07-26 15:54:16 +00003447 if (is_inode_writable(cifsInode)) {
3448 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003449 struct cifs_sb_info *cifs_sb;
3450
Steve Frenchc32a0b62006-01-12 14:41:28 -08003451 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003452 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003453 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003454 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003455 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003456 }
3457
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003458 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003459 return true;
Steve French7ba526312007-02-08 18:14:13 +00003460
Steve French4b18f2a2008-04-29 00:06:05 +00003461 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003462 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003463 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464}
3465
Nick Piggind9414772008-09-24 11:32:59 -04003466static int cifs_write_begin(struct file *file, struct address_space *mapping,
3467 loff_t pos, unsigned len, unsigned flags,
3468 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469{
Nick Piggind9414772008-09-24 11:32:59 -04003470 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3471 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003472 loff_t page_start = pos & PAGE_MASK;
3473 loff_t i_size;
3474 struct page *page;
3475 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476
Joe Perchesb6b38f72010-04-21 03:50:45 +00003477 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003478
Nick Piggin54566b22009-01-04 12:00:53 -08003479 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003480 if (!page) {
3481 rc = -ENOMEM;
3482 goto out;
3483 }
Nick Piggind9414772008-09-24 11:32:59 -04003484
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003485 if (PageUptodate(page))
3486 goto out;
Steve French8a236262007-03-06 00:31:00 +00003487
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003488 /*
3489 * If we write a full page it will be up to date, no need to read from
3490 * the server. If the write is short, we'll end up doing a sync write
3491 * instead.
3492 */
3493 if (len == PAGE_CACHE_SIZE)
3494 goto out;
3495
3496 /*
3497 * optimize away the read when we have an oplock, and we're not
3498 * expecting to use any of the data we'd be reading in. That
3499 * is, when the page lies beyond the EOF, or straddles the EOF
3500 * and the write will cover all of the existing data.
3501 */
3502 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3503 i_size = i_size_read(mapping->host);
3504 if (page_start >= i_size ||
3505 (offset == 0 && (pos + len) >= i_size)) {
3506 zero_user_segments(page, 0, offset,
3507 offset + len,
3508 PAGE_CACHE_SIZE);
3509 /*
3510 * PageChecked means that the parts of the page
3511 * to which we're not writing are considered up
3512 * to date. Once the data is copied to the
3513 * page, it can be set uptodate.
3514 */
3515 SetPageChecked(page);
3516 goto out;
3517 }
3518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519
Nick Piggind9414772008-09-24 11:32:59 -04003520 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003521 /*
3522 * might as well read a page, it is fast enough. If we get
3523 * an error, we don't need to return it. cifs_write_end will
3524 * do a sync write instead since PG_uptodate isn't set.
3525 */
3526 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003527 } else {
3528 /* we could try using another file handle if there is one -
3529 but how would we lock it to prevent close of that handle
3530 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003531 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003532 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003533out:
3534 *pagep = page;
3535 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536}
3537
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303538static int cifs_release_page(struct page *page, gfp_t gfp)
3539{
3540 if (PagePrivate(page))
3541 return 0;
3542
3543 return cifs_fscache_release_page(page, gfp);
3544}
3545
3546static void cifs_invalidate_page(struct page *page, unsigned long offset)
3547{
3548 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3549
3550 if (offset == 0)
3551 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3552}
3553
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003554static int cifs_launder_page(struct page *page)
3555{
3556 int rc = 0;
3557 loff_t range_start = page_offset(page);
3558 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3559 struct writeback_control wbc = {
3560 .sync_mode = WB_SYNC_ALL,
3561 .nr_to_write = 0,
3562 .range_start = range_start,
3563 .range_end = range_end,
3564 };
3565
3566 cFYI(1, "Launder page: %p", page);
3567
3568 if (clear_page_dirty_for_io(page))
3569 rc = cifs_writepage_locked(page, &wbc);
3570
3571 cifs_fscache_invalidate_page(page, page->mapping->host);
3572 return rc;
3573}
3574
Tejun Heo9b646972010-07-20 22:09:02 +02003575void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003576{
3577 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3578 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003579 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003580 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003581 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003582 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003583
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003584 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead &&
3585 cifs_has_mand_locks(cinode)) {
3586 cFYI(1, "Reset oplock to None for inode=%p due to mand locks",
3587 inode);
3588 cinode->clientCanCacheRead = false;
3589 }
3590
Jeff Layton3bc303c2009-09-21 06:47:50 -04003591 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003592 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003593 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003594 else
Al Viro8737c932009-12-24 06:47:55 -05003595 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003596 rc = filemap_fdatawrite(inode->i_mapping);
3597 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003598 rc = filemap_fdatawait(inode->i_mapping);
3599 mapping_set_error(inode->i_mapping, rc);
Pavel Shilovsky03eca702012-12-06 21:24:33 +04003600 cifs_invalidate_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003601 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003602 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003603 }
3604
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003605 rc = cifs_push_locks(cfile);
3606 if (rc)
3607 cERROR(1, "Push locks rc = %d", rc);
3608
Jeff Layton3bc303c2009-09-21 06:47:50 -04003609 /*
3610 * releasing stale oplock after recent reconnect of smb session using
3611 * a now incorrect file handle is not a data integrity issue but do
3612 * not bother sending an oplock release if session to server still is
3613 * disconnected since oplock already released by the server
3614 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003615 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003616 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3617 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003618 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003619 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003620}
3621
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003622const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 .readpage = cifs_readpage,
3624 .readpages = cifs_readpages,
3625 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003626 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003627 .write_begin = cifs_write_begin,
3628 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303630 .releasepage = cifs_release_page,
3631 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003632 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003634
3635/*
3636 * cifs_readpages requires the server to support a buffer large enough to
3637 * contain the header plus one complete page of data. Otherwise, we need
3638 * to leave cifs_readpages out of the address space operations.
3639 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003640const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003641 .readpage = cifs_readpage,
3642 .writepage = cifs_writepage,
3643 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003644 .write_begin = cifs_write_begin,
3645 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003646 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303647 .releasepage = cifs_release_page,
3648 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003649 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003650};