blob: 229956bf84577f81d380536b76d1e57d16de53ae [file] [log] [blame]
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -04001/******************************************************************************
2
3(c) 2007 Network Appliance, Inc. All Rights Reserved.
4(c) 2009 NetApp. All Rights Reserved.
5
6NetApp provides this source code under the GPL v2 License.
7The GPL v2 license is available at
8http://opensource.org/licenses/gpl-license.php.
9
10THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
11"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
12LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
13A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
14CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
16PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
17PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
18LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
19NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
20SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21
22******************************************************************************/
23
24#include <linux/tcp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040026#include <linux/sunrpc/xprt.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040027#include <linux/export.h>
Trond Myklebust09acfea2012-03-11 15:22:54 -040028#include <linux/sunrpc/bc_xprt.h>
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040029
Jeff Laytonf895b252014-11-17 16:58:04 -050030#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040031#define RPCDBG_FACILITY RPCDBG_TRANS
32#endif
33
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040034/*
35 * Helper routines that track the number of preallocation elements
36 * on the transport.
37 */
38static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
39{
Trond Myklebust0d2a9702015-06-04 15:37:10 -040040 return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040041}
42
43static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
44{
Trond Myklebust0d2a9702015-06-04 15:37:10 -040045 atomic_add(n, &xprt->bc_free_slots);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040046 xprt->bc_alloc_count += n;
47}
48
49static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
50{
Trond Myklebust0d2a9702015-06-04 15:37:10 -040051 atomic_sub(n, &xprt->bc_free_slots);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040052 return xprt->bc_alloc_count -= n;
53}
54
55/*
56 * Free the preallocated rpc_rqst structure and the memory
57 * buffers hanging off of it.
58 */
59static void xprt_free_allocation(struct rpc_rqst *req)
60{
61 struct xdr_buf *xbufp;
62
63 dprintk("RPC: free allocations for req= %p\n", req);
Weston Andros Adamsonf30dfbb2012-10-23 10:43:33 -040064 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
Trond Myklebust88de6af2015-06-01 15:10:25 -040065 xbufp = &req->rq_rcv_buf;
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040066 free_page((unsigned long)xbufp->head[0].iov_base);
67 xbufp = &req->rq_snd_buf;
68 free_page((unsigned long)xbufp->head[0].iov_base);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -040069 kfree(req);
70}
71
Trond Myklebust1dddda82015-06-01 15:05:38 -040072static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags)
73{
74 struct page *page;
75 /* Preallocate one XDR receive buffer */
76 page = alloc_page(gfp_flags);
77 if (page == NULL)
78 return -ENOMEM;
79 buf->head[0].iov_base = page_address(page);
80 buf->head[0].iov_len = PAGE_SIZE;
81 buf->tail[0].iov_base = NULL;
82 buf->tail[0].iov_len = 0;
83 buf->page_len = 0;
84 buf->len = 0;
85 buf->buflen = PAGE_SIZE;
86 return 0;
87}
88
89static
90struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
91{
92 struct rpc_rqst *req;
93
94 /* Pre-allocate one backchannel rpc_rqst */
95 req = kzalloc(sizeof(*req), gfp_flags);
96 if (req == NULL)
97 return NULL;
98
99 req->rq_xprt = xprt;
100 INIT_LIST_HEAD(&req->rq_list);
101 INIT_LIST_HEAD(&req->rq_bc_list);
102
103 /* Preallocate one XDR receive buffer */
104 if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
105 printk(KERN_ERR "Failed to create bc receive xbuf\n");
106 goto out_free;
107 }
108 req->rq_rcv_buf.len = PAGE_SIZE;
109
110 /* Preallocate one XDR send buffer */
111 if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) {
112 printk(KERN_ERR "Failed to create bc snd xbuf\n");
113 goto out_free;
114 }
115 return req;
116out_free:
117 xprt_free_allocation(req);
118 return NULL;
119}
120
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400121/*
122 * Preallocate up to min_reqs structures and related buffers for use
123 * by the backchannel. This function can be called multiple times
124 * when creating new sessions that use the same rpc_xprt. The
125 * preallocated buffers are added to the pool of resources used by
126 * the rpc_xprt. Anyone of these resources may be used used by an
127 * incoming callback request. It's up to the higher levels in the
128 * stack to enforce that the maximum number of session slots is not
129 * being exceeded.
130 *
131 * Some callback arguments can be large. For example, a pNFS server
132 * using multiple deviceids. The list can be unbound, but the client
133 * has the ability to tell the server the maximum size of the callback
134 * requests. Each deviceID is 16 bytes, so allocate one page
135 * for the arguments to have enough room to receive a number of these
136 * deviceIDs. The NFS client indicates to the pNFS server that its
137 * callback requests can be up to 4096 bytes in size.
138 */
139int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
140{
Chuck Lever42e5c3e2015-10-24 17:27:35 -0400141 if (!xprt->ops->bc_setup)
142 return 0;
143 return xprt->ops->bc_setup(xprt, min_reqs);
144}
145EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
146
147int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
148{
Trond Myklebust1dddda82015-06-01 15:05:38 -0400149 struct rpc_rqst *req;
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400150 struct list_head tmp_list;
151 int i;
152
153 dprintk("RPC: setup backchannel transport\n");
154
155 /*
156 * We use a temporary list to keep track of the preallocated
157 * buffers. Once we're done building the list we splice it
158 * into the backchannel preallocation list off of the rpc_xprt
159 * struct. This helps minimize the amount of time the list
160 * lock is held on the rpc_xprt struct. It also makes cleanup
161 * easier in case of memory allocation errors.
162 */
163 INIT_LIST_HEAD(&tmp_list);
164 for (i = 0; i < min_reqs; i++) {
165 /* Pre-allocate one backchannel rpc_rqst */
Trond Myklebust1dddda82015-06-01 15:05:38 -0400166 req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400167 if (req == NULL) {
168 printk(KERN_ERR "Failed to create bc rpc_rqst\n");
169 goto out_free;
170 }
171
172 /* Add the allocated buffer to the tmp list */
173 dprintk("RPC: adding req= %p\n", req);
174 list_add(&req->rq_bc_pa_list, &tmp_list);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400175 }
176
177 /*
178 * Add the temporary list to the backchannel preallocation list
179 */
180 spin_lock_bh(&xprt->bc_pa_lock);
181 list_splice(&tmp_list, &xprt->bc_pa_list);
182 xprt_inc_alloc_count(xprt, min_reqs);
183 spin_unlock_bh(&xprt->bc_pa_lock);
184
185 dprintk("RPC: setup backchannel transport done\n");
186 return 0;
187
188out_free:
189 /*
190 * Memory allocation failed, free the temporary list
191 */
Trond Myklebust1dddda82015-06-01 15:05:38 -0400192 while (!list_empty(&tmp_list)) {
193 req = list_first_entry(&tmp_list,
194 struct rpc_rqst,
195 rq_bc_pa_list);
Trond Myklebust62835672014-02-11 13:56:54 -0500196 list_del(&req->rq_bc_pa_list);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400197 xprt_free_allocation(req);
Trond Myklebust62835672014-02-11 13:56:54 -0500198 }
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400199
200 dprintk("RPC: setup backchannel transport failed\n");
Weston Andros Adamsond24bab92012-11-01 11:21:53 -0400201 return -ENOMEM;
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400202}
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400203
Ben Hutchings2c530402012-07-10 10:55:09 +0000204/**
205 * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
206 * @xprt: the transport holding the preallocated strucures
207 * @max_reqs the maximum number of preallocated structures to destroy
208 *
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400209 * Since these structures may have been allocated by multiple calls
210 * to xprt_setup_backchannel, we only destroy up to the maximum number
211 * of reqs specified by the caller.
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400212 */
213void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
214{
Chuck Lever42e5c3e2015-10-24 17:27:35 -0400215 if (xprt->ops->bc_destroy)
216 xprt->ops->bc_destroy(xprt, max_reqs);
217}
218EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
219
220void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
221{
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400222 struct rpc_rqst *req = NULL, *tmp = NULL;
223
224 dprintk("RPC: destroy backchannel transport\n");
225
Weston Andros Adamsonc4ded8d2012-10-23 10:43:34 -0400226 if (max_reqs == 0)
227 goto out;
228
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400229 spin_lock_bh(&xprt->bc_pa_lock);
230 xprt_dec_alloc_count(xprt, max_reqs);
231 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
232 dprintk("RPC: req=%p\n", req);
Trond Myklebust62835672014-02-11 13:56:54 -0500233 list_del(&req->rq_bc_pa_list);
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400234 xprt_free_allocation(req);
235 if (--max_reqs == 0)
236 break;
237 }
238 spin_unlock_bh(&xprt->bc_pa_lock);
239
Weston Andros Adamsonc4ded8d2012-10-23 10:43:34 -0400240out:
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400241 dprintk("RPC: backchannel list empty= %s\n",
242 list_empty(&xprt->bc_pa_list) ? "true" : "false");
243}
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400244
Trond Myklebust2ea24492014-02-10 11:18:39 -0500245static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400246{
Trond Myklebust2ea24492014-02-10 11:18:39 -0500247 struct rpc_rqst *req = NULL;
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400248
249 dprintk("RPC: allocate a backchannel request\n");
Trond Myklebust0d2a9702015-06-04 15:37:10 -0400250 if (atomic_read(&xprt->bc_free_slots) <= 0)
Trond Myklebust2ea24492014-02-10 11:18:39 -0500251 goto not_found;
Trond Myklebust0d2a9702015-06-04 15:37:10 -0400252 if (list_empty(&xprt->bc_pa_list)) {
253 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
254 if (!req)
255 goto not_found;
Trond Myklebust68514472015-07-22 16:31:17 -0400256 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
257 xprt->bc_alloc_count++;
Trond Myklebust0d2a9702015-06-04 15:37:10 -0400258 }
Trond Myklebust2ea24492014-02-10 11:18:39 -0500259 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
260 rq_bc_pa_list);
261 req->rq_reply_bytes_recvd = 0;
262 req->rq_bytes_sent = 0;
263 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400264 sizeof(req->rq_private_buf));
Trond Myklebust2ea24492014-02-10 11:18:39 -0500265 req->rq_xid = xid;
266 req->rq_connect_cookie = xprt->connect_cookie;
267not_found:
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400268 dprintk("RPC: backchannel req=%p\n", req);
269 return req;
270}
271
272/*
273 * Return the preallocated rpc_rqst structure and XDR buffers
274 * associated with this rpc_task.
275 */
276void xprt_free_bc_request(struct rpc_rqst *req)
277{
278 struct rpc_xprt *xprt = req->rq_xprt;
279
Chuck Lever42e5c3e2015-10-24 17:27:35 -0400280 xprt->ops->bc_free_rqst(req);
281}
282
283void xprt_free_bc_rqst(struct rpc_rqst *req)
284{
285 struct rpc_xprt *xprt = req->rq_xprt;
286
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400287 dprintk("RPC: free backchannel req=%p\n", req);
288
Trond Myklebust2ea24492014-02-10 11:18:39 -0500289 req->rq_connect_cookie = xprt->connect_cookie - 1;
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100290 smp_mb__before_atomic();
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400291 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100292 smp_mb__after_atomic();
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400293
Trond Myklebust0d2a9702015-06-04 15:37:10 -0400294 /*
295 * Return it to the list of preallocations so that it
296 * may be reused by a new callback request.
297 */
298 spin_lock_bh(&xprt->bc_pa_lock);
299 if (xprt_need_to_requeue(xprt)) {
300 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
301 xprt->bc_alloc_count++;
302 req = NULL;
303 }
304 spin_unlock_bh(&xprt->bc_pa_lock);
305 if (req != NULL) {
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400306 /*
307 * The last remaining session was destroyed while this
308 * entry was in use. Free the entry and don't attempt
309 * to add back to the list because there is no need to
310 * have anymore preallocated entries.
311 */
312 dprintk("RPC: Last session removed req=%p\n", req);
313 xprt_free_allocation(req);
314 return;
315 }
Ricardo Labiagafb7a0b92009-04-01 09:23:00 -0400316}
317
Trond Myklebust2ea24492014-02-10 11:18:39 -0500318/*
319 * One or more rpc_rqst structure have been preallocated during the
320 * backchannel setup. Buffer space for the send and private XDR buffers
321 * has been preallocated as well. Use xprt_alloc_bc_request to allocate
322 * to this request. Use xprt_free_bc_request to return it.
323 *
324 * We know that we're called in soft interrupt context, grab the spin_lock
325 * since there is no need to grab the bottom half spin_lock.
326 *
327 * Return an available rpc_rqst, otherwise NULL if non are available.
328 */
329struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
330{
331 struct rpc_rqst *req;
332
333 spin_lock(&xprt->bc_pa_lock);
334 list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
335 if (req->rq_connect_cookie != xprt->connect_cookie)
336 continue;
337 if (req->rq_xid == xid)
338 goto found;
339 }
340 req = xprt_alloc_bc_request(xprt, xid);
341found:
342 spin_unlock(&xprt->bc_pa_lock);
343 return req;
344}
345
346/*
347 * Add callback request to callback list. The callback
348 * service sleeps on the sv_cb_waitq waiting for new
349 * requests. Wake it up after adding enqueing the
350 * request.
351 */
352void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
353{
354 struct rpc_xprt *xprt = req->rq_xprt;
355 struct svc_serv *bc_serv = xprt->bc_serv;
356
Chuck Lever813b00d2015-02-13 13:08:25 -0500357 spin_lock(&xprt->bc_pa_lock);
358 list_del(&req->rq_bc_pa_list);
Trond Myklebust1980bd42015-07-22 17:05:32 -0400359 xprt_dec_alloc_count(xprt, 1);
Chuck Lever813b00d2015-02-13 13:08:25 -0500360 spin_unlock(&xprt->bc_pa_lock);
361
Trond Myklebust2ea24492014-02-10 11:18:39 -0500362 req->rq_private_buf.len = copied;
363 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
364
365 dprintk("RPC: add callback request to list\n");
366 spin_lock(&bc_serv->sv_cb_lock);
Trond Myklebust2ea24492014-02-10 11:18:39 -0500367 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
368 wake_up(&bc_serv->sv_cb_waitq);
369 spin_unlock(&bc_serv->sv_cb_lock);
370}
371