blob: 4b0da865a72c2ee323999b1810a3a19a3a07709d [file] [log] [blame]
Andy Grover08b48a12009-02-24 15:30:32 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
34
35#include "rds.h"
36#include "rdma.h"
37#include "ib.h"
38
39
40/*
41 * This is stored as mr->r_trans_private.
42 */
43struct rds_ib_mr {
44 struct rds_ib_device *device;
45 struct rds_ib_mr_pool *pool;
46 struct ib_fmr *fmr;
47 struct list_head list;
48 unsigned int remap_count;
49
50 struct scatterlist *sg;
51 unsigned int sg_len;
52 u64 *dma;
53 int sg_dma_len;
54};
55
56/*
57 * Our own little FMR pool
58 */
59struct rds_ib_mr_pool {
60 struct mutex flush_lock; /* serialize fmr invalidate */
61 struct work_struct flush_worker; /* flush worker */
62
63 spinlock_t list_lock; /* protect variables below */
64 atomic_t item_count; /* total # of MRs */
65 atomic_t dirty_count; /* # dirty of MRs */
66 struct list_head drop_list; /* MRs that have reached their max_maps limit */
67 struct list_head free_list; /* unused MRs */
68 struct list_head clean_list; /* unused & unamapped MRs */
69 atomic_t free_pinned; /* memory pinned by free MRs */
70 unsigned long max_items;
71 unsigned long max_items_soft;
72 unsigned long max_free_pinned;
73 struct ib_fmr_attr fmr_attr;
74};
75
76static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all);
77static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
78static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
79
80static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
81{
82 struct rds_ib_device *rds_ibdev;
83 struct rds_ib_ipaddr *i_ipaddr;
84
85 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
86 spin_lock_irq(&rds_ibdev->spinlock);
87 list_for_each_entry(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
88 if (i_ipaddr->ipaddr == ipaddr) {
89 spin_unlock_irq(&rds_ibdev->spinlock);
90 return rds_ibdev;
91 }
92 }
93 spin_unlock_irq(&rds_ibdev->spinlock);
94 }
95
96 return NULL;
97}
98
99static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
100{
101 struct rds_ib_ipaddr *i_ipaddr;
102
103 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
104 if (!i_ipaddr)
105 return -ENOMEM;
106
107 i_ipaddr->ipaddr = ipaddr;
108
109 spin_lock_irq(&rds_ibdev->spinlock);
110 list_add_tail(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
111 spin_unlock_irq(&rds_ibdev->spinlock);
112
113 return 0;
114}
115
116static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
117{
118 struct rds_ib_ipaddr *i_ipaddr, *next;
119
120 spin_lock_irq(&rds_ibdev->spinlock);
121 list_for_each_entry_safe(i_ipaddr, next, &rds_ibdev->ipaddr_list, list) {
122 if (i_ipaddr->ipaddr == ipaddr) {
123 list_del(&i_ipaddr->list);
124 kfree(i_ipaddr);
125 break;
126 }
127 }
128 spin_unlock_irq(&rds_ibdev->spinlock);
129}
130
131int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
132{
133 struct rds_ib_device *rds_ibdev_old;
134
135 rds_ibdev_old = rds_ib_get_device(ipaddr);
136 if (rds_ibdev_old)
137 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
138
139 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
140}
141
Andy Grover745cbcc2009-04-01 08:20:19 +0000142void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
Andy Grover08b48a12009-02-24 15:30:32 +0000143{
144 struct rds_ib_connection *ic = conn->c_transport_data;
145
146 /* conn was previously on the nodev_conns_list */
147 spin_lock_irq(&ib_nodev_conns_lock);
148 BUG_ON(list_empty(&ib_nodev_conns));
149 BUG_ON(list_empty(&ic->ib_node));
150 list_del(&ic->ib_node);
Andy Grover08b48a12009-02-24 15:30:32 +0000151
152 spin_lock_irq(&rds_ibdev->spinlock);
153 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
154 spin_unlock_irq(&rds_ibdev->spinlock);
Andy Grover08b48a12009-02-24 15:30:32 +0000155 spin_unlock_irq(&ib_nodev_conns_lock);
156
Andy Grover745cbcc2009-04-01 08:20:19 +0000157 ic->rds_ibdev = rds_ibdev;
Andy Grover08b48a12009-02-24 15:30:32 +0000158}
159
Andy Grover745cbcc2009-04-01 08:20:19 +0000160void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
161{
162 struct rds_ib_connection *ic = conn->c_transport_data;
163
164 /* place conn on nodev_conns_list */
165 spin_lock(&ib_nodev_conns_lock);
166
167 spin_lock_irq(&rds_ibdev->spinlock);
168 BUG_ON(list_empty(&ic->ib_node));
169 list_del(&ic->ib_node);
170 spin_unlock_irq(&rds_ibdev->spinlock);
171
172 list_add_tail(&ic->ib_node, &ib_nodev_conns);
173
174 spin_unlock(&ib_nodev_conns_lock);
175
176 ic->rds_ibdev = NULL;
177}
178
179void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
Andy Grover08b48a12009-02-24 15:30:32 +0000180{
181 struct rds_ib_connection *ic, *_ic;
182 LIST_HEAD(tmp_list);
183
184 /* avoid calling conn_destroy with irqs off */
Andy Grover745cbcc2009-04-01 08:20:19 +0000185 spin_lock_irq(list_lock);
186 list_splice(list, &tmp_list);
187 INIT_LIST_HEAD(list);
188 spin_unlock_irq(list_lock);
Andy Grover08b48a12009-02-24 15:30:32 +0000189
Andy Grover433d3082009-10-30 08:51:55 +0000190 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
Andy Grover08b48a12009-02-24 15:30:32 +0000191 rds_conn_destroy(ic->conn);
Andy Grover08b48a12009-02-24 15:30:32 +0000192}
193
194struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
195{
196 struct rds_ib_mr_pool *pool;
197
198 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
199 if (!pool)
200 return ERR_PTR(-ENOMEM);
201
202 INIT_LIST_HEAD(&pool->free_list);
203 INIT_LIST_HEAD(&pool->drop_list);
204 INIT_LIST_HEAD(&pool->clean_list);
205 mutex_init(&pool->flush_lock);
206 spin_lock_init(&pool->list_lock);
207 INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
208
209 pool->fmr_attr.max_pages = fmr_message_size;
210 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
Andy Grovera870d622009-07-17 13:13:33 +0000211 pool->fmr_attr.page_shift = PAGE_SHIFT;
Andy Grover08b48a12009-02-24 15:30:32 +0000212 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
213
214 /* We never allow more than max_items MRs to be allocated.
215 * When we exceed more than max_items_soft, we start freeing
216 * items more aggressively.
217 * Make sure that max_items > max_items_soft > max_items / 2
218 */
219 pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
220 pool->max_items = rds_ibdev->max_fmrs;
221
222 return pool;
223}
224
225void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
226{
227 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
228
229 iinfo->rdma_mr_max = pool->max_items;
230 iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
231}
232
233void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
234{
235 flush_workqueue(rds_wq);
236 rds_ib_flush_mr_pool(pool, 1);
237 BUG_ON(atomic_read(&pool->item_count));
238 BUG_ON(atomic_read(&pool->free_pinned));
239 kfree(pool);
240}
241
242static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
243{
244 struct rds_ib_mr *ibmr = NULL;
245 unsigned long flags;
246
247 spin_lock_irqsave(&pool->list_lock, flags);
248 if (!list_empty(&pool->clean_list)) {
249 ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list);
250 list_del_init(&ibmr->list);
251 }
252 spin_unlock_irqrestore(&pool->list_lock, flags);
253
254 return ibmr;
255}
256
257static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
258{
259 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
260 struct rds_ib_mr *ibmr = NULL;
261 int err = 0, iter = 0;
262
263 while (1) {
264 ibmr = rds_ib_reuse_fmr(pool);
265 if (ibmr)
266 return ibmr;
267
268 /* No clean MRs - now we have the choice of either
269 * allocating a fresh MR up to the limit imposed by the
270 * driver, or flush any dirty unused MRs.
271 * We try to avoid stalling in the send path if possible,
272 * so we allocate as long as we're allowed to.
273 *
274 * We're fussy with enforcing the FMR limit, though. If the driver
275 * tells us we can't use more than N fmrs, we shouldn't start
276 * arguing with it */
277 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
278 break;
279
280 atomic_dec(&pool->item_count);
281
282 if (++iter > 2) {
283 rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
284 return ERR_PTR(-EAGAIN);
285 }
286
287 /* We do have some empty MRs. Flush them out. */
288 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
289 rds_ib_flush_mr_pool(pool, 0);
290 }
291
292 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
293 if (!ibmr) {
294 err = -ENOMEM;
295 goto out_no_cigar;
296 }
297
298 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
299 (IB_ACCESS_LOCAL_WRITE |
300 IB_ACCESS_REMOTE_READ |
301 IB_ACCESS_REMOTE_WRITE),
302 &pool->fmr_attr);
303 if (IS_ERR(ibmr->fmr)) {
304 err = PTR_ERR(ibmr->fmr);
305 ibmr->fmr = NULL;
306 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
307 goto out_no_cigar;
308 }
309
310 rds_ib_stats_inc(s_ib_rdma_mr_alloc);
311 return ibmr;
312
313out_no_cigar:
314 if (ibmr) {
315 if (ibmr->fmr)
316 ib_dealloc_fmr(ibmr->fmr);
317 kfree(ibmr);
318 }
319 atomic_dec(&pool->item_count);
320 return ERR_PTR(err);
321}
322
323static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
324 struct scatterlist *sg, unsigned int nents)
325{
326 struct ib_device *dev = rds_ibdev->dev;
327 struct scatterlist *scat = sg;
328 u64 io_addr = 0;
329 u64 *dma_pages;
330 u32 len;
331 int page_cnt, sg_dma_len;
332 int i, j;
333 int ret;
334
335 sg_dma_len = ib_dma_map_sg(dev, sg, nents,
336 DMA_BIDIRECTIONAL);
337 if (unlikely(!sg_dma_len)) {
338 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
339 return -EBUSY;
340 }
341
342 len = 0;
343 page_cnt = 0;
344
345 for (i = 0; i < sg_dma_len; ++i) {
346 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
347 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
348
Andy Grovera870d622009-07-17 13:13:33 +0000349 if (dma_addr & ~PAGE_MASK) {
Andy Grover08b48a12009-02-24 15:30:32 +0000350 if (i > 0)
351 return -EINVAL;
352 else
353 ++page_cnt;
354 }
Andy Grovera870d622009-07-17 13:13:33 +0000355 if ((dma_addr + dma_len) & ~PAGE_MASK) {
Andy Grover08b48a12009-02-24 15:30:32 +0000356 if (i < sg_dma_len - 1)
357 return -EINVAL;
358 else
359 ++page_cnt;
360 }
361
362 len += dma_len;
363 }
364
Andy Grovera870d622009-07-17 13:13:33 +0000365 page_cnt += len >> PAGE_SHIFT;
Andy Grover08b48a12009-02-24 15:30:32 +0000366 if (page_cnt > fmr_message_size)
367 return -EINVAL;
368
369 dma_pages = kmalloc(sizeof(u64) * page_cnt, GFP_ATOMIC);
370 if (!dma_pages)
371 return -ENOMEM;
372
373 page_cnt = 0;
374 for (i = 0; i < sg_dma_len; ++i) {
375 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
376 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
377
Andy Grovera870d622009-07-17 13:13:33 +0000378 for (j = 0; j < dma_len; j += PAGE_SIZE)
Andy Grover08b48a12009-02-24 15:30:32 +0000379 dma_pages[page_cnt++] =
Andy Grovera870d622009-07-17 13:13:33 +0000380 (dma_addr & PAGE_MASK) + j;
Andy Grover08b48a12009-02-24 15:30:32 +0000381 }
382
383 ret = ib_map_phys_fmr(ibmr->fmr,
384 dma_pages, page_cnt, io_addr);
385 if (ret)
386 goto out;
387
388 /* Success - we successfully remapped the MR, so we can
389 * safely tear down the old mapping. */
390 rds_ib_teardown_mr(ibmr);
391
392 ibmr->sg = scat;
393 ibmr->sg_len = nents;
394 ibmr->sg_dma_len = sg_dma_len;
395 ibmr->remap_count++;
396
397 rds_ib_stats_inc(s_ib_rdma_mr_used);
398 ret = 0;
399
400out:
401 kfree(dma_pages);
402
403 return ret;
404}
405
406void rds_ib_sync_mr(void *trans_private, int direction)
407{
408 struct rds_ib_mr *ibmr = trans_private;
409 struct rds_ib_device *rds_ibdev = ibmr->device;
410
411 switch (direction) {
412 case DMA_FROM_DEVICE:
413 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
414 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
415 break;
416 case DMA_TO_DEVICE:
417 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
418 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
419 break;
420 }
421}
422
423static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
424{
425 struct rds_ib_device *rds_ibdev = ibmr->device;
426
427 if (ibmr->sg_dma_len) {
428 ib_dma_unmap_sg(rds_ibdev->dev,
429 ibmr->sg, ibmr->sg_len,
430 DMA_BIDIRECTIONAL);
431 ibmr->sg_dma_len = 0;
432 }
433
434 /* Release the s/g list */
435 if (ibmr->sg_len) {
436 unsigned int i;
437
438 for (i = 0; i < ibmr->sg_len; ++i) {
439 struct page *page = sg_page(&ibmr->sg[i]);
440
441 /* FIXME we need a way to tell a r/w MR
442 * from a r/o MR */
443 set_page_dirty(page);
444 put_page(page);
445 }
446 kfree(ibmr->sg);
447
448 ibmr->sg = NULL;
449 ibmr->sg_len = 0;
450 }
451}
452
453static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
454{
455 unsigned int pinned = ibmr->sg_len;
456
457 __rds_ib_teardown_mr(ibmr);
458 if (pinned) {
459 struct rds_ib_device *rds_ibdev = ibmr->device;
460 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
461
462 atomic_sub(pinned, &pool->free_pinned);
463 }
464}
465
466static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
467{
468 unsigned int item_count;
469
470 item_count = atomic_read(&pool->item_count);
471 if (free_all)
472 return item_count;
473
474 return 0;
475}
476
477/*
478 * Flush our pool of MRs.
479 * At a minimum, all currently unused MRs are unmapped.
480 * If the number of MRs allocated exceeds the limit, we also try
481 * to free as many MRs as needed to get back to this limit.
482 */
483static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
484{
485 struct rds_ib_mr *ibmr, *next;
486 LIST_HEAD(unmap_list);
487 LIST_HEAD(fmr_list);
488 unsigned long unpinned = 0;
489 unsigned long flags;
490 unsigned int nfreed = 0, ncleaned = 0, free_goal;
491 int ret = 0;
492
493 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
494
495 mutex_lock(&pool->flush_lock);
496
497 spin_lock_irqsave(&pool->list_lock, flags);
498 /* Get the list of all MRs to be dropped. Ordering matters -
499 * we want to put drop_list ahead of free_list. */
500 list_splice_init(&pool->free_list, &unmap_list);
501 list_splice_init(&pool->drop_list, &unmap_list);
502 if (free_all)
503 list_splice_init(&pool->clean_list, &unmap_list);
504 spin_unlock_irqrestore(&pool->list_lock, flags);
505
506 free_goal = rds_ib_flush_goal(pool, free_all);
507
508 if (list_empty(&unmap_list))
509 goto out;
510
511 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
512 list_for_each_entry(ibmr, &unmap_list, list)
513 list_add(&ibmr->fmr->list, &fmr_list);
514 ret = ib_unmap_fmr(&fmr_list);
515 if (ret)
516 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
517
518 /* Now we can destroy the DMA mapping and unpin any pages */
519 list_for_each_entry_safe(ibmr, next, &unmap_list, list) {
520 unpinned += ibmr->sg_len;
521 __rds_ib_teardown_mr(ibmr);
522 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
523 rds_ib_stats_inc(s_ib_rdma_mr_free);
524 list_del(&ibmr->list);
525 ib_dealloc_fmr(ibmr->fmr);
526 kfree(ibmr);
527 nfreed++;
528 }
529 ncleaned++;
530 }
531
532 spin_lock_irqsave(&pool->list_lock, flags);
533 list_splice(&unmap_list, &pool->clean_list);
534 spin_unlock_irqrestore(&pool->list_lock, flags);
535
536 atomic_sub(unpinned, &pool->free_pinned);
537 atomic_sub(ncleaned, &pool->dirty_count);
538 atomic_sub(nfreed, &pool->item_count);
539
540out:
541 mutex_unlock(&pool->flush_lock);
542 return ret;
543}
544
545static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
546{
547 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker);
548
549 rds_ib_flush_mr_pool(pool, 0);
550}
551
552void rds_ib_free_mr(void *trans_private, int invalidate)
553{
554 struct rds_ib_mr *ibmr = trans_private;
555 struct rds_ib_device *rds_ibdev = ibmr->device;
556 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
557 unsigned long flags;
558
559 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
560
561 /* Return it to the pool's free list */
562 spin_lock_irqsave(&pool->list_lock, flags);
563 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
564 list_add(&ibmr->list, &pool->drop_list);
565 else
566 list_add(&ibmr->list, &pool->free_list);
567
568 atomic_add(ibmr->sg_len, &pool->free_pinned);
569 atomic_inc(&pool->dirty_count);
570 spin_unlock_irqrestore(&pool->list_lock, flags);
571
572 /* If we've pinned too many pages, request a flush */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800573 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
574 atomic_read(&pool->dirty_count) >= pool->max_items / 10)
Andy Grover08b48a12009-02-24 15:30:32 +0000575 queue_work(rds_wq, &pool->flush_worker);
576
577 if (invalidate) {
578 if (likely(!in_interrupt())) {
579 rds_ib_flush_mr_pool(pool, 0);
580 } else {
581 /* We get here if the user created a MR marked
582 * as use_once and invalidate at the same time. */
583 queue_work(rds_wq, &pool->flush_worker);
584 }
585 }
586}
587
588void rds_ib_flush_mrs(void)
589{
590 struct rds_ib_device *rds_ibdev;
591
592 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
593 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
594
595 if (pool)
596 rds_ib_flush_mr_pool(pool, 0);
597 }
598}
599
600void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
601 struct rds_sock *rs, u32 *key_ret)
602{
603 struct rds_ib_device *rds_ibdev;
604 struct rds_ib_mr *ibmr = NULL;
605 int ret;
606
607 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
608 if (!rds_ibdev) {
609 ret = -ENODEV;
610 goto out;
611 }
612
613 if (!rds_ibdev->mr_pool) {
614 ret = -ENODEV;
615 goto out;
616 }
617
618 ibmr = rds_ib_alloc_fmr(rds_ibdev);
619 if (IS_ERR(ibmr))
620 return ibmr;
621
622 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
623 if (ret == 0)
624 *key_ret = ibmr->fmr->rkey;
625 else
626 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
627
628 ibmr->device = rds_ibdev;
629
630 out:
631 if (ret) {
632 if (ibmr)
633 rds_ib_free_mr(ibmr, 0);
634 ibmr = ERR_PTR(ret);
635 }
636 return ibmr;
637}