blob: ef66d2b0ec3711951557e0d40f2a7b1fa86c8d1d [file] [log] [blame]
Dennis Dalessandro01946212016-01-06 09:50:24 -08001#ifndef DEF_RDMA_VT_H
2#define DEF_RDMA_VT_H
3
4/*
5 * Copyright(c) 2015 Intel Corporation.
6 *
7 * This file is provided under a dual BSD/GPLv2 license. When using or
8 * redistributing this file, you may do so under either license.
9 *
10 * GPL LICENSE SUMMARY
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * BSD LICENSE
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51/*
52 * Structure that low level drivers will populate in order to register with the
53 * rdmavt layer.
54 */
55
Dennis Dalessandrof3d01bb2016-01-06 10:04:13 -080056#include <linux/spinlock.h>
57#include <linux/list.h>
Dennis Dalessandro01946212016-01-06 09:50:24 -080058#include "ib_verbs.h"
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -080059
Kamal Heibf2f34212016-01-06 10:03:47 -080060#define RVT_MULTICAST_LID_BASE 0xC000
61#define RVT_PERMISSIVE_LID 0xFFFF
62
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -080063/*
Dennis Dalessandro0b8a8aa2016-01-06 10:03:07 -080064 * For some of the IBTA objects there will likely be some
65 * initializations required. We need flags to determine whether it is OK
66 * for rdmavt to do this or not. This does not imply any functions of a
67 * partiuclar IBTA object are overridden.
68 */
69#define RVT_FLAG_MR_INIT_DRIVER BIT(1)
70#define RVT_FLAG_QP_INIT_DRIVER BIT(2)
71#define RVT_FLAG_CQ_INIT_DRIVER BIT(3)
72
73/*
Dennis Dalessandrob92a7562016-01-06 10:01:42 -080074 * For Memory Regions. This stuff should probably be moved into rdmavt/mr.h once
75 * drivers no longer need access to the MR directly.
76 */
77
78/*
79 * A segment is a linear region of low physical memory.
80 * Used by the verbs layer.
81 */
82struct rvt_seg {
83 void *vaddr;
84 size_t length;
85};
86
87/* The number of rvt_segs that fit in a page. */
88#define RVT_SEGSZ (PAGE_SIZE / sizeof(struct rvt_seg))
89
90struct rvt_segarray {
91 struct rvt_seg segs[RVT_SEGSZ];
92};
93
94struct rvt_mregion {
95 struct ib_pd *pd; /* shares refcnt of ibmr.pd */
96 u64 user_base; /* User's address for this region */
97 u64 iova; /* IB start address of this region */
98 size_t length;
99 u32 lkey;
100 u32 offset; /* offset (bytes) to start of region */
101 int access_flags;
102 u32 max_segs; /* number of rvt_segs in all the arrays */
103 u32 mapsz; /* size of the map array */
104 u8 page_shift; /* 0 - non unform/non powerof2 sizes */
105 u8 lkey_published; /* in global table */
106 struct completion comp; /* complete when refcount goes to zero */
107 atomic_t refcount;
108 struct rvt_segarray *map[0]; /* the segments */
109};
110
111#define RVT_MAX_LKEY_TABLE_BITS 23
112
113struct rvt_lkey_table {
114 spinlock_t lock; /* protect changes in this struct */
115 u32 next; /* next unused index (speeds search) */
116 u32 gen; /* generation count */
117 u32 max; /* size of the table */
118 struct rvt_mregion __rcu **table;
119};
120
121/* End Memmory Region */
122
123/*
Dennis Dalessandroca889e82016-01-06 10:02:41 -0800124 * Things needed for the Queue Pair definition. Like the MR stuff above the
125 * following should probably get moved to qp.h once drivers stop trying to make
126 * and manipulate thier own QPs. For the few instnaces where a driver may need
127 * to look into a queue pair there should be a pointer to a driver priavte data
128 * structure that they can look at.
129 */
130
131/*
132 * These keep track of the copy progress within a memory region.
133 * Used by the verbs layer.
134 */
135struct rvt_sge {
136 struct rvt_mregion *mr;
137 void *vaddr; /* kernel virtual address of segment */
138 u32 sge_length; /* length of the SGE */
139 u32 length; /* remaining length of the segment */
140 u16 m; /* current index: mr->map[m] */
141 u16 n; /* current index: mr->map[m]->segs[n] */
142};
143
144/*
145 * Send work request queue entry.
146 * The size of the sg_list is determined when the QP is created and stored
147 * in qp->s_max_sge.
148 */
149struct rvt_swqe {
150 union {
151 struct ib_send_wr wr; /* don't use wr.sg_list */
152 struct ib_ud_wr ud_wr;
153 struct ib_reg_wr reg_wr;
154 struct ib_rdma_wr rdma_wr;
155 struct ib_atomic_wr atomic_wr;
156 };
157 u32 psn; /* first packet sequence number */
158 u32 lpsn; /* last packet sequence number */
159 u32 ssn; /* send sequence number */
160 u32 length; /* total length of data in sg_list */
161 struct rvt_sge sg_list[0];
162};
163
164/*
165 * Receive work request queue entry.
166 * The size of the sg_list is determined when the QP (or SRQ) is created
167 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
168 */
169struct rvt_rwqe {
170 u64 wr_id;
171 u8 num_sge;
172 struct ib_sge sg_list[0];
173};
174
175/*
176 * This structure is used to contain the head pointer, tail pointer,
177 * and receive work queue entries as a single memory allocation so
178 * it can be mmap'ed into user space.
179 * Note that the wq array elements are variable size so you can't
180 * just index into the array to get the N'th element;
181 * use get_rwqe_ptr() instead.
182 */
183struct rvt_rwq {
184 u32 head; /* new work requests posted to the head */
185 u32 tail; /* receives pull requests from here. */
186 struct rvt_rwqe wq[0];
187};
188
189struct rvt_rq {
190 struct rvt_rwq *wq;
191 u32 size; /* size of RWQE array */
192 u8 max_sge;
193 /* protect changes in this struct */
194 spinlock_t lock ____cacheline_aligned_in_smp;
195};
196
197/*
198 * This structure is used by rvt_mmap() to validate an offset
199 * when an mmap() request is made. The vm_area_struct then uses
200 * this as its vm_private_data.
201 */
202struct rvt_mmap_info {
203 struct list_head pending_mmaps;
204 struct ib_ucontext *context;
205 void *obj;
206 __u64 offset;
207 struct kref ref;
208 unsigned size;
209};
210
211#define RVT_MAX_RDMA_ATOMIC 16
212
213/*
214 * This structure holds the information that the send tasklet needs
215 * to send a RDMA read response or atomic operation.
216 */
217struct rvt_ack_entry {
218 u8 opcode;
219 u8 sent;
220 u32 psn;
221 u32 lpsn;
222 union {
223 struct rvt_sge rdma_sge;
224 u64 atomic_data;
225 };
226};
227
228struct rvt_sge_state {
229 struct rvt_sge *sg_list; /* next SGE to be used if any */
230 struct rvt_sge sge; /* progress state for the current SGE */
231 u32 total_len;
232 u8 num_sge;
233};
234
235/*
236 * Variables prefixed with s_ are for the requester (sender).
237 * Variables prefixed with r_ are for the responder (receiver).
238 * Variables prefixed with ack_ are for responder replies.
239 *
240 * Common variables are protected by both r_rq.lock and s_lock in that order
241 * which only happens in modify_qp() or changing the QP 'state'.
242 */
243struct rvt_qp {
244 struct ib_qp ibqp;
245 void *priv; /* Driver private data */
246 /* read mostly fields above and below */
247 struct ib_ah_attr remote_ah_attr;
248 struct ib_ah_attr alt_ah_attr;
249 struct rvt_qp __rcu *next; /* link list for QPN hash table */
250 struct rvt_swqe *s_wq; /* send work queue */
251 struct rvt_mmap_info *ip;
252
253 unsigned long timeout_jiffies; /* computed from timeout */
254
255 enum ib_mtu path_mtu;
256 int srate_mbps; /* s_srate (below) converted to Mbit/s */
257 u32 remote_qpn;
258 u32 pmtu; /* decoded from path_mtu */
259 u32 qkey; /* QKEY for this QP (for UD or RD) */
260 u32 s_size; /* send work queue size */
261 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
262 u32 s_ahgpsn; /* set to the psn in the copy of the header */
263
264 u8 state; /* QP state */
265 u8 allowed_ops; /* high order bits of allowed opcodes */
266 u8 qp_access_flags;
267 u8 alt_timeout; /* Alternate path timeout for this QP */
268 u8 timeout; /* Timeout for this QP */
269 u8 s_srate;
270 u8 s_mig_state;
271 u8 port_num;
272 u8 s_pkey_index; /* PKEY index to use */
273 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
274 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
275 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
276 u8 s_retry_cnt; /* number of times to retry */
277 u8 s_rnr_retry_cnt;
278 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
279 u8 s_max_sge; /* size of s_wq->sg_list */
280 u8 s_draining;
281
282 /* start of read/write fields */
283 atomic_t refcount ____cacheline_aligned_in_smp;
284 wait_queue_head_t wait;
285
286 struct rvt_ack_entry s_ack_queue[RVT_MAX_RDMA_ATOMIC + 1]
287 ____cacheline_aligned_in_smp;
288 struct rvt_sge_state s_rdma_read_sge;
289
290 spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
291 unsigned long r_aflags;
292 u64 r_wr_id; /* ID for current receive WQE */
293 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
294 u32 r_len; /* total length of r_sge */
295 u32 r_rcv_len; /* receive data len processed */
296 u32 r_psn; /* expected rcv packet sequence number */
297 u32 r_msn; /* message sequence number */
298
299 u8 r_state; /* opcode of last packet received */
300 u8 r_flags;
301 u8 r_head_ack_queue; /* index into s_ack_queue[] */
302
303 struct list_head rspwait; /* link for waiting to respond */
304
305 struct rvt_sge_state r_sge; /* current receive data */
306 struct rvt_rq r_rq; /* receive work queue */
307
308 spinlock_t s_lock ____cacheline_aligned_in_smp;
309 struct rvt_sge_state *s_cur_sge;
310 u32 s_flags;
311 struct rvt_swqe *s_wqe;
312 struct rvt_sge_state s_sge; /* current send request data */
313 struct rvt_mregion *s_rdma_mr;
314 struct sdma_engine *s_sde; /* current sde */
315 u32 s_cur_size; /* size of send packet in bytes */
316 u32 s_len; /* total length of s_sge */
317 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
318 u32 s_next_psn; /* PSN for next request */
319 u32 s_last_psn; /* last response PSN processed */
320 u32 s_sending_psn; /* lowest PSN that is being sent */
321 u32 s_sending_hpsn; /* highest PSN that is being sent */
322 u32 s_psn; /* current packet sequence number */
323 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
324 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
325 u32 s_head; /* new entries added here */
326 u32 s_tail; /* next entry to process */
327 u32 s_cur; /* current work queue entry */
328 u32 s_acked; /* last un-ACK'ed entry */
329 u32 s_last; /* last completed entry */
330 u32 s_ssn; /* SSN of tail entry */
331 u32 s_lsn; /* limit sequence number (credit) */
332 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
333 u16 s_rdma_ack_cnt;
334 s8 s_ahgidx;
335 u8 s_state; /* opcode of last packet sent */
336 u8 s_ack_state; /* opcode of packet to ACK */
337 u8 s_nak_state; /* non-zero if NAK is pending */
338 u8 r_nak_state; /* non-zero if NAK is pending */
339 u8 s_retry; /* requester retry counter */
340 u8 s_rnr_retry; /* requester RNR retry counter */
341 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
342 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
343
344 struct rvt_sge_state s_ack_rdma_sge;
345 struct timer_list s_timer;
346
347 /*
348 * This sge list MUST be last. Do not add anything below here.
349 */
350 struct rvt_sge r_sg_list[0] /* verified SGEs */
351 ____cacheline_aligned_in_smp;
352};
353
Dennis Dalessandro70a1a352016-01-06 10:04:06 -0800354struct rvt_srq {
355 struct ib_srq ibsrq;
356 struct rvt_rq rq;
357 struct rvt_mmap_info *ip;
358 /* send signal when number of RWQEs < limit */
359 u32 limit;
360};
361
Dennis Dalessandroca889e82016-01-06 10:02:41 -0800362/* End QP section */
363
Dennis Dalessandrof3d01bb2016-01-06 10:04:13 -0800364struct rvt_ibport {
365 struct rvt_qp __rcu *qp[2];
366 struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
367 struct rb_root mcast_tree;
368 spinlock_t lock; /* protect changes in this struct */
369
370 /* non-zero when timer is set */
371 unsigned long mkey_lease_timeout;
372 unsigned long trap_timeout;
373 __be64 gid_prefix; /* in network order */
374 __be64 mkey;
375 u64 tid;
376 u32 port_cap_flags;
377 u32 pma_sample_start;
378 u32 pma_sample_interval;
379 __be16 pma_counter_select[5];
380 u16 pma_tag;
381 u16 mkey_lease_period;
382 u16 sm_lid;
383 u8 sm_sl;
384 u8 mkeyprot;
385 u8 subnet_timeout;
386 u8 vl_high_limit;
387
388 /*
389 * Driver is expected to keep these up to date. These
390 * counters are informational only and not required to be
391 * completely accurate.
392 */
393 u64 n_rc_resends;
394 u64 n_seq_naks;
395 u64 n_rdma_seq;
396 u64 n_rnr_naks;
397 u64 n_other_naks;
398 u64 n_loop_pkts;
399 u64 n_pkt_drops;
400 u64 n_vl15_dropped;
401 u64 n_rc_timeouts;
402 u64 n_dmawait;
403 u64 n_unaligned;
404 u64 n_rc_dupreq;
405 u64 n_rc_seqnak;
406 u16 pkey_violations;
407 u16 qkey_violations;
408 u16 mkey_violations;
409
410 /* Hot-path per CPU counters to avoid cacheline trading to update */
411 u64 z_rc_acks;
412 u64 z_rc_qacks;
413 u64 z_rc_delayed_comp;
414 u64 __percpu *rc_acks;
415 u64 __percpu *rc_qacks;
416 u64 __percpu *rc_delayed_comp;
417
418 void *priv; /* driver private data */
419
420 /* TODO: Move sm_ah and smi_ah into here as well*/
421};
422
Dennis Dalessandroca889e82016-01-06 10:02:41 -0800423/*
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -0800424 * Things that are driver specific, module parameters in hfi1 and qib
425 */
426struct rvt_driver_params {
Dennis Dalessandrob1070a72016-01-06 09:52:19 -0800427 /*
428 * driver required fields:
429 * node_guid
430 * phys_port_cnt
431 * dma_device
432 * owner
433 * driver optional fields (rvt will provide generic value if blank):
434 * name
435 * node_desc
436 * rvt fields, driver value ignored:
437 * uverbs_abi_ver
438 * node_type
439 * num_comp_vectors
440 * uverbs_cmd_mask
441 */
442 struct ib_device_attr props;
443
444 /*
445 * Drivers will need to support a number of notifications to rvt in
446 * accordance with certain events. This structure should contain a mask
447 * of the supported events. Such events that the rvt may need to know
448 * about include:
449 * port errors
450 * port active
451 * lid change
452 * sm change
453 * client reregister
454 * pkey change
455 *
456 * There may also be other events that the rvt layers needs to know
457 * about this is not an exhaustive list. Some events though rvt does not
458 * need to rely on the driver for such as completion queue error.
459 */
460 int rvt_signal_supported;
461
462 /*
463 * Anything driver specific that is not covered by props
464 * For instance special module parameters. Goes here.
465 */
Dennis Dalessandro7b1e2092016-01-06 10:03:31 -0800466 unsigned int lkey_table_size;
Dennis Dalessandrof3d01bb2016-01-06 10:04:13 -0800467 int nports;
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -0800468};
469
470/* Protection domain */
471struct rvt_pd {
472 struct ib_pd ibpd;
473 int user; /* non-zero if created from user space */
474};
475
Kamal Heib119a8e72016-01-06 10:03:59 -0800476/* Address handle */
477struct rvt_ah {
478 struct ib_ah ibah;
479 struct ib_ah_attr attr;
480 atomic_t refcount;
Dennis Dalessandrob036db82016-01-06 10:04:23 -0800481 u8 vl;
482 u8 log_pmtu;
483};
484
485struct rvt_dev_info;
486struct rvt_driver_provided {
487 /*
488 * The work to create port files in /sys/class Infiniband is different
489 * depending on the driver. This should not be extracted away and
490 * instead drivers are responsible for setting the correct callback for
491 * this.
492 */
493
494 /* -------------------*/
495 /* Required functions */
496 /* -------------------*/
497 int (*port_callback)(struct ib_device *, u8, struct kobject *);
498 const char * (*get_card_name)(struct rvt_dev_info *rdi);
499 struct pci_dev * (*get_pci_dev)(struct rvt_dev_info *rdi);
500
501 /*--------------------*/
502 /* Optional functions */
503 /*--------------------*/
504 int (*check_ah)(struct ib_device *, struct ib_ah_attr *);
505 void (*notify_new_ah)(struct ib_device *, struct ib_ah_attr *,
506 struct rvt_ah *);
Kamal Heib119a8e72016-01-06 10:03:59 -0800507};
508
Dennis Dalessandro01946212016-01-06 09:50:24 -0800509struct rvt_dev_info {
Dennis Dalessandro7b1e2092016-01-06 10:03:31 -0800510 struct ib_device ibdev; /* Keep this first. Nothing above here */
511
Dennis Dalessandrob1070a72016-01-06 09:52:19 -0800512 /*
513 * Prior to calling for registration the driver will be responsible for
514 * allocating space for this structure.
515 *
516 * The driver will also be responsible for filling in certain members of
517 * dparms.props
518 */
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -0800519
Dennis Dalessandrob1070a72016-01-06 09:52:19 -0800520 /* Driver specific properties */
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -0800521 struct rvt_driver_params dparms;
Dennis Dalessandrob1070a72016-01-06 09:52:19 -0800522
Dennis Dalessandrob92a7562016-01-06 10:01:42 -0800523 struct rvt_mregion __rcu *dma_mr;
524 struct rvt_lkey_table lkey_table;
525
Dennis Dalessandro30588642016-01-06 09:54:16 -0800526 /* PKey Table goes here */
527
Dennis Dalessandroaec57782016-01-06 10:02:52 -0800528 /* Driver specific helper functions */
529 struct rvt_driver_provided driver_f;
Dennis Dalessandro01946212016-01-06 09:50:24 -0800530
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -0800531 /* Internal use */
532 int n_pds_allocated;
533 spinlock_t n_pds_lock; /* Protect pd allocated count */
Dennis Dalessandro0b8a8aa2016-01-06 10:03:07 -0800534
Kamal Heib119a8e72016-01-06 10:03:59 -0800535 int n_ahs_allocated;
536 spinlock_t n_ahs_lock; /* Protect ah allocated count */
537
Dennis Dalessandro0b8a8aa2016-01-06 10:03:07 -0800538 int flags;
Dennis Dalessandrof3d01bb2016-01-06 10:04:13 -0800539 struct rvt_ibport **ports;
Dennis Dalessandro01946212016-01-06 09:50:24 -0800540};
541
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -0800542static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
543{
544 return container_of(ibpd, struct rvt_pd, ibpd);
545}
546
Kamal Heib119a8e72016-01-06 10:03:59 -0800547static inline struct rvt_ah *ibah_to_rvtah(struct ib_ah *ibah)
548{
549 return container_of(ibah, struct rvt_ah, ibah);
550}
551
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -0800552static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev)
553{
554 return container_of(ibdev, struct rvt_dev_info, ibdev);
555}
556
Dennis Dalessandro7b1e2092016-01-06 10:03:31 -0800557static inline void rvt_put_mr(struct rvt_mregion *mr)
558{
559 if (unlikely(atomic_dec_and_test(&mr->refcount)))
560 complete(&mr->comp);
561}
562
563static inline void rvt_get_mr(struct rvt_mregion *mr)
564{
565 atomic_inc(&mr->refcount);
566}
567
Dennis Dalessandro70a1a352016-01-06 10:04:06 -0800568static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
569{
570 return container_of(ibsrq, struct rvt_srq, ibsrq);
571}
572
Dennis Dalessandro01946212016-01-06 09:50:24 -0800573int rvt_register_device(struct rvt_dev_info *rvd);
574void rvt_unregister_device(struct rvt_dev_info *rvd);
Kamal Heib119a8e72016-01-06 10:03:59 -0800575int rvt_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
Dennis Dalessandrof3d01bb2016-01-06 10:04:13 -0800576void rvt_attach_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
577 int portnum);
Dennis Dalessandro7b1e2092016-01-06 10:03:31 -0800578int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
579 u32 len, u64 vaddr, u32 rkey, int acc);
580int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
581 struct rvt_sge *isge, struct ib_sge *sge, int acc);
Dennis Dalessandro01946212016-01-06 09:50:24 -0800582#endif /* DEF_RDMA_VT_H */