blob: dbb45bcd1feada69deede01cd8751e1b348d21c3 [file] [log] [blame]
Dennis Dalessandro01946212016-01-06 09:50:24 -08001#ifndef DEF_RDMA_VT_H
2#define DEF_RDMA_VT_H
3
4/*
5 * Copyright(c) 2015 Intel Corporation.
6 *
7 * This file is provided under a dual BSD/GPLv2 license. When using or
8 * redistributing this file, you may do so under either license.
9 *
10 * GPL LICENSE SUMMARY
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * BSD LICENSE
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51/*
52 * Structure that low level drivers will populate in order to register with the
53 * rdmavt layer.
54 */
55
56#include "ib_verbs.h"
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -080057
Kamal Heibf2f34212016-01-06 10:03:47 -080058#define RVT_MULTICAST_LID_BASE 0xC000
59#define RVT_PERMISSIVE_LID 0xFFFF
60
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -080061/*
Dennis Dalessandro0b8a8aa2016-01-06 10:03:07 -080062 * For some of the IBTA objects there will likely be some
63 * initializations required. We need flags to determine whether it is OK
64 * for rdmavt to do this or not. This does not imply any functions of a
65 * partiuclar IBTA object are overridden.
66 */
67#define RVT_FLAG_MR_INIT_DRIVER BIT(1)
68#define RVT_FLAG_QP_INIT_DRIVER BIT(2)
69#define RVT_FLAG_CQ_INIT_DRIVER BIT(3)
70
71/*
Dennis Dalessandrob92a7562016-01-06 10:01:42 -080072 * For Memory Regions. This stuff should probably be moved into rdmavt/mr.h once
73 * drivers no longer need access to the MR directly.
74 */
75
76/*
77 * A segment is a linear region of low physical memory.
78 * Used by the verbs layer.
79 */
80struct rvt_seg {
81 void *vaddr;
82 size_t length;
83};
84
85/* The number of rvt_segs that fit in a page. */
86#define RVT_SEGSZ (PAGE_SIZE / sizeof(struct rvt_seg))
87
88struct rvt_segarray {
89 struct rvt_seg segs[RVT_SEGSZ];
90};
91
92struct rvt_mregion {
93 struct ib_pd *pd; /* shares refcnt of ibmr.pd */
94 u64 user_base; /* User's address for this region */
95 u64 iova; /* IB start address of this region */
96 size_t length;
97 u32 lkey;
98 u32 offset; /* offset (bytes) to start of region */
99 int access_flags;
100 u32 max_segs; /* number of rvt_segs in all the arrays */
101 u32 mapsz; /* size of the map array */
102 u8 page_shift; /* 0 - non unform/non powerof2 sizes */
103 u8 lkey_published; /* in global table */
104 struct completion comp; /* complete when refcount goes to zero */
105 atomic_t refcount;
106 struct rvt_segarray *map[0]; /* the segments */
107};
108
109#define RVT_MAX_LKEY_TABLE_BITS 23
110
111struct rvt_lkey_table {
112 spinlock_t lock; /* protect changes in this struct */
113 u32 next; /* next unused index (speeds search) */
114 u32 gen; /* generation count */
115 u32 max; /* size of the table */
116 struct rvt_mregion __rcu **table;
117};
118
119/* End Memmory Region */
120
121/*
Dennis Dalessandroca889e82016-01-06 10:02:41 -0800122 * Things needed for the Queue Pair definition. Like the MR stuff above the
123 * following should probably get moved to qp.h once drivers stop trying to make
124 * and manipulate thier own QPs. For the few instnaces where a driver may need
125 * to look into a queue pair there should be a pointer to a driver priavte data
126 * structure that they can look at.
127 */
128
129/*
130 * These keep track of the copy progress within a memory region.
131 * Used by the verbs layer.
132 */
133struct rvt_sge {
134 struct rvt_mregion *mr;
135 void *vaddr; /* kernel virtual address of segment */
136 u32 sge_length; /* length of the SGE */
137 u32 length; /* remaining length of the segment */
138 u16 m; /* current index: mr->map[m] */
139 u16 n; /* current index: mr->map[m]->segs[n] */
140};
141
142/*
143 * Send work request queue entry.
144 * The size of the sg_list is determined when the QP is created and stored
145 * in qp->s_max_sge.
146 */
147struct rvt_swqe {
148 union {
149 struct ib_send_wr wr; /* don't use wr.sg_list */
150 struct ib_ud_wr ud_wr;
151 struct ib_reg_wr reg_wr;
152 struct ib_rdma_wr rdma_wr;
153 struct ib_atomic_wr atomic_wr;
154 };
155 u32 psn; /* first packet sequence number */
156 u32 lpsn; /* last packet sequence number */
157 u32 ssn; /* send sequence number */
158 u32 length; /* total length of data in sg_list */
159 struct rvt_sge sg_list[0];
160};
161
162/*
163 * Receive work request queue entry.
164 * The size of the sg_list is determined when the QP (or SRQ) is created
165 * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
166 */
167struct rvt_rwqe {
168 u64 wr_id;
169 u8 num_sge;
170 struct ib_sge sg_list[0];
171};
172
173/*
174 * This structure is used to contain the head pointer, tail pointer,
175 * and receive work queue entries as a single memory allocation so
176 * it can be mmap'ed into user space.
177 * Note that the wq array elements are variable size so you can't
178 * just index into the array to get the N'th element;
179 * use get_rwqe_ptr() instead.
180 */
181struct rvt_rwq {
182 u32 head; /* new work requests posted to the head */
183 u32 tail; /* receives pull requests from here. */
184 struct rvt_rwqe wq[0];
185};
186
187struct rvt_rq {
188 struct rvt_rwq *wq;
189 u32 size; /* size of RWQE array */
190 u8 max_sge;
191 /* protect changes in this struct */
192 spinlock_t lock ____cacheline_aligned_in_smp;
193};
194
195/*
196 * This structure is used by rvt_mmap() to validate an offset
197 * when an mmap() request is made. The vm_area_struct then uses
198 * this as its vm_private_data.
199 */
200struct rvt_mmap_info {
201 struct list_head pending_mmaps;
202 struct ib_ucontext *context;
203 void *obj;
204 __u64 offset;
205 struct kref ref;
206 unsigned size;
207};
208
209#define RVT_MAX_RDMA_ATOMIC 16
210
211/*
212 * This structure holds the information that the send tasklet needs
213 * to send a RDMA read response or atomic operation.
214 */
215struct rvt_ack_entry {
216 u8 opcode;
217 u8 sent;
218 u32 psn;
219 u32 lpsn;
220 union {
221 struct rvt_sge rdma_sge;
222 u64 atomic_data;
223 };
224};
225
226struct rvt_sge_state {
227 struct rvt_sge *sg_list; /* next SGE to be used if any */
228 struct rvt_sge sge; /* progress state for the current SGE */
229 u32 total_len;
230 u8 num_sge;
231};
232
233/*
234 * Variables prefixed with s_ are for the requester (sender).
235 * Variables prefixed with r_ are for the responder (receiver).
236 * Variables prefixed with ack_ are for responder replies.
237 *
238 * Common variables are protected by both r_rq.lock and s_lock in that order
239 * which only happens in modify_qp() or changing the QP 'state'.
240 */
241struct rvt_qp {
242 struct ib_qp ibqp;
243 void *priv; /* Driver private data */
244 /* read mostly fields above and below */
245 struct ib_ah_attr remote_ah_attr;
246 struct ib_ah_attr alt_ah_attr;
247 struct rvt_qp __rcu *next; /* link list for QPN hash table */
248 struct rvt_swqe *s_wq; /* send work queue */
249 struct rvt_mmap_info *ip;
250
251 unsigned long timeout_jiffies; /* computed from timeout */
252
253 enum ib_mtu path_mtu;
254 int srate_mbps; /* s_srate (below) converted to Mbit/s */
255 u32 remote_qpn;
256 u32 pmtu; /* decoded from path_mtu */
257 u32 qkey; /* QKEY for this QP (for UD or RD) */
258 u32 s_size; /* send work queue size */
259 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
260 u32 s_ahgpsn; /* set to the psn in the copy of the header */
261
262 u8 state; /* QP state */
263 u8 allowed_ops; /* high order bits of allowed opcodes */
264 u8 qp_access_flags;
265 u8 alt_timeout; /* Alternate path timeout for this QP */
266 u8 timeout; /* Timeout for this QP */
267 u8 s_srate;
268 u8 s_mig_state;
269 u8 port_num;
270 u8 s_pkey_index; /* PKEY index to use */
271 u8 s_alt_pkey_index; /* Alternate path PKEY index to use */
272 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
273 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
274 u8 s_retry_cnt; /* number of times to retry */
275 u8 s_rnr_retry_cnt;
276 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
277 u8 s_max_sge; /* size of s_wq->sg_list */
278 u8 s_draining;
279
280 /* start of read/write fields */
281 atomic_t refcount ____cacheline_aligned_in_smp;
282 wait_queue_head_t wait;
283
284 struct rvt_ack_entry s_ack_queue[RVT_MAX_RDMA_ATOMIC + 1]
285 ____cacheline_aligned_in_smp;
286 struct rvt_sge_state s_rdma_read_sge;
287
288 spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
289 unsigned long r_aflags;
290 u64 r_wr_id; /* ID for current receive WQE */
291 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
292 u32 r_len; /* total length of r_sge */
293 u32 r_rcv_len; /* receive data len processed */
294 u32 r_psn; /* expected rcv packet sequence number */
295 u32 r_msn; /* message sequence number */
296
297 u8 r_state; /* opcode of last packet received */
298 u8 r_flags;
299 u8 r_head_ack_queue; /* index into s_ack_queue[] */
300
301 struct list_head rspwait; /* link for waiting to respond */
302
303 struct rvt_sge_state r_sge; /* current receive data */
304 struct rvt_rq r_rq; /* receive work queue */
305
306 spinlock_t s_lock ____cacheline_aligned_in_smp;
307 struct rvt_sge_state *s_cur_sge;
308 u32 s_flags;
309 struct rvt_swqe *s_wqe;
310 struct rvt_sge_state s_sge; /* current send request data */
311 struct rvt_mregion *s_rdma_mr;
312 struct sdma_engine *s_sde; /* current sde */
313 u32 s_cur_size; /* size of send packet in bytes */
314 u32 s_len; /* total length of s_sge */
315 u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
316 u32 s_next_psn; /* PSN for next request */
317 u32 s_last_psn; /* last response PSN processed */
318 u32 s_sending_psn; /* lowest PSN that is being sent */
319 u32 s_sending_hpsn; /* highest PSN that is being sent */
320 u32 s_psn; /* current packet sequence number */
321 u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
322 u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
323 u32 s_head; /* new entries added here */
324 u32 s_tail; /* next entry to process */
325 u32 s_cur; /* current work queue entry */
326 u32 s_acked; /* last un-ACK'ed entry */
327 u32 s_last; /* last completed entry */
328 u32 s_ssn; /* SSN of tail entry */
329 u32 s_lsn; /* limit sequence number (credit) */
330 u16 s_hdrwords; /* size of s_hdr in 32 bit words */
331 u16 s_rdma_ack_cnt;
332 s8 s_ahgidx;
333 u8 s_state; /* opcode of last packet sent */
334 u8 s_ack_state; /* opcode of packet to ACK */
335 u8 s_nak_state; /* non-zero if NAK is pending */
336 u8 r_nak_state; /* non-zero if NAK is pending */
337 u8 s_retry; /* requester retry counter */
338 u8 s_rnr_retry; /* requester RNR retry counter */
339 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
340 u8 s_tail_ack_queue; /* index into s_ack_queue[] */
341
342 struct rvt_sge_state s_ack_rdma_sge;
343 struct timer_list s_timer;
344
345 /*
346 * This sge list MUST be last. Do not add anything below here.
347 */
348 struct rvt_sge r_sg_list[0] /* verified SGEs */
349 ____cacheline_aligned_in_smp;
350};
351
352/* End QP section */
353
354/*
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -0800355 * Things that are driver specific, module parameters in hfi1 and qib
356 */
357struct rvt_driver_params {
Dennis Dalessandrob1070a72016-01-06 09:52:19 -0800358 /*
359 * driver required fields:
360 * node_guid
361 * phys_port_cnt
362 * dma_device
363 * owner
364 * driver optional fields (rvt will provide generic value if blank):
365 * name
366 * node_desc
367 * rvt fields, driver value ignored:
368 * uverbs_abi_ver
369 * node_type
370 * num_comp_vectors
371 * uverbs_cmd_mask
372 */
373 struct ib_device_attr props;
374
375 /*
376 * Drivers will need to support a number of notifications to rvt in
377 * accordance with certain events. This structure should contain a mask
378 * of the supported events. Such events that the rvt may need to know
379 * about include:
380 * port errors
381 * port active
382 * lid change
383 * sm change
384 * client reregister
385 * pkey change
386 *
387 * There may also be other events that the rvt layers needs to know
388 * about this is not an exhaustive list. Some events though rvt does not
389 * need to rely on the driver for such as completion queue error.
390 */
391 int rvt_signal_supported;
392
393 /*
394 * Anything driver specific that is not covered by props
395 * For instance special module parameters. Goes here.
396 */
Dennis Dalessandro7b1e2092016-01-06 10:03:31 -0800397 unsigned int lkey_table_size;
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -0800398};
399
Dennis Dalessandroaec57782016-01-06 10:02:52 -0800400/*
401 * Functions that drivers are required to support
402 */
Dennis Dalessandrob5348752016-01-06 10:02:59 -0800403struct rvt_dev_info;
Dennis Dalessandroaec57782016-01-06 10:02:52 -0800404struct rvt_driver_provided {
405 /*
406 * The work to create port files in /sys/class Infiniband is different
407 * depending on the driver. This should not be extracted away and
408 * instead drivers are responsible for setting the correct callback for
409 * this.
410 */
411 int (*port_callback)(struct ib_device *, u8, struct kobject *);
Dennis Dalessandrob5348752016-01-06 10:02:59 -0800412 const char * (*get_card_name)(struct rvt_dev_info *rdi);
413 struct pci_dev * (*get_pci_dev)(struct rvt_dev_info *rdi);
Dennis Dalessandroaec57782016-01-06 10:02:52 -0800414};
415
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -0800416/* Protection domain */
417struct rvt_pd {
418 struct ib_pd ibpd;
419 int user; /* non-zero if created from user space */
420};
421
Dennis Dalessandro01946212016-01-06 09:50:24 -0800422struct rvt_dev_info {
Dennis Dalessandro7b1e2092016-01-06 10:03:31 -0800423 struct ib_device ibdev; /* Keep this first. Nothing above here */
424
Dennis Dalessandrob1070a72016-01-06 09:52:19 -0800425 /*
426 * Prior to calling for registration the driver will be responsible for
427 * allocating space for this structure.
428 *
429 * The driver will also be responsible for filling in certain members of
430 * dparms.props
431 */
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -0800432
Dennis Dalessandrob1070a72016-01-06 09:52:19 -0800433 /* Driver specific properties */
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -0800434 struct rvt_driver_params dparms;
Dennis Dalessandrob1070a72016-01-06 09:52:19 -0800435
Dennis Dalessandrob92a7562016-01-06 10:01:42 -0800436 struct rvt_mregion __rcu *dma_mr;
437 struct rvt_lkey_table lkey_table;
438
Dennis Dalessandro30588642016-01-06 09:54:16 -0800439 /* PKey Table goes here */
440
Dennis Dalessandroaec57782016-01-06 10:02:52 -0800441 /* Driver specific helper functions */
442 struct rvt_driver_provided driver_f;
Dennis Dalessandro01946212016-01-06 09:50:24 -0800443
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -0800444 /* Internal use */
445 int n_pds_allocated;
446 spinlock_t n_pds_lock; /* Protect pd allocated count */
Dennis Dalessandro0b8a8aa2016-01-06 10:03:07 -0800447
448 int flags;
Dennis Dalessandro01946212016-01-06 09:50:24 -0800449};
450
Dennis Dalessandro8afd32e2016-01-06 09:51:48 -0800451static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
452{
453 return container_of(ibpd, struct rvt_pd, ibpd);
454}
455
456static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev)
457{
458 return container_of(ibdev, struct rvt_dev_info, ibdev);
459}
460
Dennis Dalessandro7b1e2092016-01-06 10:03:31 -0800461static inline void rvt_put_mr(struct rvt_mregion *mr)
462{
463 if (unlikely(atomic_dec_and_test(&mr->refcount)))
464 complete(&mr->comp);
465}
466
467static inline void rvt_get_mr(struct rvt_mregion *mr)
468{
469 atomic_inc(&mr->refcount);
470}
471
Dennis Dalessandro01946212016-01-06 09:50:24 -0800472int rvt_register_device(struct rvt_dev_info *rvd);
473void rvt_unregister_device(struct rvt_dev_info *rvd);
Dennis Dalessandro7b1e2092016-01-06 10:03:31 -0800474int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
475 u32 len, u64 vaddr, u32 rkey, int acc);
476int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
477 struct rvt_sge *isge, struct ib_sge *sge, int acc);
Dennis Dalessandro01946212016-01-06 09:50:24 -0800478
479#endif /* DEF_RDMA_VT_H */