1 /*
2  * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 #ifndef _C2_WR_H_
34 #define _C2_WR_H_
35 
36 #ifdef CCDEBUG
37 #define CCWR_MAGIC		0xb07700b0
38 #endif
39 
40 #define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
41 
42 /* Maximum allowed size in bytes of private_data exchange
43  * on connect.
44  */
45 #define C2_MAX_PRIVATE_DATA_SIZE 200
46 
47 /*
48  * These types are shared among the adapter, host, and CCIL consumer.
49  */
50 enum c2_cq_notification_type {
51 	C2_CQ_NOTIFICATION_TYPE_NONE = 1,
52 	C2_CQ_NOTIFICATION_TYPE_NEXT,
53 	C2_CQ_NOTIFICATION_TYPE_NEXT_SE
54 };
55 
56 enum c2_setconfig_cmd {
57 	C2_CFG_ADD_ADDR = 1,
58 	C2_CFG_DEL_ADDR = 2,
59 	C2_CFG_ADD_ROUTE = 3,
60 	C2_CFG_DEL_ROUTE = 4
61 };
62 
63 enum c2_getconfig_cmd {
64 	C2_GETCONFIG_ROUTES = 1,
65 	C2_GETCONFIG_ADDRS
66 };
67 
68 /*
69  *  CCIL Work Request Identifiers
70  */
71 enum c2wr_ids {
72 	CCWR_RNIC_OPEN = 1,
73 	CCWR_RNIC_QUERY,
74 	CCWR_RNIC_SETCONFIG,
75 	CCWR_RNIC_GETCONFIG,
76 	CCWR_RNIC_CLOSE,
77 	CCWR_CQ_CREATE,
78 	CCWR_CQ_QUERY,
79 	CCWR_CQ_MODIFY,
80 	CCWR_CQ_DESTROY,
81 	CCWR_QP_CONNECT,
82 	CCWR_PD_ALLOC,
83 	CCWR_PD_DEALLOC,
84 	CCWR_SRQ_CREATE,
85 	CCWR_SRQ_QUERY,
86 	CCWR_SRQ_MODIFY,
87 	CCWR_SRQ_DESTROY,
88 	CCWR_QP_CREATE,
89 	CCWR_QP_QUERY,
90 	CCWR_QP_MODIFY,
91 	CCWR_QP_DESTROY,
92 	CCWR_NSMR_STAG_ALLOC,
93 	CCWR_NSMR_REGISTER,
94 	CCWR_NSMR_PBL,
95 	CCWR_STAG_DEALLOC,
96 	CCWR_NSMR_REREGISTER,
97 	CCWR_SMR_REGISTER,
98 	CCWR_MR_QUERY,
99 	CCWR_MW_ALLOC,
100 	CCWR_MW_QUERY,
101 	CCWR_EP_CREATE,
102 	CCWR_EP_GETOPT,
103 	CCWR_EP_SETOPT,
104 	CCWR_EP_DESTROY,
105 	CCWR_EP_BIND,
106 	CCWR_EP_CONNECT,
107 	CCWR_EP_LISTEN,
108 	CCWR_EP_SHUTDOWN,
109 	CCWR_EP_LISTEN_CREATE,
110 	CCWR_EP_LISTEN_DESTROY,
111 	CCWR_EP_QUERY,
112 	CCWR_CR_ACCEPT,
113 	CCWR_CR_REJECT,
114 	CCWR_CONSOLE,
115 	CCWR_TERM,
116 	CCWR_FLASH_INIT,
117 	CCWR_FLASH,
118 	CCWR_BUF_ALLOC,
119 	CCWR_BUF_FREE,
120 	CCWR_FLASH_WRITE,
121 	CCWR_INIT,		/* WARNING: Don't move this ever again! */
122 
123 
124 
125 	/* Add new IDs here */
126 
127 
128 
129 	/*
130 	 * WARNING: CCWR_LAST must always be the last verbs id defined!
131 	 *          All the preceding IDs are fixed, and must not change.
132 	 *          You can add new IDs, but must not remove or reorder
133 	 *          any IDs. If you do, YOU will ruin any hope of
134 	 *          compatibility between versions.
135 	 */
136 	CCWR_LAST,
137 
138 	/*
139 	 * Start over at 1 so that arrays indexed by user wr id's
140 	 * begin at 1.  This is OK since the verbs and user wr id's
141 	 * are always used on disjoint sets of queues.
142 	 */
143 	/*
144 	 * The order of the CCWR_SEND_XX verbs must
145 	 * match the order of the RDMA_OPs
146 	 */
147 	CCWR_SEND = 1,
148 	CCWR_SEND_INV,
149 	CCWR_SEND_SE,
150 	CCWR_SEND_SE_INV,
151 	CCWR_RDMA_WRITE,
152 	CCWR_RDMA_READ,
153 	CCWR_RDMA_READ_INV,
154 	CCWR_MW_BIND,
155 	CCWR_NSMR_FASTREG,
156 	CCWR_STAG_INVALIDATE,
157 	CCWR_RECV,
158 	CCWR_NOP,
159 	CCWR_UNIMPL,
160 /* WARNING: This must always be the last user wr id defined! */
161 };
162 #define RDMA_SEND_OPCODE_FROM_WR_ID(x)   (x+2)
163 
164 /*
165  * SQ/RQ Work Request Types
166  */
167 enum c2_wr_type {
168 	C2_WR_TYPE_SEND = CCWR_SEND,
169 	C2_WR_TYPE_SEND_SE = CCWR_SEND_SE,
170 	C2_WR_TYPE_SEND_INV = CCWR_SEND_INV,
171 	C2_WR_TYPE_SEND_SE_INV = CCWR_SEND_SE_INV,
172 	C2_WR_TYPE_RDMA_WRITE = CCWR_RDMA_WRITE,
173 	C2_WR_TYPE_RDMA_READ = CCWR_RDMA_READ,
174 	C2_WR_TYPE_RDMA_READ_INV_STAG = CCWR_RDMA_READ_INV,
175 	C2_WR_TYPE_BIND_MW = CCWR_MW_BIND,
176 	C2_WR_TYPE_FASTREG_NSMR = CCWR_NSMR_FASTREG,
177 	C2_WR_TYPE_INV_STAG = CCWR_STAG_INVALIDATE,
178 	C2_WR_TYPE_RECV = CCWR_RECV,
179 	C2_WR_TYPE_NOP = CCWR_NOP,
180 };
181 
182 struct c2_netaddr {
183 	__be32 ip_addr;
184 	__be32 netmask;
185 	u32 mtu;
186 };
187 
188 struct c2_route {
189 	u32 ip_addr;		/* 0 indicates the default route */
190 	u32 netmask;		/* netmask associated with dst */
191 	u32 flags;
192 	union {
193 		u32 ipaddr;	/* address of the nexthop interface */
194 		u8 enaddr[6];
195 	} nexthop;
196 };
197 
198 /*
199  * A Scatter Gather Entry.
200  */
201 struct c2_data_addr {
202 	__be32 stag;
203 	__be32 length;
204 	__be64 to;
205 };
206 
207 /*
208  * MR and MW flags used by the consumer, RI, and RNIC.
209  */
210 enum c2_mm_flags {
211 	MEM_REMOTE = 0x0001,	/* allow mw binds with remote access. */
212 	MEM_VA_BASED = 0x0002,	/* Not Zero-based */
213 	MEM_PBL_COMPLETE = 0x0004,	/* PBL array is complete in this msg */
214 	MEM_LOCAL_READ = 0x0008,	/* allow local reads */
215 	MEM_LOCAL_WRITE = 0x0010,	/* allow local writes */
216 	MEM_REMOTE_READ = 0x0020,	/* allow remote reads */
217 	MEM_REMOTE_WRITE = 0x0040,	/* allow remote writes */
218 	MEM_WINDOW_BIND = 0x0080,	/* binds allowed */
219 	MEM_SHARED = 0x0100,	/* set if MR is shared */
220 	MEM_STAG_VALID = 0x0200	/* set if STAG is in valid state */
221 };
222 
223 /*
224  * CCIL API ACF flags defined in terms of the low level mem flags.
225  * This minimizes translation needed in the user API
226  */
227 enum c2_acf {
228 	C2_ACF_LOCAL_READ = MEM_LOCAL_READ,
229 	C2_ACF_LOCAL_WRITE = MEM_LOCAL_WRITE,
230 	C2_ACF_REMOTE_READ = MEM_REMOTE_READ,
231 	C2_ACF_REMOTE_WRITE = MEM_REMOTE_WRITE,
232 	C2_ACF_WINDOW_BIND = MEM_WINDOW_BIND
233 };
234 
235 /*
236  * Image types of objects written to flash
237  */
238 #define C2_FLASH_IMG_BITFILE 1
239 #define C2_FLASH_IMG_OPTION_ROM 2
240 #define C2_FLASH_IMG_VPD 3
241 
242 /*
243  *  to fix bug 1815 we define the max size allowable of the
244  *  terminate message (per the IETF spec).Refer to the IETF
245  *  protocol specification, section 12.1.6, page 64)
246  *  The message is prefixed by 20 types of DDP info.
247  *
248  *  Then the message has 6 bytes for the terminate control
249  *  and DDP segment length info plus a DDP header (either
250  *  14 or 18 byts) plus 28 bytes for the RDMA header.
251  *  Thus the max size in:
252  *  20 + (6 + 18 + 28) = 72
253  */
254 #define C2_MAX_TERMINATE_MESSAGE_SIZE (72)
255 
256 /*
257  * Build String Length.  It must be the same as C2_BUILD_STR_LEN in ccil_api.h
258  */
259 #define WR_BUILD_STR_LEN 64
260 
261 /*
262  * WARNING:  All of these structs need to align any 64bit types on
263  * 64 bit boundaries!  64bit types include u64 and u64.
264  */
265 
266 /*
267  * Clustercore Work Request Header.  Be sensitive to field layout
268  * and alignment.
269  */
270 struct c2wr_hdr {
271 	/* wqe_count is part of the cqe.  It is put here so the
272 	 * adapter can write to it while the wr is pending without
273 	 * clobbering part of the wr.  This word need not be dma'd
274 	 * from the host to adapter by libccil, but we copy it anyway
275 	 * to make the memcpy to the adapter better aligned.
276 	 */
277 	__be32 wqe_count;
278 
279 	/* Put these fields next so that later 32- and 64-bit
280 	 * quantities are naturally aligned.
281 	 */
282 	u8 id;
283 	u8 result;		/* adapter -> host */
284 	u8 sge_count;		/* host -> adapter */
285 	u8 flags;		/* host -> adapter */
286 
287 	u64 context;
288 #ifdef CCMSGMAGIC
289 	u32 magic;
290 	u32 pad;
291 #endif
292 } __attribute__((packed));
293 
294 /*
295  *------------------------ RNIC ------------------------
296  */
297 
298 /*
299  * WR_RNIC_OPEN
300  */
301 
302 /*
303  * Flags for the RNIC WRs
304  */
305 enum c2_rnic_flags {
306 	RNIC_IRD_STATIC = 0x0001,
307 	RNIC_ORD_STATIC = 0x0002,
308 	RNIC_QP_STATIC = 0x0004,
309 	RNIC_SRQ_SUPPORTED = 0x0008,
310 	RNIC_PBL_BLOCK_MODE = 0x0010,
311 	RNIC_SRQ_MODEL_ARRIVAL = 0x0020,
312 	RNIC_CQ_OVF_DETECTED = 0x0040,
313 	RNIC_PRIV_MODE = 0x0080
314 };
315 
316 struct c2wr_rnic_open_req {
317 	struct c2wr_hdr hdr;
318 	u64 user_context;
319 	__be16 flags;		/* See enum c2_rnic_flags */
320 	__be16 port_num;
321 } __attribute__((packed));
322 
323 struct c2wr_rnic_open_rep {
324 	struct c2wr_hdr hdr;
325 	u32 rnic_handle;
326 } __attribute__((packed));
327 
328 union c2wr_rnic_open {
329 	struct c2wr_rnic_open_req req;
330 	struct c2wr_rnic_open_rep rep;
331 } __attribute__((packed));
332 
333 struct c2wr_rnic_query_req {
334 	struct c2wr_hdr hdr;
335 	u32 rnic_handle;
336 } __attribute__((packed));
337 
338 /*
339  * WR_RNIC_QUERY
340  */
341 struct c2wr_rnic_query_rep {
342 	struct c2wr_hdr hdr;
343 	u64 user_context;
344 	__be32 vendor_id;
345 	__be32 part_number;
346 	__be32 hw_version;
347 	__be32 fw_ver_major;
348 	__be32 fw_ver_minor;
349 	__be32 fw_ver_patch;
350 	char fw_ver_build_str[WR_BUILD_STR_LEN];
351 	__be32 max_qps;
352 	__be32 max_qp_depth;
353 	u32 max_srq_depth;
354 	u32 max_send_sgl_depth;
355 	u32 max_rdma_sgl_depth;
356 	__be32 max_cqs;
357 	__be32 max_cq_depth;
358 	u32 max_cq_event_handlers;
359 	__be32 max_mrs;
360 	u32 max_pbl_depth;
361 	__be32 max_pds;
362 	__be32 max_global_ird;
363 	u32 max_global_ord;
364 	__be32 max_qp_ird;
365 	__be32 max_qp_ord;
366 	u32 flags;
367 	__be32 max_mws;
368 	u32 pbe_range_low;
369 	u32 pbe_range_high;
370 	u32 max_srqs;
371 	u32 page_size;
372 } __attribute__((packed));
373 
374 union c2wr_rnic_query {
375 	struct c2wr_rnic_query_req req;
376 	struct c2wr_rnic_query_rep rep;
377 } __attribute__((packed));
378 
379 /*
380  * WR_RNIC_GETCONFIG
381  */
382 
383 struct c2wr_rnic_getconfig_req {
384 	struct c2wr_hdr hdr;
385 	u32 rnic_handle;
386 	u32 option;		/* see c2_getconfig_cmd_t */
387 	u64 reply_buf;
388 	u32 reply_buf_len;
389 } __attribute__((packed)) ;
390 
391 struct c2wr_rnic_getconfig_rep {
392 	struct c2wr_hdr hdr;
393 	u32 option;		/* see c2_getconfig_cmd_t */
394 	u32 count_len;		/* length of the number of addresses configured */
395 } __attribute__((packed)) ;
396 
397 union c2wr_rnic_getconfig {
398 	struct c2wr_rnic_getconfig_req req;
399 	struct c2wr_rnic_getconfig_rep rep;
400 } __attribute__((packed)) ;
401 
402 /*
403  * WR_RNIC_SETCONFIG
404  */
405 struct c2wr_rnic_setconfig_req {
406 	struct c2wr_hdr hdr;
407 	u32 rnic_handle;
408 	__be32 option;		/* See c2_setconfig_cmd_t */
409 	/* variable data and pad. See c2_netaddr and c2_route */
410 	u8 data[0];
411 } __attribute__((packed)) ;
412 
413 struct c2wr_rnic_setconfig_rep {
414 	struct c2wr_hdr hdr;
415 } __attribute__((packed)) ;
416 
417 union c2wr_rnic_setconfig {
418 	struct c2wr_rnic_setconfig_req req;
419 	struct c2wr_rnic_setconfig_rep rep;
420 } __attribute__((packed)) ;
421 
422 /*
423  * WR_RNIC_CLOSE
424  */
425 struct c2wr_rnic_close_req {
426 	struct c2wr_hdr hdr;
427 	u32 rnic_handle;
428 } __attribute__((packed)) ;
429 
430 struct c2wr_rnic_close_rep {
431 	struct c2wr_hdr hdr;
432 } __attribute__((packed)) ;
433 
434 union c2wr_rnic_close {
435 	struct c2wr_rnic_close_req req;
436 	struct c2wr_rnic_close_rep rep;
437 } __attribute__((packed)) ;
438 
439 /*
440  *------------------------ CQ ------------------------
441  */
442 struct c2wr_cq_create_req {
443 	struct c2wr_hdr hdr;
444 	__be64 shared_ht;
445 	u64 user_context;
446 	__be64 msg_pool;
447 	u32 rnic_handle;
448 	__be32 msg_size;
449 	__be32 depth;
450 } __attribute__((packed)) ;
451 
452 struct c2wr_cq_create_rep {
453 	struct c2wr_hdr hdr;
454 	__be32 mq_index;
455 	__be32 adapter_shared;
456 	u32 cq_handle;
457 } __attribute__((packed)) ;
458 
459 union c2wr_cq_create {
460 	struct c2wr_cq_create_req req;
461 	struct c2wr_cq_create_rep rep;
462 } __attribute__((packed)) ;
463 
464 struct c2wr_cq_modify_req {
465 	struct c2wr_hdr hdr;
466 	u32 rnic_handle;
467 	u32 cq_handle;
468 	u32 new_depth;
469 	u64 new_msg_pool;
470 } __attribute__((packed)) ;
471 
472 struct c2wr_cq_modify_rep {
473 	struct c2wr_hdr hdr;
474 } __attribute__((packed)) ;
475 
476 union c2wr_cq_modify {
477 	struct c2wr_cq_modify_req req;
478 	struct c2wr_cq_modify_rep rep;
479 } __attribute__((packed)) ;
480 
481 struct c2wr_cq_destroy_req {
482 	struct c2wr_hdr hdr;
483 	u32 rnic_handle;
484 	u32 cq_handle;
485 } __attribute__((packed)) ;
486 
487 struct c2wr_cq_destroy_rep {
488 	struct c2wr_hdr hdr;
489 } __attribute__((packed)) ;
490 
491 union c2wr_cq_destroy {
492 	struct c2wr_cq_destroy_req req;
493 	struct c2wr_cq_destroy_rep rep;
494 } __attribute__((packed)) ;
495 
496 /*
497  *------------------------ PD ------------------------
498  */
499 struct c2wr_pd_alloc_req {
500 	struct c2wr_hdr hdr;
501 	u32 rnic_handle;
502 	u32 pd_id;
503 } __attribute__((packed)) ;
504 
505 struct c2wr_pd_alloc_rep {
506 	struct c2wr_hdr hdr;
507 } __attribute__((packed)) ;
508 
509 union c2wr_pd_alloc {
510 	struct c2wr_pd_alloc_req req;
511 	struct c2wr_pd_alloc_rep rep;
512 } __attribute__((packed)) ;
513 
514 struct c2wr_pd_dealloc_req {
515 	struct c2wr_hdr hdr;
516 	u32 rnic_handle;
517 	u32 pd_id;
518 } __attribute__((packed)) ;
519 
520 struct c2wr_pd_dealloc_rep {
521 	struct c2wr_hdr hdr;
522 } __attribute__((packed)) ;
523 
524 union c2wr_pd_dealloc {
525 	struct c2wr_pd_dealloc_req req;
526 	struct c2wr_pd_dealloc_rep rep;
527 } __attribute__((packed)) ;
528 
529 /*
530  *------------------------ SRQ ------------------------
531  */
532 struct c2wr_srq_create_req {
533 	struct c2wr_hdr hdr;
534 	u64 shared_ht;
535 	u64 user_context;
536 	u32 rnic_handle;
537 	u32 srq_depth;
538 	u32 srq_limit;
539 	u32 sgl_depth;
540 	u32 pd_id;
541 } __attribute__((packed)) ;
542 
543 struct c2wr_srq_create_rep {
544 	struct c2wr_hdr hdr;
545 	u32 srq_depth;
546 	u32 sgl_depth;
547 	u32 msg_size;
548 	u32 mq_index;
549 	u32 mq_start;
550 	u32 srq_handle;
551 } __attribute__((packed)) ;
552 
553 union c2wr_srq_create {
554 	struct c2wr_srq_create_req req;
555 	struct c2wr_srq_create_rep rep;
556 } __attribute__((packed)) ;
557 
558 struct c2wr_srq_destroy_req {
559 	struct c2wr_hdr hdr;
560 	u32 rnic_handle;
561 	u32 srq_handle;
562 } __attribute__((packed)) ;
563 
564 struct c2wr_srq_destroy_rep {
565 	struct c2wr_hdr hdr;
566 } __attribute__((packed)) ;
567 
568 union c2wr_srq_destroy {
569 	struct c2wr_srq_destroy_req req;
570 	struct c2wr_srq_destroy_rep rep;
571 } __attribute__((packed)) ;
572 
573 /*
574  *------------------------ QP ------------------------
575  */
576 enum c2wr_qp_flags {
577 	QP_RDMA_READ = 0x00000001,	/* RDMA read enabled? */
578 	QP_RDMA_WRITE = 0x00000002,	/* RDMA write enabled? */
579 	QP_MW_BIND = 0x00000004,	/* MWs enabled */
580 	QP_ZERO_STAG = 0x00000008,	/* enabled? */
581 	QP_REMOTE_TERMINATION = 0x00000010,	/* remote end terminated */
582 	QP_RDMA_READ_RESPONSE = 0x00000020	/* Remote RDMA read  */
583 	    /* enabled? */
584 };
585 
586 struct c2wr_qp_create_req {
587 	struct c2wr_hdr hdr;
588 	__be64 shared_sq_ht;
589 	__be64 shared_rq_ht;
590 	u64 user_context;
591 	u32 rnic_handle;
592 	u32 sq_cq_handle;
593 	u32 rq_cq_handle;
594 	__be32 sq_depth;
595 	__be32 rq_depth;
596 	u32 srq_handle;
597 	u32 srq_limit;
598 	__be32 flags;		/* see enum c2wr_qp_flags */
599 	__be32 send_sgl_depth;
600 	__be32 recv_sgl_depth;
601 	__be32 rdma_write_sgl_depth;
602 	__be32 ord;
603 	__be32 ird;
604 	u32 pd_id;
605 } __attribute__((packed)) ;
606 
607 struct c2wr_qp_create_rep {
608 	struct c2wr_hdr hdr;
609 	__be32 sq_depth;
610 	__be32 rq_depth;
611 	u32 send_sgl_depth;
612 	u32 recv_sgl_depth;
613 	u32 rdma_write_sgl_depth;
614 	u32 ord;
615 	u32 ird;
616 	__be32 sq_msg_size;
617 	__be32 sq_mq_index;
618 	__be32 sq_mq_start;
619 	__be32 rq_msg_size;
620 	__be32 rq_mq_index;
621 	__be32 rq_mq_start;
622 	u32 qp_handle;
623 } __attribute__((packed)) ;
624 
625 union c2wr_qp_create {
626 	struct c2wr_qp_create_req req;
627 	struct c2wr_qp_create_rep rep;
628 } __attribute__((packed)) ;
629 
630 struct c2wr_qp_query_req {
631 	struct c2wr_hdr hdr;
632 	u32 rnic_handle;
633 	u32 qp_handle;
634 } __attribute__((packed)) ;
635 
636 struct c2wr_qp_query_rep {
637 	struct c2wr_hdr hdr;
638 	u64 user_context;
639 	u32 rnic_handle;
640 	u32 sq_depth;
641 	u32 rq_depth;
642 	u32 send_sgl_depth;
643 	u32 rdma_write_sgl_depth;
644 	u32 recv_sgl_depth;
645 	u32 ord;
646 	u32 ird;
647 	u16 qp_state;
648 	u16 flags;		/* see c2wr_qp_flags_t */
649 	u32 qp_id;
650 	u32 local_addr;
651 	u32 remote_addr;
652 	u16 local_port;
653 	u16 remote_port;
654 	u32 terminate_msg_length;	/* 0 if not present */
655 	u8 data[0];
656 	/* Terminate Message in-line here. */
657 } __attribute__((packed)) ;
658 
659 union c2wr_qp_query {
660 	struct c2wr_qp_query_req req;
661 	struct c2wr_qp_query_rep rep;
662 } __attribute__((packed)) ;
663 
664 struct c2wr_qp_modify_req {
665 	struct c2wr_hdr hdr;
666 	u64 stream_msg;
667 	u32 stream_msg_length;
668 	u32 rnic_handle;
669 	u32 qp_handle;
670 	__be32 next_qp_state;
671 	__be32 ord;
672 	__be32 ird;
673 	__be32 sq_depth;
674 	__be32 rq_depth;
675 	u32 llp_ep_handle;
676 } __attribute__((packed)) ;
677 
678 struct c2wr_qp_modify_rep {
679 	struct c2wr_hdr hdr;
680 	u32 ord;
681 	u32 ird;
682 	u32 sq_depth;
683 	u32 rq_depth;
684 	u32 sq_msg_size;
685 	u32 sq_mq_index;
686 	u32 sq_mq_start;
687 	u32 rq_msg_size;
688 	u32 rq_mq_index;
689 	u32 rq_mq_start;
690 } __attribute__((packed)) ;
691 
692 union c2wr_qp_modify {
693 	struct c2wr_qp_modify_req req;
694 	struct c2wr_qp_modify_rep rep;
695 } __attribute__((packed)) ;
696 
697 struct c2wr_qp_destroy_req {
698 	struct c2wr_hdr hdr;
699 	u32 rnic_handle;
700 	u32 qp_handle;
701 } __attribute__((packed)) ;
702 
703 struct c2wr_qp_destroy_rep {
704 	struct c2wr_hdr hdr;
705 } __attribute__((packed)) ;
706 
707 union c2wr_qp_destroy {
708 	struct c2wr_qp_destroy_req req;
709 	struct c2wr_qp_destroy_rep rep;
710 } __attribute__((packed)) ;
711 
712 /*
713  * The CCWR_QP_CONNECT msg is posted on the verbs request queue.  It can
714  * only be posted when a QP is in IDLE state.  After the connect request is
715  * submitted to the LLP, the adapter moves the QP to CONNECT_PENDING state.
716  * No synchronous reply from adapter to this WR.  The results of
717  * connection are passed back in an async event CCAE_ACTIVE_CONNECT_RESULTS
718  * See c2wr_ae_active_connect_results_t
719  */
720 struct c2wr_qp_connect_req {
721 	struct c2wr_hdr hdr;
722 	u32 rnic_handle;
723 	u32 qp_handle;
724 	__be32 remote_addr;
725 	__be16 remote_port;
726 	u16 pad;
727 	__be32 private_data_length;
728 	u8 private_data[0];	/* Private data in-line. */
729 } __attribute__((packed)) ;
730 
731 struct c2wr_qp_connect {
732 	struct c2wr_qp_connect_req req;
733 	/* no synchronous reply.         */
734 } __attribute__((packed)) ;
735 
736 
737 /*
738  *------------------------ MM ------------------------
739  */
740 
741 struct c2wr_nsmr_stag_alloc_req {
742 	struct c2wr_hdr hdr;
743 	u32 rnic_handle;
744 	u32 pbl_depth;
745 	u32 pd_id;
746 	u32 flags;
747 } __attribute__((packed)) ;
748 
749 struct c2wr_nsmr_stag_alloc_rep {
750 	struct c2wr_hdr hdr;
751 	u32 pbl_depth;
752 	u32 stag_index;
753 } __attribute__((packed)) ;
754 
755 union c2wr_nsmr_stag_alloc {
756 	struct c2wr_nsmr_stag_alloc_req req;
757 	struct c2wr_nsmr_stag_alloc_rep rep;
758 } __attribute__((packed)) ;
759 
760 struct c2wr_nsmr_register_req {
761 	struct c2wr_hdr hdr;
762 	__be64 va;
763 	u32 rnic_handle;
764 	__be16 flags;
765 	u8 stag_key;
766 	u8 pad;
767 	u32 pd_id;
768 	__be32 pbl_depth;
769 	__be32 pbe_size;
770 	__be32 fbo;
771 	__be32 length;
772 	__be32 addrs_length;
773 	/* array of paddrs (must be aligned on a 64bit boundary) */
774 	__be64 paddrs[0];
775 } __attribute__((packed)) ;
776 
777 struct c2wr_nsmr_register_rep {
778 	struct c2wr_hdr hdr;
779 	u32 pbl_depth;
780 	__be32 stag_index;
781 } __attribute__((packed)) ;
782 
783 union c2wr_nsmr_register {
784 	struct c2wr_nsmr_register_req req;
785 	struct c2wr_nsmr_register_rep rep;
786 } __attribute__((packed)) ;
787 
788 struct c2wr_nsmr_pbl_req {
789 	struct c2wr_hdr hdr;
790 	u32 rnic_handle;
791 	__be32 flags;
792 	__be32 stag_index;
793 	__be32 addrs_length;
794 	/* array of paddrs (must be aligned on a 64bit boundary) */
795 	__be64 paddrs[0];
796 } __attribute__((packed)) ;
797 
798 struct c2wr_nsmr_pbl_rep {
799 	struct c2wr_hdr hdr;
800 } __attribute__((packed)) ;
801 
802 union c2wr_nsmr_pbl {
803 	struct c2wr_nsmr_pbl_req req;
804 	struct c2wr_nsmr_pbl_rep rep;
805 } __attribute__((packed)) ;
806 
807 struct c2wr_mr_query_req {
808 	struct c2wr_hdr hdr;
809 	u32 rnic_handle;
810 	u32 stag_index;
811 } __attribute__((packed)) ;
812 
813 struct c2wr_mr_query_rep {
814 	struct c2wr_hdr hdr;
815 	u8 stag_key;
816 	u8 pad[3];
817 	u32 pd_id;
818 	u32 flags;
819 	u32 pbl_depth;
820 } __attribute__((packed)) ;
821 
822 union c2wr_mr_query {
823 	struct c2wr_mr_query_req req;
824 	struct c2wr_mr_query_rep rep;
825 } __attribute__((packed)) ;
826 
827 struct c2wr_mw_query_req {
828 	struct c2wr_hdr hdr;
829 	u32 rnic_handle;
830 	u32 stag_index;
831 } __attribute__((packed)) ;
832 
833 struct c2wr_mw_query_rep {
834 	struct c2wr_hdr hdr;
835 	u8 stag_key;
836 	u8 pad[3];
837 	u32 pd_id;
838 	u32 flags;
839 } __attribute__((packed)) ;
840 
841 union c2wr_mw_query {
842 	struct c2wr_mw_query_req req;
843 	struct c2wr_mw_query_rep rep;
844 } __attribute__((packed)) ;
845 
846 
847 struct c2wr_stag_dealloc_req {
848 	struct c2wr_hdr hdr;
849 	u32 rnic_handle;
850 	__be32 stag_index;
851 } __attribute__((packed)) ;
852 
853 struct c2wr_stag_dealloc_rep {
854 	struct c2wr_hdr hdr;
855 } __attribute__((packed)) ;
856 
857 union c2wr_stag_dealloc {
858 	struct c2wr_stag_dealloc_req req;
859 	struct c2wr_stag_dealloc_rep rep;
860 } __attribute__((packed)) ;
861 
862 struct c2wr_nsmr_reregister_req {
863 	struct c2wr_hdr hdr;
864 	u64 va;
865 	u32 rnic_handle;
866 	u16 flags;
867 	u8 stag_key;
868 	u8 pad;
869 	u32 stag_index;
870 	u32 pd_id;
871 	u32 pbl_depth;
872 	u32 pbe_size;
873 	u32 fbo;
874 	u32 length;
875 	u32 addrs_length;
876 	u32 pad1;
877 	/* array of paddrs (must be aligned on a 64bit boundary) */
878 	u64 paddrs[0];
879 } __attribute__((packed)) ;
880 
881 struct c2wr_nsmr_reregister_rep {
882 	struct c2wr_hdr hdr;
883 	u32 pbl_depth;
884 	u32 stag_index;
885 } __attribute__((packed)) ;
886 
887 union c2wr_nsmr_reregister {
888 	struct c2wr_nsmr_reregister_req req;
889 	struct c2wr_nsmr_reregister_rep rep;
890 } __attribute__((packed)) ;
891 
892 struct c2wr_smr_register_req {
893 	struct c2wr_hdr hdr;
894 	u64 va;
895 	u32 rnic_handle;
896 	u16 flags;
897 	u8 stag_key;
898 	u8 pad;
899 	u32 stag_index;
900 	u32 pd_id;
901 } __attribute__((packed)) ;
902 
903 struct c2wr_smr_register_rep {
904 	struct c2wr_hdr hdr;
905 	u32 stag_index;
906 } __attribute__((packed)) ;
907 
908 union c2wr_smr_register {
909 	struct c2wr_smr_register_req req;
910 	struct c2wr_smr_register_rep rep;
911 } __attribute__((packed)) ;
912 
913 struct c2wr_mw_alloc_req {
914 	struct c2wr_hdr hdr;
915 	u32 rnic_handle;
916 	u32 pd_id;
917 } __attribute__((packed)) ;
918 
919 struct c2wr_mw_alloc_rep {
920 	struct c2wr_hdr hdr;
921 	u32 stag_index;
922 } __attribute__((packed)) ;
923 
924 union c2wr_mw_alloc {
925 	struct c2wr_mw_alloc_req req;
926 	struct c2wr_mw_alloc_rep rep;
927 } __attribute__((packed)) ;
928 
929 /*
930  *------------------------ WRs -----------------------
931  */
932 
933 struct c2wr_user_hdr {
934 	struct c2wr_hdr hdr;		/* Has status and WR Type */
935 } __attribute__((packed)) ;
936 
937 enum c2_qp_state {
938 	C2_QP_STATE_IDLE = 0x01,
939 	C2_QP_STATE_CONNECTING = 0x02,
940 	C2_QP_STATE_RTS = 0x04,
941 	C2_QP_STATE_CLOSING = 0x08,
942 	C2_QP_STATE_TERMINATE = 0x10,
943 	C2_QP_STATE_ERROR = 0x20,
944 };
945 
946 /* Completion queue entry. */
947 struct c2wr_ce {
948 	struct c2wr_hdr hdr;		/* Has status and WR Type */
949 	u64 qp_user_context;	/* c2_user_qp_t * */
950 	u32 qp_state;		/* Current QP State */
951 	u32 handle;		/* QPID or EP Handle */
952 	__be32 bytes_rcvd;		/* valid for RECV WCs */
953 	u32 stag;
954 } __attribute__((packed)) ;
955 
956 
957 /*
958  * Flags used for all post-sq WRs.  These must fit in the flags
959  * field of the struct c2wr_hdr (eight bits).
960  */
961 enum {
962 	SQ_SIGNALED = 0x01,
963 	SQ_READ_FENCE = 0x02,
964 	SQ_FENCE = 0x04,
965 };
966 
967 /*
968  * Common fields for all post-sq WRs.  Namely the standard header and a
969  * secondary header with fields common to all post-sq WRs.
970  */
971 struct c2_sq_hdr {
972 	struct c2wr_user_hdr user_hdr;
973 } __attribute__((packed));
974 
975 /*
976  * Same as above but for post-rq WRs.
977  */
978 struct c2_rq_hdr {
979 	struct c2wr_user_hdr user_hdr;
980 } __attribute__((packed));
981 
982 /*
983  * use the same struct for all sends.
984  */
985 struct c2wr_send_req {
986 	struct c2_sq_hdr sq_hdr;
987 	__be32 sge_len;
988 	__be32 remote_stag;
989 	u8 data[0];		/* SGE array */
990 } __attribute__((packed));
991 
992 union c2wr_send {
993 	struct c2wr_send_req req;
994 	struct c2wr_ce rep;
995 } __attribute__((packed));
996 
997 struct c2wr_rdma_write_req {
998 	struct c2_sq_hdr sq_hdr;
999 	__be64 remote_to;
1000 	__be32 remote_stag;
1001 	__be32 sge_len;
1002 	u8 data[0];		/* SGE array */
1003 } __attribute__((packed));
1004 
1005 union c2wr_rdma_write {
1006 	struct c2wr_rdma_write_req req;
1007 	struct c2wr_ce rep;
1008 } __attribute__((packed));
1009 
1010 struct c2wr_rdma_read_req {
1011 	struct c2_sq_hdr sq_hdr;
1012 	__be64 local_to;
1013 	__be64 remote_to;
1014 	__be32 local_stag;
1015 	__be32 remote_stag;
1016 	__be32 length;
1017 } __attribute__((packed));
1018 
1019 union c2wr_rdma_read {
1020 	struct c2wr_rdma_read_req req;
1021 	struct c2wr_ce rep;
1022 } __attribute__((packed));
1023 
1024 struct c2wr_mw_bind_req {
1025 	struct c2_sq_hdr sq_hdr;
1026 	u64 va;
1027 	u8 stag_key;
1028 	u8 pad[3];
1029 	u32 mw_stag_index;
1030 	u32 mr_stag_index;
1031 	u32 length;
1032 	u32 flags;
1033 } __attribute__((packed));
1034 
1035 union c2wr_mw_bind {
1036 	struct c2wr_mw_bind_req req;
1037 	struct c2wr_ce rep;
1038 } __attribute__((packed));
1039 
1040 struct c2wr_nsmr_fastreg_req {
1041 	struct c2_sq_hdr sq_hdr;
1042 	u64 va;
1043 	u8 stag_key;
1044 	u8 pad[3];
1045 	u32 stag_index;
1046 	u32 pbe_size;
1047 	u32 fbo;
1048 	u32 length;
1049 	u32 addrs_length;
1050 	/* array of paddrs (must be aligned on a 64bit boundary) */
1051 	u64 paddrs[0];
1052 } __attribute__((packed));
1053 
1054 union c2wr_nsmr_fastreg {
1055 	struct c2wr_nsmr_fastreg_req req;
1056 	struct c2wr_ce rep;
1057 } __attribute__((packed));
1058 
1059 struct c2wr_stag_invalidate_req {
1060 	struct c2_sq_hdr sq_hdr;
1061 	u8 stag_key;
1062 	u8 pad[3];
1063 	u32 stag_index;
1064 } __attribute__((packed));
1065 
1066 union c2wr_stag_invalidate {
1067 	struct c2wr_stag_invalidate_req req;
1068 	struct c2wr_ce rep;
1069 } __attribute__((packed));
1070 
1071 union c2wr_sqwr {
1072 	struct c2_sq_hdr sq_hdr;
1073 	struct c2wr_send_req send;
1074 	struct c2wr_send_req send_se;
1075 	struct c2wr_send_req send_inv;
1076 	struct c2wr_send_req send_se_inv;
1077 	struct c2wr_rdma_write_req rdma_write;
1078 	struct c2wr_rdma_read_req rdma_read;
1079 	struct c2wr_mw_bind_req mw_bind;
1080 	struct c2wr_nsmr_fastreg_req nsmr_fastreg;
1081 	struct c2wr_stag_invalidate_req stag_inv;
1082 } __attribute__((packed));
1083 
1084 
1085 /*
1086  * RQ WRs
1087  */
1088 struct c2wr_rqwr {
1089 	struct c2_rq_hdr rq_hdr;
1090 	u8 data[0];		/* array of SGEs */
1091 } __attribute__((packed));
1092 
1093 union c2wr_recv {
1094 	struct c2wr_rqwr req;
1095 	struct c2wr_ce rep;
1096 } __attribute__((packed));
1097 
1098 /*
1099  * All AEs start with this header.  Most AEs only need to convey the
1100  * information in the header.  Some, like LLP connection events, need
1101  * more info.  The union typdef c2wr_ae_t has all the possible AEs.
1102  *
1103  * hdr.context is the user_context from the rnic_open WR.  NULL If this
1104  * is not affiliated with an rnic
1105  *
1106  * hdr.id is the AE identifier (eg;  CCAE_REMOTE_SHUTDOWN,
1107  * CCAE_LLP_CLOSE_COMPLETE)
1108  *
1109  * resource_type is one of:  C2_RES_IND_QP, C2_RES_IND_CQ, C2_RES_IND_SRQ
1110  *
1111  * user_context is the context passed down when the host created the resource.
1112  */
1113 struct c2wr_ae_hdr {
1114 	struct c2wr_hdr hdr;
1115 	u64 user_context;	/* user context for this res. */
1116 	__be32 resource_type;	/* see enum c2_resource_indicator */
1117 	__be32 resource;	/* handle for resource */
1118 	__be32 qp_state;	/* current QP State */
1119 } __attribute__((packed));
1120 
1121 /*
1122  * After submitting the CCAE_ACTIVE_CONNECT_RESULTS message on the AEQ,
1123  * the adapter moves the QP into RTS state
1124  */
1125 struct c2wr_ae_active_connect_results {
1126 	struct c2wr_ae_hdr ae_hdr;
1127 	__be32 laddr;
1128 	__be32 raddr;
1129 	__be16 lport;
1130 	__be16 rport;
1131 	__be32 private_data_length;
1132 	u8 private_data[0];	/* data is in-line in the msg. */
1133 } __attribute__((packed));
1134 
1135 /*
1136  * When connections are established by the stack (and the private data
1137  * MPA frame is received), the adapter will generate an event to the host.
1138  * The details of the connection, any private data, and the new connection
1139  * request handle is passed up via the CCAE_CONNECTION_REQUEST msg on the
1140  * AE queue:
1141  */
1142 struct c2wr_ae_connection_request {
1143 	struct c2wr_ae_hdr ae_hdr;
1144 	u32 cr_handle;		/* connreq handle (sock ptr) */
1145 	__be32 laddr;
1146 	__be32 raddr;
1147 	__be16 lport;
1148 	__be16 rport;
1149 	__be32 private_data_length;
1150 	u8 private_data[0];	/* data is in-line in the msg. */
1151 } __attribute__((packed));
1152 
1153 union c2wr_ae {
1154 	struct c2wr_ae_hdr ae_generic;
1155 	struct c2wr_ae_active_connect_results ae_active_connect_results;
1156 	struct c2wr_ae_connection_request ae_connection_request;
1157 } __attribute__((packed));
1158 
1159 struct c2wr_init_req {
1160 	struct c2wr_hdr hdr;
1161 	__be64 hint_count;
1162 	__be64 q0_host_shared;
1163 	__be64 q1_host_shared;
1164 	__be64 q1_host_msg_pool;
1165 	__be64 q2_host_shared;
1166 	__be64 q2_host_msg_pool;
1167 } __attribute__((packed));
1168 
1169 struct c2wr_init_rep {
1170 	struct c2wr_hdr hdr;
1171 } __attribute__((packed));
1172 
1173 union c2wr_init {
1174 	struct c2wr_init_req req;
1175 	struct c2wr_init_rep rep;
1176 } __attribute__((packed));
1177 
1178 /*
1179  * For upgrading flash.
1180  */
1181 
1182 struct c2wr_flash_init_req {
1183 	struct c2wr_hdr hdr;
1184 	u32 rnic_handle;
1185 } __attribute__((packed));
1186 
1187 struct c2wr_flash_init_rep {
1188 	struct c2wr_hdr hdr;
1189 	u32 adapter_flash_buf_offset;
1190 	u32 adapter_flash_len;
1191 } __attribute__((packed));
1192 
1193 union c2wr_flash_init {
1194 	struct c2wr_flash_init_req req;
1195 	struct c2wr_flash_init_rep rep;
1196 } __attribute__((packed));
1197 
1198 struct c2wr_flash_req {
1199 	struct c2wr_hdr hdr;
1200 	u32 rnic_handle;
1201 	u32 len;
1202 } __attribute__((packed));
1203 
1204 struct c2wr_flash_rep {
1205 	struct c2wr_hdr hdr;
1206 	u32 status;
1207 } __attribute__((packed));
1208 
1209 union c2wr_flash {
1210 	struct c2wr_flash_req req;
1211 	struct c2wr_flash_rep rep;
1212 } __attribute__((packed));
1213 
1214 struct c2wr_buf_alloc_req {
1215 	struct c2wr_hdr hdr;
1216 	u32 rnic_handle;
1217 	u32 size;
1218 } __attribute__((packed));
1219 
1220 struct c2wr_buf_alloc_rep {
1221 	struct c2wr_hdr hdr;
1222 	u32 offset;		/* 0 if mem not available */
1223 	u32 size;		/* 0 if mem not available */
1224 } __attribute__((packed));
1225 
1226 union c2wr_buf_alloc {
1227 	struct c2wr_buf_alloc_req req;
1228 	struct c2wr_buf_alloc_rep rep;
1229 } __attribute__((packed));
1230 
1231 struct c2wr_buf_free_req {
1232 	struct c2wr_hdr hdr;
1233 	u32 rnic_handle;
1234 	u32 offset;		/* Must match value from alloc */
1235 	u32 size;		/* Must match value from alloc */
1236 } __attribute__((packed));
1237 
1238 struct c2wr_buf_free_rep {
1239 	struct c2wr_hdr hdr;
1240 } __attribute__((packed));
1241 
1242 union c2wr_buf_free {
1243 	struct c2wr_buf_free_req req;
1244 	struct c2wr_ce rep;
1245 } __attribute__((packed));
1246 
1247 struct c2wr_flash_write_req {
1248 	struct c2wr_hdr hdr;
1249 	u32 rnic_handle;
1250 	u32 offset;
1251 	u32 size;
1252 	u32 type;
1253 	u32 flags;
1254 } __attribute__((packed));
1255 
1256 struct c2wr_flash_write_rep {
1257 	struct c2wr_hdr hdr;
1258 	u32 status;
1259 } __attribute__((packed));
1260 
1261 union c2wr_flash_write {
1262 	struct c2wr_flash_write_req req;
1263 	struct c2wr_flash_write_rep rep;
1264 } __attribute__((packed));
1265 
1266 /*
1267  * Messages for LLP connection setup.
1268  */
1269 
1270 /*
1271  * Listen Request.  This allocates a listening endpoint to allow passive
1272  * connection setup.  Newly established LLP connections are passed up
1273  * via an AE.  See c2wr_ae_connection_request_t
1274  */
1275 struct c2wr_ep_listen_create_req {
1276 	struct c2wr_hdr hdr;
1277 	u64 user_context;	/* returned in AEs. */
1278 	u32 rnic_handle;
1279 	__be32 local_addr;		/* local addr, or 0  */
1280 	__be16 local_port;		/* 0 means "pick one" */
1281 	u16 pad;
1282 	__be32 backlog;		/* tradional tcp listen bl */
1283 } __attribute__((packed));
1284 
1285 struct c2wr_ep_listen_create_rep {
1286 	struct c2wr_hdr hdr;
1287 	u32 ep_handle;		/* handle to new listening ep */
1288 	u16 local_port;		/* resulting port... */
1289 	u16 pad;
1290 } __attribute__((packed));
1291 
1292 union c2wr_ep_listen_create {
1293 	struct c2wr_ep_listen_create_req req;
1294 	struct c2wr_ep_listen_create_rep rep;
1295 } __attribute__((packed));
1296 
1297 struct c2wr_ep_listen_destroy_req {
1298 	struct c2wr_hdr hdr;
1299 	u32 rnic_handle;
1300 	u32 ep_handle;
1301 } __attribute__((packed));
1302 
1303 struct c2wr_ep_listen_destroy_rep {
1304 	struct c2wr_hdr hdr;
1305 } __attribute__((packed));
1306 
1307 union c2wr_ep_listen_destroy {
1308 	struct c2wr_ep_listen_destroy_req req;
1309 	struct c2wr_ep_listen_destroy_rep rep;
1310 } __attribute__((packed));
1311 
1312 struct c2wr_ep_query_req {
1313 	struct c2wr_hdr hdr;
1314 	u32 rnic_handle;
1315 	u32 ep_handle;
1316 } __attribute__((packed));
1317 
1318 struct c2wr_ep_query_rep {
1319 	struct c2wr_hdr hdr;
1320 	u32 rnic_handle;
1321 	u32 local_addr;
1322 	u32 remote_addr;
1323 	u16 local_port;
1324 	u16 remote_port;
1325 } __attribute__((packed));
1326 
1327 union c2wr_ep_query {
1328 	struct c2wr_ep_query_req req;
1329 	struct c2wr_ep_query_rep rep;
1330 } __attribute__((packed));
1331 
1332 
1333 /*
1334  * The host passes this down to indicate acceptance of a pending iWARP
1335  * connection.  The cr_handle was obtained from the CONNECTION_REQUEST
1336  * AE passed up by the adapter.  See c2wr_ae_connection_request_t.
1337  */
1338 struct c2wr_cr_accept_req {
1339 	struct c2wr_hdr hdr;
1340 	u32 rnic_handle;
1341 	u32 qp_handle;		/* QP to bind to this LLP conn */
1342 	u32 ep_handle;		/* LLP  handle to accept */
1343 	__be32 private_data_length;
1344 	u8 private_data[0];	/* data in-line in msg. */
1345 } __attribute__((packed));
1346 
1347 /*
1348  * adapter sends reply when private data is successfully submitted to
1349  * the LLP.
1350  */
1351 struct c2wr_cr_accept_rep {
1352 	struct c2wr_hdr hdr;
1353 } __attribute__((packed));
1354 
1355 union c2wr_cr_accept {
1356 	struct c2wr_cr_accept_req req;
1357 	struct c2wr_cr_accept_rep rep;
1358 } __attribute__((packed));
1359 
1360 /*
1361  * The host sends this down if a given iWARP connection request was
1362  * rejected by the consumer.  The cr_handle was obtained from a
1363  * previous c2wr_ae_connection_request_t AE sent by the adapter.
1364  */
1365 struct  c2wr_cr_reject_req {
1366 	struct c2wr_hdr hdr;
1367 	u32 rnic_handle;
1368 	u32 ep_handle;		/* LLP handle to reject */
1369 } __attribute__((packed));
1370 
1371 /*
1372  * Dunno if this is needed, but we'll add it for now.  The adapter will
1373  * send the reject_reply after the LLP endpoint has been destroyed.
1374  */
1375 struct  c2wr_cr_reject_rep {
1376 	struct c2wr_hdr hdr;
1377 } __attribute__((packed));
1378 
1379 union c2wr_cr_reject {
1380 	struct c2wr_cr_reject_req req;
1381 	struct c2wr_cr_reject_rep rep;
1382 } __attribute__((packed));
1383 
1384 /*
1385  * console command.  Used to implement a debug console over the verbs
1386  * request and reply queues.
1387  */
1388 
1389 /*
1390  * Console request message.  It contains:
1391  *	- message hdr with id = CCWR_CONSOLE
1392  *	- the physaddr/len of host memory to be used for the reply.
1393  *	- the command string.  eg:  "netstat -s" or "zoneinfo"
1394  */
1395 struct c2wr_console_req {
1396 	struct c2wr_hdr hdr;		/* id = CCWR_CONSOLE */
1397 	u64 reply_buf;		/* pinned host buf for reply */
1398 	u32 reply_buf_len;	/* length of reply buffer */
1399 	u8 command[0];		/* NUL terminated ascii string */
1400 	/* containing the command req */
1401 } __attribute__((packed));
1402 
1403 /*
1404  * flags used in the console reply.
1405  */
1406 enum c2_console_flags {
1407 	CONS_REPLY_TRUNCATED = 0x00000001	/* reply was truncated */
1408 } __attribute__((packed));
1409 
1410 /*
1411  * Console reply message.
1412  * hdr.result contains the c2_status_t error if the reply was _not_ generated,
1413  * or C2_OK if the reply was generated.
1414  */
1415 struct c2wr_console_rep {
1416 	struct c2wr_hdr hdr;		/* id = CCWR_CONSOLE */
1417 	u32 flags;
1418 } __attribute__((packed));
1419 
1420 union c2wr_console {
1421 	struct c2wr_console_req req;
1422 	struct c2wr_console_rep rep;
1423 } __attribute__((packed));
1424 
1425 
1426 /*
1427  * Giant union with all WRs.  Makes life easier...
1428  */
1429 union c2wr {
1430 	struct c2wr_hdr hdr;
1431 	struct c2wr_user_hdr user_hdr;
1432 	union c2wr_rnic_open rnic_open;
1433 	union c2wr_rnic_query rnic_query;
1434 	union c2wr_rnic_getconfig rnic_getconfig;
1435 	union c2wr_rnic_setconfig rnic_setconfig;
1436 	union c2wr_rnic_close rnic_close;
1437 	union c2wr_cq_create cq_create;
1438 	union c2wr_cq_modify cq_modify;
1439 	union c2wr_cq_destroy cq_destroy;
1440 	union c2wr_pd_alloc pd_alloc;
1441 	union c2wr_pd_dealloc pd_dealloc;
1442 	union c2wr_srq_create srq_create;
1443 	union c2wr_srq_destroy srq_destroy;
1444 	union c2wr_qp_create qp_create;
1445 	union c2wr_qp_query qp_query;
1446 	union c2wr_qp_modify qp_modify;
1447 	union c2wr_qp_destroy qp_destroy;
1448 	struct c2wr_qp_connect qp_connect;
1449 	union c2wr_nsmr_stag_alloc nsmr_stag_alloc;
1450 	union c2wr_nsmr_register nsmr_register;
1451 	union c2wr_nsmr_pbl nsmr_pbl;
1452 	union c2wr_mr_query mr_query;
1453 	union c2wr_mw_query mw_query;
1454 	union c2wr_stag_dealloc stag_dealloc;
1455 	union c2wr_sqwr sqwr;
1456 	struct c2wr_rqwr rqwr;
1457 	struct c2wr_ce ce;
1458 	union c2wr_ae ae;
1459 	union c2wr_init init;
1460 	union c2wr_ep_listen_create ep_listen_create;
1461 	union c2wr_ep_listen_destroy ep_listen_destroy;
1462 	union c2wr_cr_accept cr_accept;
1463 	union c2wr_cr_reject cr_reject;
1464 	union c2wr_console console;
1465 	union c2wr_flash_init flash_init;
1466 	union c2wr_flash flash;
1467 	union c2wr_buf_alloc buf_alloc;
1468 	union c2wr_buf_free buf_free;
1469 	union c2wr_flash_write flash_write;
1470 } __attribute__((packed));
1471 
1472 
1473 /*
1474  * Accessors for the wr fields that are packed together tightly to
1475  * reduce the wr message size.  The wr arguments are void* so that
1476  * either a struct c2wr*, a struct c2wr_hdr*, or a pointer to any of the types
1477  * in the struct c2wr union can be passed in.
1478  */
c2_wr_get_id(void * wr)1479 static __inline__ u8 c2_wr_get_id(void *wr)
1480 {
1481 	return ((struct c2wr_hdr *) wr)->id;
1482 }
c2_wr_set_id(void * wr,u8 id)1483 static __inline__ void c2_wr_set_id(void *wr, u8 id)
1484 {
1485 	((struct c2wr_hdr *) wr)->id = id;
1486 }
c2_wr_get_result(void * wr)1487 static __inline__ u8 c2_wr_get_result(void *wr)
1488 {
1489 	return ((struct c2wr_hdr *) wr)->result;
1490 }
c2_wr_set_result(void * wr,u8 result)1491 static __inline__ void c2_wr_set_result(void *wr, u8 result)
1492 {
1493 	((struct c2wr_hdr *) wr)->result = result;
1494 }
c2_wr_get_flags(void * wr)1495 static __inline__ u8 c2_wr_get_flags(void *wr)
1496 {
1497 	return ((struct c2wr_hdr *) wr)->flags;
1498 }
c2_wr_set_flags(void * wr,u8 flags)1499 static __inline__ void c2_wr_set_flags(void *wr, u8 flags)
1500 {
1501 	((struct c2wr_hdr *) wr)->flags = flags;
1502 }
c2_wr_get_sge_count(void * wr)1503 static __inline__ u8 c2_wr_get_sge_count(void *wr)
1504 {
1505 	return ((struct c2wr_hdr *) wr)->sge_count;
1506 }
c2_wr_set_sge_count(void * wr,u8 sge_count)1507 static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
1508 {
1509 	((struct c2wr_hdr *) wr)->sge_count = sge_count;
1510 }
c2_wr_get_wqe_count(void * wr)1511 static __inline__ __be32 c2_wr_get_wqe_count(void *wr)
1512 {
1513 	return ((struct c2wr_hdr *) wr)->wqe_count;
1514 }
c2_wr_set_wqe_count(void * wr,u32 wqe_count)1515 static __inline__ void c2_wr_set_wqe_count(void *wr, u32 wqe_count)
1516 {
1517 	((struct c2wr_hdr *) wr)->wqe_count = wqe_count;
1518 }
1519 
1520 #endif				/* _C2_WR_H_ */
1521