xref: /freebsd-src/sys/dev/qlnx/qlnxe/ecore_roce_api.h (revision 8657387683946d0c03e09fe77029edfe309eeb20)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  *
29  */
30 
31 #ifndef __ECORE_RDMA_API_H__
32 #define __ECORE_RDMA_API_H__
33 
34 #ifndef LINUX_REMOVE
35 #define ETH_ALEN 6
36 #endif
37 
38 #ifndef __EXTRACT__LINUX__
39 
40 enum ecore_roce_ll2_tx_dest
41 {
42 	ECORE_ROCE_LL2_TX_DEST_NW /* Light L2 TX Destination to the Network */,
43 	ECORE_ROCE_LL2_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
44 	ECORE_ROCE_LL2_TX_DEST_MAX
45 };
46 
47 /* HW/FW RoCE Limitations (external. For internal see ecore_roce.h) */
48 /* CNQ size Limitation
49  * The CNQ size should be set as twice the amount of CQs, since for each CQ one
50  * element may be inserted into the CNQ and another element is used per CQ to
51  * accommodate for a possible race in the arm mechanism.
52  * The FW supports a CNQ of 64k-1 and this apparently causes an issue - notice
53  * that the number of QPs can reach 32k giving 64k CQs and 128k CNQ elements.
54  * Luckily the FW can buffer CNQ elements avoiding an overflow, on the expense
55  * of performance.
56  */
57 #define ECORE_RDMA_MAX_CNQ_SIZE               (0xFFFF) /* 2^16 - 1 */
58 
59 /* rdma interface */
60 enum ecore_rdma_tid_type
61 {
62 	ECORE_RDMA_TID_REGISTERED_MR,
63 	ECORE_RDMA_TID_FMR,
64 	ECORE_RDMA_TID_MW_TYPE1,
65 	ECORE_RDMA_TID_MW_TYPE2A
66 };
67 
68 enum ecore_roce_qp_state {
69 	ECORE_ROCE_QP_STATE_RESET, /* Reset */
70 	ECORE_ROCE_QP_STATE_INIT,  /* Initialized */
71 	ECORE_ROCE_QP_STATE_RTR,   /* Ready to Receive */
72 	ECORE_ROCE_QP_STATE_RTS,   /* Ready to Send */
73 	ECORE_ROCE_QP_STATE_SQD,   /* Send Queue Draining */
74 	ECORE_ROCE_QP_STATE_ERR,   /* Error */
75 	ECORE_ROCE_QP_STATE_SQE    /* Send Queue Error */
76 };
77 
78 typedef
79 void (*affiliated_event_t)(void	*context,
80 			   u8	fw_event_code,
81 			   void	*fw_handle);
82 
83 typedef
84 void (*unaffiliated_event_t)(void *context,
85 			     u8   event_code);
86 
87 struct ecore_rdma_events {
88 	void			*context;
89 	affiliated_event_t	affiliated_event;
90 	unaffiliated_event_t	unaffiliated_event;
91 };
92 
93 struct ecore_rdma_device {
94     /* Vendor specific information */
95 	u32	vendor_id;
96 	u32	vendor_part_id;
97 	u32	hw_ver;
98 	u64	fw_ver;
99 
100 	u64	node_guid; /* node GUID */
101 	u64	sys_image_guid; /* System image GUID */
102 
103 	u8	max_cnq;
104 	u8	max_sge; /* The maximum number of scatter/gather entries
105 			  * per Work Request supported
106 			  */
107 	u8	max_srq_sge; /* The maximum number of scatter/gather entries
108 			      * per Work Request supported for SRQ
109 			      */
110 	u16	max_inline;
111 	u32	max_wqe; /* The maximum number of outstanding work
112 			  * requests on any Work Queue supported
113 			  */
114 	u32	max_srq_wqe; /* The maximum number of outstanding work
115 			      * requests on any Work Queue supported for SRQ
116 			      */
117 	u8	max_qp_resp_rd_atomic_resc; /* The maximum number of RDMA Reads
118 					     * & atomic operation that can be
119 					     * outstanding per QP
120 					     */
121 
122 	u8	max_qp_req_rd_atomic_resc; /* The maximum depth per QP for
123 					    * initiation of RDMA Read
124 					    * & atomic operations
125 					    */
126 	u64	max_dev_resp_rd_atomic_resc;
127 	u32	max_cq;
128 	u32	max_qp;
129 	u32	max_srq; /* Maximum number of SRQs */
130 	u32	max_mr; /* Maximum number of MRs supported by this device */
131 	u64	max_mr_size; /* Size (in bytes) of the largest contiguous memory
132 			      * block that can be registered by this device
133 			      */
134 	u32	max_cqe;
135 	u32	max_mw; /* The maximum number of memory windows supported */
136 	u32	max_fmr;
137 	u32	max_mr_mw_fmr_pbl;
138 	u64	max_mr_mw_fmr_size;
139 	u32	max_pd; /* The maximum number of protection domains supported */
140 	u32	max_ah;
141 	u8	max_pkey;
142 	u16	max_srq_wr; /* Maximum number of WRs per SRQ */
143 	u8	max_stats_queues; /* Maximum number of statistics queues */
144 	u32	dev_caps;
145 
146 	/* Abilty to support RNR-NAK generation */
147 
148 #define ECORE_RDMA_DEV_CAP_RNR_NAK_MASK				0x1
149 #define ECORE_RDMA_DEV_CAP_RNR_NAK_SHIFT			0
150 	/* Abilty to support shutdown port */
151 #define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK			0x1
152 #define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT			1
153 	/* Abilty to support port active event */
154 #define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK		0x1
155 #define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT		2
156 	/* Abilty to support port change event */
157 #define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK		0x1
158 #define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT		3
159 	/* Abilty to support system image GUID */
160 #define ECORE_RDMA_DEV_CAP_SYS_IMAGE_MASK			0x1
161 #define ECORE_RDMA_DEV_CAP_SYS_IMAGE_SHIFT			4
162 	/* Abilty to support bad P_Key counter support */
163 #define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK			0x1
164 #define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT			5
165 	/* Abilty to support atomic operations */
166 #define ECORE_RDMA_DEV_CAP_ATOMIC_OP_MASK			0x1
167 #define ECORE_RDMA_DEV_CAP_ATOMIC_OP_SHIFT			6
168 #define ECORE_RDMA_DEV_CAP_RESIZE_CQ_MASK			0x1
169 #define ECORE_RDMA_DEV_CAP_RESIZE_CQ_SHIFT			7
170 	/* Abilty to support modifying the maximum number of
171 	 * outstanding work requests per QP
172 	 */
173 #define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK			0x1
174 #define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT			8
175 	/* Abilty to support automatic path migration */
176 #define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK			0x1
177 #define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT			9
178 	/* Abilty to support the base memory management extensions */
179 #define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK			0x1
180 #define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT		10
181 #define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK			0x1
182 #define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT			11
183 	/* Abilty to support multipile page sizes per memory region */
184 #define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK		0x1
185 #define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT		12
186 	/* Abilty to support block list physical buffer list */
187 #define ECORE_RDMA_DEV_CAP_BLOCK_MODE_MASK			0x1
188 #define ECORE_RDMA_DEV_CAP_BLOCK_MODE_SHIFT			13
189 	/* Abilty to support zero based virtual addresses */
190 #define ECORE_RDMA_DEV_CAP_ZBVA_MASK				0x1
191 #define ECORE_RDMA_DEV_CAP_ZBVA_SHIFT				14
192 	/* Abilty to support local invalidate fencing */
193 #define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK			0x1
194 #define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT		15
195 	/* Abilty to support Loopback on QP */
196 #define ECORE_RDMA_DEV_CAP_LB_INDICATOR_MASK			0x1
197 #define ECORE_RDMA_DEV_CAP_LB_INDICATOR_SHIFT			16
198 	u64	page_size_caps;
199 	u8	dev_ack_delay;
200 	u32	reserved_lkey; /* Value of reserved L_key */
201 	u32	bad_pkey_counter; /* Bad P_key counter support indicator */
202 	struct ecore_rdma_events events;
203 };
204 
205 enum ecore_port_state {
206 	ECORE_RDMA_PORT_UP,
207 	ECORE_RDMA_PORT_DOWN,
208 };
209 
210 enum ecore_roce_capability {
211 	ECORE_ROCE_V1	= 1 << 0,
212 	ECORE_ROCE_V2	= 1 << 1,
213 };
214 
215 struct ecore_rdma_port {
216 	enum ecore_port_state port_state;
217 	int	link_speed;
218 	u64	max_msg_size;
219 	u8	source_gid_table_len;
220 	void	*source_gid_table_ptr;
221 	u8	pkey_table_len;
222 	void	*pkey_table_ptr;
223 	u32	pkey_bad_counter;
224 	enum ecore_roce_capability capability;
225 };
226 
227 struct ecore_rdma_cnq_params
228 {
229 	u8  num_pbl_pages; /* Number of pages in the PBL allocated
230 				   * for this queue
231 				   */
232 	u64 pbl_ptr; /* Address to the first entry of the queue PBL */
233 };
234 
235 /* The CQ Mode affects the CQ doorbell transaction size.
236  * 64/32 bit machines should configure to 32/16 bits respectively.
237  */
238 enum ecore_rdma_cq_mode {
239 	ECORE_RDMA_CQ_MODE_16_BITS,
240 	ECORE_RDMA_CQ_MODE_32_BITS,
241 };
242 
243 struct ecore_roce_dcqcn_params {
244 	u8	notification_point;
245 	u8	reaction_point;
246 
247 	/* fields for notification point */
248 	u32	cnp_send_timeout;
249 
250 	/* fields for reaction point */
251 	u32	rl_bc_rate;  /* Byte Counter Limit. */
252 	u16	rl_max_rate; /* Maximum rate in 1.6 Mbps resolution */
253 	u16	rl_r_ai;     /* Active increase rate */
254 	u16	rl_r_hai;    /* Hyper active increase rate */
255 	u16	dcqcn_g;     /* Alpha update gain in 1/64K resolution */
256 	u32	dcqcn_k_us;  /* Alpha update interval */
257 	u32	dcqcn_timeout_us;
258 };
259 
260 #ifdef CONFIG_ECORE_IWARP
261 
262 #define ECORE_MPA_RTR_TYPE_NONE		0 /* No RTR type */
263 #define ECORE_MPA_RTR_TYPE_ZERO_SEND	(1 << 0)
264 #define ECORE_MPA_RTR_TYPE_ZERO_WRITE	(1 << 1)
265 #define ECORE_MPA_RTR_TYPE_ZERO_READ	(1 << 2)
266 
267 enum ecore_mpa_rev {
268 	ECORE_MPA_REV1,
269 	ECORE_MPA_REV2,
270 };
271 
272 struct ecore_iwarp_params {
273 	u32				rcv_wnd_size;
274 	u16				ooo_num_rx_bufs;
275 #define ECORE_IWARP_TS_EN (1 << 0)
276 #define ECORE_IWARP_DA_EN (1 << 1)
277 	u8				flags;
278 	u8				crc_needed;
279 	enum ecore_mpa_rev		mpa_rev;
280 	u8				mpa_rtr;
281 	u8				mpa_peer2peer;
282 };
283 
284 #endif
285 
286 struct ecore_roce_params {
287 	enum ecore_rdma_cq_mode		cq_mode;
288 	struct ecore_roce_dcqcn_params	dcqcn_params;
289 	u8				ll2_handle; /* required for UD QPs */
290 };
291 
292 struct ecore_rdma_start_in_params {
293 	struct ecore_rdma_events	*events;
294 	struct ecore_rdma_cnq_params	cnq_pbl_list[128];
295 	u8				desired_cnq;
296 	u16				max_mtu;
297 	u8				mac_addr[ETH_ALEN];
298 #ifdef CONFIG_ECORE_IWARP
299 	struct ecore_iwarp_params	iwarp;
300 #endif
301 	struct ecore_roce_params	roce;
302 };
303 
304 struct ecore_rdma_add_user_out_params {
305 	/* output variables (given to miniport) */
306 	u16	dpi;
307 	u64	dpi_addr;
308 	u64	dpi_phys_addr;
309 	u32	dpi_size;
310 	u16	wid_count;
311 };
312 
313 /*Returns the CQ CID or zero in case of failure */
314 struct ecore_rdma_create_cq_in_params {
315 	/* input variables (given by miniport) */
316 	u32	cq_handle_lo; /* CQ handle to be written in CNQ */
317 	u32	cq_handle_hi;
318 	u32	cq_size;
319 	u16	dpi;
320 	bool	pbl_two_level;
321 	u64	pbl_ptr;
322 	u16	pbl_num_pages;
323 	u8	pbl_page_size_log; /* for the pages that contain the
324 			   * pointers to the CQ pages
325 			   */
326 	u8	cnq_id;
327 	u16	int_timeout;
328 };
329 
330 #endif
331 
332 struct ecore_rdma_resize_cq_in_params {
333 	/* input variables (given by miniport) */
334 
335 	u16	icid;
336 	u32	cq_size;
337 	bool	pbl_two_level;
338 	u64	pbl_ptr;
339 	u16	pbl_num_pages;
340 	u8	pbl_page_size_log; /* for the pages that contain the
341 		       * pointers to the CQ pages
342 		       */
343 };
344 
345 #ifndef __EXTRACT__LINUX__
346 
347 enum roce_mode
348 {
349 	ROCE_V1,
350 	ROCE_V2_IPV4,
351 	ROCE_V2_IPV6,
352 	MAX_ROCE_MODE
353 };
354 
355 struct ecore_rdma_create_qp_in_params {
356 	/* input variables (given by miniport) */
357 	u32	qp_handle_lo; /* QP handle to be written in CQE */
358 	u32	qp_handle_hi;
359 	u32	qp_handle_async_lo; /* QP handle to be written in async event */
360 	u32	qp_handle_async_hi;
361 	bool	use_srq;
362 	bool	signal_all;
363 	bool	fmr_and_reserved_lkey;
364 	u16	pd;
365 	u16	dpi;
366 	u16	sq_cq_id;
367 	u16	sq_num_pages;
368 	u64	sq_pbl_ptr;	/* Not relevant for iWARP */
369 	u8	max_sq_sges;
370 	u16	rq_cq_id;
371 	u16	rq_num_pages;
372 	u64	rq_pbl_ptr;	/* Not relevant for iWARP */
373 	u16	srq_id;
374 	u8	stats_queue;
375 };
376 
377 struct ecore_rdma_create_qp_out_params {
378 	/* output variables (given to miniport) */
379 	u32		qp_id;
380 	u16		icid;
381 	void		*rq_pbl_virt;
382 	dma_addr_t	rq_pbl_phys;
383 	void		*sq_pbl_virt;
384 	dma_addr_t	sq_pbl_phys;
385 };
386 
387 struct ecore_rdma_destroy_cq_in_params {
388 	/* input variables (given by miniport) */
389 	u16 icid;
390 };
391 
392 struct ecore_rdma_destroy_cq_out_params {
393 	/* output variables, provided to the upper layer */
394 
395 	/* Sequence number of completion notification sent for the CQ on
396 	 * the associated CNQ
397 	 */
398 	u16	num_cq_notif;
399 };
400 
401 /* ECORE GID can be used as IPv4/6 address in RoCE v2 */
402 union ecore_gid {
403 	u8 bytes[16];
404 	u16 words[8];
405 	u32 dwords[4];
406 	u64 qwords[2];
407 	u32 ipv4_addr;
408 };
409 
410 struct ecore_rdma_modify_qp_in_params {
411 	/* input variables (given by miniport) */
412 	u32		modify_flags;
413 #define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK               0x1
414 #define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT              0
415 #define ECORE_ROCE_MODIFY_QP_VALID_PKEY_MASK                    0x1
416 #define ECORE_ROCE_MODIFY_QP_VALID_PKEY_SHIFT                   1
417 #define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK             0x1
418 #define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT            2
419 #define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_MASK                 0x1
420 #define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT                3
421 #define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK          0x1
422 #define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT         4
423 #define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK                  0x1
424 #define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT                 5
425 #define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK                  0x1
426 #define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT                 6
427 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK       0x1
428 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT      7
429 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK      0x1
430 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT     8
431 #define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK             0x1
432 #define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT            9
433 #define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK               0x1
434 #define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT              10
435 #define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK           0x1
436 #define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT          11
437 #define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK       0x1
438 #define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT      12
439 #define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK     0x1
440 #define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT    13
441 #define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK               0x1
442 #define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT              14
443 
444 	enum ecore_roce_qp_state	new_state;
445 	u16		pkey;
446 	bool		incoming_rdma_read_en;
447 	bool		incoming_rdma_write_en;
448 	bool		incoming_atomic_en;
449 	bool		e2e_flow_control_en;
450 	u32		dest_qp;
451 	u16		mtu;
452 	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
453 	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
454 	u32		flow_label; /* ignored in IPv4 */
455 	union ecore_gid	sgid; /* GRH SGID; IPv4/6 Source IP */
456 	union ecore_gid	dgid; /* GRH DGID; IPv4/6 Destination IP */
457 	u16		udp_src_port; /* RoCEv2 only */
458 
459 	u16		vlan_id;
460 
461 	u32		rq_psn;
462 	u32		sq_psn;
463 	u8		max_rd_atomic_resp;
464 	u8		max_rd_atomic_req;
465 	u32		ack_timeout;
466 	u8		retry_cnt;
467 	u8		rnr_retry_cnt;
468 	u8		min_rnr_nak_timer;
469 	bool		sqd_async;
470 	u8		remote_mac_addr[6];
471 	u8		local_mac_addr[6];
472 	bool		use_local_mac;
473 	enum roce_mode	roce_mode;
474 };
475 
476 struct ecore_rdma_query_qp_out_params {
477 	/* output variables (given to miniport) */
478 	enum ecore_roce_qp_state	state;
479 	u32		rq_psn; /* responder */
480 	u32		sq_psn; /* requester */
481 	bool		draining; /* send queue is draining */
482 	u16		mtu;
483 	u32		dest_qp;
484 	bool		incoming_rdma_read_en;
485 	bool		incoming_rdma_write_en;
486 	bool		incoming_atomic_en;
487 	bool		e2e_flow_control_en;
488 	union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
489 	union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
490 	u32		flow_label; /* ignored in IPv4 */
491 	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
492 	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
493 	u32		timeout;
494 	u8		rnr_retry;
495 	u8		retry_cnt;
496 	u8		min_rnr_nak_timer;
497 	u16		pkey_index;
498 	u8		max_rd_atomic;
499 	u8		max_dest_rd_atomic;
500 	bool		sqd_async;
501 };
502 
503 struct ecore_rdma_register_tid_in_params {
504 	/* input variables (given by miniport) */
505 	u32	itid; /* index only, 18 bit long, lkey = itid << 8 | key */
506 	enum ecore_rdma_tid_type tid_type;
507 	u8	key;
508 	u16	pd;
509 	bool	local_read;
510 	bool	local_write;
511 	bool	remote_read;
512 	bool	remote_write;
513 	bool	remote_atomic;
514 	bool	mw_bind;
515 	u64	pbl_ptr;
516 	bool	pbl_two_level;
517 	u8	pbl_page_size_log; /* for the pages that contain the pointers
518 		       * to the MR pages
519 		       */
520 	u8	page_size_log; /* for the MR pages */
521 	u32	fbo;
522 	u64	length; /* only lower 40 bits are valid */
523 	u64	vaddr;
524 	bool	zbva;
525 	bool	phy_mr;
526 	bool	dma_mr;
527 
528 	/* DIF related fields */
529 	bool	dif_enabled;
530 	u64	dif_error_addr;
531 	u64	dif_runt_addr;
532 };
533 
534 struct ecore_rdma_create_srq_in_params	{
535 	u64 pbl_base_addr;
536 	u64 prod_pair_addr;
537 	u16 num_pages;
538 	u16 pd_id;
539 	u16 page_size;
540 };
541 
542 struct ecore_rdma_create_srq_out_params {
543 	u16 srq_id;
544 };
545 
546 struct ecore_rdma_destroy_srq_in_params {
547 	u16 srq_id;
548 };
549 
550 struct ecore_rdma_modify_srq_in_params {
551 	u32 wqe_limit;
552 	u16 srq_id;
553 };
554 #endif
555 
556 struct ecore_rdma_resize_cq_out_params {
557 	/* output variables, provided to the upper layer */
558 	u32 prod; /* CQ producer value on old PBL */
559 	u32 cons; /* CQ consumer value on old PBL */
560 };
561 
562 struct ecore_rdma_resize_cnq_in_params {
563 	/* input variables (given by miniport) */
564 	u32	cnq_id;
565 	u32	pbl_page_size_log; /* for the pages that contain the
566 			* pointers to the cnq pages
567 			*/
568 	u64	pbl_ptr;
569 };
570 
571 #ifndef __EXTRACT__LINUX__
572 struct ecore_rdma_stats_out_params {
573 	u64	sent_bytes;
574 	u64	sent_pkts;
575 	u64	rcv_bytes;
576 	u64	rcv_pkts;
577 
578 	/* RoCE only */
579 	u64	icrc_errors;		/* wraps at 32 bits */
580 	u64	retransmit_events;	/* wraps at 32 bits */
581 	u64	silent_drops;		/* wraps at 16 bits */
582 	u64	rnr_nacks_sent;		/* wraps at 16 bits */
583 
584 	/* iWARP only */
585 	u64	iwarp_tx_fast_rxmit_cnt;
586 	u64	iwarp_tx_slow_start_cnt;
587 	u64	unalign_rx_comp;
588 };
589 
590 struct ecore_rdma_counters_out_params {
591 	u64	pd_count;
592 	u64	max_pd;
593 	u64	dpi_count;
594 	u64	max_dpi;
595 	u64	cq_count;
596 	u64	max_cq;
597 	u64	qp_count;
598 	u64	max_qp;
599 	u64	tid_count;
600 	u64	max_tid;
601 };
602 #endif
603 
604 enum _ecore_status_t
605 ecore_rdma_add_user(void *rdma_cxt,
606 		    struct ecore_rdma_add_user_out_params *out_params);
607 
608 enum _ecore_status_t
609 ecore_rdma_alloc_pd(void *rdma_cxt,
610 		    u16	*pd);
611 
612 enum _ecore_status_t
613 ecore_rdma_alloc_tid(void *rdma_cxt,
614 		     u32 *tid);
615 
616 enum _ecore_status_t
617 ecore_rdma_create_cq(void *rdma_cxt,
618 		     struct ecore_rdma_create_cq_in_params *params,
619 		     u16 *icid);
620 
621 /* Returns a pointer to the responders' CID, which is also a pointer to the
622  * ecore_qp_params struct. Returns NULL in case of failure.
623  */
624 struct ecore_rdma_qp*
625 ecore_rdma_create_qp(void *rdma_cxt,
626 		     struct ecore_rdma_create_qp_in_params  *in_params,
627 		     struct ecore_rdma_create_qp_out_params *out_params);
628 
629 enum _ecore_status_t
630 ecore_roce_create_ud_qp(void *rdma_cxt,
631 			struct ecore_rdma_create_qp_out_params *out_params);
632 
633 enum _ecore_status_t
634 ecore_rdma_deregister_tid(void *rdma_cxt,
635 			  u32		tid);
636 
637 enum _ecore_status_t
638 ecore_rdma_destroy_cq(void *rdma_cxt,
639 		      struct ecore_rdma_destroy_cq_in_params  *in_params,
640 		      struct ecore_rdma_destroy_cq_out_params *out_params);
641 
642 enum _ecore_status_t
643 ecore_rdma_destroy_qp(void *rdma_cxt,
644 		      struct ecore_rdma_qp *qp);
645 
646 enum _ecore_status_t
647 ecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid);
648 
649 void
650 ecore_rdma_free_pd(void *rdma_cxt,
651 		   u16	pd);
652 
653 void
654 ecore_rdma_free_tid(void *rdma_cxt,
655 		    u32	tid);
656 
657 enum _ecore_status_t
658 ecore_rdma_modify_qp(void *rdma_cxt,
659 		     struct ecore_rdma_qp *qp,
660 		     struct ecore_rdma_modify_qp_in_params *params);
661 
662 struct ecore_rdma_device*
663 ecore_rdma_query_device(void *rdma_cxt);
664 
665 struct ecore_rdma_port*
666 ecore_rdma_query_port(void *rdma_cxt);
667 
668 enum _ecore_status_t
669 ecore_rdma_query_qp(void *rdma_cxt,
670 		    struct ecore_rdma_qp		  *qp,
671 		    struct ecore_rdma_query_qp_out_params *out_params);
672 
673 enum _ecore_status_t
674 ecore_rdma_register_tid(void *rdma_cxt,
675 			struct ecore_rdma_register_tid_in_params *params);
676 
677 void ecore_rdma_remove_user(void *rdma_cxt,
678 			    u16		dpi);
679 
680 enum _ecore_status_t
681 ecore_rdma_resize_cnq(void *rdma_cxt,
682 		      struct ecore_rdma_resize_cnq_in_params *in_params);
683 
684 /*Returns the CQ CID or zero in case of failure */
685 enum _ecore_status_t
686 ecore_rdma_resize_cq(void *rdma_cxt,
687 		     struct ecore_rdma_resize_cq_in_params  *in_params,
688 		     struct ecore_rdma_resize_cq_out_params *out_params);
689 
690 /* Before calling rdma_start upper layer (VBD/qed) should fill the
691  * page-size and mtu in hwfn context
692  */
693 enum _ecore_status_t
694 ecore_rdma_start(void *p_hwfn,
695 		 struct ecore_rdma_start_in_params *params);
696 
697 enum _ecore_status_t
698 ecore_rdma_stop(void *rdma_cxt);
699 
700 enum _ecore_status_t
701 ecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
702 		       struct ecore_rdma_stats_out_params *out_parms);
703 
704 enum _ecore_status_t
705 ecore_rdma_query_counters(void *rdma_cxt,
706 			  struct ecore_rdma_counters_out_params *out_parms);
707 
708 u32 ecore_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
709 
710 u32 ecore_rdma_query_cau_timer_res(void);
711 
712 void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
713 
714 void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);
715 
716 enum _ecore_status_t
717 ecore_rdma_create_srq(void *rdma_cxt,
718 		      struct ecore_rdma_create_srq_in_params *in_params,
719 		      struct ecore_rdma_create_srq_out_params *out_params);
720 
721 enum _ecore_status_t
722 ecore_rdma_destroy_srq(void *rdma_cxt,
723 		       struct ecore_rdma_destroy_srq_in_params *in_params);
724 
725 enum _ecore_status_t
726 ecore_rdma_modify_srq(void *rdma_cxt,
727 		      struct ecore_rdma_modify_srq_in_params *in_params);
728 
729 #ifdef CONFIG_ECORE_IWARP
730 
731 /* iWARP API */
732 
733 #ifndef __EXTRACT__LINUX__
734 
735 enum ecore_iwarp_event_type {
736 	ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
737 	ECORE_IWARP_EVENT_PASSIVE_COMPLETE, /* Passive side established
738 					     * ( ack on mpa response )
739 					     */
740 	ECORE_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
741 	ECORE_IWARP_EVENT_DISCONNECT,
742 	ECORE_IWARP_EVENT_CLOSE,
743 	ECORE_IWARP_EVENT_IRQ_FULL,
744 	ECORE_IWARP_EVENT_RQ_EMPTY,
745 	ECORE_IWARP_EVENT_LLP_TIMEOUT,
746 	ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
747 	ECORE_IWARP_EVENT_CQ_OVERFLOW,
748 	ECORE_IWARP_EVENT_QP_CATASTROPHIC,
749 	ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY,
750 	ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR,
751 	ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR,
752 	ECORE_IWARP_EVENT_TERMINATE_RECEIVED
753 };
754 
755 enum ecore_tcp_ip_version
756 {
757 	ECORE_TCP_IPV4,
758 	ECORE_TCP_IPV6,
759 };
760 
761 struct ecore_iwarp_cm_info {
762 	enum ecore_tcp_ip_version ip_version;
763 	u32 remote_ip[4];
764 	u32 local_ip[4];
765 	u16 remote_port;
766 	u16 local_port;
767 	u16 vlan;
768 	const void *private_data;
769 	u16 private_data_len;
770 	u8 ord;
771 	u8 ird;
772 };
773 
774 struct ecore_iwarp_cm_event_params {
775 	enum ecore_iwarp_event_type event;
776 	const struct ecore_iwarp_cm_info *cm_info;
777 	void *ep_context; /* To be passed to accept call */
778 	int status;
779 };
780 
781 typedef int (*iwarp_event_handler)(void *context,
782 				   struct ecore_iwarp_cm_event_params *event);
783 
784 /* Active Side Connect Flow:
785  * upper layer driver calls ecore_iwarp_connect
786  * Function is blocking: i.e. returns after tcp connection is established
787  * After MPA connection is established ECORE_IWARP_EVENT_ACTIVE_COMPLETE event
788  * will be passed to upperlayer driver using the event_cb passed in
789  * ecore_iwarp_connect_in. Information of the established connection will be
790  * initialized in event data.
791  */
792 struct ecore_iwarp_connect_in {
793 	iwarp_event_handler event_cb;
794 	void *cb_context;
795 	struct ecore_rdma_qp *qp;
796 	struct ecore_iwarp_cm_info cm_info;
797 	u16 mss;
798 	u8 remote_mac_addr[6];
799 	u8 local_mac_addr[6];
800 };
801 
802 struct ecore_iwarp_connect_out {
803 	void *ep_context;
804 };
805 
806 /* Passive side connect flow:
807  * upper layer driver calls ecore_iwarp_create_listen
808  * once Syn packet that matches a ip/port that is listened on arrives, ecore
809  * will offload the tcp connection. After MPA Request is received on the
810  * offload connection, the event ECORE_IWARP_EVENT_MPA_REQUEST will be sent
811  * to upper layer driver using the event_cb passed below. The event data
812  * will be placed in event parameter. After upper layer driver processes the
813  * event, ecore_iwarp_accept or ecore_iwarp_reject should be called to continue
814  * MPA negotiation. Once negotiation is complete the event
815  * ECORE_IWARP_EVENT_PASSIVE_COMPLETE will be passed to the event_cb passed
816  * originally in ecore_iwarp_listen_in structure.
817  */
818 struct ecore_iwarp_listen_in {
819 	iwarp_event_handler event_cb; /* Callback func for delivering events */
820 	void *cb_context; /* passed to event_cb */
821 	u32 max_backlog; /* Max num of pending incoming connection requests */
822 	enum ecore_tcp_ip_version ip_version;
823 	u32 ip_addr[4];
824 	u16 port;
825 	u16 vlan;
826 };
827 
828 struct ecore_iwarp_listen_out {
829 	void *handle; /* to be sent to destroy */
830 };
831 
832 struct ecore_iwarp_accept_in {
833 	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
834 	void *cb_context; /* context to be passed to event_cb */
835 	struct ecore_rdma_qp *qp;
836 	const void *private_data;
837 	u16 private_data_len;
838 	u8 ord;
839 	u8 ird;
840 };
841 
842 struct ecore_iwarp_reject_in {
843 	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
844 	void *cb_context; /* context to be passed to event_cb */
845 	const void *private_data;
846 	u16 private_data_len;
847 };
848 
849 struct ecore_iwarp_send_rtr_in {
850 	void *ep_context;
851 };
852 
853 struct ecore_iwarp_tcp_abort_in {
854 	void *ep_context;
855 };
856 
857 #endif
858 
859 enum _ecore_status_t
860 ecore_iwarp_connect(void *rdma_cxt,
861 		    struct ecore_iwarp_connect_in *iparams,
862 		    struct ecore_iwarp_connect_out *oparams);
863 
864 enum _ecore_status_t
865 ecore_iwarp_create_listen(void *rdma_cxt,
866 			  struct ecore_iwarp_listen_in *iparams,
867 			  struct ecore_iwarp_listen_out *oparams);
868 
869 enum _ecore_status_t
870 ecore_iwarp_accept(void *rdma_cxt,
871 		   struct ecore_iwarp_accept_in *iparams);
872 
873 enum _ecore_status_t
874 ecore_iwarp_reject(void *rdma_cxt,
875 		   struct ecore_iwarp_reject_in *iparams);
876 
877 enum _ecore_status_t
878 ecore_iwarp_destroy_listen(void *rdma_cxt, void *handle);
879 
880 enum _ecore_status_t
881 ecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams);
882 
883 enum _ecore_status_t
884 ecore_iwarp_tcp_abort(void *rdma_cxt, struct ecore_iwarp_tcp_abort_in *iparams);
885 
886 #endif /* CONFIG_ECORE_IWARP */
887 
888 #endif
889