xref: /dpdk/drivers/net/hinic/base/hinic_pmd_nicio.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4 #include<rte_bus_pci.h>
5 
6 #include "hinic_compat.h"
7 #include "hinic_pmd_hwdev.h"
8 #include "hinic_pmd_hwif.h"
9 #include "hinic_pmd_wq.h"
10 #include "hinic_pmd_mgmt.h"
11 #include "hinic_pmd_cmdq.h"
12 #include "hinic_pmd_cfg.h"
13 #include "hinic_pmd_niccfg.h"
14 #include "hinic_pmd_nicio.h"
15 
16 #define WQ_PREFETCH_MAX			6
17 #define WQ_PREFETCH_MIN			1
18 #define WQ_PREFETCH_THRESHOLD		256
19 
20 #define DEFAULT_RX_BUF_SIZE		((u16)0xB)
21 
22 enum {
23 	RECYCLE_MODE_NIC = 0x0,
24 	RECYCLE_MODE_DPDK = 0x1,
25 };
26 
27 /* Queue buffer related define */
28 enum hinic_rx_buf_size {
29 	HINIC_RX_BUF_SIZE_32B = 0x20,
30 	HINIC_RX_BUF_SIZE_64B = 0x40,
31 	HINIC_RX_BUF_SIZE_96B = 0x60,
32 	HINIC_RX_BUF_SIZE_128B = 0x80,
33 	HINIC_RX_BUF_SIZE_192B = 0xC0,
34 	HINIC_RX_BUF_SIZE_256B = 0x100,
35 	HINIC_RX_BUF_SIZE_384B = 0x180,
36 	HINIC_RX_BUF_SIZE_512B = 0x200,
37 	HINIC_RX_BUF_SIZE_768B = 0x300,
38 	HINIC_RX_BUF_SIZE_1K = 0x400,
39 	HINIC_RX_BUF_SIZE_1_5K = 0x600,
40 	HINIC_RX_BUF_SIZE_2K = 0x800,
41 	HINIC_RX_BUF_SIZE_3K = 0xC00,
42 	HINIC_RX_BUF_SIZE_4K = 0x1000,
43 	HINIC_RX_BUF_SIZE_8K = 0x2000,
44 	HINIC_RX_BUF_SIZE_16K = 0x4000,
45 };
46 
47 const u32 hinic_hw_rx_buf_size[] = {
48 	HINIC_RX_BUF_SIZE_32B,
49 	HINIC_RX_BUF_SIZE_64B,
50 	HINIC_RX_BUF_SIZE_96B,
51 	HINIC_RX_BUF_SIZE_128B,
52 	HINIC_RX_BUF_SIZE_192B,
53 	HINIC_RX_BUF_SIZE_256B,
54 	HINIC_RX_BUF_SIZE_384B,
55 	HINIC_RX_BUF_SIZE_512B,
56 	HINIC_RX_BUF_SIZE_768B,
57 	HINIC_RX_BUF_SIZE_1K,
58 	HINIC_RX_BUF_SIZE_1_5K,
59 	HINIC_RX_BUF_SIZE_2K,
60 	HINIC_RX_BUF_SIZE_3K,
61 	HINIC_RX_BUF_SIZE_4K,
62 	HINIC_RX_BUF_SIZE_8K,
63 	HINIC_RX_BUF_SIZE_16K,
64 };
65 
66 struct hinic_qp_ctxt_header {
67 	u16	num_queues;
68 	u16	queue_type;
69 	u32	addr_offset;
70 };
71 
72 struct hinic_sq_ctxt {
73 	u32	ceq_attr;
74 
75 	u32	ci_owner;
76 
77 	u32	wq_pfn_hi;
78 	u32	wq_pfn_lo;
79 
80 	u32	pref_cache;
81 	u32	pref_owner;
82 	u32	pref_wq_pfn_hi_ci;
83 	u32	pref_wq_pfn_lo;
84 
85 	u32	rsvd8;
86 	u32	rsvd9;
87 
88 	u32	wq_block_pfn_hi;
89 	u32	wq_block_pfn_lo;
90 };
91 
92 struct hinic_rq_ctxt {
93 	u32	ceq_attr;
94 
95 	u32	pi_intr_attr;
96 
97 	u32	wq_pfn_hi_ci;
98 	u32	wq_pfn_lo;
99 
100 	u32	pref_cache;
101 	u32	pref_owner;
102 
103 	u32	pref_wq_pfn_hi_ci;
104 	u32	pref_wq_pfn_lo;
105 
106 	u32	pi_paddr_hi;
107 	u32	pi_paddr_lo;
108 
109 	u32	wq_block_pfn_hi;
110 	u32	wq_block_pfn_lo;
111 };
112 
113 struct hinic_sq_ctxt_block {
114 	struct hinic_qp_ctxt_header	cmdq_hdr;
115 	struct hinic_sq_ctxt		sq_ctxt[HINIC_Q_CTXT_MAX];
116 };
117 
118 struct hinic_rq_ctxt_block {
119 	struct hinic_qp_ctxt_header	cmdq_hdr;
120 	struct hinic_rq_ctxt		rq_ctxt[HINIC_Q_CTXT_MAX];
121 };
122 
123 struct hinic_clean_queue_ctxt {
124 	struct hinic_qp_ctxt_header	cmdq_hdr;
125 	u32				ctxt_size;
126 };
127 
128 
129 static void
130 hinic_qp_prepare_cmdq_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
131 			     enum hinic_qp_ctxt_type ctxt_type,
132 			     u16 num_queues, u16 max_queues, u16 q_id)
133 {
134 	qp_ctxt_hdr->queue_type = ctxt_type;
135 	qp_ctxt_hdr->num_queues = num_queues;
136 
137 	if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)
138 		qp_ctxt_hdr->addr_offset =
139 				SQ_CTXT_OFFSET(max_queues, max_queues, q_id);
140 	else
141 		qp_ctxt_hdr->addr_offset =
142 				RQ_CTXT_OFFSET(max_queues, max_queues, q_id);
143 
144 	qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);
145 
146 	hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));
147 }
148 
149 static void hinic_sq_prepare_ctxt(struct hinic_sq *sq, u16 global_qpn,
150 			   struct hinic_sq_ctxt *sq_ctxt)
151 {
152 	struct hinic_wq *wq = sq->wq;
153 	u64 wq_page_addr;
154 	u64 wq_page_pfn, wq_block_pfn;
155 	u32 wq_page_pfn_hi, wq_page_pfn_lo;
156 	u32 wq_block_pfn_hi, wq_block_pfn_lo;
157 	u16 pi_start, ci_start;
158 
159 	ci_start = (u16)(wq->cons_idx);
160 	pi_start = (u16)(wq->prod_idx);
161 
162 	/* read the first page from the HW table */
163 	wq_page_addr = wq->queue_buf_paddr;
164 
165 	wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
166 	wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
167 	wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
168 
169 	wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);
170 	wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
171 	wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
172 
173 	/* must config as ceq disabled */
174 	sq_ctxt->ceq_attr = SQ_CTXT_CEQ_ATTR_SET(global_qpn, GLOBAL_SQ_ID) |
175 				SQ_CTXT_CEQ_ATTR_SET(0, ARM) |
176 				SQ_CTXT_CEQ_ATTR_SET(0, CEQ_ID) |
177 				SQ_CTXT_CEQ_ATTR_SET(0, EN);
178 
179 	sq_ctxt->ci_owner = SQ_CTXT_CI_SET(ci_start, IDX) |
180 				SQ_CTXT_CI_SET(1, OWNER);
181 
182 	sq_ctxt->wq_pfn_hi =
183 			SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
184 			SQ_CTXT_WQ_PAGE_SET(pi_start, PI);
185 
186 	sq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
187 
188 	sq_ctxt->pref_cache =
189 		SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
190 		SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
191 		SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
192 
193 	sq_ctxt->pref_owner = 1;
194 
195 	sq_ctxt->pref_wq_pfn_hi_ci =
196 		SQ_CTXT_PREF_SET(ci_start, CI) |
197 		SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI);
198 
199 	sq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
200 
201 	sq_ctxt->wq_block_pfn_hi =
202 		SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
203 
204 	sq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
205 
206 	hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));
207 }
208 
209 static void hinic_rq_prepare_ctxt(struct hinic_rq *rq,
210 			struct hinic_rq_ctxt *rq_ctxt)
211 {
212 	struct hinic_wq *wq = rq->wq;
213 	u64 wq_page_addr;
214 	u64 wq_page_pfn, wq_block_pfn;
215 	u32 wq_page_pfn_hi, wq_page_pfn_lo;
216 	u32 wq_block_pfn_hi, wq_block_pfn_lo;
217 	u16 pi_start, ci_start;
218 
219 	ci_start = (u16)(wq->cons_idx);
220 	pi_start = (u16)(wq->prod_idx);
221 
222 	/* read the first page from the HW table */
223 	wq_page_addr = wq->queue_buf_paddr;
224 
225 	wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
226 	wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
227 	wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
228 
229 	wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);
230 	wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
231 	wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
232 
233 	/* must config as ceq enable but do not generate ceq */
234 	rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(1, EN) |
235 			    RQ_CTXT_CEQ_ATTR_SET(1, OWNER);
236 
237 	rq_ctxt->pi_intr_attr = RQ_CTXT_PI_SET(pi_start, IDX) |
238 				RQ_CTXT_PI_SET(rq->msix_entry_idx, INTR) |
239 				RQ_CTXT_PI_SET(0, CEQ_ARM);
240 
241 	rq_ctxt->wq_pfn_hi_ci = RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
242 				RQ_CTXT_WQ_PAGE_SET(ci_start, CI);
243 
244 	rq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
245 
246 	rq_ctxt->pref_cache =
247 		RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
248 		RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
249 		RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
250 
251 	rq_ctxt->pref_owner = 1;
252 
253 	rq_ctxt->pref_wq_pfn_hi_ci =
254 		RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
255 		RQ_CTXT_PREF_SET(ci_start, CI);
256 
257 	rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
258 
259 	rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
260 	rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
261 
262 	rq_ctxt->wq_block_pfn_hi =
263 		RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
264 
265 	rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
266 
267 	hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
268 }
269 
270 static int init_sq_ctxts(struct hinic_nic_io *nic_io)
271 {
272 	struct hinic_hwdev *hwdev = nic_io->hwdev;
273 	struct hinic_sq_ctxt_block *sq_ctxt_block;
274 	struct hinic_sq_ctxt *sq_ctxt;
275 	struct hinic_cmd_buf *cmd_buf;
276 	struct hinic_qp *qp;
277 	u64 out_param = EIO;
278 	u16 q_id, curr_id, global_qpn, max_ctxts, i;
279 	int err = 0;
280 
281 	cmd_buf = hinic_alloc_cmd_buf(hwdev);
282 	if (!cmd_buf) {
283 		PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
284 		return -ENOMEM;
285 	}
286 
287 	q_id = 0;
288 	/* sq and rq number may not equal */
289 	while (q_id < nic_io->num_sqs) {
290 		sq_ctxt_block = cmd_buf->buf;
291 		sq_ctxt = sq_ctxt_block->sq_ctxt;
292 
293 		max_ctxts = (nic_io->num_sqs - q_id) > HINIC_Q_CTXT_MAX ?
294 				HINIC_Q_CTXT_MAX : (nic_io->num_sqs - q_id);
295 
296 		hinic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,
297 					     HINIC_QP_CTXT_TYPE_SQ, max_ctxts,
298 					     nic_io->max_qps, q_id);
299 
300 		for (i = 0; i < max_ctxts; i++) {
301 			curr_id = q_id + i;
302 			qp = &nic_io->qps[curr_id];
303 			global_qpn = nic_io->global_qpn + curr_id;
304 
305 			hinic_sq_prepare_ctxt(&qp->sq, global_qpn, &sq_ctxt[i]);
306 		}
307 
308 		cmd_buf->size = SQ_CTXT_SIZE(max_ctxts);
309 
310 		err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
311 					     HINIC_MOD_L2NIC,
312 					     HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,
313 					     cmd_buf, &out_param, 0);
314 		if (err || out_param != 0) {
315 			PMD_DRV_LOG(ERR, "Failed to set SQ ctxts, err:%d", err);
316 			err = -EFAULT;
317 			break;
318 		}
319 
320 		q_id += max_ctxts;
321 	}
322 
323 	hinic_free_cmd_buf(hwdev, cmd_buf);
324 
325 	return err;
326 }
327 
328 static int init_rq_ctxts(struct hinic_nic_io *nic_io)
329 {
330 	struct hinic_hwdev *hwdev = nic_io->hwdev;
331 	struct hinic_rq_ctxt_block *rq_ctxt_block;
332 	struct hinic_rq_ctxt *rq_ctxt;
333 	struct hinic_cmd_buf *cmd_buf;
334 	struct hinic_qp *qp;
335 	u64 out_param = 0;
336 	u16 q_id, curr_id, max_ctxts, i;
337 	int err = 0;
338 
339 	cmd_buf = hinic_alloc_cmd_buf(hwdev);
340 	if (!cmd_buf) {
341 		PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
342 		return -ENOMEM;
343 	}
344 
345 	q_id = 0;
346 	/* sq and rq number may not equal */
347 	while (q_id < nic_io->num_rqs) {
348 		rq_ctxt_block = cmd_buf->buf;
349 		rq_ctxt = rq_ctxt_block->rq_ctxt;
350 
351 		max_ctxts = (nic_io->num_rqs - q_id) > HINIC_Q_CTXT_MAX ?
352 				HINIC_Q_CTXT_MAX : (nic_io->num_rqs - q_id);
353 
354 		hinic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,
355 					     HINIC_QP_CTXT_TYPE_RQ, max_ctxts,
356 					     nic_io->max_qps, q_id);
357 
358 		for (i = 0; i < max_ctxts; i++) {
359 			curr_id = q_id + i;
360 			qp = &nic_io->qps[curr_id];
361 
362 			hinic_rq_prepare_ctxt(&qp->rq, &rq_ctxt[i]);
363 		}
364 
365 		cmd_buf->size = RQ_CTXT_SIZE(max_ctxts);
366 
367 		err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
368 					     HINIC_MOD_L2NIC,
369 					     HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,
370 					     cmd_buf, &out_param, 0);
371 
372 		if ((err) || out_param != 0) {
373 			PMD_DRV_LOG(ERR, "Failed to set RQ ctxts");
374 			err = -EFAULT;
375 			break;
376 		}
377 
378 		q_id += max_ctxts;
379 	}
380 
381 	hinic_free_cmd_buf(hwdev, cmd_buf);
382 
383 	return err;
384 }
385 
386 static int init_qp_ctxts(struct hinic_nic_io *nic_io)
387 {
388 	return (init_sq_ctxts(nic_io) || init_rq_ctxts(nic_io));
389 }
390 
391 static int clean_queue_offload_ctxt(struct hinic_nic_io *nic_io,
392 				    enum hinic_qp_ctxt_type ctxt_type)
393 {
394 	struct hinic_hwdev *hwdev = nic_io->hwdev;
395 	struct hinic_clean_queue_ctxt *ctxt_block;
396 	struct hinic_cmd_buf *cmd_buf;
397 	u64 out_param = 0;
398 	int err;
399 
400 	cmd_buf = hinic_alloc_cmd_buf(hwdev);
401 	if (!cmd_buf) {
402 		PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
403 		return -ENOMEM;
404 	}
405 
406 	ctxt_block = cmd_buf->buf;
407 	ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps;
408 	ctxt_block->cmdq_hdr.queue_type = ctxt_type;
409 	ctxt_block->cmdq_hdr.addr_offset = 0;
410 
411 	/* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
412 	ctxt_block->ctxt_size = 0x3;
413 
414 	hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));
415 
416 	cmd_buf->size = sizeof(*ctxt_block);
417 
418 	err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
419 				     HINIC_MOD_L2NIC,
420 				     HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,
421 				     cmd_buf, &out_param, 0);
422 
423 	if ((err) || (out_param)) {
424 		PMD_DRV_LOG(ERR, "Failed to clean queue offload ctxts");
425 		err = -EFAULT;
426 	}
427 
428 	hinic_free_cmd_buf(hwdev, cmd_buf);
429 
430 	return err;
431 }
432 
433 static int clean_qp_offload_ctxt(struct hinic_nic_io *nic_io)
434 {
435 	/* clean LRO/TSO context space */
436 	return (clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_SQ) ||
437 		clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_RQ));
438 }
439 
440 /**
441  * get_hw_rx_buf_size - translate rx_buf_size into hw_rx_buf_size
442  * @rx_buf_sz: receive buffer size
443  * @return
444  *   hw rx buffer size
445  **/
446 static u16 get_hw_rx_buf_size(u32 rx_buf_sz)
447 {
448 	u16 num_hw_types = sizeof(hinic_hw_rx_buf_size)
449 			   / sizeof(hinic_hw_rx_buf_size[0]);
450 	u16 i;
451 
452 	for (i = 0; i < num_hw_types; i++) {
453 		if (hinic_hw_rx_buf_size[i] == rx_buf_sz)
454 			return i;
455 	}
456 
457 	PMD_DRV_LOG(ERR, "Hw can't support rx buf size of %u", rx_buf_sz);
458 
459 	return DEFAULT_RX_BUF_SIZE;	/* default 2K */
460 }
461 
462 /**
463  * hinic_set_root_ctxt - init root context in NIC
464  * @hwdev: the hardware interface of a nic device
465  * @rq_depth: the depth of receive queue
466  * @sq_depth: the depth of transmit queue
467  * @rx_buf_sz: receive buffer size from app
468  * Return: 0 on success, negative error value otherwise.
469  **/
470 static int
471 hinic_set_root_ctxt(void *hwdev, u16 rq_depth, u16 sq_depth, int rx_buf_sz)
472 {
473 	struct hinic_root_ctxt root_ctxt;
474 
475 	memset(&root_ctxt, 0, sizeof(root_ctxt));
476 	root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
477 	root_ctxt.func_idx = hinic_global_func_id(hwdev);
478 	root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
479 	root_ctxt.set_cmdq_depth = 0;
480 	root_ctxt.cmdq_depth = 0;
481 	root_ctxt.lro_en = 1;
482 	root_ctxt.rq_depth  = (u16)ilog2(rq_depth);
483 	root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz);
484 	root_ctxt.sq_depth  = (u16)ilog2(sq_depth);
485 
486 	return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
487 				      HINIC_MGMT_CMD_VAT_SET,
488 				      &root_ctxt, sizeof(root_ctxt),
489 				      NULL, NULL, 0);
490 }
491 
492 /**
493  * hinic_clean_root_ctxt - clean root context table in NIC
494  * @hwdev: the hardware interface of a nic device
495  * @return
496  *   0 on success,
497  *   negative error value otherwise.
498  **/
499 static int hinic_clean_root_ctxt(void *hwdev)
500 {
501 	struct hinic_root_ctxt root_ctxt;
502 
503 	memset(&root_ctxt, 0, sizeof(root_ctxt));
504 	root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
505 	root_ctxt.func_idx = hinic_global_func_id(hwdev);
506 	root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
507 	root_ctxt.set_cmdq_depth = 0;
508 	root_ctxt.cmdq_depth = 0;
509 	root_ctxt.lro_en = 0;
510 	root_ctxt.rq_depth  = 0;
511 	root_ctxt.rx_buf_sz = 0;
512 	root_ctxt.sq_depth  = 0;
513 
514 	return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
515 				      HINIC_MGMT_CMD_VAT_SET,
516 				      &root_ctxt, sizeof(root_ctxt),
517 				      NULL, NULL, 0);
518 }
519 
520 /* init qps ctxt and set sq ci attr and arm all sq and set vat page_size */
521 int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev)
522 {
523 	struct hinic_nic_io *nic_io = hwdev->nic_io;
524 	struct hinic_sq_attr sq_attr;
525 	u16 q_id;
526 	int err, rx_buf_sz;
527 
528 	/* set vat page size to max queue depth page_size */
529 	err = hinic_set_pagesize(hwdev, HINIC_PAGE_SIZE_DPDK);
530 	if (err != HINIC_OK) {
531 		PMD_DRV_LOG(ERR, "Set vat page size: %d failed, rc: %d",
532 			HINIC_PAGE_SIZE_DPDK, err);
533 		return err;
534 	}
535 
536 	err = init_qp_ctxts(nic_io);
537 	if (err) {
538 		PMD_DRV_LOG(ERR, "Init QP ctxts failed, rc: %d", err);
539 		return err;
540 	}
541 
542 	/* clean LRO/TSO context space */
543 	err = clean_qp_offload_ctxt(nic_io);
544 	if (err) {
545 		PMD_DRV_LOG(ERR, "Clean qp offload ctxts failed, rc: %d",
546 			err);
547 		return err;
548 	}
549 
550 	rx_buf_sz = nic_io->rq_buf_size;
551 
552 	/* update rx buf size to function table */
553 	err = hinic_set_rx_vhd_mode(hwdev, 0, rx_buf_sz);
554 	if (err) {
555 		PMD_DRV_LOG(ERR, "Set rx vhd mode failed, rc: %d",
556 			err);
557 		return err;
558 	}
559 
560 	err = hinic_set_root_ctxt(hwdev, nic_io->rq_depth,
561 				  nic_io->sq_depth, rx_buf_sz);
562 	if (err) {
563 		PMD_DRV_LOG(ERR, "Set root context failed, rc: %d",
564 			err);
565 		return err;
566 	}
567 
568 	for (q_id = 0; q_id < nic_io->num_sqs; q_id++) {
569 		sq_attr.ci_dma_base =
570 			HINIC_CI_PADDR(nic_io->ci_dma_base, q_id) >> 2;
571 		/* performance: sq ci update threshold as 8 */
572 		sq_attr.pending_limit = 1;
573 		sq_attr.coalescing_time = 1;
574 		sq_attr.intr_en = 0;
575 		sq_attr.l2nic_sqn = q_id;
576 		sq_attr.dma_attr_off = 0;
577 		err = hinic_set_ci_table(hwdev, q_id, &sq_attr);
578 		if (err) {
579 			PMD_DRV_LOG(ERR, "Set ci table failed, rc: %d",
580 				err);
581 			goto set_cons_idx_table_err;
582 		}
583 	}
584 
585 	return 0;
586 
587 set_cons_idx_table_err:
588 	(void)hinic_clean_root_ctxt(hwdev);
589 	return err;
590 }
591 
592 void hinic_free_qp_ctxts(struct hinic_hwdev *hwdev)
593 {
594 	int err;
595 
596 	err = hinic_clean_root_ctxt(hwdev);
597 	if (err)
598 		PMD_DRV_LOG(ERR, "Failed to clean root ctxt");
599 }
600 
601 static int hinic_init_nic_hwdev(struct hinic_hwdev *hwdev)
602 {
603 	struct hinic_nic_io *nic_io = hwdev->nic_io;
604 	u16 global_qpn, rx_buf_sz;
605 	int err;
606 
607 	err = hinic_get_base_qpn(hwdev, &global_qpn);
608 	if (err) {
609 		PMD_DRV_LOG(ERR, "Failed to get base qpn");
610 		goto err_init_nic_hwdev;
611 	}
612 
613 	nic_io->global_qpn = global_qpn;
614 	rx_buf_sz = HINIC_IS_VF(hwdev) ? RX_BUF_LEN_1_5K : RX_BUF_LEN_16K;
615 	err = hinic_init_function_table(hwdev, rx_buf_sz);
616 	if (err) {
617 		PMD_DRV_LOG(ERR, "Failed to init function table");
618 		goto err_init_nic_hwdev;
619 	}
620 
621 	err = hinic_set_fast_recycle_mode(hwdev, RECYCLE_MODE_DPDK);
622 	if (err) {
623 		PMD_DRV_LOG(ERR, "Failed to set fast recycle mode");
624 		goto err_init_nic_hwdev;
625 	}
626 
627 	return 0;
628 
629 err_init_nic_hwdev:
630 	return err;
631 }
632 
633 static void hinic_free_nic_hwdev(struct hinic_hwdev *hwdev)
634 {
635 	hwdev->nic_io = NULL;
636 }
637 
638 int hinic_rx_tx_flush(struct hinic_hwdev *hwdev)
639 {
640 	return hinic_func_rx_tx_flush(hwdev);
641 }
642 
643 int hinic_get_sq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id)
644 {
645 	struct hinic_nic_io *nic_io = hwdev->nic_io;
646 	struct hinic_wq *wq = &nic_io->sq_wq[q_id];
647 
648 	return (wq->delta) - 1;
649 }
650 
651 int hinic_get_rq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id)
652 {
653 	struct hinic_nic_io *nic_io = hwdev->nic_io;
654 	struct hinic_wq *wq = &nic_io->rq_wq[q_id];
655 
656 	return (wq->delta) - 1;
657 }
658 
659 u16 hinic_get_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id)
660 {
661 	struct hinic_nic_io *nic_io = hwdev->nic_io;
662 	struct hinic_wq *wq = &nic_io->sq_wq[q_id];
663 
664 	return (wq->cons_idx) & wq->mask;
665 }
666 
667 void hinic_return_sq_wqe(struct hinic_hwdev *hwdev, u16 q_id,
668 			 int num_wqebbs, u16 owner)
669 {
670 	struct hinic_nic_io *nic_io = hwdev->nic_io;
671 	struct hinic_sq *sq = &nic_io->qps[q_id].sq;
672 
673 	if (owner != sq->owner)
674 		sq->owner = owner;
675 
676 	sq->wq->delta += num_wqebbs;
677 	sq->wq->prod_idx -= num_wqebbs;
678 }
679 
680 void hinic_update_sq_local_ci(struct hinic_hwdev *hwdev,
681 			      u16 q_id, int wqebb_cnt)
682 {
683 	struct hinic_nic_io *nic_io = hwdev->nic_io;
684 	struct hinic_sq *sq = &nic_io->qps[q_id].sq;
685 
686 	hinic_put_wqe(sq->wq, wqebb_cnt);
687 }
688 
689 void *hinic_get_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, u16 *pi)
690 {
691 	struct hinic_nic_io *nic_io = hwdev->nic_io;
692 	struct hinic_rq *rq = &nic_io->qps[q_id].rq;
693 
694 	return hinic_get_wqe(rq->wq, 1, pi);
695 }
696 
697 void hinic_return_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, int num_wqebbs)
698 {
699 	struct hinic_nic_io *nic_io = hwdev->nic_io;
700 	struct hinic_rq *rq = &nic_io->qps[q_id].rq;
701 
702 	rq->wq->delta += num_wqebbs;
703 	rq->wq->prod_idx -= num_wqebbs;
704 }
705 
706 u16 hinic_get_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id)
707 {
708 	struct hinic_nic_io *nic_io = hwdev->nic_io;
709 	struct hinic_wq *wq = &nic_io->rq_wq[q_id];
710 
711 	return (wq->cons_idx) & wq->mask;
712 }
713 
714 void hinic_update_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id, int wqe_cnt)
715 {
716 	struct hinic_nic_io *nic_io = hwdev->nic_io;
717 	struct hinic_rq *rq = &nic_io->qps[q_id].rq;
718 
719 	hinic_put_wqe(rq->wq, wqe_cnt);
720 }
721 
722 static int hinic_alloc_nicio(struct hinic_hwdev *hwdev)
723 {
724 	int err;
725 	u16 max_qps, num_qp;
726 	struct hinic_nic_io *nic_io = hwdev->nic_io;
727 
728 	max_qps = hinic_func_max_qnum(hwdev);
729 	if ((max_qps & (max_qps - 1))) {
730 		PMD_DRV_LOG(ERR, "wrong number of max_qps: %d",
731 			max_qps);
732 		return -EINVAL;
733 	}
734 
735 	nic_io->max_qps = max_qps;
736 	nic_io->num_qps = max_qps;
737 	num_qp = max_qps;
738 
739 	nic_io->qps = kzalloc_aligned(num_qp * sizeof(*nic_io->qps),
740 				      GFP_KERNEL);
741 	if (!nic_io->qps) {
742 		PMD_DRV_LOG(ERR, "Failed to allocate qps");
743 		err = -ENOMEM;
744 		goto alloc_qps_err;
745 	}
746 
747 	nic_io->ci_vaddr_base =
748 		dma_zalloc_coherent(hwdev,
749 				    CI_TABLE_SIZE(num_qp, HINIC_PAGE_SIZE),
750 				    &nic_io->ci_dma_base, GFP_KERNEL);
751 	if (!nic_io->ci_vaddr_base) {
752 		PMD_DRV_LOG(ERR, "Failed to allocate ci area");
753 		err = -ENOMEM;
754 		goto ci_base_err;
755 	}
756 
757 	nic_io->sq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->sq_wq),
758 					GFP_KERNEL);
759 	if (!nic_io->sq_wq) {
760 		PMD_DRV_LOG(ERR, "Failed to allocate sq wq array");
761 		err = -ENOMEM;
762 		goto sq_wq_err;
763 	}
764 
765 	nic_io->rq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->rq_wq),
766 					GFP_KERNEL);
767 	if (!nic_io->rq_wq) {
768 		PMD_DRV_LOG(ERR, "Failed to allocate rq wq array");
769 		err = -ENOMEM;
770 		goto rq_wq_err;
771 	}
772 
773 	return HINIC_OK;
774 
775 rq_wq_err:
776 	kfree(nic_io->sq_wq);
777 
778 sq_wq_err:
779 	dma_free_coherent(hwdev, CI_TABLE_SIZE(num_qp, HINIC_PAGE_SIZE),
780 			  nic_io->ci_vaddr_base, nic_io->ci_dma_base);
781 
782 ci_base_err:
783 	kfree(nic_io->qps);
784 
785 alloc_qps_err:
786 	return err;
787 }
788 
789 static void hinic_free_nicio(struct hinic_hwdev *hwdev)
790 {
791 	struct hinic_nic_io *nic_io = hwdev->nic_io;
792 
793 	/* nic_io->rq_wq */
794 	kfree(nic_io->rq_wq);
795 
796 	/* nic_io->sq_wq */
797 	kfree(nic_io->sq_wq);
798 
799 	/* nic_io->ci_vaddr_base */
800 	dma_free_coherent(hwdev,
801 			  CI_TABLE_SIZE(nic_io->max_qps, HINIC_PAGE_SIZE),
802 			  nic_io->ci_vaddr_base, nic_io->ci_dma_base);
803 
804 	/* nic_io->qps */
805 	kfree(nic_io->qps);
806 }
807 
808 /* alloc nic hwdev and init function table */
809 int hinic_init_nicio(struct hinic_hwdev *hwdev)
810 {
811 	int rc;
812 
813 	hwdev->nic_io = rte_zmalloc("hinic_nicio", sizeof(*hwdev->nic_io),
814 				      RTE_CACHE_LINE_SIZE);
815 	if (!hwdev->nic_io) {
816 		PMD_DRV_LOG(ERR, "Allocate nic_io failed, dev_name: %s",
817 			    hwdev->pcidev_hdl->name);
818 		return -ENOMEM;
819 	}
820 	hwdev->nic_io->hwdev = hwdev;
821 
822 	/* alloc root working queue set */
823 	rc = hinic_alloc_nicio(hwdev);
824 	if (rc) {
825 		PMD_DRV_LOG(ERR, "Allocate nic_io failed, dev_name: %s",
826 			    hwdev->pcidev_hdl->name);
827 		goto allc_nicio_fail;
828 	}
829 
830 	rc = hinic_init_nic_hwdev(hwdev);
831 	if (rc) {
832 		PMD_DRV_LOG(ERR, "Initialize hwdev failed, dev_name: %s",
833 			    hwdev->pcidev_hdl->name);
834 		goto init_nic_hwdev_fail;
835 	}
836 
837 	return 0;
838 
839 init_nic_hwdev_fail:
840 	hinic_free_nicio(hwdev);
841 
842 allc_nicio_fail:
843 	rte_free(hwdev->nic_io);
844 	return rc;
845 }
846 
847 void hinic_deinit_nicio(struct hinic_hwdev *hwdev)
848 {
849 	hinic_free_nicio(hwdev);
850 
851 	hinic_free_nic_hwdev(hwdev);
852 
853 	rte_free(hwdev->nic_io);
854 	hwdev->nic_io = NULL;
855 }
856 
857 /**
858  * hinic_convert_rx_buf_size - convert rx buffer size to hw size
859  * @rx_buf_sz: receive buffer size of mbuf
860  * @match_sz: receive buffer size of hardware
861  * @return
862  *   0 on success,
863  *   negative error value otherwise.
864  **/
865 int hinic_convert_rx_buf_size(u32 rx_buf_sz, u32 *match_sz)
866 {
867 	u32 i, num_hw_types, best_match_sz;
868 
869 	if (unlikely(!match_sz || rx_buf_sz < HINIC_RX_BUF_SIZE_32B))
870 		return -EINVAL;
871 
872 	if (rx_buf_sz >= HINIC_RX_BUF_SIZE_16K) {
873 		best_match_sz =  HINIC_RX_BUF_SIZE_16K;
874 		goto size_matched;
875 	}
876 
877 	num_hw_types = sizeof(hinic_hw_rx_buf_size) /
878 		sizeof(hinic_hw_rx_buf_size[0]);
879 	best_match_sz = hinic_hw_rx_buf_size[0];
880 	for (i = 0; i < num_hw_types; i++) {
881 		if (rx_buf_sz == hinic_hw_rx_buf_size[i]) {
882 			best_match_sz = hinic_hw_rx_buf_size[i];
883 			break;
884 		} else if (rx_buf_sz < hinic_hw_rx_buf_size[i]) {
885 			break;
886 		}
887 		best_match_sz = hinic_hw_rx_buf_size[i];
888 	}
889 
890 size_matched:
891 	*match_sz = best_match_sz;
892 
893 	return 0;
894 }
895