xref: /dpdk/drivers/net/hinic/base/hinic_pmd_cmdq.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4 
5 #include "hinic_compat.h"
6 #include "hinic_pmd_hwdev.h"
7 #include "hinic_pmd_hwif.h"
8 #include "hinic_pmd_wq.h"
9 #include "hinic_pmd_mgmt.h"
10 #include "hinic_pmd_cmdq.h"
11 
12 #define CMDQ_CMD_TIMEOUT				5000 /* millisecond */
13 
14 #define UPPER_8_BITS(data)				(((data) >> 8) & 0xFF)
15 #define LOWER_8_BITS(data)				((data) & 0xFF)
16 
17 #define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT			0
18 #define CMDQ_DB_INFO_QUEUE_TYPE_SHIFT			23
19 #define CMDQ_DB_INFO_CMDQ_TYPE_SHIFT			24
20 #define CMDQ_DB_INFO_SRC_TYPE_SHIFT			27
21 
22 #define CMDQ_DB_INFO_HI_PROD_IDX_MASK			0xFFU
23 #define CMDQ_DB_INFO_QUEUE_TYPE_MASK			0x1U
24 #define CMDQ_DB_INFO_CMDQ_TYPE_MASK			0x7U
25 #define CMDQ_DB_INFO_SRC_TYPE_MASK			0x1FU
26 
27 #define CMDQ_DB_INFO_SET(val, member)		\
28 	(((val) & CMDQ_DB_INFO_##member##_MASK) <<	\
29 		CMDQ_DB_INFO_##member##_SHIFT)
30 
31 #define CMDQ_CTRL_PI_SHIFT				0
32 #define CMDQ_CTRL_CMD_SHIFT				16
33 #define CMDQ_CTRL_MOD_SHIFT				24
34 #define CMDQ_CTRL_ACK_TYPE_SHIFT			29
35 #define CMDQ_CTRL_HW_BUSY_BIT_SHIFT			31
36 
37 #define CMDQ_CTRL_PI_MASK				0xFFFFU
38 #define CMDQ_CTRL_CMD_MASK				0xFFU
39 #define CMDQ_CTRL_MOD_MASK				0x1FU
40 #define CMDQ_CTRL_ACK_TYPE_MASK				0x3U
41 #define CMDQ_CTRL_HW_BUSY_BIT_MASK			0x1U
42 
43 #define CMDQ_CTRL_SET(val, member)		\
44 	(((val) & CMDQ_CTRL_##member##_MASK) << CMDQ_CTRL_##member##_SHIFT)
45 
46 #define CMDQ_CTRL_GET(val, member)		\
47 	(((val) >> CMDQ_CTRL_##member##_SHIFT) & CMDQ_CTRL_##member##_MASK)
48 
49 #define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT		0
50 #define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT		15
51 #define CMDQ_WQE_HEADER_DATA_FMT_SHIFT			22
52 #define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT		23
53 #define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT		27
54 #define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT			29
55 #define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT		31
56 
57 #define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK		0xFFU
58 #define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK		0x1U
59 #define CMDQ_WQE_HEADER_DATA_FMT_MASK			0x1U
60 #define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK		0x1U
61 #define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK		0x3U
62 #define CMDQ_WQE_HEADER_CTRL_LEN_MASK			0x3U
63 #define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK		0x1U
64 
65 #define CMDQ_WQE_HEADER_SET(val, member)	\
66 	(((val) & CMDQ_WQE_HEADER_##member##_MASK) <<	\
67 		CMDQ_WQE_HEADER_##member##_SHIFT)
68 
69 #define CMDQ_WQE_HEADER_GET(val, member)	\
70 	(((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) &	\
71 		CMDQ_WQE_HEADER_##member##_MASK)
72 
73 #define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT		0
74 #define CMDQ_CTXT_EQ_ID_SHIFT				56
75 #define CMDQ_CTXT_CEQ_ARM_SHIFT				61
76 #define CMDQ_CTXT_CEQ_EN_SHIFT				62
77 #define CMDQ_CTXT_HW_BUSY_BIT_SHIFT			63
78 
79 #define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK		0xFFFFFFFFFFFFF
80 #define CMDQ_CTXT_EQ_ID_MASK				0x1F
81 #define CMDQ_CTXT_CEQ_ARM_MASK				0x1
82 #define CMDQ_CTXT_CEQ_EN_MASK				0x1
83 #define CMDQ_CTXT_HW_BUSY_BIT_MASK			0x1
84 
85 #define CMDQ_CTXT_PAGE_INFO_SET(val, member)		\
86 	(((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT)
87 
88 #define CMDQ_CTXT_PAGE_INFO_CLEAR(val, member)		\
89 	((val) & (~((u64)CMDQ_CTXT_##member##_MASK <<	\
90 		CMDQ_CTXT_##member##_SHIFT)))
91 
92 #define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT			0
93 #define CMDQ_CTXT_CI_SHIFT				52
94 
95 #define CMDQ_CTXT_WQ_BLOCK_PFN_MASK			0xFFFFFFFFFFFFF
96 #define CMDQ_CTXT_CI_MASK				0xFFF
97 
98 #define CMDQ_CTXT_BLOCK_INFO_SET(val, member)		\
99 	(((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT)
100 
101 #define SAVED_DATA_ARM_SHIFT			31
102 
103 #define SAVED_DATA_ARM_MASK			0x1U
104 
105 #define SAVED_DATA_SET(val, member)		\
106 	(((val) & SAVED_DATA_##member##_MASK) << SAVED_DATA_##member##_SHIFT)
107 
108 #define SAVED_DATA_CLEAR(val, member)		\
109 	((val) & (~(SAVED_DATA_##member##_MASK << SAVED_DATA_##member##_SHIFT)))
110 
111 #define WQE_ERRCODE_VAL_SHIFT			20
112 
113 #define WQE_ERRCODE_VAL_MASK			0xF
114 
115 #define WQE_ERRCODE_GET(val, member)		\
116 	(((val) >> WQE_ERRCODE_##member##_SHIFT) & WQE_ERRCODE_##member##_MASK)
117 
118 #define WQE_COMPLETED(ctrl_info)	CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
119 
120 #define WQE_HEADER(wqe)		((struct hinic_cmdq_header *)(wqe))
121 
122 #define CMDQ_DB_PI_OFF(pi)		(((u16)LOWER_8_BITS(pi)) << 3)
123 
124 #define CMDQ_DB_ADDR(db_base, pi)	\
125 	(((u8 *)(db_base) + HINIC_DB_OFF) + CMDQ_DB_PI_OFF(pi))
126 
127 #define CMDQ_PFN(addr, page_size)	((addr) >> (ilog2(page_size)))
128 
129 #define FIRST_DATA_TO_WRITE_LAST	sizeof(u64)
130 
131 #define WQE_LCMD_SIZE		64
132 #define WQE_SCMD_SIZE		64
133 
134 #define COMPLETE_LEN		3
135 
136 #define CMDQ_WQEBB_SIZE		64
137 #define CMDQ_WQEBB_SHIFT	6
138 
139 #define CMDQ_WQE_SIZE		64
140 
141 #define HINIC_CMDQ_WQ_BUF_SIZE	4096
142 
143 #define WQE_NUM_WQEBBS(wqe_size, wq)	\
144 	((u16)(ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size))
145 
146 #define cmdq_to_cmdqs(cmdq)	container_of((cmdq) - (cmdq)->cmdq_type, \
147 				struct hinic_cmdqs, cmdq[0])
148 
149 #define WAIT_CMDQ_ENABLE_TIMEOUT	300
150 
151 
152 static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq,
153 				 struct hinic_cmdq_ctxt *cmdq_ctxt);
154 static void hinic_cmdqs_free(struct hinic_hwdev *hwdev);
155 
156 bool hinic_cmdq_idle(struct hinic_cmdq *cmdq)
157 {
158 	struct hinic_wq *wq = cmdq->wq;
159 
160 	return ((wq->delta) == wq->q_depth ? true : false);
161 }
162 
163 struct hinic_cmd_buf *hinic_alloc_cmd_buf(void *hwdev)
164 {
165 	struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
166 	struct hinic_cmd_buf *cmd_buf;
167 
168 	cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_KERNEL);
169 	if (!cmd_buf) {
170 		PMD_DRV_LOG(ERR, "Allocate cmd buffer failed");
171 		return NULL;
172 	}
173 
174 	cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, GFP_KERNEL,
175 				      &cmd_buf->dma_addr);
176 	if (!cmd_buf->buf) {
177 		PMD_DRV_LOG(ERR, "Allocate cmd from the pool failed");
178 		goto alloc_pci_buf_err;
179 	}
180 
181 	return cmd_buf;
182 
183 alloc_pci_buf_err:
184 	kfree(cmd_buf);
185 	return NULL;
186 }
187 
188 void hinic_free_cmd_buf(void *hwdev, struct hinic_cmd_buf *cmd_buf)
189 {
190 	struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
191 
192 	pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr);
193 	kfree(cmd_buf);
194 }
195 
196 static u32 cmdq_wqe_size(enum cmdq_wqe_type wqe_type)
197 {
198 	u32 wqe_size = 0;
199 
200 	switch (wqe_type) {
201 	case WQE_LCMD_TYPE:
202 		wqe_size = WQE_LCMD_SIZE;
203 		break;
204 	case WQE_SCMD_TYPE:
205 		wqe_size = WQE_SCMD_SIZE;
206 		break;
207 	}
208 
209 	return wqe_size;
210 }
211 
212 static int cmdq_get_wqe_size(enum bufdesc_len len)
213 {
214 	int wqe_size = 0;
215 
216 	switch (len) {
217 	case BUFDESC_LCMD_LEN:
218 		wqe_size = WQE_LCMD_SIZE;
219 		break;
220 	case BUFDESC_SCMD_LEN:
221 		wqe_size = WQE_SCMD_SIZE;
222 		break;
223 	}
224 
225 	return wqe_size;
226 }
227 
228 static void cmdq_set_completion(struct hinic_cmdq_completion *complete,
229 					struct hinic_cmd_buf *buf_out)
230 {
231 	struct hinic_sge_resp *sge_resp = &complete->sge_resp;
232 
233 	hinic_set_sge(&sge_resp->sge, buf_out->dma_addr,
234 		      HINIC_CMDQ_BUF_SIZE);
235 }
236 
237 static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe,
238 					struct hinic_cmd_buf *buf_in)
239 {
240 	hinic_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size);
241 }
242 
243 static void cmdq_fill_db(struct hinic_cmdq_db *db,
244 			enum hinic_cmdq_type cmdq_type, u16 prod_idx)
245 {
246 	db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
247 			CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, QUEUE_TYPE) |
248 			CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE)		|
249 			CMDQ_DB_INFO_SET(HINIC_DB_SRC_CMDQ_TYPE, SRC_TYPE);
250 }
251 
252 static void cmdq_set_db(struct hinic_cmdq *cmdq,
253 			enum hinic_cmdq_type cmdq_type, u16 prod_idx)
254 {
255 	struct hinic_cmdq_db db;
256 
257 	cmdq_fill_db(&db, cmdq_type, prod_idx);
258 
259 	/* The data that is written to HW should be in Big Endian Format */
260 	db.db_info = cpu_to_be32(db.db_info);
261 
262 	rte_wmb();	/* write all before the doorbell */
263 
264 	writel(db.db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
265 }
266 
267 static void cmdq_wqe_fill(void *dst, void *src)
268 {
269 	memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST,
270 	       (u8 *)src + FIRST_DATA_TO_WRITE_LAST,
271 	       CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
272 
273 	rte_wmb();/* The first 8 bytes should be written last */
274 
275 	*(u64 *)dst = *(u64 *)src;
276 }
277 
278 static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
279 				  enum hinic_ack_type ack_type,
280 				  enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
281 				  enum completion_format complete_format,
282 				  enum data_format local_data_format,
283 				  enum bufdesc_len buf_len)
284 {
285 	struct hinic_ctrl *ctrl;
286 	enum ctrl_sect_len ctrl_len;
287 	struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
288 	struct hinic_cmdq_wqe_scmd *wqe_scmd;
289 	u32 saved_data = WQE_HEADER(wqe)->saved_data;
290 
291 	if (local_data_format == DATA_SGE) {
292 		wqe_lcmd = &wqe->wqe_lcmd;
293 
294 		wqe_lcmd->status.status_info = 0;
295 		ctrl = &wqe_lcmd->ctrl;
296 		ctrl_len = CTRL_SECT_LEN;
297 	} else {
298 		wqe_scmd = &wqe->inline_wqe.wqe_scmd;
299 
300 		wqe_scmd->status.status_info = 0;
301 		ctrl = &wqe_scmd->ctrl;
302 		ctrl_len = CTRL_DIRECT_SECT_LEN;
303 	}
304 
305 	ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI)		|
306 			CMDQ_CTRL_SET(cmd, CMD)			|
307 			CMDQ_CTRL_SET(mod, MOD)			|
308 			CMDQ_CTRL_SET(ack_type, ACK_TYPE);
309 
310 	WQE_HEADER(wqe)->header_info =
311 		CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
312 		CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
313 		CMDQ_WQE_HEADER_SET(local_data_format, DATA_FMT)	|
314 		CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ)	|
315 		CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
316 		CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN)		|
317 		CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT);
318 
319 	if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC_MOD_COMM) {
320 		saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
321 		WQE_HEADER(wqe)->saved_data = saved_data |
322 						SAVED_DATA_SET(1, ARM);
323 	} else {
324 		saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
325 		WQE_HEADER(wqe)->saved_data = saved_data;
326 	}
327 }
328 
329 static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
330 			      enum cmdq_cmd_type cmd_type,
331 			      struct hinic_cmd_buf *buf_in,
332 			      struct hinic_cmd_buf *buf_out, int wrapped,
333 			      enum hinic_ack_type ack_type,
334 			      enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
335 {
336 	struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
337 	enum completion_format complete_format = COMPLETE_DIRECT;
338 
339 	switch (cmd_type) {
340 	case SYNC_CMD_SGE_RESP:
341 		if (buf_out) {
342 			complete_format = COMPLETE_SGE;
343 			cmdq_set_completion(&wqe_lcmd->completion, buf_out);
344 		}
345 		break;
346 	case SYNC_CMD_DIRECT_RESP:
347 		complete_format = COMPLETE_DIRECT;
348 		wqe_lcmd->completion.direct_resp = 0;
349 		break;
350 	case ASYNC_CMD:
351 		complete_format = COMPLETE_DIRECT;
352 		wqe_lcmd->completion.direct_resp = 0;
353 
354 		wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in);
355 		break;
356 	}
357 
358 	cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
359 			      prod_idx, complete_format, DATA_SGE,
360 			      BUFDESC_LCMD_LEN);
361 
362 	cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
363 }
364 
365 static int cmdq_params_valid(struct hinic_cmd_buf *buf_in)
366 {
367 	if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) {
368 		PMD_DRV_LOG(ERR, "Invalid CMDQ buffer size");
369 		return -EINVAL;
370 	}
371 
372 	return 0;
373 }
374 
375 static int wait_cmdqs_enable(struct hinic_cmdqs *cmdqs)
376 {
377 	unsigned long end;
378 
379 	end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT);
380 	do {
381 		if (cmdqs->status & HINIC_CMDQ_ENABLE)
382 			return 0;
383 
384 	} while (time_before(jiffies, end));
385 
386 	return -EBUSY;
387 }
388 
389 static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
390 				int errcode)
391 {
392 	cmdq->errcode[prod_idx] = errcode;
393 }
394 
395 static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
396 				   struct hinic_cmdq_wqe *wqe)
397 {
398 	struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
399 	struct hinic_cmdq_inline_wqe *inline_wqe;
400 	struct hinic_cmdq_wqe_scmd *wqe_scmd;
401 	struct hinic_ctrl *ctrl;
402 	u32 header_info = be32_to_cpu(WQE_HEADER(wqe)->header_info);
403 	int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
404 	int wqe_size = cmdq_get_wqe_size(buf_len);
405 	u16 num_wqebbs;
406 
407 	if (wqe_size == WQE_LCMD_SIZE) {
408 		wqe_lcmd = &wqe->wqe_lcmd;
409 		ctrl = &wqe_lcmd->ctrl;
410 	} else {
411 		inline_wqe = &wqe->inline_wqe;
412 		wqe_scmd = &inline_wqe->wqe_scmd;
413 		ctrl = &wqe_scmd->ctrl;
414 	}
415 
416 	/* clear HW busy bit */
417 	ctrl->ctrl_info = 0;
418 
419 	rte_wmb();	/* verify wqe is clear */
420 
421 	num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq);
422 	hinic_put_wqe(cmdq->wq, num_wqebbs);
423 }
424 
425 static int hinic_set_cmdq_ctxts(struct hinic_hwdev *hwdev)
426 {
427 	struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
428 	struct hinic_cmdq_ctxt *cmdq_ctxt;
429 	enum hinic_cmdq_type cmdq_type;
430 	u16 in_size;
431 	int err;
432 
433 	cmdq_type = HINIC_CMDQ_SYNC;
434 	for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
435 		cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
436 		cmdq_ctxt->resp_aeq_num = HINIC_AEQ1;
437 		in_size = sizeof(*cmdq_ctxt);
438 		err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
439 					     HINIC_MGMT_CMD_CMDQ_CTXT_SET,
440 					     cmdq_ctxt, in_size, NULL,
441 					     NULL, 0);
442 		if (err) {
443 			PMD_DRV_LOG(ERR, "Set cmdq ctxt failed");
444 			return -EFAULT;
445 		}
446 	}
447 
448 	cmdqs->status |= HINIC_CMDQ_ENABLE;
449 
450 	return 0;
451 }
452 
453 void hinic_comm_cmdqs_free(struct hinic_hwdev *hwdev)
454 {
455 	hinic_cmdqs_free(hwdev);
456 }
457 
458 int hinic_reinit_cmdq_ctxts(struct hinic_hwdev *hwdev)
459 {
460 	struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
461 	enum hinic_cmdq_type cmdq_type;
462 
463 	cmdq_type = HINIC_CMDQ_SYNC;
464 	for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
465 		cmdqs->cmdq[cmdq_type].wrapped = 1;
466 		hinic_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq);
467 	}
468 
469 	return hinic_set_cmdq_ctxts(hwdev);
470 }
471 
472 static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_hwdev *hwdev,
473 		     struct hinic_wq *wq, enum hinic_cmdq_type q_type)
474 {
475 	void __iomem *db_base;
476 	int err = 0;
477 	size_t errcode_size;
478 	size_t cmd_infos_size;
479 
480 	cmdq->wq = wq;
481 	cmdq->cmdq_type = q_type;
482 	cmdq->wrapped = 1;
483 
484 	spin_lock_init(&cmdq->cmdq_lock);
485 
486 	errcode_size = wq->q_depth * sizeof(*cmdq->errcode);
487 	cmdq->errcode = kzalloc(errcode_size, GFP_KERNEL);
488 	if (!cmdq->errcode) {
489 		PMD_DRV_LOG(ERR, "Allocate errcode for cmdq failed");
490 		spin_lock_deinit(&cmdq->cmdq_lock);
491 		return -ENOMEM;
492 	}
493 
494 	cmd_infos_size = wq->q_depth * sizeof(*cmdq->cmd_infos);
495 	cmdq->cmd_infos = kzalloc(cmd_infos_size, GFP_KERNEL);
496 	if (!cmdq->cmd_infos) {
497 		PMD_DRV_LOG(ERR, "Allocate errcode for cmdq failed");
498 		err = -ENOMEM;
499 		goto cmd_infos_err;
500 	}
501 
502 	err = hinic_alloc_db_addr(hwdev, &db_base);
503 	if (err)
504 		goto alloc_db_err;
505 
506 	cmdq->db_base = (u8 *)db_base;
507 	return 0;
508 
509 alloc_db_err:
510 	kfree(cmdq->cmd_infos);
511 
512 cmd_infos_err:
513 	kfree(cmdq->errcode);
514 	spin_lock_deinit(&cmdq->cmdq_lock);
515 
516 	return err;
517 }
518 
519 static void free_cmdq(struct hinic_hwdev *hwdev, struct hinic_cmdq *cmdq)
520 {
521 	hinic_free_db_addr(hwdev, cmdq->db_base);
522 	kfree(cmdq->cmd_infos);
523 	kfree(cmdq->errcode);
524 	spin_lock_deinit(&cmdq->cmdq_lock);
525 }
526 
527 static int hinic_cmdqs_init(struct hinic_hwdev *hwdev)
528 {
529 	struct hinic_cmdqs *cmdqs;
530 	struct hinic_cmdq_ctxt *cmdq_ctxt;
531 	enum hinic_cmdq_type type, cmdq_type;
532 	size_t saved_wqs_size;
533 	int err;
534 
535 	cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL);
536 	if (!cmdqs)
537 		return -ENOMEM;
538 
539 	hwdev->cmdqs = cmdqs;
540 	cmdqs->hwdev = hwdev;
541 
542 	saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
543 	cmdqs->saved_wqs = kzalloc(saved_wqs_size, GFP_KERNEL);
544 	if (!cmdqs->saved_wqs) {
545 		PMD_DRV_LOG(ERR, "Allocate saved wqs failed");
546 		err = -ENOMEM;
547 		goto alloc_wqs_err;
548 	}
549 
550 	cmdqs->cmd_buf_pool = dma_pool_create("hinic_cmdq", hwdev,
551 					      HINIC_CMDQ_BUF_SIZE,
552 					      HINIC_CMDQ_BUF_SIZE, 0ULL);
553 	if (!cmdqs->cmd_buf_pool) {
554 		PMD_DRV_LOG(ERR, "Create cmdq buffer pool failed");
555 		err = -ENOMEM;
556 		goto pool_create_err;
557 	}
558 
559 	err = hinic_cmdq_alloc(cmdqs->saved_wqs, hwdev,
560 			       HINIC_MAX_CMDQ_TYPES, HINIC_CMDQ_WQ_BUF_SIZE,
561 			       CMDQ_WQEBB_SHIFT, HINIC_CMDQ_DEPTH);
562 	if (err) {
563 		PMD_DRV_LOG(ERR, "Allocate cmdq failed");
564 		goto cmdq_alloc_err;
565 	}
566 
567 	cmdq_type = HINIC_CMDQ_SYNC;
568 	for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
569 		err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev,
570 				&cmdqs->saved_wqs[cmdq_type], cmdq_type);
571 		if (err) {
572 			PMD_DRV_LOG(ERR, "Initialize cmdq failed");
573 			goto init_cmdq_err;
574 		}
575 
576 		cmdq_ctxt = &cmdqs->cmdq[cmdq_type].cmdq_ctxt;
577 		cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type], cmdq_ctxt);
578 	}
579 
580 	err = hinic_set_cmdq_ctxts(hwdev);
581 	if (err)
582 		goto init_cmdq_err;
583 
584 	return 0;
585 
586 init_cmdq_err:
587 	type = HINIC_CMDQ_SYNC;
588 	for ( ; type < cmdq_type; type++)
589 		free_cmdq(hwdev, &cmdqs->cmdq[type]);
590 
591 	hinic_cmdq_free(hwdev, cmdqs->saved_wqs, HINIC_MAX_CMDQ_TYPES);
592 
593 cmdq_alloc_err:
594 	dma_pool_destroy(cmdqs->cmd_buf_pool);
595 
596 pool_create_err:
597 	kfree(cmdqs->saved_wqs);
598 
599 alloc_wqs_err:
600 	kfree(cmdqs);
601 
602 	return err;
603 }
604 
605 static void hinic_cmdqs_free(struct hinic_hwdev *hwdev)
606 {
607 	struct hinic_cmdqs *cmdqs = hwdev->cmdqs;
608 	enum hinic_cmdq_type cmdq_type = HINIC_CMDQ_SYNC;
609 
610 	cmdqs->status &= ~HINIC_CMDQ_ENABLE;
611 
612 	for ( ; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
613 		free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]);
614 
615 	hinic_cmdq_free(hwdev, cmdqs->saved_wqs,
616 			HINIC_MAX_CMDQ_TYPES);
617 
618 	dma_pool_destroy(cmdqs->cmd_buf_pool);
619 
620 	kfree(cmdqs->saved_wqs);
621 
622 	kfree(cmdqs);
623 }
624 
625 static int hinic_set_cmdq_depth(struct hinic_hwdev *hwdev, u16 cmdq_depth)
626 {
627 	struct hinic_root_ctxt root_ctxt;
628 
629 	memset(&root_ctxt, 0, sizeof(root_ctxt));
630 	root_ctxt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
631 	root_ctxt.func_idx = hinic_global_func_id(hwdev);
632 	root_ctxt.ppf_idx = hinic_ppf_idx(hwdev);
633 	root_ctxt.set_cmdq_depth = 1;
634 	root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth);
635 	return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
636 				      HINIC_MGMT_CMD_VAT_SET,
637 				      &root_ctxt, sizeof(root_ctxt),
638 				      NULL, NULL, 0);
639 }
640 
641 int hinic_comm_cmdqs_init(struct hinic_hwdev *hwdev)
642 {
643 	int err;
644 
645 	err = hinic_cmdqs_init(hwdev);
646 	if (err) {
647 		PMD_DRV_LOG(ERR, "Init cmd queues failed");
648 		return err;
649 	}
650 
651 	err = hinic_set_cmdq_depth(hwdev, HINIC_CMDQ_DEPTH);
652 	if (err) {
653 		PMD_DRV_LOG(ERR, "Set cmdq depth failed");
654 		goto set_cmdq_depth_err;
655 	}
656 
657 	return 0;
658 
659 set_cmdq_depth_err:
660 	hinic_cmdqs_free(hwdev);
661 
662 	return err;
663 }
664 
665 static void cmdq_init_queue_ctxt(struct hinic_cmdq *cmdq,
666 				 struct hinic_cmdq_ctxt *cmdq_ctxt)
667 {
668 	struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)cmdq_to_cmdqs(cmdq);
669 	struct hinic_hwdev *hwdev = cmdqs->hwdev;
670 	struct hinic_wq *wq = cmdq->wq;
671 	struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
672 	u64 wq_first_page_paddr, pfn;
673 
674 	u16 start_ci = (u16)(wq->cons_idx);
675 
676 	/* The data in the HW is in Big Endian Format */
677 	wq_first_page_paddr = wq->queue_buf_paddr;
678 
679 	pfn = CMDQ_PFN(wq_first_page_paddr, HINIC_PAGE_SIZE);
680 	ctxt_info->curr_wqe_page_pfn =
681 		CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) |
682 		CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN)	|
683 		CMDQ_CTXT_PAGE_INFO_SET(0, CEQ_ARM)	|
684 		CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
685 		CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN);
686 
687 	ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) |
688 				CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN);
689 	cmdq_ctxt->func_idx = HINIC_HWIF_GLOBAL_IDX(hwdev->hwif);
690 	cmdq_ctxt->ppf_idx  = HINIC_HWIF_PPF_IDX(hwdev->hwif);
691 	cmdq_ctxt->cmdq_id  = cmdq->cmdq_type;
692 }
693 
694 static int hinic_cmdq_poll_msg(struct hinic_cmdq *cmdq, u32 timeout)
695 {
696 	struct hinic_cmdq_wqe *wqe;
697 	struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
698 	struct hinic_ctrl *ctrl;
699 	struct hinic_cmdq_cmd_info *cmd_info;
700 	u32 status_info, ctrl_info;
701 	u16 ci;
702 	int errcode;
703 	unsigned long end;
704 	int done = 0;
705 	int rc = 0;
706 
707 	wqe = hinic_read_wqe(cmdq->wq, 1, &ci);
708 	if (wqe == NULL) {
709 		PMD_DRV_LOG(ERR, "No outstanding cmdq msg");
710 		return -EINVAL;
711 	}
712 
713 	cmd_info = &cmdq->cmd_infos[ci];
714 	/* this cmd has not been filled and send to hw, or get TMO msg ack*/
715 	if (cmd_info->cmd_type == HINIC_CMD_TYPE_NONE) {
716 		PMD_DRV_LOG(ERR, "Cmdq msg has not been filled and send to hw, or get TMO msg ack. cmdq ci: %u",
717 			    ci);
718 		return -EINVAL;
719 	}
720 
721 	/* only arm bit is using scmd wqe, the wqe is lcmd */
722 	wqe_lcmd = &wqe->wqe_lcmd;
723 	ctrl = &wqe_lcmd->ctrl;
724 	end = jiffies + msecs_to_jiffies(timeout);
725 	do {
726 		ctrl_info = be32_to_cpu((ctrl)->ctrl_info);
727 		if (WQE_COMPLETED(ctrl_info)) {
728 			done = 1;
729 			break;
730 		}
731 
732 		rte_delay_ms(1);
733 	} while (time_before(jiffies, end));
734 
735 	if (done) {
736 		status_info = be32_to_cpu(wqe_lcmd->status.status_info);
737 		errcode = WQE_ERRCODE_GET(status_info, VAL);
738 		cmdq_update_errcode(cmdq, ci, errcode);
739 		clear_wqe_complete_bit(cmdq, wqe);
740 		rc = 0;
741 	} else {
742 		PMD_DRV_LOG(ERR, "Poll cmdq msg time out, ci: %u", ci);
743 		rc = -ETIMEDOUT;
744 	}
745 
746 	/* set this cmd invalid */
747 	cmd_info->cmd_type = HINIC_CMD_TYPE_NONE;
748 
749 	return rc;
750 }
751 
752 static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
753 				     enum hinic_ack_type ack_type,
754 				     enum hinic_mod_type mod, u8 cmd,
755 				     struct hinic_cmd_buf *buf_in,
756 				     u64 *out_param, u32 timeout)
757 {
758 	struct hinic_wq *wq = cmdq->wq;
759 	struct hinic_cmdq_wqe *curr_wqe, wqe;
760 	struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
761 	u16 curr_prod_idx, next_prod_idx, num_wqebbs;
762 	int wrapped;
763 	u32 timeo, wqe_size;
764 	int err;
765 
766 	wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
767 	num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
768 
769 	/* Keep wrapped and doorbell index correct. */
770 	spin_lock(&cmdq->cmdq_lock);
771 
772 	curr_wqe = hinic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
773 	if (!curr_wqe) {
774 		err = -EBUSY;
775 		goto cmdq_unlock;
776 	}
777 
778 	memset(&wqe, 0, sizeof(wqe));
779 	wrapped = cmdq->wrapped;
780 
781 	next_prod_idx = curr_prod_idx + num_wqebbs;
782 	if (next_prod_idx >= wq->q_depth) {
783 		cmdq->wrapped = !cmdq->wrapped;
784 		next_prod_idx -= wq->q_depth;
785 	}
786 
787 	cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL,
788 			  wrapped, ack_type, mod, cmd, curr_prod_idx);
789 
790 	/* The data that is written to HW should be in Big Endian Format */
791 	hinic_cpu_to_be32(&wqe, wqe_size);
792 
793 	/* CMDQ WQE is not shadow, therefore wqe will be written to wq */
794 	cmdq_wqe_fill(curr_wqe, &wqe);
795 
796 	cmdq->cmd_infos[curr_prod_idx].cmd_type = HINIC_CMD_TYPE_NORMAL;
797 
798 	cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
799 
800 	timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT);
801 	err = hinic_cmdq_poll_msg(cmdq, timeo);
802 	if (err) {
803 		PMD_DRV_LOG(ERR, "Cmdq poll msg ack failed, prod idx: 0x%x",
804 			curr_prod_idx);
805 		err = -ETIMEDOUT;
806 		goto cmdq_unlock;
807 	}
808 
809 	rte_smp_rmb();	/* read error code after completion */
810 
811 	if (out_param) {
812 		wqe_lcmd = &curr_wqe->wqe_lcmd;
813 		*out_param = cpu_to_be64(wqe_lcmd->completion.direct_resp);
814 	}
815 
816 	if (cmdq->errcode[curr_prod_idx] > 1) {
817 		err = cmdq->errcode[curr_prod_idx];
818 		goto cmdq_unlock;
819 	}
820 
821 cmdq_unlock:
822 	spin_unlock(&cmdq->cmdq_lock);
823 
824 	return err;
825 }
826 
827 int hinic_cmdq_direct_resp(void *hwdev, enum hinic_ack_type ack_type,
828 			   enum hinic_mod_type mod, u8 cmd,
829 			   struct hinic_cmd_buf *buf_in,
830 			   u64 *out_param, u32 timeout)
831 {
832 	struct hinic_cmdqs *cmdqs = ((struct hinic_hwdev *)hwdev)->cmdqs;
833 	int err = cmdq_params_valid(buf_in);
834 
835 	if (err) {
836 		PMD_DRV_LOG(ERR, "Invalid CMDQ parameters");
837 		return err;
838 	}
839 
840 	err = wait_cmdqs_enable(cmdqs);
841 	if (err) {
842 		PMD_DRV_LOG(ERR, "Cmdq is disable");
843 		return err;
844 	}
845 
846 	return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
847 					 ack_type, mod, cmd, buf_in,
848 					 out_param, timeout);
849 }
850