xref: /dpdk/drivers/net/hinic/base/hinic_pmd_eqs.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Huawei Technologies Co., Ltd
3  */
4 
5 #include "hinic_compat.h"
6 #include "hinic_csr.h"
7 #include "hinic_pmd_hwdev.h"
8 #include "hinic_pmd_hwif.h"
9 #include "hinic_pmd_mgmt.h"
10 #include "hinic_pmd_eqs.h"
11 
12 #define AEQ_CTRL_0_INTR_IDX_SHIFT		0
13 #define AEQ_CTRL_0_DMA_ATTR_SHIFT		12
14 #define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT		20
15 #define AEQ_CTRL_0_INTR_MODE_SHIFT		31
16 
17 #define AEQ_CTRL_0_INTR_IDX_MASK		0x3FFU
18 #define AEQ_CTRL_0_DMA_ATTR_MASK		0x3FU
19 #define AEQ_CTRL_0_PCI_INTF_IDX_MASK		0x3U
20 #define AEQ_CTRL_0_INTR_MODE_MASK		0x1U
21 
22 #define AEQ_CTRL_0_SET(val, member)		\
23 				(((val) & AEQ_CTRL_0_##member##_MASK) << \
24 				AEQ_CTRL_0_##member##_SHIFT)
25 
26 #define AEQ_CTRL_0_CLEAR(val, member)		\
27 				((val) & (~(AEQ_CTRL_0_##member##_MASK \
28 					<< AEQ_CTRL_0_##member##_SHIFT)))
29 
30 #define AEQ_CTRL_1_LEN_SHIFT			0
31 #define AEQ_CTRL_1_ELEM_SIZE_SHIFT		24
32 #define AEQ_CTRL_1_PAGE_SIZE_SHIFT		28
33 
34 #define AEQ_CTRL_1_LEN_MASK			0x1FFFFFU
35 #define AEQ_CTRL_1_ELEM_SIZE_MASK		0x3U
36 #define AEQ_CTRL_1_PAGE_SIZE_MASK		0xFU
37 
38 #define AEQ_CTRL_1_SET(val, member)		\
39 				(((val) & AEQ_CTRL_1_##member##_MASK) << \
40 				AEQ_CTRL_1_##member##_SHIFT)
41 
42 #define AEQ_CTRL_1_CLEAR(val, member)		\
43 				((val) & (~(AEQ_CTRL_1_##member##_MASK \
44 					<< AEQ_CTRL_1_##member##_SHIFT)))
45 
46 #define CEQ_CTRL_0_INTR_IDX_SHIFT		0
47 #define CEQ_CTRL_0_DMA_ATTR_SHIFT		12
48 #define CEQ_CTRL_0_LIMIT_KICK_SHIFT		20
49 #define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT		24
50 #define CEQ_CTRL_0_INTR_MODE_SHIFT		31
51 
52 #define CEQ_CTRL_0_INTR_IDX_MASK		0x3FFU
53 #define CEQ_CTRL_0_DMA_ATTR_MASK		0x3FU
54 #define CEQ_CTRL_0_LIMIT_KICK_MASK		0xFU
55 #define CEQ_CTRL_0_PCI_INTF_IDX_MASK		0x3U
56 #define CEQ_CTRL_0_INTR_MODE_MASK		0x1U
57 
58 #define CEQ_CTRL_0_SET(val, member)		\
59 				(((val) & CEQ_CTRL_0_##member##_MASK) << \
60 					CEQ_CTRL_0_##member##_SHIFT)
61 
62 #define CEQ_CTRL_1_LEN_SHIFT			0
63 #define CEQ_CTRL_1_PAGE_SIZE_SHIFT		28
64 
65 #define CEQ_CTRL_1_LEN_MASK			0x1FFFFFU
66 #define CEQ_CTRL_1_PAGE_SIZE_MASK		0xFU
67 
68 #define CEQ_CTRL_1_SET(val, member)		\
69 				(((val) & CEQ_CTRL_1_##member##_MASK) << \
70 					CEQ_CTRL_1_##member##_SHIFT)
71 
72 #define EQ_CONS_IDX_CONS_IDX_SHIFT		0
73 #define EQ_CONS_IDX_XOR_CHKSUM_SHIFT		24
74 #define EQ_CONS_IDX_INT_ARMED_SHIFT		31
75 
76 #define EQ_CONS_IDX_CONS_IDX_MASK		0x1FFFFFU
77 #define EQ_CONS_IDX_XOR_CHKSUM_MASK		0xFU
78 #define EQ_CONS_IDX_INT_ARMED_MASK		0x1U
79 
80 #define EQ_CONS_IDX_SET(val, member)		\
81 				(((val) & EQ_CONS_IDX_##member##_MASK) << \
82 				EQ_CONS_IDX_##member##_SHIFT)
83 
84 #define EQ_CONS_IDX_CLEAR(val, member)		\
85 				((val) & (~(EQ_CONS_IDX_##member##_MASK \
86 					<< EQ_CONS_IDX_##member##_SHIFT)))
87 
88 #define EQ_WRAPPED(eq)			((u32)(eq)->wrapped << EQ_VALID_SHIFT)
89 
90 #define EQ_CONS_IDX(eq)		((eq)->cons_idx | \
91 				((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT))
92 
93 #define EQ_CONS_IDX_REG_ADDR(eq)	(((eq)->type == HINIC_AEQ) ? \
94 				HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) :\
95 				HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
96 
97 #define EQ_PROD_IDX_REG_ADDR(eq)	(((eq)->type == HINIC_AEQ) ? \
98 				HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) :\
99 				HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
100 
101 #define GET_EQ_NUM_PAGES(eq, size)		\
102 		((u16)(ALIGN((eq)->eq_len * (u32)(eq)->elem_size, (size)) \
103 		/ (size)))
104 
105 #define GET_EQ_NUM_ELEMS(eq, pg_size)	((pg_size) / (u32)(eq)->elem_size)
106 
107 #define PAGE_IN_4K(page_size)		((page_size) >> 12)
108 #define EQ_SET_HW_PAGE_SIZE_VAL(eq) ((u32)ilog2(PAGE_IN_4K((eq)->page_size)))
109 
110 #define ELEMENT_SIZE_IN_32B(eq)		(((eq)->elem_size) >> 5)
111 #define EQ_SET_HW_ELEM_SIZE_VAL(eq)	((u32)ilog2(ELEMENT_SIZE_IN_32B(eq)))
112 
113 #define AEQ_DMA_ATTR_DEFAULT			0
114 #define CEQ_DMA_ATTR_DEFAULT			0
115 
116 #define CEQ_LMT_KICK_DEFAULT			0
117 
118 #define EQ_WRAPPED_SHIFT			20
119 
120 #define	EQ_VALID_SHIFT				31
121 
122 #define aeq_to_aeqs(eq) \
123 		container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
124 
125 static u8 eq_cons_idx_checksum_set(u32 val)
126 {
127 	u8 checksum = 0;
128 	u8 idx;
129 
130 	for (idx = 0; idx < 32; idx += 4)
131 		checksum ^= ((val >> idx) & 0xF);
132 
133 	return (checksum & 0xF);
134 }
135 
136 /**
137  * set_eq_cons_idx - write the cons idx to the hw
138  * @eq: The event queue to update the cons idx for
139  * @arm_state: indicate whether report interrupts when generate eq element
140  **/
141 static void set_eq_cons_idx(struct hinic_eq *eq, u32 arm_state)
142 {
143 	u32 eq_cons_idx, eq_wrap_ci, val;
144 	u32 addr = EQ_CONS_IDX_REG_ADDR(eq);
145 
146 	eq_wrap_ci = EQ_CONS_IDX(eq);
147 
148 	/* Read Modify Write */
149 	val = hinic_hwif_read_reg(eq->hwdev->hwif, addr);
150 
151 	val = EQ_CONS_IDX_CLEAR(val, CONS_IDX) &
152 		EQ_CONS_IDX_CLEAR(val, INT_ARMED) &
153 		EQ_CONS_IDX_CLEAR(val, XOR_CHKSUM);
154 
155 	/* Just aeq0 use int_arm mode for pmd drv to recv
156 	 * asyn event&mbox recv data
157 	 */
158 	if (eq->q_id == 0)
159 		eq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
160 			EQ_CONS_IDX_SET(arm_state, INT_ARMED);
161 	else
162 		eq_cons_idx = EQ_CONS_IDX_SET(eq_wrap_ci, CONS_IDX) |
163 			EQ_CONS_IDX_SET(HINIC_EQ_NOT_ARMED, INT_ARMED);
164 
165 	val |= eq_cons_idx;
166 
167 	val |= EQ_CONS_IDX_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
168 
169 	hinic_hwif_write_reg(eq->hwdev->hwif, addr, val);
170 }
171 
172 /**
173  * eq_update_ci - update the cons idx of event queue
174  * @eq: the event queue to update the cons idx for
175  **/
176 void eq_update_ci(struct hinic_eq *eq)
177 {
178 	set_eq_cons_idx(eq, HINIC_EQ_ARMED);
179 }
180 
181 struct hinic_ceq_ctrl_reg {
182 	struct hinic_mgmt_msg_head mgmt_msg_head;
183 
184 	u16 func_id;
185 	u16 q_id;
186 	u32 ctrl0;
187 	u32 ctrl1;
188 };
189 
190 static int set_ceq_ctrl_reg(struct hinic_hwdev *hwdev, u16 q_id,
191 			    u32 ctrl0, u32 ctrl1)
192 {
193 	struct hinic_ceq_ctrl_reg ceq_ctrl;
194 	u16 in_size = sizeof(ceq_ctrl);
195 
196 	memset(&ceq_ctrl, 0, in_size);
197 	ceq_ctrl.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
198 	ceq_ctrl.func_id = hinic_global_func_id(hwdev);
199 	ceq_ctrl.q_id = q_id;
200 	ceq_ctrl.ctrl0 = ctrl0;
201 	ceq_ctrl.ctrl1 = ctrl1;
202 
203 	return hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
204 				     HINIC_MGMT_CMD_CEQ_CTRL_REG_WR_BY_UP,
205 				     &ceq_ctrl, in_size, NULL, NULL, 0);
206 }
207 
208 /**
209  * set_eq_ctrls - setting eq's ctrls registers
210  * @eq: the event queue for setting
211  **/
212 static int set_eq_ctrls(struct hinic_eq *eq)
213 {
214 	enum hinic_eq_type type = eq->type;
215 	struct hinic_hwif *hwif = eq->hwdev->hwif;
216 	struct irq_info *eq_irq = &eq->eq_irq;
217 	u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size;
218 	u32 pci_intf_idx = HINIC_PCI_INTF_IDX(hwif);
219 	int ret = 0;
220 
221 	if (type == HINIC_AEQ) {
222 		/* set ctrl0 */
223 		addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
224 
225 		val = hinic_hwif_read_reg(hwif, addr);
226 
227 		val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) &
228 			AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
229 			AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
230 			AEQ_CTRL_0_CLEAR(val, INTR_MODE);
231 
232 		ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
233 			AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR)	|
234 			AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX)	|
235 			AEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);
236 
237 		val |= ctrl0;
238 
239 		hinic_hwif_write_reg(hwif, addr, val);
240 
241 		/* set ctrl1 */
242 		addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
243 
244 		page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
245 		elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
246 
247 		ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN)		|
248 			AEQ_CTRL_1_SET(elem_size, ELEM_SIZE)	|
249 			AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
250 
251 		hinic_hwif_write_reg(hwif, addr, ctrl1);
252 	} else {
253 		ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) |
254 			CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR)	|
255 			CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) |
256 			CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX)	|
257 			CEQ_CTRL_0_SET(HINIC_INTR_MODE_ARMED, INTR_MODE);
258 
259 		page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
260 
261 		ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN) |
262 			CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
263 
264 		/* set ceq ctrl reg through mgmt cpu */
265 		ret = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1);
266 	}
267 
268 	return ret;
269 }
270 
271 /**
272  * ceq_elements_init - Initialize all the elements in the ceq
273  * @eq: the event queue
274  * @init_val: value to init with it the elements
275  **/
276 static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
277 {
278 	u16 i;
279 	u32 *ceqe;
280 
281 	for (i = 0; i < eq->eq_len; i++) {
282 		ceqe = GET_CEQ_ELEM(eq, i);
283 		*(ceqe) = cpu_to_be32(init_val);
284 	}
285 
286 	rte_wmb();	/* Write the init values */
287 }
288 
289 /**
290  * aeq_elements_init - initialize all the elements in the aeq
291  * @eq: the event queue
292  * @init_val: value to init with it the elements
293  **/
294 static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
295 {
296 	struct hinic_aeq_elem *aeqe;
297 	u16 i;
298 
299 	for (i = 0; i < eq->eq_len; i++) {
300 		aeqe = GET_AEQ_ELEM(eq, i);
301 		aeqe->desc = cpu_to_be32(init_val);
302 	}
303 
304 	rte_wmb();	/* Write the init values */
305 }
306 
307 /**
308  * alloc_eq_pages - allocate the pages for the queue
309  * @eq: the event queue
310  **/
311 static int alloc_eq_pages(struct hinic_eq *eq)
312 {
313 	struct hinic_hwif *hwif = eq->hwdev->hwif;
314 	u32 init_val;
315 	u64 dma_addr_size, virt_addr_size;
316 	u16 pg_num, i;
317 	int err;
318 
319 	dma_addr_size = eq->num_pages * sizeof(*eq->dma_addr);
320 	virt_addr_size = eq->num_pages * sizeof(*eq->virt_addr);
321 
322 	eq->dma_addr = kzalloc(dma_addr_size, GFP_KERNEL);
323 	if (!eq->dma_addr) {
324 		PMD_DRV_LOG(ERR, "Allocate dma addr array failed");
325 		return -ENOMEM;
326 	}
327 
328 	eq->virt_addr = kzalloc(virt_addr_size, GFP_KERNEL);
329 	if (!eq->virt_addr) {
330 		PMD_DRV_LOG(ERR, "Allocate virt addr array failed");
331 		err = -ENOMEM;
332 		goto virt_addr_alloc_err;
333 	}
334 
335 	for (pg_num = 0; pg_num < eq->num_pages; pg_num++) {
336 		eq->virt_addr[pg_num] =
337 			(u8 *)dma_zalloc_coherent_aligned(eq->hwdev,
338 					eq->page_size, &eq->dma_addr[pg_num],
339 					GFP_KERNEL);
340 		if (!eq->virt_addr[pg_num]) {
341 			err = -ENOMEM;
342 			goto dma_alloc_err;
343 		}
344 
345 		hinic_hwif_write_reg(hwif,
346 				     HINIC_EQ_HI_PHYS_ADDR_REG(eq->type,
347 				     eq->q_id, pg_num),
348 				     upper_32_bits(eq->dma_addr[pg_num]));
349 
350 		hinic_hwif_write_reg(hwif,
351 				     HINIC_EQ_LO_PHYS_ADDR_REG(eq->type,
352 				     eq->q_id, pg_num),
353 				     lower_32_bits(eq->dma_addr[pg_num]));
354 	}
355 
356 	init_val = EQ_WRAPPED(eq);
357 
358 	if (eq->type == HINIC_AEQ)
359 		aeq_elements_init(eq, init_val);
360 	else
361 		ceq_elements_init(eq, init_val);
362 
363 	return 0;
364 
365 dma_alloc_err:
366 	for (i = 0; i < pg_num; i++)
367 		dma_free_coherent(eq->hwdev, eq->page_size,
368 				  eq->virt_addr[i], eq->dma_addr[i]);
369 
370 virt_addr_alloc_err:
371 	kfree(eq->dma_addr);
372 	return err;
373 }
374 
375 /**
376  * free_eq_pages - free the pages of the queue
377  * @eq: the event queue
378  **/
379 static void free_eq_pages(struct hinic_eq *eq)
380 {
381 	struct hinic_hwdev *hwdev = eq->hwdev;
382 	u16 pg_num;
383 
384 	for (pg_num = 0; pg_num < eq->num_pages; pg_num++)
385 		dma_free_coherent(hwdev, eq->page_size,
386 				  eq->virt_addr[pg_num],
387 				  eq->dma_addr[pg_num]);
388 
389 	kfree(eq->virt_addr);
390 	kfree(eq->dma_addr);
391 }
392 
393 #define MSIX_ENTRY_IDX_0 (0)
394 
395 /**
396  * init_eq - initialize eq
397  * @eq:	the event queue
398  * @hwdev: the pointer to the private hardware device object
399  * @q_id: Queue id number
400  * @q_len: the number of EQ elements
401  * @type: the type of the event queue, ceq or aeq
402  * @page_size: the page size of the event queue
403  * @entry: msix entry associated with the event queue
404  * Return: 0 - Success, Negative - failure
405  **/
406 static int init_eq(struct hinic_eq *eq, struct hinic_hwdev *hwdev, u16 q_id,
407 		   u16 q_len, enum hinic_eq_type type, u32 page_size,
408 		   __rte_unused struct irq_info *entry)
409 {
410 	int err = 0;
411 
412 	eq->hwdev = hwdev;
413 	eq->q_id = q_id;
414 	eq->type = type;
415 	eq->page_size = page_size;
416 	eq->eq_len = q_len;
417 
418 	/* clear eq_len to force eqe drop in hardware */
419 	if (eq->type == HINIC_AEQ) {
420 		hinic_hwif_write_reg(eq->hwdev->hwif,
421 				     HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
422 	} else {
423 		err = set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
424 		if (err) {
425 			PMD_DRV_LOG(ERR, "Set ceq control registers ctrl0[0] ctrl1[0] failed");
426 			return err;
427 		}
428 	}
429 
430 	eq->cons_idx = 0;
431 	eq->wrapped = 0;
432 
433 	eq->elem_size = (type == HINIC_AEQ) ?
434 			HINIC_AEQE_SIZE : HINIC_CEQE_SIZE;
435 	eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
436 	eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, page_size);
437 
438 	if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
439 		PMD_DRV_LOG(ERR, "Number element in eq page is not power of 2");
440 		return -EINVAL;
441 	}
442 
443 	if (eq->num_pages > HINIC_EQ_MAX_PAGES) {
444 		PMD_DRV_LOG(ERR, "Too many pages for eq, num_pages: %d",
445 			eq->num_pages);
446 		return -EINVAL;
447 	}
448 
449 	err = alloc_eq_pages(eq);
450 	if (err) {
451 		PMD_DRV_LOG(ERR, "Allocate pages for eq failed");
452 		return err;
453 	}
454 
455 	/* pmd use MSIX_ENTRY_IDX_0*/
456 	eq->eq_irq.msix_entry_idx = MSIX_ENTRY_IDX_0;
457 
458 	err = set_eq_ctrls(eq);
459 	if (err) {
460 		PMD_DRV_LOG(ERR, "Init eq control registers failed");
461 		goto init_eq_ctrls_err;
462 	}
463 
464 	hinic_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
465 	set_eq_cons_idx(eq, HINIC_EQ_ARMED);
466 
467 	if (eq->q_id == 0)
468 		hinic_set_msix_state(hwdev, 0, HINIC_MSIX_ENABLE);
469 
470 	eq->poll_retry_nr = HINIC_RETRY_NUM;
471 
472 	return 0;
473 
474 init_eq_ctrls_err:
475 	free_eq_pages(eq);
476 
477 	return err;
478 }
479 
480 /**
481  * remove_eq - remove eq
482  * @eq:	the event queue
483  **/
484 static void remove_eq(struct hinic_eq *eq)
485 {
486 	struct irq_info *entry = &eq->eq_irq;
487 
488 	if (eq->type == HINIC_AEQ) {
489 		if (eq->q_id == 0)
490 			hinic_set_msix_state(eq->hwdev, entry->msix_entry_idx,
491 					     HINIC_MSIX_DISABLE);
492 
493 		/* clear eq_len to avoid hw access host memory */
494 		hinic_hwif_write_reg(eq->hwdev->hwif,
495 				     HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
496 	} else {
497 		(void)set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0);
498 	}
499 
500 	/* update cons_idx to avoid invalid interrupt */
501 	eq->cons_idx = (u16)hinic_hwif_read_reg(eq->hwdev->hwif,
502 						EQ_PROD_IDX_REG_ADDR(eq));
503 	set_eq_cons_idx(eq, HINIC_EQ_NOT_ARMED);
504 
505 	free_eq_pages(eq);
506 }
507 
508 /**
509  * hinic_aeqs_init - init all the aeqs
510  * @hwdev: the pointer to the private hardware device object
511  * @num_aeqs: number of aeq
512  * @msix_entries: msix entries associated with the event queues
513  * Return: 0 - Success, Negative - failure
514  **/
515 static int
516 hinic_aeqs_init(struct hinic_hwdev *hwdev, u16 num_aeqs,
517 		struct irq_info *msix_entries)
518 {
519 	struct hinic_aeqs *aeqs;
520 	int err;
521 	u16 i, q_id;
522 
523 	aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL);
524 	if (!aeqs)
525 		return -ENOMEM;
526 
527 	hwdev->aeqs = aeqs;
528 	aeqs->hwdev = hwdev;
529 	aeqs->num_aeqs = num_aeqs;
530 
531 	for (q_id = HINIC_AEQN_START; q_id < num_aeqs; q_id++) {
532 		err = init_eq(&aeqs->aeq[q_id], hwdev, q_id,
533 			      HINIC_DEFAULT_AEQ_LEN, HINIC_AEQ,
534 			      HINIC_EQ_PAGE_SIZE, &msix_entries[q_id]);
535 		if (err) {
536 			PMD_DRV_LOG(ERR, "Init aeq %d failed", q_id);
537 			goto init_aeq_err;
538 		}
539 	}
540 
541 	return 0;
542 
543 init_aeq_err:
544 	for (i = 0; i < q_id; i++)
545 		remove_eq(&aeqs->aeq[i]);
546 
547 	kfree(aeqs);
548 
549 	return err;
550 }
551 
552 /**
553  * hinic_aeqs_free - free all the aeqs
554  * @hwdev: the pointer to the private hardware device object
555  **/
556 static void hinic_aeqs_free(struct hinic_hwdev *hwdev)
557 {
558 	struct hinic_aeqs *aeqs = hwdev->aeqs;
559 	u16 q_id;
560 
561 	/* hinic pmd use aeq[1~3], aeq[0] used in kernel only */
562 	for (q_id = HINIC_AEQN_START; q_id < aeqs->num_aeqs ; q_id++)
563 		remove_eq(&aeqs->aeq[q_id]);
564 
565 	kfree(aeqs);
566 }
567 
568 void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)
569 {
570 	struct hinic_eq *eq;
571 	u32 addr, ci, pi;
572 	int q_id;
573 
574 	for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) {
575 		eq = &hwdev->aeqs->aeq[q_id];
576 		addr = EQ_CONS_IDX_REG_ADDR(eq);
577 		ci = hinic_hwif_read_reg(hwdev->hwif, addr);
578 		addr = EQ_PROD_IDX_REG_ADDR(eq);
579 		pi = hinic_hwif_read_reg(hwdev->hwif, addr);
580 		PMD_DRV_LOG(ERR, "aeq id: %d, ci: 0x%x, pi: 0x%x",
581 			q_id, ci, pi);
582 	}
583 }
584 
585 int hinic_comm_aeqs_init(struct hinic_hwdev *hwdev)
586 {
587 	int rc;
588 	u16 num_aeqs;
589 	struct irq_info aeq_irqs[HINIC_MAX_AEQS];
590 
591 	num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
592 	if (num_aeqs < HINIC_MAX_AEQS) {
593 		PMD_DRV_LOG(ERR, "Warning: PMD need %d AEQs, Chip have %d",
594 			HINIC_MAX_AEQS, num_aeqs);
595 		return HINIC_ERROR;
596 	}
597 
598 	memset(aeq_irqs, 0, sizeof(aeq_irqs));
599 	rc = hinic_aeqs_init(hwdev, num_aeqs, aeq_irqs);
600 	if (rc != HINIC_OK)
601 		PMD_DRV_LOG(ERR, "Initialize aeqs failed, rc: %d", rc);
602 
603 	return rc;
604 }
605 
606 void hinic_comm_aeqs_free(struct hinic_hwdev *hwdev)
607 {
608 	hinic_aeqs_free(hwdev);
609 }
610