xref: /dpdk/drivers/common/idpf/base/idpf_controlq_setup.c (revision fb4ac04e9bfa115da9cc425b892ebad23e811b10)
1*fb4ac04eSJunfeng Guo /* SPDX-License-Identifier: BSD-3-Clause
2*fb4ac04eSJunfeng Guo  * Copyright(c) 2001-2022 Intel Corporation
3*fb4ac04eSJunfeng Guo  */
4*fb4ac04eSJunfeng Guo 
5*fb4ac04eSJunfeng Guo 
6*fb4ac04eSJunfeng Guo #include "idpf_controlq.h"
7*fb4ac04eSJunfeng Guo 
8*fb4ac04eSJunfeng Guo 
9*fb4ac04eSJunfeng Guo /**
10*fb4ac04eSJunfeng Guo  * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
11*fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
12*fb4ac04eSJunfeng Guo  * @cq: pointer to the specific Control queue
13*fb4ac04eSJunfeng Guo  */
14*fb4ac04eSJunfeng Guo static int
15*fb4ac04eSJunfeng Guo idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
16*fb4ac04eSJunfeng Guo 			  struct idpf_ctlq_info *cq)
17*fb4ac04eSJunfeng Guo {
18*fb4ac04eSJunfeng Guo 	size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);
19*fb4ac04eSJunfeng Guo 
20*fb4ac04eSJunfeng Guo 	cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);
21*fb4ac04eSJunfeng Guo 	if (!cq->desc_ring.va)
22*fb4ac04eSJunfeng Guo 		return -ENOMEM;
23*fb4ac04eSJunfeng Guo 
24*fb4ac04eSJunfeng Guo 	return 0;
25*fb4ac04eSJunfeng Guo }
26*fb4ac04eSJunfeng Guo 
27*fb4ac04eSJunfeng Guo /**
28*fb4ac04eSJunfeng Guo  * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
29*fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
30*fb4ac04eSJunfeng Guo  * @cq: pointer to the specific Control queue
31*fb4ac04eSJunfeng Guo  *
32*fb4ac04eSJunfeng Guo  * Allocate the buffer head for all control queues, and if it's a receive
33*fb4ac04eSJunfeng Guo  * queue, allocate DMA buffers
34*fb4ac04eSJunfeng Guo  */
35*fb4ac04eSJunfeng Guo static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,
36*fb4ac04eSJunfeng Guo 				struct idpf_ctlq_info *cq)
37*fb4ac04eSJunfeng Guo {
38*fb4ac04eSJunfeng Guo 	int i = 0;
39*fb4ac04eSJunfeng Guo 
40*fb4ac04eSJunfeng Guo 	/* Do not allocate DMA buffers for transmit queues */
41*fb4ac04eSJunfeng Guo 	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
42*fb4ac04eSJunfeng Guo 		return 0;
43*fb4ac04eSJunfeng Guo 
44*fb4ac04eSJunfeng Guo 	/* We'll be allocating the buffer info memory first, then we can
45*fb4ac04eSJunfeng Guo 	 * allocate the mapped buffers for the event processing
46*fb4ac04eSJunfeng Guo 	 */
47*fb4ac04eSJunfeng Guo 	cq->bi.rx_buff = (struct idpf_dma_mem **)
48*fb4ac04eSJunfeng Guo 		idpf_calloc(hw, cq->ring_size,
49*fb4ac04eSJunfeng Guo 			    sizeof(struct idpf_dma_mem *));
50*fb4ac04eSJunfeng Guo 	if (!cq->bi.rx_buff)
51*fb4ac04eSJunfeng Guo 		return -ENOMEM;
52*fb4ac04eSJunfeng Guo 
53*fb4ac04eSJunfeng Guo 	/* allocate the mapped buffers (except for the last one) */
54*fb4ac04eSJunfeng Guo 	for (i = 0; i < cq->ring_size - 1; i++) {
55*fb4ac04eSJunfeng Guo 		struct idpf_dma_mem *bi;
56*fb4ac04eSJunfeng Guo 		int num = 1; /* number of idpf_dma_mem to be allocated */
57*fb4ac04eSJunfeng Guo 
58*fb4ac04eSJunfeng Guo 		cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc(hw, num,
59*fb4ac04eSJunfeng Guo 						sizeof(struct idpf_dma_mem));
60*fb4ac04eSJunfeng Guo 		if (!cq->bi.rx_buff[i])
61*fb4ac04eSJunfeng Guo 			goto unwind_alloc_cq_bufs;
62*fb4ac04eSJunfeng Guo 
63*fb4ac04eSJunfeng Guo 		bi = cq->bi.rx_buff[i];
64*fb4ac04eSJunfeng Guo 
65*fb4ac04eSJunfeng Guo 		bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size);
66*fb4ac04eSJunfeng Guo 		if (!bi->va) {
67*fb4ac04eSJunfeng Guo 			/* unwind will not free the failed entry */
68*fb4ac04eSJunfeng Guo 			idpf_free(hw, cq->bi.rx_buff[i]);
69*fb4ac04eSJunfeng Guo 			goto unwind_alloc_cq_bufs;
70*fb4ac04eSJunfeng Guo 		}
71*fb4ac04eSJunfeng Guo 	}
72*fb4ac04eSJunfeng Guo 
73*fb4ac04eSJunfeng Guo 	return 0;
74*fb4ac04eSJunfeng Guo 
75*fb4ac04eSJunfeng Guo unwind_alloc_cq_bufs:
76*fb4ac04eSJunfeng Guo 	/* don't try to free the one that failed... */
77*fb4ac04eSJunfeng Guo 	i--;
78*fb4ac04eSJunfeng Guo 	for (; i >= 0; i--) {
79*fb4ac04eSJunfeng Guo 		idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
80*fb4ac04eSJunfeng Guo 		idpf_free(hw, cq->bi.rx_buff[i]);
81*fb4ac04eSJunfeng Guo 	}
82*fb4ac04eSJunfeng Guo 	idpf_free(hw, cq->bi.rx_buff);
83*fb4ac04eSJunfeng Guo 
84*fb4ac04eSJunfeng Guo 	return -ENOMEM;
85*fb4ac04eSJunfeng Guo }
86*fb4ac04eSJunfeng Guo 
87*fb4ac04eSJunfeng Guo /**
88*fb4ac04eSJunfeng Guo  * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
89*fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
90*fb4ac04eSJunfeng Guo  * @cq: pointer to the specific Control queue
91*fb4ac04eSJunfeng Guo  *
92*fb4ac04eSJunfeng Guo  * This assumes the posted send buffers have already been cleaned
93*fb4ac04eSJunfeng Guo  * and de-allocated
94*fb4ac04eSJunfeng Guo  */
95*fb4ac04eSJunfeng Guo static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw,
96*fb4ac04eSJunfeng Guo 				     struct idpf_ctlq_info *cq)
97*fb4ac04eSJunfeng Guo {
98*fb4ac04eSJunfeng Guo 	idpf_free_dma_mem(hw, &cq->desc_ring);
99*fb4ac04eSJunfeng Guo }
100*fb4ac04eSJunfeng Guo 
101*fb4ac04eSJunfeng Guo /**
102*fb4ac04eSJunfeng Guo  * idpf_ctlq_free_bufs - Free CQ buffer info elements
103*fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
104*fb4ac04eSJunfeng Guo  * @cq: pointer to the specific Control queue
105*fb4ac04eSJunfeng Guo  *
106*fb4ac04eSJunfeng Guo  * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
107*fb4ac04eSJunfeng Guo  * queues.  The upper layers are expected to manage freeing of TX DMA buffers
108*fb4ac04eSJunfeng Guo  */
109*fb4ac04eSJunfeng Guo static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
110*fb4ac04eSJunfeng Guo {
111*fb4ac04eSJunfeng Guo 	void *bi;
112*fb4ac04eSJunfeng Guo 
113*fb4ac04eSJunfeng Guo 	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) {
114*fb4ac04eSJunfeng Guo 		int i;
115*fb4ac04eSJunfeng Guo 
116*fb4ac04eSJunfeng Guo 		/* free DMA buffers for rx queues*/
117*fb4ac04eSJunfeng Guo 		for (i = 0; i < cq->ring_size; i++) {
118*fb4ac04eSJunfeng Guo 			if (cq->bi.rx_buff[i]) {
119*fb4ac04eSJunfeng Guo 				idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
120*fb4ac04eSJunfeng Guo 				idpf_free(hw, cq->bi.rx_buff[i]);
121*fb4ac04eSJunfeng Guo 			}
122*fb4ac04eSJunfeng Guo 		}
123*fb4ac04eSJunfeng Guo 
124*fb4ac04eSJunfeng Guo 		bi = (void *)cq->bi.rx_buff;
125*fb4ac04eSJunfeng Guo 	} else {
126*fb4ac04eSJunfeng Guo 		bi = (void *)cq->bi.tx_msg;
127*fb4ac04eSJunfeng Guo 	}
128*fb4ac04eSJunfeng Guo 
129*fb4ac04eSJunfeng Guo 	/* free the buffer header */
130*fb4ac04eSJunfeng Guo 	idpf_free(hw, bi);
131*fb4ac04eSJunfeng Guo }
132*fb4ac04eSJunfeng Guo 
133*fb4ac04eSJunfeng Guo /**
134*fb4ac04eSJunfeng Guo  * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
135*fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
136*fb4ac04eSJunfeng Guo  * @cq: pointer to the specific Control queue
137*fb4ac04eSJunfeng Guo  *
138*fb4ac04eSJunfeng Guo  * Free the memory used by the ring, buffers and other related structures
139*fb4ac04eSJunfeng Guo  */
140*fb4ac04eSJunfeng Guo void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
141*fb4ac04eSJunfeng Guo {
142*fb4ac04eSJunfeng Guo 	/* free ring buffers and the ring itself */
143*fb4ac04eSJunfeng Guo 	idpf_ctlq_free_bufs(hw, cq);
144*fb4ac04eSJunfeng Guo 	idpf_ctlq_free_desc_ring(hw, cq);
145*fb4ac04eSJunfeng Guo }
146*fb4ac04eSJunfeng Guo 
147*fb4ac04eSJunfeng Guo /**
148*fb4ac04eSJunfeng Guo  * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
149*fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
150*fb4ac04eSJunfeng Guo  * @cq: pointer to control queue struct
151*fb4ac04eSJunfeng Guo  *
152*fb4ac04eSJunfeng Guo  * Do *NOT* hold the lock when calling this as the memory allocation routines
153*fb4ac04eSJunfeng Guo  * called are not going to be atomic context safe
154*fb4ac04eSJunfeng Guo  */
155*fb4ac04eSJunfeng Guo int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
156*fb4ac04eSJunfeng Guo {
157*fb4ac04eSJunfeng Guo 	int ret_code;
158*fb4ac04eSJunfeng Guo 
159*fb4ac04eSJunfeng Guo 	/* verify input for valid configuration */
160*fb4ac04eSJunfeng Guo 	if (!cq->ring_size || !cq->buf_size)
161*fb4ac04eSJunfeng Guo 		return -EINVAL;
162*fb4ac04eSJunfeng Guo 
163*fb4ac04eSJunfeng Guo 	/* allocate the ring memory */
164*fb4ac04eSJunfeng Guo 	ret_code = idpf_ctlq_alloc_desc_ring(hw, cq);
165*fb4ac04eSJunfeng Guo 	if (ret_code)
166*fb4ac04eSJunfeng Guo 		return ret_code;
167*fb4ac04eSJunfeng Guo 
168*fb4ac04eSJunfeng Guo 	/* allocate buffers in the rings */
169*fb4ac04eSJunfeng Guo 	ret_code = idpf_ctlq_alloc_bufs(hw, cq);
170*fb4ac04eSJunfeng Guo 	if (ret_code)
171*fb4ac04eSJunfeng Guo 		goto idpf_init_cq_free_ring;
172*fb4ac04eSJunfeng Guo 
173*fb4ac04eSJunfeng Guo 	/* success! */
174*fb4ac04eSJunfeng Guo 	return 0;
175*fb4ac04eSJunfeng Guo 
176*fb4ac04eSJunfeng Guo idpf_init_cq_free_ring:
177*fb4ac04eSJunfeng Guo 	idpf_free_dma_mem(hw, &cq->desc_ring);
178*fb4ac04eSJunfeng Guo 	return ret_code;
179*fb4ac04eSJunfeng Guo }
180