xref: /dpdk/drivers/common/idpf/base/idpf_controlq_setup.c (revision fb2f9d92a1c9a5b1980fff82412827b42eb78df4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2024 Intel Corporation
3  */
4 
5 
6 #include "idpf_controlq.h"
7 
8 
9 /**
10  * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
11  * @hw: pointer to hw struct
12  * @cq: pointer to the specific Control queue
13  */
idpf_ctlq_alloc_desc_ring(struct idpf_hw * hw,struct idpf_ctlq_info * cq)14 static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
15 				     struct idpf_ctlq_info *cq)
16 {
17 	size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);
18 
19 	cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);
20 	if (!cq->desc_ring.va)
21 		return -ENOMEM;
22 
23 	return 0;
24 }
25 
26 /**
27  * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
28  * @hw: pointer to hw struct
29  * @cq: pointer to the specific Control queue
30  *
31  * Allocate the buffer head for all control queues, and if it's a receive
32  * queue, allocate DMA buffers
33  */
idpf_ctlq_alloc_bufs(struct idpf_hw * hw,struct idpf_ctlq_info * cq)34 static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,
35 				struct idpf_ctlq_info *cq)
36 {
37 	int i;
38 
39 	/* Do not allocate DMA buffers for transmit queues */
40 	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
41 		return 0;
42 
43 	/* We'll be allocating the buffer info memory first, then we can
44 	 * allocate the mapped buffers for the event processing
45 	 */
46 	cq->bi.rx_buff = (struct idpf_dma_mem **)
47 		idpf_calloc(hw, cq->ring_size,
48 			    sizeof(struct idpf_dma_mem *));
49 	if (!cq->bi.rx_buff)
50 		return -ENOMEM;
51 
52 	/* allocate the mapped buffers (except for the last one) */
53 	for (i = 0; i < cq->ring_size - 1; i++) {
54 		struct idpf_dma_mem *bi;
55 		int num = 1; /* number of idpf_dma_mem to be allocated */
56 
57 		cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc(hw, num,
58 						sizeof(struct idpf_dma_mem));
59 		if (!cq->bi.rx_buff[i])
60 			goto unwind_alloc_cq_bufs;
61 
62 		bi = cq->bi.rx_buff[i];
63 
64 		bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size);
65 		if (!bi->va) {
66 			/* unwind will not free the failed entry */
67 			idpf_free(hw, cq->bi.rx_buff[i]);
68 			goto unwind_alloc_cq_bufs;
69 		}
70 	}
71 
72 	return 0;
73 
74 unwind_alloc_cq_bufs:
75 	/* don't try to free the one that failed... */
76 	i--;
77 	for (; i >= 0; i--) {
78 		idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
79 		idpf_free(hw, cq->bi.rx_buff[i]);
80 	}
81 	idpf_free(hw, cq->bi.rx_buff);
82 
83 	return -ENOMEM;
84 }
85 
86 /**
87  * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
88  * @hw: pointer to hw struct
89  * @cq: pointer to the specific Control queue
90  *
91  * This assumes the posted send buffers have already been cleaned
92  * and de-allocated
93  */
idpf_ctlq_free_desc_ring(struct idpf_hw * hw,struct idpf_ctlq_info * cq)94 static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw,
95 				     struct idpf_ctlq_info *cq)
96 {
97 	idpf_free_dma_mem(hw, &cq->desc_ring);
98 }
99 
100 /**
101  * idpf_ctlq_free_bufs - Free CQ buffer info elements
102  * @hw: pointer to hw struct
103  * @cq: pointer to the specific Control queue
104  *
105  * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
106  * queues.  The upper layers are expected to manage freeing of TX DMA buffers
107  */
idpf_ctlq_free_bufs(struct idpf_hw * hw,struct idpf_ctlq_info * cq)108 static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
109 {
110 	void *bi;
111 
112 	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) {
113 		int i;
114 
115 		/* free DMA buffers for rx queues*/
116 		for (i = 0; i < cq->ring_size; i++) {
117 			if (cq->bi.rx_buff[i]) {
118 				idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
119 				idpf_free(hw, cq->bi.rx_buff[i]);
120 			}
121 		}
122 
123 		bi = (void *)cq->bi.rx_buff;
124 	} else {
125 		bi = (void *)cq->bi.tx_msg;
126 	}
127 
128 	/* free the buffer header */
129 	idpf_free(hw, bi);
130 }
131 
132 /**
133  * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
134  * @hw: pointer to hw struct
135  * @cq: pointer to the specific Control queue
136  *
137  * Free the memory used by the ring, buffers and other related structures
138  */
idpf_ctlq_dealloc_ring_res(struct idpf_hw * hw,struct idpf_ctlq_info * cq)139 void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
140 {
141 	/* free ring buffers and the ring itself */
142 	idpf_ctlq_free_bufs(hw, cq);
143 	idpf_ctlq_free_desc_ring(hw, cq);
144 }
145 
146 /**
147  * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
148  * @hw: pointer to hw struct
149  * @cq: pointer to control queue struct
150  *
151  * Do *NOT* hold the lock when calling this as the memory allocation routines
152  * called are not going to be atomic context safe
153  */
idpf_ctlq_alloc_ring_res(struct idpf_hw * hw,struct idpf_ctlq_info * cq)154 int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
155 {
156 	int err;
157 
158 	/* verify input for valid configuration */
159 	if (!cq->ring_size || !cq->buf_size)
160 		return -EINVAL;
161 
162 	/* allocate the ring memory */
163 	err = idpf_ctlq_alloc_desc_ring(hw, cq);
164 	if (err)
165 		return err;
166 
167 	/* allocate buffers in the rings */
168 	err = idpf_ctlq_alloc_bufs(hw, cq);
169 	if (err)
170 		goto idpf_init_cq_free_ring;
171 
172 	/* success! */
173 	return 0;
174 
175 idpf_init_cq_free_ring:
176 	idpf_free_dma_mem(hw, &cq->desc_ring);
177 	return err;
178 }
179