xref: /dpdk/drivers/common/idpf/base/idpf_controlq_setup.c (revision 4b53e9802b6b6040ad5622b1414aaa93d9581d0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2022 Intel Corporation
3  */
4 
5 
6 #include "idpf_controlq.h"
7 
8 
9 /**
10  * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
11  * @hw: pointer to hw struct
12  * @cq: pointer to the specific Control queue
13  */
14 static int
15 idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
16 			  struct idpf_ctlq_info *cq)
17 {
18 	size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);
19 
20 	cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);
21 	if (!cq->desc_ring.va)
22 		return -ENOMEM;
23 
24 	return 0;
25 }
26 
27 /**
28  * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
29  * @hw: pointer to hw struct
30  * @cq: pointer to the specific Control queue
31  *
32  * Allocate the buffer head for all control queues, and if it's a receive
33  * queue, allocate DMA buffers
34  */
35 static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,
36 				struct idpf_ctlq_info *cq)
37 {
38 	int i = 0;
39 
40 	/* Do not allocate DMA buffers for transmit queues */
41 	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
42 		return 0;
43 
44 	/* We'll be allocating the buffer info memory first, then we can
45 	 * allocate the mapped buffers for the event processing
46 	 */
47 	cq->bi.rx_buff = (struct idpf_dma_mem **)
48 		idpf_calloc(hw, cq->ring_size,
49 			    sizeof(struct idpf_dma_mem *));
50 	if (!cq->bi.rx_buff)
51 		return -ENOMEM;
52 
53 	/* allocate the mapped buffers (except for the last one) */
54 	for (i = 0; i < cq->ring_size - 1; i++) {
55 		struct idpf_dma_mem *bi;
56 		int num = 1; /* number of idpf_dma_mem to be allocated */
57 
58 		cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc(hw, num,
59 						sizeof(struct idpf_dma_mem));
60 		if (!cq->bi.rx_buff[i])
61 			goto unwind_alloc_cq_bufs;
62 
63 		bi = cq->bi.rx_buff[i];
64 
65 		bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size);
66 		if (!bi->va) {
67 			/* unwind will not free the failed entry */
68 			idpf_free(hw, cq->bi.rx_buff[i]);
69 			goto unwind_alloc_cq_bufs;
70 		}
71 	}
72 
73 	return 0;
74 
75 unwind_alloc_cq_bufs:
76 	/* don't try to free the one that failed... */
77 	i--;
78 	for (; i >= 0; i--) {
79 		idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
80 		idpf_free(hw, cq->bi.rx_buff[i]);
81 	}
82 	idpf_free(hw, cq->bi.rx_buff);
83 
84 	return -ENOMEM;
85 }
86 
87 /**
88  * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
89  * @hw: pointer to hw struct
90  * @cq: pointer to the specific Control queue
91  *
92  * This assumes the posted send buffers have already been cleaned
93  * and de-allocated
94  */
95 static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw,
96 				     struct idpf_ctlq_info *cq)
97 {
98 	idpf_free_dma_mem(hw, &cq->desc_ring);
99 }
100 
101 /**
102  * idpf_ctlq_free_bufs - Free CQ buffer info elements
103  * @hw: pointer to hw struct
104  * @cq: pointer to the specific Control queue
105  *
106  * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
107  * queues.  The upper layers are expected to manage freeing of TX DMA buffers
108  */
109 static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
110 {
111 	void *bi;
112 
113 	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) {
114 		int i;
115 
116 		/* free DMA buffers for rx queues*/
117 		for (i = 0; i < cq->ring_size; i++) {
118 			if (cq->bi.rx_buff[i]) {
119 				idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
120 				idpf_free(hw, cq->bi.rx_buff[i]);
121 			}
122 		}
123 
124 		bi = (void *)cq->bi.rx_buff;
125 	} else {
126 		bi = (void *)cq->bi.tx_msg;
127 	}
128 
129 	/* free the buffer header */
130 	idpf_free(hw, bi);
131 }
132 
133 /**
134  * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
135  * @hw: pointer to hw struct
136  * @cq: pointer to the specific Control queue
137  *
138  * Free the memory used by the ring, buffers and other related structures
139  */
140 void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
141 {
142 	/* free ring buffers and the ring itself */
143 	idpf_ctlq_free_bufs(hw, cq);
144 	idpf_ctlq_free_desc_ring(hw, cq);
145 }
146 
147 /**
148  * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
149  * @hw: pointer to hw struct
150  * @cq: pointer to control queue struct
151  *
152  * Do *NOT* hold the lock when calling this as the memory allocation routines
153  * called are not going to be atomic context safe
154  */
155 int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
156 {
157 	int ret_code;
158 
159 	/* verify input for valid configuration */
160 	if (!cq->ring_size || !cq->buf_size)
161 		return -EINVAL;
162 
163 	/* allocate the ring memory */
164 	ret_code = idpf_ctlq_alloc_desc_ring(hw, cq);
165 	if (ret_code)
166 		return ret_code;
167 
168 	/* allocate buffers in the rings */
169 	ret_code = idpf_ctlq_alloc_bufs(hw, cq);
170 	if (ret_code)
171 		goto idpf_init_cq_free_ring;
172 
173 	/* success! */
174 	return 0;
175 
176 idpf_init_cq_free_ring:
177 	idpf_free_dma_mem(hw, &cq->desc_ring);
178 	return ret_code;
179 }
180