xref: /dpdk/drivers/common/idpf/base/idpf_controlq_setup.c (revision fb2f9d92a1c9a5b1980fff82412827b42eb78df4)
1fb4ac04eSJunfeng Guo /* SPDX-License-Identifier: BSD-3-Clause
2*fb2f9d92SSoumyadeep Hore  * Copyright(c) 2001-2024 Intel Corporation
3fb4ac04eSJunfeng Guo  */
4fb4ac04eSJunfeng Guo 
5fb4ac04eSJunfeng Guo 
6fb4ac04eSJunfeng Guo #include "idpf_controlq.h"
7fb4ac04eSJunfeng Guo 
8fb4ac04eSJunfeng Guo 
9fb4ac04eSJunfeng Guo /**
10fb4ac04eSJunfeng Guo  * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
11fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
12fb4ac04eSJunfeng Guo  * @cq: pointer to the specific Control queue
13fb4ac04eSJunfeng Guo  */
idpf_ctlq_alloc_desc_ring(struct idpf_hw * hw,struct idpf_ctlq_info * cq)14a97fb92cSSimei Su static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
15fb4ac04eSJunfeng Guo 				     struct idpf_ctlq_info *cq)
16fb4ac04eSJunfeng Guo {
17fb4ac04eSJunfeng Guo 	size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);
18fb4ac04eSJunfeng Guo 
19fb4ac04eSJunfeng Guo 	cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);
20fb4ac04eSJunfeng Guo 	if (!cq->desc_ring.va)
21fb4ac04eSJunfeng Guo 		return -ENOMEM;
22fb4ac04eSJunfeng Guo 
23fb4ac04eSJunfeng Guo 	return 0;
24fb4ac04eSJunfeng Guo }
25fb4ac04eSJunfeng Guo 
26fb4ac04eSJunfeng Guo /**
27fb4ac04eSJunfeng Guo  * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
28fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
29fb4ac04eSJunfeng Guo  * @cq: pointer to the specific Control queue
30fb4ac04eSJunfeng Guo  *
31fb4ac04eSJunfeng Guo  * Allocate the buffer head for all control queues, and if it's a receive
32fb4ac04eSJunfeng Guo  * queue, allocate DMA buffers
33fb4ac04eSJunfeng Guo  */
idpf_ctlq_alloc_bufs(struct idpf_hw * hw,struct idpf_ctlq_info * cq)34fb4ac04eSJunfeng Guo static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,
35fb4ac04eSJunfeng Guo 				struct idpf_ctlq_info *cq)
36fb4ac04eSJunfeng Guo {
37*fb2f9d92SSoumyadeep Hore 	int i;
38fb4ac04eSJunfeng Guo 
39fb4ac04eSJunfeng Guo 	/* Do not allocate DMA buffers for transmit queues */
40fb4ac04eSJunfeng Guo 	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
41fb4ac04eSJunfeng Guo 		return 0;
42fb4ac04eSJunfeng Guo 
43fb4ac04eSJunfeng Guo 	/* We'll be allocating the buffer info memory first, then we can
44fb4ac04eSJunfeng Guo 	 * allocate the mapped buffers for the event processing
45fb4ac04eSJunfeng Guo 	 */
46fb4ac04eSJunfeng Guo 	cq->bi.rx_buff = (struct idpf_dma_mem **)
47fb4ac04eSJunfeng Guo 		idpf_calloc(hw, cq->ring_size,
48fb4ac04eSJunfeng Guo 			    sizeof(struct idpf_dma_mem *));
49fb4ac04eSJunfeng Guo 	if (!cq->bi.rx_buff)
50fb4ac04eSJunfeng Guo 		return -ENOMEM;
51fb4ac04eSJunfeng Guo 
52fb4ac04eSJunfeng Guo 	/* allocate the mapped buffers (except for the last one) */
53fb4ac04eSJunfeng Guo 	for (i = 0; i < cq->ring_size - 1; i++) {
54fb4ac04eSJunfeng Guo 		struct idpf_dma_mem *bi;
55fb4ac04eSJunfeng Guo 		int num = 1; /* number of idpf_dma_mem to be allocated */
56fb4ac04eSJunfeng Guo 
57fb4ac04eSJunfeng Guo 		cq->bi.rx_buff[i] = (struct idpf_dma_mem *)idpf_calloc(hw, num,
58fb4ac04eSJunfeng Guo 						sizeof(struct idpf_dma_mem));
59fb4ac04eSJunfeng Guo 		if (!cq->bi.rx_buff[i])
60fb4ac04eSJunfeng Guo 			goto unwind_alloc_cq_bufs;
61fb4ac04eSJunfeng Guo 
62fb4ac04eSJunfeng Guo 		bi = cq->bi.rx_buff[i];
63fb4ac04eSJunfeng Guo 
64fb4ac04eSJunfeng Guo 		bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size);
65fb4ac04eSJunfeng Guo 		if (!bi->va) {
66fb4ac04eSJunfeng Guo 			/* unwind will not free the failed entry */
67fb4ac04eSJunfeng Guo 			idpf_free(hw, cq->bi.rx_buff[i]);
68fb4ac04eSJunfeng Guo 			goto unwind_alloc_cq_bufs;
69fb4ac04eSJunfeng Guo 		}
70fb4ac04eSJunfeng Guo 	}
71fb4ac04eSJunfeng Guo 
72fb4ac04eSJunfeng Guo 	return 0;
73fb4ac04eSJunfeng Guo 
74fb4ac04eSJunfeng Guo unwind_alloc_cq_bufs:
75fb4ac04eSJunfeng Guo 	/* don't try to free the one that failed... */
76fb4ac04eSJunfeng Guo 	i--;
77fb4ac04eSJunfeng Guo 	for (; i >= 0; i--) {
78fb4ac04eSJunfeng Guo 		idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
79fb4ac04eSJunfeng Guo 		idpf_free(hw, cq->bi.rx_buff[i]);
80fb4ac04eSJunfeng Guo 	}
81fb4ac04eSJunfeng Guo 	idpf_free(hw, cq->bi.rx_buff);
82fb4ac04eSJunfeng Guo 
83fb4ac04eSJunfeng Guo 	return -ENOMEM;
84fb4ac04eSJunfeng Guo }
85fb4ac04eSJunfeng Guo 
86fb4ac04eSJunfeng Guo /**
87fb4ac04eSJunfeng Guo  * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
88fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
89fb4ac04eSJunfeng Guo  * @cq: pointer to the specific Control queue
90fb4ac04eSJunfeng Guo  *
91fb4ac04eSJunfeng Guo  * This assumes the posted send buffers have already been cleaned
92fb4ac04eSJunfeng Guo  * and de-allocated
93fb4ac04eSJunfeng Guo  */
idpf_ctlq_free_desc_ring(struct idpf_hw * hw,struct idpf_ctlq_info * cq)94fb4ac04eSJunfeng Guo static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw,
95fb4ac04eSJunfeng Guo 				     struct idpf_ctlq_info *cq)
96fb4ac04eSJunfeng Guo {
97fb4ac04eSJunfeng Guo 	idpf_free_dma_mem(hw, &cq->desc_ring);
98fb4ac04eSJunfeng Guo }
99fb4ac04eSJunfeng Guo 
100fb4ac04eSJunfeng Guo /**
101fb4ac04eSJunfeng Guo  * idpf_ctlq_free_bufs - Free CQ buffer info elements
102fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
103fb4ac04eSJunfeng Guo  * @cq: pointer to the specific Control queue
104fb4ac04eSJunfeng Guo  *
105fb4ac04eSJunfeng Guo  * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
106fb4ac04eSJunfeng Guo  * queues.  The upper layers are expected to manage freeing of TX DMA buffers
107fb4ac04eSJunfeng Guo  */
idpf_ctlq_free_bufs(struct idpf_hw * hw,struct idpf_ctlq_info * cq)108fb4ac04eSJunfeng Guo static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
109fb4ac04eSJunfeng Guo {
110fb4ac04eSJunfeng Guo 	void *bi;
111fb4ac04eSJunfeng Guo 
112fb4ac04eSJunfeng Guo 	if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) {
113fb4ac04eSJunfeng Guo 		int i;
114fb4ac04eSJunfeng Guo 
115fb4ac04eSJunfeng Guo 		/* free DMA buffers for rx queues*/
116fb4ac04eSJunfeng Guo 		for (i = 0; i < cq->ring_size; i++) {
117fb4ac04eSJunfeng Guo 			if (cq->bi.rx_buff[i]) {
118fb4ac04eSJunfeng Guo 				idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
119fb4ac04eSJunfeng Guo 				idpf_free(hw, cq->bi.rx_buff[i]);
120fb4ac04eSJunfeng Guo 			}
121fb4ac04eSJunfeng Guo 		}
122fb4ac04eSJunfeng Guo 
123fb4ac04eSJunfeng Guo 		bi = (void *)cq->bi.rx_buff;
124fb4ac04eSJunfeng Guo 	} else {
125fb4ac04eSJunfeng Guo 		bi = (void *)cq->bi.tx_msg;
126fb4ac04eSJunfeng Guo 	}
127fb4ac04eSJunfeng Guo 
128fb4ac04eSJunfeng Guo 	/* free the buffer header */
129fb4ac04eSJunfeng Guo 	idpf_free(hw, bi);
130fb4ac04eSJunfeng Guo }
131fb4ac04eSJunfeng Guo 
132fb4ac04eSJunfeng Guo /**
133fb4ac04eSJunfeng Guo  * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
134fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
135fb4ac04eSJunfeng Guo  * @cq: pointer to the specific Control queue
136fb4ac04eSJunfeng Guo  *
137fb4ac04eSJunfeng Guo  * Free the memory used by the ring, buffers and other related structures
138fb4ac04eSJunfeng Guo  */
idpf_ctlq_dealloc_ring_res(struct idpf_hw * hw,struct idpf_ctlq_info * cq)139fb4ac04eSJunfeng Guo void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
140fb4ac04eSJunfeng Guo {
141fb4ac04eSJunfeng Guo 	/* free ring buffers and the ring itself */
142fb4ac04eSJunfeng Guo 	idpf_ctlq_free_bufs(hw, cq);
143fb4ac04eSJunfeng Guo 	idpf_ctlq_free_desc_ring(hw, cq);
144fb4ac04eSJunfeng Guo }
145fb4ac04eSJunfeng Guo 
146fb4ac04eSJunfeng Guo /**
147fb4ac04eSJunfeng Guo  * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
148fb4ac04eSJunfeng Guo  * @hw: pointer to hw struct
149fb4ac04eSJunfeng Guo  * @cq: pointer to control queue struct
150fb4ac04eSJunfeng Guo  *
151fb4ac04eSJunfeng Guo  * Do *NOT* hold the lock when calling this as the memory allocation routines
152fb4ac04eSJunfeng Guo  * called are not going to be atomic context safe
153fb4ac04eSJunfeng Guo  */
idpf_ctlq_alloc_ring_res(struct idpf_hw * hw,struct idpf_ctlq_info * cq)154fb4ac04eSJunfeng Guo int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
155fb4ac04eSJunfeng Guo {
156*fb2f9d92SSoumyadeep Hore 	int err;
157fb4ac04eSJunfeng Guo 
158fb4ac04eSJunfeng Guo 	/* verify input for valid configuration */
159fb4ac04eSJunfeng Guo 	if (!cq->ring_size || !cq->buf_size)
160fb4ac04eSJunfeng Guo 		return -EINVAL;
161fb4ac04eSJunfeng Guo 
162fb4ac04eSJunfeng Guo 	/* allocate the ring memory */
163*fb2f9d92SSoumyadeep Hore 	err = idpf_ctlq_alloc_desc_ring(hw, cq);
164*fb2f9d92SSoumyadeep Hore 	if (err)
165*fb2f9d92SSoumyadeep Hore 		return err;
166fb4ac04eSJunfeng Guo 
167fb4ac04eSJunfeng Guo 	/* allocate buffers in the rings */
168*fb2f9d92SSoumyadeep Hore 	err = idpf_ctlq_alloc_bufs(hw, cq);
169*fb2f9d92SSoumyadeep Hore 	if (err)
170fb4ac04eSJunfeng Guo 		goto idpf_init_cq_free_ring;
171fb4ac04eSJunfeng Guo 
172fb4ac04eSJunfeng Guo 	/* success! */
173fb4ac04eSJunfeng Guo 	return 0;
174fb4ac04eSJunfeng Guo 
175fb4ac04eSJunfeng Guo idpf_init_cq_free_ring:
176fb4ac04eSJunfeng Guo 	idpf_free_dma_mem(hw, &cq->desc_ring);
177*fb2f9d92SSoumyadeep Hore 	return err;
178fb4ac04eSJunfeng Guo }
179