xref: /dpdk/drivers/dma/odm/odm.c (revision f317dc9699547a4ee08169733cd12a430df360bf)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2024 Marvell.
3  */
4 
5 #include <stdint.h>
6 
7 #include <bus_pci_driver.h>
8 
9 #include <rte_io.h>
10 #include <rte_malloc.h>
11 
12 #include "odm.h"
13 #include "odm_priv.h"
14 
15 static void
odm_vchan_resc_free(struct odm_dev * odm,int qno)16 odm_vchan_resc_free(struct odm_dev *odm, int qno)
17 {
18 	struct odm_queue *vq = &odm->vq[qno];
19 
20 	rte_memzone_free(vq->iring_mz);
21 	rte_memzone_free(vq->cring_mz);
22 	rte_free(vq->extra_ins_sz);
23 
24 	vq->iring_mz = NULL;
25 	vq->cring_mz = NULL;
26 	vq->extra_ins_sz = NULL;
27 }
28 
29 static int
send_mbox_to_pf(struct odm_dev * odm,union odm_mbox_msg * msg,union odm_mbox_msg * rsp)30 send_mbox_to_pf(struct odm_dev *odm, union odm_mbox_msg *msg, union odm_mbox_msg *rsp)
31 {
32 	int retry_cnt = ODM_MBOX_RETRY_CNT;
33 	union odm_mbox_msg pf_msg;
34 
35 	msg->d.err = ODM_MBOX_ERR_CODE_MAX;
36 	odm_write64(msg->u[0], odm->rbase + ODM_MBOX_VF_PF_DATA(0));
37 	odm_write64(msg->u[1], odm->rbase + ODM_MBOX_VF_PF_DATA(1));
38 
39 	pf_msg.u[0] = 0;
40 	pf_msg.u[1] = 0;
41 	pf_msg.u[0] = odm_read64(odm->rbase + ODM_MBOX_VF_PF_DATA(0));
42 
43 	while (pf_msg.d.rsp == 0 && retry_cnt > 0) {
44 		pf_msg.u[0] = odm_read64(odm->rbase + ODM_MBOX_VF_PF_DATA(0));
45 		--retry_cnt;
46 	}
47 
48 	if (retry_cnt <= 0)
49 		return -EBADE;
50 
51 	pf_msg.u[1] = odm_read64(odm->rbase + ODM_MBOX_VF_PF_DATA(1));
52 
53 	if (rsp) {
54 		rsp->u[0] = pf_msg.u[0];
55 		rsp->u[1] = pf_msg.u[1];
56 	}
57 
58 	if (pf_msg.d.rsp == msg->d.err && pf_msg.d.err != 0)
59 		return -EBADE;
60 
61 	return 0;
62 }
63 
64 static int
odm_queue_ring_config(struct odm_dev * odm,int vchan,int isize,int csize)65 odm_queue_ring_config(struct odm_dev *odm, int vchan, int isize, int csize)
66 {
67 	union odm_vdma_ring_cfg_s ring_cfg = {0};
68 	struct odm_queue *vq = &odm->vq[vchan];
69 
70 	if (vq->iring_mz == NULL || vq->cring_mz == NULL)
71 		return -EINVAL;
72 
73 	ring_cfg.s.isize = (isize / 1024) - 1;
74 	ring_cfg.s.csize = (csize / 1024) - 1;
75 
76 	odm_write64(ring_cfg.u, odm->rbase + ODM_VDMA_RING_CFG(vchan));
77 	odm_write64(vq->iring_mz->iova, odm->rbase + ODM_VDMA_IRING_BADDR(vchan));
78 	odm_write64(vq->cring_mz->iova, odm->rbase + ODM_VDMA_CRING_BADDR(vchan));
79 
80 	return 0;
81 }
82 
83 int
odm_enable(struct odm_dev * odm)84 odm_enable(struct odm_dev *odm)
85 {
86 	struct odm_queue *vq;
87 	int qno, rc = 0;
88 
89 	for (qno = 0; qno < odm->num_qs; qno++) {
90 		vq = &odm->vq[qno];
91 
92 		vq->desc_idx = vq->stats.completed_offset;
93 		vq->pending_submit_len = 0;
94 		vq->pending_submit_cnt = 0;
95 		vq->iring_head = 0;
96 		vq->cring_head = 0;
97 		vq->ins_ring_head = 0;
98 		vq->iring_sz_available = vq->iring_max_words;
99 
100 		rc = odm_queue_ring_config(odm, qno, vq->iring_max_words * 8,
101 					   vq->cring_max_entry * 4);
102 		if (rc < 0)
103 			break;
104 
105 		odm_write64(0x1, odm->rbase + ODM_VDMA_EN(qno));
106 	}
107 
108 	return rc;
109 }
110 
111 int
odm_disable(struct odm_dev * odm)112 odm_disable(struct odm_dev *odm)
113 {
114 	int qno, wait_cnt = ODM_IRING_IDLE_WAIT_CNT;
115 	uint64_t val;
116 
117 	/* Disable the queue and wait for the queue to became idle */
118 	for (qno = 0; qno < odm->num_qs; qno++) {
119 		odm_write64(0x0, odm->rbase + ODM_VDMA_EN(qno));
120 		do {
121 			val = odm_read64(odm->rbase + ODM_VDMA_IRING_BADDR(qno));
122 		} while ((!(val & 1ULL << 63)) && (--wait_cnt > 0));
123 	}
124 
125 	return 0;
126 }
127 
128 int
odm_vchan_setup(struct odm_dev * odm,int vchan,int nb_desc)129 odm_vchan_setup(struct odm_dev *odm, int vchan, int nb_desc)
130 {
131 	struct odm_queue *vq = &odm->vq[vchan];
132 	int isize, csize, max_nb_desc, rc = 0;
133 	union odm_mbox_msg mbox_msg;
134 	const struct rte_memzone *mz;
135 	char name[32];
136 
137 	if (vq->iring_mz != NULL)
138 		odm_vchan_resc_free(odm, vchan);
139 
140 	mbox_msg.u[0] = 0;
141 	mbox_msg.u[1] = 0;
142 
143 	/* ODM PF driver expects vfid starts from index 0 */
144 	mbox_msg.q.vfid = odm->vfid;
145 	mbox_msg.q.cmd = ODM_QUEUE_OPEN;
146 	mbox_msg.q.qidx = vchan;
147 	rc = send_mbox_to_pf(odm, &mbox_msg, &mbox_msg);
148 	if (rc < 0)
149 		return rc;
150 
151 	/* Determine instruction & completion ring sizes. */
152 
153 	/* Create iring that can support nb_desc. Round up to a multiple of 1024. */
154 	isize = RTE_ALIGN_CEIL(nb_desc * ODM_IRING_ENTRY_SIZE_MAX * 8, 1024);
155 	isize = RTE_MIN(isize, ODM_IRING_MAX_SIZE);
156 	snprintf(name, sizeof(name), "vq%d_iring%d", odm->vfid, vchan);
157 	mz = rte_memzone_reserve_aligned(name, isize, SOCKET_ID_ANY, 0, 1024);
158 	if (mz == NULL)
159 		return -ENOMEM;
160 	vq->iring_mz = mz;
161 	vq->iring_max_words = isize / 8;
162 
163 	/* Create cring that can support max instructions that can be inflight in hw. */
164 	max_nb_desc = (isize / (ODM_IRING_ENTRY_SIZE_MIN * 8));
165 	csize = RTE_ALIGN_CEIL(max_nb_desc * sizeof(union odm_cmpl_ent_s), 1024);
166 	snprintf(name, sizeof(name), "vq%d_cring%d", odm->vfid, vchan);
167 	mz = rte_memzone_reserve_aligned(name, csize, SOCKET_ID_ANY, 0, 1024);
168 	if (mz == NULL) {
169 		rc = -ENOMEM;
170 		goto iring_free;
171 	}
172 	vq->cring_mz = mz;
173 	vq->cring_max_entry = csize / 4;
174 
175 	/* Allocate memory to track the size of each instruction. */
176 	snprintf(name, sizeof(name), "vq%d_extra%d", odm->vfid, vchan);
177 	vq->extra_ins_sz = rte_zmalloc(name, vq->cring_max_entry, 0);
178 	if (vq->extra_ins_sz == NULL) {
179 		rc = -ENOMEM;
180 		goto cring_free;
181 	}
182 
183 	vq->stats = (struct vq_stats){0};
184 	return rc;
185 
186 cring_free:
187 	rte_memzone_free(odm->vq[vchan].cring_mz);
188 	vq->cring_mz = NULL;
189 iring_free:
190 	rte_memzone_free(odm->vq[vchan].iring_mz);
191 	vq->iring_mz = NULL;
192 
193 	return rc;
194 }
195 
196 int
odm_dev_init(struct odm_dev * odm)197 odm_dev_init(struct odm_dev *odm)
198 {
199 	struct rte_pci_device *pci_dev = odm->pci_dev;
200 	union odm_mbox_msg mbox_msg;
201 	uint16_t vfid;
202 	int rc;
203 
204 	odm->rbase = pci_dev->mem_resource[0].addr;
205 	vfid = ((pci_dev->addr.devid & 0x1F) << 3) | (pci_dev->addr.function & 0x7);
206 	vfid -= 1;
207 	odm->vfid = vfid;
208 	odm->num_qs = 0;
209 
210 	mbox_msg.u[0] = 0;
211 	mbox_msg.u[1] = 0;
212 	mbox_msg.q.vfid = odm->vfid;
213 	mbox_msg.q.cmd = ODM_DEV_INIT;
214 	rc = send_mbox_to_pf(odm, &mbox_msg, &mbox_msg);
215 	if (!rc)
216 		odm->max_qs = 1 << (4 - mbox_msg.d.nvfs);
217 
218 	return rc;
219 }
220 
221 int
odm_dev_fini(struct odm_dev * odm)222 odm_dev_fini(struct odm_dev *odm)
223 {
224 	union odm_mbox_msg mbox_msg;
225 	int qno, rc = 0;
226 
227 	mbox_msg.u[0] = 0;
228 	mbox_msg.u[1] = 0;
229 	mbox_msg.q.vfid = odm->vfid;
230 	mbox_msg.q.cmd = ODM_DEV_CLOSE;
231 	rc = send_mbox_to_pf(odm, &mbox_msg, &mbox_msg);
232 
233 	for (qno = 0; qno < odm->num_qs; qno++)
234 		odm_vchan_resc_free(odm, qno);
235 
236 	return rc;
237 }
238