xref: /dpdk/drivers/dma/odm/odm_dmadev.c (revision e99981af34632ecce3bac82d05db97b08308f9b5)
140e461d3SAnoob Joseph /* SPDX-License-Identifier: BSD-3-Clause
240e461d3SAnoob Joseph  * Copyright(C) 2024 Marvell.
340e461d3SAnoob Joseph  */
440e461d3SAnoob Joseph 
540e461d3SAnoob Joseph #include <string.h>
640e461d3SAnoob Joseph 
740e461d3SAnoob Joseph #include <bus_pci_driver.h>
840e461d3SAnoob Joseph #include <rte_bus_pci.h>
940e461d3SAnoob Joseph #include <rte_common.h>
1040e461d3SAnoob Joseph #include <rte_dmadev.h>
1140e461d3SAnoob Joseph #include <rte_dmadev_pmd.h>
12c57d26faSVidya Sagar Velumuri #include <rte_memcpy.h>
1340e461d3SAnoob Joseph #include <rte_pci.h>
1440e461d3SAnoob Joseph 
1540e461d3SAnoob Joseph #include "odm.h"
1640e461d3SAnoob Joseph 
1740e461d3SAnoob Joseph #define PCI_VENDOR_ID_CAVIUM	 0x177D
1840e461d3SAnoob Joseph #define PCI_DEVID_ODYSSEY_ODM_VF 0xA08C
1940e461d3SAnoob Joseph #define PCI_DRIVER_NAME		 dma_odm
2040e461d3SAnoob Joseph 
2140e461d3SAnoob Joseph static int
22f317dc96SGowrishankar Muthukrishnan odm_dmadev_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info, uint32_t size)
23f317dc96SGowrishankar Muthukrishnan {
24f317dc96SGowrishankar Muthukrishnan 	struct odm_dev *odm = NULL;
25f317dc96SGowrishankar Muthukrishnan 
26f317dc96SGowrishankar Muthukrishnan 	RTE_SET_USED(size);
27f317dc96SGowrishankar Muthukrishnan 
28f317dc96SGowrishankar Muthukrishnan 	odm = dev->fp_obj->dev_private;
29f317dc96SGowrishankar Muthukrishnan 
30f317dc96SGowrishankar Muthukrishnan 	dev_info->max_vchans = odm->max_qs;
31f317dc96SGowrishankar Muthukrishnan 	dev_info->nb_vchans = odm->num_qs;
32f317dc96SGowrishankar Muthukrishnan 	dev_info->dev_capa =
33f317dc96SGowrishankar Muthukrishnan 		(RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_COPY_SG);
34f317dc96SGowrishankar Muthukrishnan 	dev_info->max_desc = ODM_IRING_MAX_ENTRY;
35f317dc96SGowrishankar Muthukrishnan 	dev_info->min_desc = 1;
36f317dc96SGowrishankar Muthukrishnan 	dev_info->max_sges = ODM_MAX_POINTER;
37f317dc96SGowrishankar Muthukrishnan 
38f317dc96SGowrishankar Muthukrishnan 	return 0;
39f317dc96SGowrishankar Muthukrishnan }
40f317dc96SGowrishankar Muthukrishnan 
41f317dc96SGowrishankar Muthukrishnan static int
42f317dc96SGowrishankar Muthukrishnan odm_dmadev_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf, uint32_t conf_sz)
43f317dc96SGowrishankar Muthukrishnan {
44f317dc96SGowrishankar Muthukrishnan 	struct odm_dev *odm = NULL;
45f317dc96SGowrishankar Muthukrishnan 
46f317dc96SGowrishankar Muthukrishnan 	RTE_SET_USED(conf_sz);
47f317dc96SGowrishankar Muthukrishnan 
48f317dc96SGowrishankar Muthukrishnan 	odm = dev->fp_obj->dev_private;
49f317dc96SGowrishankar Muthukrishnan 	odm->num_qs = conf->nb_vchans;
50f317dc96SGowrishankar Muthukrishnan 
51f317dc96SGowrishankar Muthukrishnan 	return 0;
52f317dc96SGowrishankar Muthukrishnan }
53f317dc96SGowrishankar Muthukrishnan 
54f317dc96SGowrishankar Muthukrishnan static int
55f317dc96SGowrishankar Muthukrishnan odm_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
56f317dc96SGowrishankar Muthukrishnan 		       const struct rte_dma_vchan_conf *conf, uint32_t conf_sz)
57f317dc96SGowrishankar Muthukrishnan {
58f317dc96SGowrishankar Muthukrishnan 	struct odm_dev *odm = dev->fp_obj->dev_private;
59f317dc96SGowrishankar Muthukrishnan 
60f317dc96SGowrishankar Muthukrishnan 	RTE_SET_USED(conf_sz);
61f317dc96SGowrishankar Muthukrishnan 	return odm_vchan_setup(odm, vchan, conf->nb_desc);
62f317dc96SGowrishankar Muthukrishnan }
63f317dc96SGowrishankar Muthukrishnan 
64f317dc96SGowrishankar Muthukrishnan static int
65f317dc96SGowrishankar Muthukrishnan odm_dmadev_start(struct rte_dma_dev *dev)
66f317dc96SGowrishankar Muthukrishnan {
67f317dc96SGowrishankar Muthukrishnan 	struct odm_dev *odm = dev->fp_obj->dev_private;
68f317dc96SGowrishankar Muthukrishnan 
69f317dc96SGowrishankar Muthukrishnan 	return odm_enable(odm);
70f317dc96SGowrishankar Muthukrishnan }
71f317dc96SGowrishankar Muthukrishnan 
72f317dc96SGowrishankar Muthukrishnan static int
73f317dc96SGowrishankar Muthukrishnan odm_dmadev_stop(struct rte_dma_dev *dev)
74f317dc96SGowrishankar Muthukrishnan {
75f317dc96SGowrishankar Muthukrishnan 	struct odm_dev *odm = dev->fp_obj->dev_private;
76f317dc96SGowrishankar Muthukrishnan 
77f317dc96SGowrishankar Muthukrishnan 	return odm_disable(odm);
78f317dc96SGowrishankar Muthukrishnan }
79f317dc96SGowrishankar Muthukrishnan 
80f317dc96SGowrishankar Muthukrishnan static int
81f317dc96SGowrishankar Muthukrishnan odm_dmadev_close(struct rte_dma_dev *dev)
82f317dc96SGowrishankar Muthukrishnan {
83f317dc96SGowrishankar Muthukrishnan 	struct odm_dev *odm = dev->fp_obj->dev_private;
84f317dc96SGowrishankar Muthukrishnan 
85f317dc96SGowrishankar Muthukrishnan 	odm_disable(odm);
86f317dc96SGowrishankar Muthukrishnan 	odm_dev_fini(odm);
87f317dc96SGowrishankar Muthukrishnan 
88f317dc96SGowrishankar Muthukrishnan 	return 0;
89f317dc96SGowrishankar Muthukrishnan }
90f317dc96SGowrishankar Muthukrishnan 
9174f67c71SGowrishankar Muthukrishnan static int
92c57d26faSVidya Sagar Velumuri odm_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iova_t dst, uint32_t length,
93c57d26faSVidya Sagar Velumuri 		uint64_t flags)
94c57d26faSVidya Sagar Velumuri {
95c57d26faSVidya Sagar Velumuri 	uint16_t pending_submit_len, pending_submit_cnt, iring_sz_available, iring_head;
96c57d26faSVidya Sagar Velumuri 	const int num_words = ODM_IRING_ENTRY_SIZE_MIN;
97c57d26faSVidya Sagar Velumuri 	struct odm_dev *odm = dev_private;
98c57d26faSVidya Sagar Velumuri 	uint64_t *iring_head_ptr;
99c57d26faSVidya Sagar Velumuri 	struct odm_queue *vq;
100c57d26faSVidya Sagar Velumuri 	uint64_t h;
101c57d26faSVidya Sagar Velumuri 
102c57d26faSVidya Sagar Velumuri 	const union odm_instr_hdr_s hdr = {
103c57d26faSVidya Sagar Velumuri 		.s.ct = ODM_HDR_CT_CW_NC,
104c57d26faSVidya Sagar Velumuri 		.s.xtype = ODM_XTYPE_INTERNAL,
105c57d26faSVidya Sagar Velumuri 		.s.nfst = 1,
106c57d26faSVidya Sagar Velumuri 		.s.nlst = 1,
107c57d26faSVidya Sagar Velumuri 	};
108c57d26faSVidya Sagar Velumuri 
109c57d26faSVidya Sagar Velumuri 	vq = &odm->vq[vchan];
110c57d26faSVidya Sagar Velumuri 
111c57d26faSVidya Sagar Velumuri 	h = length;
112c57d26faSVidya Sagar Velumuri 	h |= ((uint64_t)length << 32);
113c57d26faSVidya Sagar Velumuri 
114c57d26faSVidya Sagar Velumuri 	const uint16_t max_iring_words = vq->iring_max_words;
115c57d26faSVidya Sagar Velumuri 
116c57d26faSVidya Sagar Velumuri 	iring_sz_available = vq->iring_sz_available;
117c57d26faSVidya Sagar Velumuri 	pending_submit_len = vq->pending_submit_len;
118c57d26faSVidya Sagar Velumuri 	pending_submit_cnt = vq->pending_submit_cnt;
119c57d26faSVidya Sagar Velumuri 	iring_head_ptr = vq->iring_mz->addr;
120c57d26faSVidya Sagar Velumuri 	iring_head = vq->iring_head;
121c57d26faSVidya Sagar Velumuri 
122c57d26faSVidya Sagar Velumuri 	if (iring_sz_available < num_words)
123c57d26faSVidya Sagar Velumuri 		return -ENOSPC;
124c57d26faSVidya Sagar Velumuri 
125c57d26faSVidya Sagar Velumuri 	if ((iring_head + num_words) >= max_iring_words) {
126c57d26faSVidya Sagar Velumuri 
127c57d26faSVidya Sagar Velumuri 		iring_head_ptr[iring_head] = hdr.u;
128c57d26faSVidya Sagar Velumuri 		iring_head = (iring_head + 1) % max_iring_words;
129c57d26faSVidya Sagar Velumuri 
130c57d26faSVidya Sagar Velumuri 		iring_head_ptr[iring_head] = h;
131c57d26faSVidya Sagar Velumuri 		iring_head = (iring_head + 1) % max_iring_words;
132c57d26faSVidya Sagar Velumuri 
133c57d26faSVidya Sagar Velumuri 		iring_head_ptr[iring_head] = src;
134c57d26faSVidya Sagar Velumuri 		iring_head = (iring_head + 1) % max_iring_words;
135c57d26faSVidya Sagar Velumuri 
136c57d26faSVidya Sagar Velumuri 		iring_head_ptr[iring_head] = dst;
137c57d26faSVidya Sagar Velumuri 		iring_head = (iring_head + 1) % max_iring_words;
138c57d26faSVidya Sagar Velumuri 	} else {
139c57d26faSVidya Sagar Velumuri 		iring_head_ptr[iring_head++] = hdr.u;
140c57d26faSVidya Sagar Velumuri 		iring_head_ptr[iring_head++] = h;
141c57d26faSVidya Sagar Velumuri 		iring_head_ptr[iring_head++] = src;
142c57d26faSVidya Sagar Velumuri 		iring_head_ptr[iring_head++] = dst;
143c57d26faSVidya Sagar Velumuri 	}
144c57d26faSVidya Sagar Velumuri 
145c57d26faSVidya Sagar Velumuri 	pending_submit_len += num_words;
146c57d26faSVidya Sagar Velumuri 
147c57d26faSVidya Sagar Velumuri 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
148c57d26faSVidya Sagar Velumuri 		rte_wmb();
149c57d26faSVidya Sagar Velumuri 		odm_write64(pending_submit_len, odm->rbase + ODM_VDMA_DBELL(vchan));
150c57d26faSVidya Sagar Velumuri 		vq->stats.submitted += pending_submit_cnt + 1;
151c57d26faSVidya Sagar Velumuri 		vq->pending_submit_len = 0;
152c57d26faSVidya Sagar Velumuri 		vq->pending_submit_cnt = 0;
153c57d26faSVidya Sagar Velumuri 	} else {
154c57d26faSVidya Sagar Velumuri 		vq->pending_submit_len = pending_submit_len;
155c57d26faSVidya Sagar Velumuri 		vq->pending_submit_cnt++;
156c57d26faSVidya Sagar Velumuri 	}
157c57d26faSVidya Sagar Velumuri 
158c57d26faSVidya Sagar Velumuri 	vq->iring_head = iring_head;
159c57d26faSVidya Sagar Velumuri 
160c57d26faSVidya Sagar Velumuri 	vq->iring_sz_available = iring_sz_available - num_words;
161c57d26faSVidya Sagar Velumuri 
162c57d26faSVidya Sagar Velumuri 	/* No extra space to save. Skip entry in extra space ring. */
163c57d26faSVidya Sagar Velumuri 	vq->ins_ring_head = (vq->ins_ring_head + 1) % vq->cring_max_entry;
164c57d26faSVidya Sagar Velumuri 
165c57d26faSVidya Sagar Velumuri 	return vq->desc_idx++;
166c57d26faSVidya Sagar Velumuri }
167c57d26faSVidya Sagar Velumuri 
168c57d26faSVidya Sagar Velumuri static inline void
169c57d26faSVidya Sagar Velumuri odm_dmadev_fill_sg(uint64_t *cmd, const struct rte_dma_sge *src, const struct rte_dma_sge *dst,
170c57d26faSVidya Sagar Velumuri 		   uint16_t nb_src, uint16_t nb_dst, union odm_instr_hdr_s *hdr)
171c57d26faSVidya Sagar Velumuri {
172c57d26faSVidya Sagar Velumuri 	int i = 0, j = 0;
173c57d26faSVidya Sagar Velumuri 	uint64_t h = 0;
174c57d26faSVidya Sagar Velumuri 
175c57d26faSVidya Sagar Velumuri 	cmd[j++] = hdr->u;
176c57d26faSVidya Sagar Velumuri 	/* When nb_src is even */
177c57d26faSVidya Sagar Velumuri 	if (!(nb_src & 0x1)) {
178c57d26faSVidya Sagar Velumuri 		/* Fill the iring with src pointers */
179c57d26faSVidya Sagar Velumuri 		for (i = 1; i < nb_src; i += 2) {
180c57d26faSVidya Sagar Velumuri 			h = ((uint64_t)src[i].length << 32) | src[i - 1].length;
181c57d26faSVidya Sagar Velumuri 			cmd[j++] = h;
182c57d26faSVidya Sagar Velumuri 			cmd[j++] = src[i - 1].addr;
183c57d26faSVidya Sagar Velumuri 			cmd[j++] = src[i].addr;
184c57d26faSVidya Sagar Velumuri 		}
185c57d26faSVidya Sagar Velumuri 
186c57d26faSVidya Sagar Velumuri 		/* Fill the iring with dst pointers */
187c57d26faSVidya Sagar Velumuri 		for (i = 1; i < nb_dst; i += 2) {
188c57d26faSVidya Sagar Velumuri 			h = ((uint64_t)dst[i].length << 32) | dst[i - 1].length;
189c57d26faSVidya Sagar Velumuri 			cmd[j++] = h;
190c57d26faSVidya Sagar Velumuri 			cmd[j++] = dst[i - 1].addr;
191c57d26faSVidya Sagar Velumuri 			cmd[j++] = dst[i].addr;
192c57d26faSVidya Sagar Velumuri 		}
193c57d26faSVidya Sagar Velumuri 
194c57d26faSVidya Sagar Velumuri 		/* Handle the last dst pointer when nb_dst is odd */
195c57d26faSVidya Sagar Velumuri 		if (nb_dst & 0x1) {
196c57d26faSVidya Sagar Velumuri 			h = dst[nb_dst - 1].length;
197c57d26faSVidya Sagar Velumuri 			cmd[j++] = h;
198c57d26faSVidya Sagar Velumuri 			cmd[j++] = dst[nb_dst - 1].addr;
199c57d26faSVidya Sagar Velumuri 			cmd[j++] = 0;
200c57d26faSVidya Sagar Velumuri 		}
201c57d26faSVidya Sagar Velumuri 	} else {
202c57d26faSVidya Sagar Velumuri 		/* When nb_src is odd */
203c57d26faSVidya Sagar Velumuri 
204c57d26faSVidya Sagar Velumuri 		/* Fill the iring with src pointers */
205c57d26faSVidya Sagar Velumuri 		for (i = 1; i < nb_src; i += 2) {
206c57d26faSVidya Sagar Velumuri 			h = ((uint64_t)src[i].length << 32) | src[i - 1].length;
207c57d26faSVidya Sagar Velumuri 			cmd[j++] = h;
208c57d26faSVidya Sagar Velumuri 			cmd[j++] = src[i - 1].addr;
209c57d26faSVidya Sagar Velumuri 			cmd[j++] = src[i].addr;
210c57d26faSVidya Sagar Velumuri 		}
211c57d26faSVidya Sagar Velumuri 
212c57d26faSVidya Sagar Velumuri 		/* Handle the last src pointer */
213c57d26faSVidya Sagar Velumuri 		h = ((uint64_t)dst[0].length << 32) | src[nb_src - 1].length;
214c57d26faSVidya Sagar Velumuri 		cmd[j++] = h;
215c57d26faSVidya Sagar Velumuri 		cmd[j++] = src[nb_src - 1].addr;
216c57d26faSVidya Sagar Velumuri 		cmd[j++] = dst[0].addr;
217c57d26faSVidya Sagar Velumuri 
218c57d26faSVidya Sagar Velumuri 		/* Fill the iring with dst pointers */
219c57d26faSVidya Sagar Velumuri 		for (i = 2; i < nb_dst; i += 2) {
220c57d26faSVidya Sagar Velumuri 			h = ((uint64_t)dst[i].length << 32) | dst[i - 1].length;
221c57d26faSVidya Sagar Velumuri 			cmd[j++] = h;
222c57d26faSVidya Sagar Velumuri 			cmd[j++] = dst[i - 1].addr;
223c57d26faSVidya Sagar Velumuri 			cmd[j++] = dst[i].addr;
224c57d26faSVidya Sagar Velumuri 		}
225c57d26faSVidya Sagar Velumuri 
226c57d26faSVidya Sagar Velumuri 		/* Handle the last dst pointer when nb_dst is even */
227c57d26faSVidya Sagar Velumuri 		if (!(nb_dst & 0x1)) {
228c57d26faSVidya Sagar Velumuri 			h = dst[nb_dst - 1].length;
229c57d26faSVidya Sagar Velumuri 			cmd[j++] = h;
230c57d26faSVidya Sagar Velumuri 			cmd[j++] = dst[nb_dst - 1].addr;
231c57d26faSVidya Sagar Velumuri 			cmd[j++] = 0;
232c57d26faSVidya Sagar Velumuri 		}
233c57d26faSVidya Sagar Velumuri 	}
234c57d26faSVidya Sagar Velumuri }
235c57d26faSVidya Sagar Velumuri 
236c57d26faSVidya Sagar Velumuri static int
237c57d26faSVidya Sagar Velumuri odm_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge *src,
238c57d26faSVidya Sagar Velumuri 		   const struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst, uint64_t flags)
239c57d26faSVidya Sagar Velumuri {
240c57d26faSVidya Sagar Velumuri 	uint16_t pending_submit_len, pending_submit_cnt, iring_head, ins_ring_head;
241c57d26faSVidya Sagar Velumuri 	uint16_t iring_sz_available, i, nb, num_words;
242c57d26faSVidya Sagar Velumuri 	uint64_t cmd[ODM_IRING_ENTRY_SIZE_MAX];
243c57d26faSVidya Sagar Velumuri 	struct odm_dev *odm = dev_private;
244c57d26faSVidya Sagar Velumuri 	uint32_t s_sz = 0, d_sz = 0;
245c57d26faSVidya Sagar Velumuri 	uint64_t *iring_head_ptr;
246c57d26faSVidya Sagar Velumuri 	struct odm_queue *vq;
247c57d26faSVidya Sagar Velumuri 	union odm_instr_hdr_s hdr = {
248c57d26faSVidya Sagar Velumuri 		.s.ct = ODM_HDR_CT_CW_NC,
249c57d26faSVidya Sagar Velumuri 		.s.xtype = ODM_XTYPE_INTERNAL,
250c57d26faSVidya Sagar Velumuri 	};
251c57d26faSVidya Sagar Velumuri 
252c57d26faSVidya Sagar Velumuri 	vq = &odm->vq[vchan];
253c57d26faSVidya Sagar Velumuri 	const uint16_t max_iring_words = vq->iring_max_words;
254c57d26faSVidya Sagar Velumuri 
255c57d26faSVidya Sagar Velumuri 	iring_head_ptr = vq->iring_mz->addr;
256c57d26faSVidya Sagar Velumuri 	iring_head = vq->iring_head;
257c57d26faSVidya Sagar Velumuri 	iring_sz_available = vq->iring_sz_available;
258c57d26faSVidya Sagar Velumuri 	ins_ring_head = vq->ins_ring_head;
259c57d26faSVidya Sagar Velumuri 	pending_submit_len = vq->pending_submit_len;
260c57d26faSVidya Sagar Velumuri 	pending_submit_cnt = vq->pending_submit_cnt;
261c57d26faSVidya Sagar Velumuri 
262c57d26faSVidya Sagar Velumuri 	if (unlikely(nb_src > 4 || nb_dst > 4))
263c57d26faSVidya Sagar Velumuri 		return -EINVAL;
264c57d26faSVidya Sagar Velumuri 
265c57d26faSVidya Sagar Velumuri 	for (i = 0; i < nb_src; i++)
266c57d26faSVidya Sagar Velumuri 		s_sz += src[i].length;
267c57d26faSVidya Sagar Velumuri 
268c57d26faSVidya Sagar Velumuri 	for (i = 0; i < nb_dst; i++)
269c57d26faSVidya Sagar Velumuri 		d_sz += dst[i].length;
270c57d26faSVidya Sagar Velumuri 
271c57d26faSVidya Sagar Velumuri 	if (s_sz != d_sz)
272c57d26faSVidya Sagar Velumuri 		return -EINVAL;
273c57d26faSVidya Sagar Velumuri 
274c57d26faSVidya Sagar Velumuri 	nb = nb_src + nb_dst;
275c57d26faSVidya Sagar Velumuri 	hdr.s.nfst = nb_src;
276c57d26faSVidya Sagar Velumuri 	hdr.s.nlst = nb_dst;
277c57d26faSVidya Sagar Velumuri 	num_words = 1 + 3 * (nb / 2 + (nb & 0x1));
278c57d26faSVidya Sagar Velumuri 
279c57d26faSVidya Sagar Velumuri 	if (iring_sz_available < num_words)
280c57d26faSVidya Sagar Velumuri 		return -ENOSPC;
281c57d26faSVidya Sagar Velumuri 
282c57d26faSVidya Sagar Velumuri 	if ((iring_head + num_words) >= max_iring_words) {
283c57d26faSVidya Sagar Velumuri 		uint16_t words_avail = max_iring_words - iring_head;
284c57d26faSVidya Sagar Velumuri 		uint16_t words_pend = num_words - words_avail;
285c57d26faSVidya Sagar Velumuri 
286c57d26faSVidya Sagar Velumuri 		if (unlikely(words_avail + words_pend > ODM_IRING_ENTRY_SIZE_MAX))
287c57d26faSVidya Sagar Velumuri 			return -ENOSPC;
288c57d26faSVidya Sagar Velumuri 
289c57d26faSVidya Sagar Velumuri 		odm_dmadev_fill_sg(cmd, src, dst, nb_src, nb_dst, &hdr);
290c57d26faSVidya Sagar Velumuri 		rte_memcpy((void *)&iring_head_ptr[iring_head], (void *)cmd, words_avail * 8);
291c57d26faSVidya Sagar Velumuri 		rte_memcpy((void *)iring_head_ptr, (void *)&cmd[words_avail], words_pend * 8);
292c57d26faSVidya Sagar Velumuri 		iring_head = words_pend;
293c57d26faSVidya Sagar Velumuri 	} else {
294c57d26faSVidya Sagar Velumuri 		odm_dmadev_fill_sg(&iring_head_ptr[iring_head], src, dst, nb_src, nb_dst, &hdr);
295c57d26faSVidya Sagar Velumuri 		iring_head += num_words;
296c57d26faSVidya Sagar Velumuri 	}
297c57d26faSVidya Sagar Velumuri 
298c57d26faSVidya Sagar Velumuri 	pending_submit_len += num_words;
299c57d26faSVidya Sagar Velumuri 
300c57d26faSVidya Sagar Velumuri 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
301c57d26faSVidya Sagar Velumuri 		rte_wmb();
302c57d26faSVidya Sagar Velumuri 		odm_write64(pending_submit_len, odm->rbase + ODM_VDMA_DBELL(vchan));
303c57d26faSVidya Sagar Velumuri 		vq->stats.submitted += pending_submit_cnt + 1;
304c57d26faSVidya Sagar Velumuri 		vq->pending_submit_len = 0;
305c57d26faSVidya Sagar Velumuri 		vq->pending_submit_cnt = 0;
306c57d26faSVidya Sagar Velumuri 	} else {
307c57d26faSVidya Sagar Velumuri 		vq->pending_submit_len = pending_submit_len;
308c57d26faSVidya Sagar Velumuri 		vq->pending_submit_cnt++;
309c57d26faSVidya Sagar Velumuri 	}
310c57d26faSVidya Sagar Velumuri 
311c57d26faSVidya Sagar Velumuri 	vq->iring_head = iring_head;
312c57d26faSVidya Sagar Velumuri 
313c57d26faSVidya Sagar Velumuri 	vq->iring_sz_available = iring_sz_available - num_words;
314c57d26faSVidya Sagar Velumuri 
315c57d26faSVidya Sagar Velumuri 	/* Save extra space used for the instruction. */
316c57d26faSVidya Sagar Velumuri 	vq->extra_ins_sz[ins_ring_head] = num_words - 4;
317c57d26faSVidya Sagar Velumuri 
318c57d26faSVidya Sagar Velumuri 	vq->ins_ring_head = (ins_ring_head + 1) % vq->cring_max_entry;
319c57d26faSVidya Sagar Velumuri 
320c57d26faSVidya Sagar Velumuri 	return vq->desc_idx++;
321c57d26faSVidya Sagar Velumuri }
322c57d26faSVidya Sagar Velumuri 
323c57d26faSVidya Sagar Velumuri static int
324d76c27e6SVidya Sagar Velumuri odm_dmadev_fill(void *dev_private, uint16_t vchan, uint64_t pattern, rte_iova_t dst,
325d76c27e6SVidya Sagar Velumuri 		uint32_t length, uint64_t flags)
326d76c27e6SVidya Sagar Velumuri {
327d76c27e6SVidya Sagar Velumuri 	uint16_t pending_submit_len, pending_submit_cnt, iring_sz_available, iring_head;
328d76c27e6SVidya Sagar Velumuri 	const int num_words = ODM_IRING_ENTRY_SIZE_MIN;
329d76c27e6SVidya Sagar Velumuri 	struct odm_dev *odm = dev_private;
330d76c27e6SVidya Sagar Velumuri 	uint64_t *iring_head_ptr;
331d76c27e6SVidya Sagar Velumuri 	struct odm_queue *vq;
332d76c27e6SVidya Sagar Velumuri 	uint64_t h;
333d76c27e6SVidya Sagar Velumuri 
334d76c27e6SVidya Sagar Velumuri 	vq = &odm->vq[vchan];
335d76c27e6SVidya Sagar Velumuri 
336d76c27e6SVidya Sagar Velumuri 	union odm_instr_hdr_s hdr = {
337d76c27e6SVidya Sagar Velumuri 		.s.ct = ODM_HDR_CT_CW_NC,
338d76c27e6SVidya Sagar Velumuri 		.s.nfst = 0,
339d76c27e6SVidya Sagar Velumuri 		.s.nlst = 1,
340d76c27e6SVidya Sagar Velumuri 	};
341d76c27e6SVidya Sagar Velumuri 
342d76c27e6SVidya Sagar Velumuri 	h = (uint64_t)length;
343d76c27e6SVidya Sagar Velumuri 
344d76c27e6SVidya Sagar Velumuri 	switch (pattern) {
345d76c27e6SVidya Sagar Velumuri 	case 0:
346d76c27e6SVidya Sagar Velumuri 		hdr.s.xtype = ODM_XTYPE_FILL0;
347d76c27e6SVidya Sagar Velumuri 		break;
348d76c27e6SVidya Sagar Velumuri 	case 0xffffffffffffffff:
349d76c27e6SVidya Sagar Velumuri 		hdr.s.xtype = ODM_XTYPE_FILL1;
350d76c27e6SVidya Sagar Velumuri 		break;
351d76c27e6SVidya Sagar Velumuri 	default:
352d76c27e6SVidya Sagar Velumuri 		return -ENOTSUP;
353d76c27e6SVidya Sagar Velumuri 	}
354d76c27e6SVidya Sagar Velumuri 
355d76c27e6SVidya Sagar Velumuri 	const uint16_t max_iring_words = vq->iring_max_words;
356d76c27e6SVidya Sagar Velumuri 
357d76c27e6SVidya Sagar Velumuri 	iring_sz_available = vq->iring_sz_available;
358d76c27e6SVidya Sagar Velumuri 	pending_submit_len = vq->pending_submit_len;
359d76c27e6SVidya Sagar Velumuri 	pending_submit_cnt = vq->pending_submit_cnt;
360d76c27e6SVidya Sagar Velumuri 	iring_head_ptr = vq->iring_mz->addr;
361d76c27e6SVidya Sagar Velumuri 	iring_head = vq->iring_head;
362d76c27e6SVidya Sagar Velumuri 
363d76c27e6SVidya Sagar Velumuri 	if (iring_sz_available < num_words)
364d76c27e6SVidya Sagar Velumuri 		return -ENOSPC;
365d76c27e6SVidya Sagar Velumuri 
366d76c27e6SVidya Sagar Velumuri 	if ((iring_head + num_words) >= max_iring_words) {
367d76c27e6SVidya Sagar Velumuri 
368d76c27e6SVidya Sagar Velumuri 		iring_head_ptr[iring_head] = hdr.u;
369d76c27e6SVidya Sagar Velumuri 		iring_head = (iring_head + 1) % max_iring_words;
370d76c27e6SVidya Sagar Velumuri 
371d76c27e6SVidya Sagar Velumuri 		iring_head_ptr[iring_head] = h;
372d76c27e6SVidya Sagar Velumuri 		iring_head = (iring_head + 1) % max_iring_words;
373d76c27e6SVidya Sagar Velumuri 
374d76c27e6SVidya Sagar Velumuri 		iring_head_ptr[iring_head] = dst;
375d76c27e6SVidya Sagar Velumuri 		iring_head = (iring_head + 1) % max_iring_words;
376d76c27e6SVidya Sagar Velumuri 
377d76c27e6SVidya Sagar Velumuri 		iring_head_ptr[iring_head] = 0;
378d76c27e6SVidya Sagar Velumuri 		iring_head = (iring_head + 1) % max_iring_words;
379d76c27e6SVidya Sagar Velumuri 	} else {
380d76c27e6SVidya Sagar Velumuri 		iring_head_ptr[iring_head] = hdr.u;
381d76c27e6SVidya Sagar Velumuri 		iring_head_ptr[iring_head + 1] = h;
382d76c27e6SVidya Sagar Velumuri 		iring_head_ptr[iring_head + 2] = dst;
383d76c27e6SVidya Sagar Velumuri 		iring_head_ptr[iring_head + 3] = 0;
384d76c27e6SVidya Sagar Velumuri 		iring_head += num_words;
385d76c27e6SVidya Sagar Velumuri 	}
386d76c27e6SVidya Sagar Velumuri 
387d76c27e6SVidya Sagar Velumuri 	pending_submit_len += num_words;
388d76c27e6SVidya Sagar Velumuri 
389d76c27e6SVidya Sagar Velumuri 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
390d76c27e6SVidya Sagar Velumuri 		rte_wmb();
391d76c27e6SVidya Sagar Velumuri 		odm_write64(pending_submit_len, odm->rbase + ODM_VDMA_DBELL(vchan));
392d76c27e6SVidya Sagar Velumuri 		vq->stats.submitted += pending_submit_cnt + 1;
393d76c27e6SVidya Sagar Velumuri 		vq->pending_submit_len = 0;
394d76c27e6SVidya Sagar Velumuri 		vq->pending_submit_cnt = 0;
395d76c27e6SVidya Sagar Velumuri 	} else {
396d76c27e6SVidya Sagar Velumuri 		vq->pending_submit_len = pending_submit_len;
397d76c27e6SVidya Sagar Velumuri 		vq->pending_submit_cnt++;
398d76c27e6SVidya Sagar Velumuri 	}
399d76c27e6SVidya Sagar Velumuri 
400d76c27e6SVidya Sagar Velumuri 	vq->iring_head = iring_head;
401d76c27e6SVidya Sagar Velumuri 	vq->iring_sz_available = iring_sz_available - num_words;
402d76c27e6SVidya Sagar Velumuri 
403d76c27e6SVidya Sagar Velumuri 	/* No extra space to save. Skip entry in extra space ring. */
404d76c27e6SVidya Sagar Velumuri 	vq->ins_ring_head = (vq->ins_ring_head + 1) % vq->cring_max_entry;
405d76c27e6SVidya Sagar Velumuri 
406d76c27e6SVidya Sagar Velumuri 	vq->iring_sz_available = iring_sz_available - num_words;
407d76c27e6SVidya Sagar Velumuri 
408d76c27e6SVidya Sagar Velumuri 	return vq->desc_idx++;
409d76c27e6SVidya Sagar Velumuri }
410d76c27e6SVidya Sagar Velumuri 
411d76c27e6SVidya Sagar Velumuri static uint16_t
412d76c27e6SVidya Sagar Velumuri odm_dmadev_completed(void *dev_private, uint16_t vchan, const uint16_t nb_cpls, uint16_t *last_idx,
413d76c27e6SVidya Sagar Velumuri 		     bool *has_error)
414d76c27e6SVidya Sagar Velumuri {
415d76c27e6SVidya Sagar Velumuri 	const union odm_cmpl_ent_s cmpl_zero = {0};
416d76c27e6SVidya Sagar Velumuri 	uint16_t cring_head, iring_sz_available;
417d76c27e6SVidya Sagar Velumuri 	struct odm_dev *odm = dev_private;
418d76c27e6SVidya Sagar Velumuri 	union odm_cmpl_ent_s cmpl;
419d76c27e6SVidya Sagar Velumuri 	struct odm_queue *vq;
420d76c27e6SVidya Sagar Velumuri 	uint64_t nb_err = 0;
421d76c27e6SVidya Sagar Velumuri 	uint32_t *cmpl_ptr;
422d76c27e6SVidya Sagar Velumuri 	int cnt;
423d76c27e6SVidya Sagar Velumuri 
424d76c27e6SVidya Sagar Velumuri 	vq = &odm->vq[vchan];
425d76c27e6SVidya Sagar Velumuri 	const uint32_t *base_addr = vq->cring_mz->addr;
426d76c27e6SVidya Sagar Velumuri 	const uint16_t cring_max_entry = vq->cring_max_entry;
427d76c27e6SVidya Sagar Velumuri 
428d76c27e6SVidya Sagar Velumuri 	cring_head = vq->cring_head;
429d76c27e6SVidya Sagar Velumuri 	iring_sz_available = vq->iring_sz_available;
430d76c27e6SVidya Sagar Velumuri 
431d76c27e6SVidya Sagar Velumuri 	if (unlikely(vq->stats.submitted == vq->stats.completed)) {
432d76c27e6SVidya Sagar Velumuri 		*last_idx = (vq->stats.completed_offset + vq->stats.completed - 1) & 0xFFFF;
433d76c27e6SVidya Sagar Velumuri 		return 0;
434d76c27e6SVidya Sagar Velumuri 	}
435d76c27e6SVidya Sagar Velumuri 
436d76c27e6SVidya Sagar Velumuri 	for (cnt = 0; cnt < nb_cpls; cnt++) {
437d76c27e6SVidya Sagar Velumuri 		cmpl_ptr = RTE_PTR_ADD(base_addr, cring_head * sizeof(cmpl));
438d76c27e6SVidya Sagar Velumuri 		cmpl.u = rte_atomic_load_explicit((RTE_ATOMIC(uint32_t) *)cmpl_ptr,
439d76c27e6SVidya Sagar Velumuri 						  rte_memory_order_relaxed);
440d76c27e6SVidya Sagar Velumuri 		if (!cmpl.s.valid)
441d76c27e6SVidya Sagar Velumuri 			break;
442d76c27e6SVidya Sagar Velumuri 
443d76c27e6SVidya Sagar Velumuri 		if (cmpl.s.cmp_code)
444d76c27e6SVidya Sagar Velumuri 			nb_err++;
445d76c27e6SVidya Sagar Velumuri 
446d76c27e6SVidya Sagar Velumuri 		/* Free space for enqueue */
447d76c27e6SVidya Sagar Velumuri 		iring_sz_available += 4 + vq->extra_ins_sz[cring_head];
448d76c27e6SVidya Sagar Velumuri 
449d76c27e6SVidya Sagar Velumuri 		/* Clear instruction extra space */
450d76c27e6SVidya Sagar Velumuri 		vq->extra_ins_sz[cring_head] = 0;
451d76c27e6SVidya Sagar Velumuri 
452d76c27e6SVidya Sagar Velumuri 		rte_atomic_store_explicit((RTE_ATOMIC(uint32_t) *)cmpl_ptr, cmpl_zero.u,
453d76c27e6SVidya Sagar Velumuri 					  rte_memory_order_relaxed);
454d76c27e6SVidya Sagar Velumuri 		cring_head = (cring_head + 1) % cring_max_entry;
455d76c27e6SVidya Sagar Velumuri 	}
456d76c27e6SVidya Sagar Velumuri 
457d76c27e6SVidya Sagar Velumuri 	vq->stats.errors += nb_err;
458d76c27e6SVidya Sagar Velumuri 
459d76c27e6SVidya Sagar Velumuri 	if (unlikely(has_error != NULL && nb_err))
460d76c27e6SVidya Sagar Velumuri 		*has_error = true;
461d76c27e6SVidya Sagar Velumuri 
462d76c27e6SVidya Sagar Velumuri 	vq->cring_head = cring_head;
463d76c27e6SVidya Sagar Velumuri 	vq->iring_sz_available = iring_sz_available;
464d76c27e6SVidya Sagar Velumuri 
465d76c27e6SVidya Sagar Velumuri 	vq->stats.completed += cnt;
466d76c27e6SVidya Sagar Velumuri 
467d76c27e6SVidya Sagar Velumuri 	*last_idx = (vq->stats.completed_offset + vq->stats.completed - 1) & 0xFFFF;
468d76c27e6SVidya Sagar Velumuri 
469d76c27e6SVidya Sagar Velumuri 	return cnt;
470d76c27e6SVidya Sagar Velumuri }
471d76c27e6SVidya Sagar Velumuri 
472d76c27e6SVidya Sagar Velumuri static uint16_t
473d76c27e6SVidya Sagar Velumuri odm_dmadev_completed_status(void *dev_private, uint16_t vchan, const uint16_t nb_cpls,
474d76c27e6SVidya Sagar Velumuri 			    uint16_t *last_idx, enum rte_dma_status_code *status)
475d76c27e6SVidya Sagar Velumuri {
476d76c27e6SVidya Sagar Velumuri 	const union odm_cmpl_ent_s cmpl_zero = {0};
477d76c27e6SVidya Sagar Velumuri 	uint16_t cring_head, iring_sz_available;
478d76c27e6SVidya Sagar Velumuri 	struct odm_dev *odm = dev_private;
479d76c27e6SVidya Sagar Velumuri 	union odm_cmpl_ent_s cmpl;
480d76c27e6SVidya Sagar Velumuri 	struct odm_queue *vq;
481d76c27e6SVidya Sagar Velumuri 	uint32_t *cmpl_ptr;
482d76c27e6SVidya Sagar Velumuri 	int cnt;
483d76c27e6SVidya Sagar Velumuri 
484d76c27e6SVidya Sagar Velumuri 	vq = &odm->vq[vchan];
485d76c27e6SVidya Sagar Velumuri 	const uint32_t *base_addr = vq->cring_mz->addr;
486d76c27e6SVidya Sagar Velumuri 	const uint16_t cring_max_entry = vq->cring_max_entry;
487d76c27e6SVidya Sagar Velumuri 
488d76c27e6SVidya Sagar Velumuri 	cring_head = vq->cring_head;
489d76c27e6SVidya Sagar Velumuri 	iring_sz_available = vq->iring_sz_available;
490d76c27e6SVidya Sagar Velumuri 
491d76c27e6SVidya Sagar Velumuri 	if (vq->stats.submitted == vq->stats.completed) {
492d76c27e6SVidya Sagar Velumuri 		*last_idx = (vq->stats.completed_offset + vq->stats.completed - 1) & 0xFFFF;
493d76c27e6SVidya Sagar Velumuri 		return 0;
494d76c27e6SVidya Sagar Velumuri 	}
495d76c27e6SVidya Sagar Velumuri 
496d76c27e6SVidya Sagar Velumuri #ifdef ODM_DEBUG
497*e99981afSDavid Marchand 	ODM_LOG(DEBUG, "cring_head: 0x%" PRIx16, cring_head);
498*e99981afSDavid Marchand 	ODM_LOG(DEBUG, "Submitted: 0x%" PRIx64, vq->stats.submitted);
499*e99981afSDavid Marchand 	ODM_LOG(DEBUG, "Completed: 0x%" PRIx64, vq->stats.completed);
500*e99981afSDavid Marchand 	ODM_LOG(DEBUG, "Hardware count: 0x%" PRIx64, odm_read64(odm->rbase + ODM_VDMA_CNT(vchan)));
501d76c27e6SVidya Sagar Velumuri #endif
502d76c27e6SVidya Sagar Velumuri 
503d76c27e6SVidya Sagar Velumuri 	for (cnt = 0; cnt < nb_cpls; cnt++) {
504d76c27e6SVidya Sagar Velumuri 		cmpl_ptr = RTE_PTR_ADD(base_addr, cring_head * sizeof(cmpl));
505d76c27e6SVidya Sagar Velumuri 		cmpl.u = rte_atomic_load_explicit((RTE_ATOMIC(uint32_t) *)cmpl_ptr,
506d76c27e6SVidya Sagar Velumuri 						  rte_memory_order_relaxed);
507d76c27e6SVidya Sagar Velumuri 		if (!cmpl.s.valid)
508d76c27e6SVidya Sagar Velumuri 			break;
509d76c27e6SVidya Sagar Velumuri 
510d76c27e6SVidya Sagar Velumuri 		status[cnt] = cmpl.s.cmp_code;
511d76c27e6SVidya Sagar Velumuri 
512d76c27e6SVidya Sagar Velumuri 		if (cmpl.s.cmp_code)
513d76c27e6SVidya Sagar Velumuri 			vq->stats.errors++;
514d76c27e6SVidya Sagar Velumuri 
515d76c27e6SVidya Sagar Velumuri 		/* Free space for enqueue */
516d76c27e6SVidya Sagar Velumuri 		iring_sz_available += 4 + vq->extra_ins_sz[cring_head];
517d76c27e6SVidya Sagar Velumuri 
518d76c27e6SVidya Sagar Velumuri 		/* Clear instruction extra space */
519d76c27e6SVidya Sagar Velumuri 		vq->extra_ins_sz[cring_head] = 0;
520d76c27e6SVidya Sagar Velumuri 
521d76c27e6SVidya Sagar Velumuri 		rte_atomic_store_explicit((RTE_ATOMIC(uint32_t) *)cmpl_ptr, cmpl_zero.u,
522d76c27e6SVidya Sagar Velumuri 					  rte_memory_order_relaxed);
523d76c27e6SVidya Sagar Velumuri 		cring_head = (cring_head + 1) % cring_max_entry;
524d76c27e6SVidya Sagar Velumuri 	}
525d76c27e6SVidya Sagar Velumuri 
526d76c27e6SVidya Sagar Velumuri 	vq->cring_head = cring_head;
527d76c27e6SVidya Sagar Velumuri 	vq->iring_sz_available = iring_sz_available;
528d76c27e6SVidya Sagar Velumuri 
529d76c27e6SVidya Sagar Velumuri 	vq->stats.completed += cnt;
530d76c27e6SVidya Sagar Velumuri 
531d76c27e6SVidya Sagar Velumuri 	*last_idx = (vq->stats.completed_offset + vq->stats.completed - 1) & 0xFFFF;
532d76c27e6SVidya Sagar Velumuri 
533d76c27e6SVidya Sagar Velumuri 	return cnt;
534d76c27e6SVidya Sagar Velumuri }
535d76c27e6SVidya Sagar Velumuri 
536d76c27e6SVidya Sagar Velumuri static int
537d76c27e6SVidya Sagar Velumuri odm_dmadev_submit(void *dev_private, uint16_t vchan)
538d76c27e6SVidya Sagar Velumuri {
539d76c27e6SVidya Sagar Velumuri 	struct odm_dev *odm = dev_private;
540d76c27e6SVidya Sagar Velumuri 	uint16_t pending_submit_len;
541d76c27e6SVidya Sagar Velumuri 	struct odm_queue *vq;
542d76c27e6SVidya Sagar Velumuri 
543d76c27e6SVidya Sagar Velumuri 	vq = &odm->vq[vchan];
544d76c27e6SVidya Sagar Velumuri 	pending_submit_len = vq->pending_submit_len;
545d76c27e6SVidya Sagar Velumuri 
546d76c27e6SVidya Sagar Velumuri 	if (pending_submit_len == 0)
547d76c27e6SVidya Sagar Velumuri 		return 0;
548d76c27e6SVidya Sagar Velumuri 
549d76c27e6SVidya Sagar Velumuri 	rte_wmb();
550d76c27e6SVidya Sagar Velumuri 	odm_write64(pending_submit_len, odm->rbase + ODM_VDMA_DBELL(vchan));
551d76c27e6SVidya Sagar Velumuri 	vq->pending_submit_len = 0;
552d76c27e6SVidya Sagar Velumuri 	vq->stats.submitted += vq->pending_submit_cnt;
553d76c27e6SVidya Sagar Velumuri 	vq->pending_submit_cnt = 0;
554d76c27e6SVidya Sagar Velumuri 
555d76c27e6SVidya Sagar Velumuri 	return 0;
556d76c27e6SVidya Sagar Velumuri }
557d76c27e6SVidya Sagar Velumuri 
558d76c27e6SVidya Sagar Velumuri static uint16_t
559d76c27e6SVidya Sagar Velumuri odm_dmadev_burst_capacity(const void *dev_private, uint16_t vchan __rte_unused)
560d76c27e6SVidya Sagar Velumuri {
561d76c27e6SVidya Sagar Velumuri 	const struct odm_dev *odm = dev_private;
562d76c27e6SVidya Sagar Velumuri 	const struct odm_queue *vq;
563d76c27e6SVidya Sagar Velumuri 
564d76c27e6SVidya Sagar Velumuri 	vq = &odm->vq[vchan];
565d76c27e6SVidya Sagar Velumuri 	return (vq->iring_sz_available / ODM_IRING_ENTRY_SIZE_MIN);
566d76c27e6SVidya Sagar Velumuri }
567d76c27e6SVidya Sagar Velumuri 
568d76c27e6SVidya Sagar Velumuri static int
56974f67c71SGowrishankar Muthukrishnan odm_stats_get(const struct rte_dma_dev *dev, uint16_t vchan, struct rte_dma_stats *rte_stats,
57074f67c71SGowrishankar Muthukrishnan 	      uint32_t size)
57174f67c71SGowrishankar Muthukrishnan {
57274f67c71SGowrishankar Muthukrishnan 	struct odm_dev *odm = dev->fp_obj->dev_private;
57374f67c71SGowrishankar Muthukrishnan 
57474f67c71SGowrishankar Muthukrishnan 	if (size < sizeof(rte_stats))
57574f67c71SGowrishankar Muthukrishnan 		return -EINVAL;
57674f67c71SGowrishankar Muthukrishnan 	if (rte_stats == NULL)
57774f67c71SGowrishankar Muthukrishnan 		return -EINVAL;
57874f67c71SGowrishankar Muthukrishnan 
57974f67c71SGowrishankar Muthukrishnan 	if (vchan != RTE_DMA_ALL_VCHAN) {
58074f67c71SGowrishankar Muthukrishnan 		struct rte_dma_stats *stats = (struct rte_dma_stats *)&odm->vq[vchan].stats;
58174f67c71SGowrishankar Muthukrishnan 
58274f67c71SGowrishankar Muthukrishnan 		*rte_stats = *stats;
58374f67c71SGowrishankar Muthukrishnan 	} else {
58474f67c71SGowrishankar Muthukrishnan 		int i;
58574f67c71SGowrishankar Muthukrishnan 
58674f67c71SGowrishankar Muthukrishnan 		for (i = 0; i < odm->num_qs; i++) {
58774f67c71SGowrishankar Muthukrishnan 			struct rte_dma_stats *stats = (struct rte_dma_stats *)&odm->vq[i].stats;
58874f67c71SGowrishankar Muthukrishnan 
58974f67c71SGowrishankar Muthukrishnan 			rte_stats->submitted += stats->submitted;
59074f67c71SGowrishankar Muthukrishnan 			rte_stats->completed += stats->completed;
59174f67c71SGowrishankar Muthukrishnan 			rte_stats->errors += stats->errors;
59274f67c71SGowrishankar Muthukrishnan 		}
59374f67c71SGowrishankar Muthukrishnan 	}
59474f67c71SGowrishankar Muthukrishnan 
59574f67c71SGowrishankar Muthukrishnan 	return 0;
59674f67c71SGowrishankar Muthukrishnan }
59774f67c71SGowrishankar Muthukrishnan 
59874f67c71SGowrishankar Muthukrishnan static void
59974f67c71SGowrishankar Muthukrishnan odm_vq_stats_reset(struct vq_stats *vq_stats)
60074f67c71SGowrishankar Muthukrishnan {
60174f67c71SGowrishankar Muthukrishnan 	vq_stats->completed_offset += vq_stats->completed;
60274f67c71SGowrishankar Muthukrishnan 	vq_stats->completed = 0;
60374f67c71SGowrishankar Muthukrishnan 	vq_stats->errors = 0;
60474f67c71SGowrishankar Muthukrishnan 	vq_stats->submitted = 0;
60574f67c71SGowrishankar Muthukrishnan }
60674f67c71SGowrishankar Muthukrishnan 
60774f67c71SGowrishankar Muthukrishnan static int
60874f67c71SGowrishankar Muthukrishnan odm_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
60974f67c71SGowrishankar Muthukrishnan {
61074f67c71SGowrishankar Muthukrishnan 	struct odm_dev *odm = dev->fp_obj->dev_private;
61174f67c71SGowrishankar Muthukrishnan 	struct vq_stats *vq_stats;
61274f67c71SGowrishankar Muthukrishnan 	int i;
61374f67c71SGowrishankar Muthukrishnan 
61474f67c71SGowrishankar Muthukrishnan 	if (vchan != RTE_DMA_ALL_VCHAN) {
61574f67c71SGowrishankar Muthukrishnan 		vq_stats = &odm->vq[vchan].stats;
61674f67c71SGowrishankar Muthukrishnan 		odm_vq_stats_reset(vq_stats);
61774f67c71SGowrishankar Muthukrishnan 	} else {
61874f67c71SGowrishankar Muthukrishnan 		for (i = 0; i < odm->num_qs; i++) {
61974f67c71SGowrishankar Muthukrishnan 			vq_stats = &odm->vq[i].stats;
62074f67c71SGowrishankar Muthukrishnan 			odm_vq_stats_reset(vq_stats);
62174f67c71SGowrishankar Muthukrishnan 		}
62274f67c71SGowrishankar Muthukrishnan 	}
62374f67c71SGowrishankar Muthukrishnan 
62474f67c71SGowrishankar Muthukrishnan 	return 0;
62574f67c71SGowrishankar Muthukrishnan }
62674f67c71SGowrishankar Muthukrishnan 
627f317dc96SGowrishankar Muthukrishnan static const struct rte_dma_dev_ops odm_dmadev_ops = {
628f317dc96SGowrishankar Muthukrishnan 	.dev_close = odm_dmadev_close,
629f317dc96SGowrishankar Muthukrishnan 	.dev_configure = odm_dmadev_configure,
630f317dc96SGowrishankar Muthukrishnan 	.dev_info_get = odm_dmadev_info_get,
631f317dc96SGowrishankar Muthukrishnan 	.dev_start = odm_dmadev_start,
632f317dc96SGowrishankar Muthukrishnan 	.dev_stop = odm_dmadev_stop,
63374f67c71SGowrishankar Muthukrishnan 	.stats_get = odm_stats_get,
63474f67c71SGowrishankar Muthukrishnan 	.stats_reset = odm_stats_reset,
635f317dc96SGowrishankar Muthukrishnan 	.vchan_setup = odm_dmadev_vchan_setup,
636f317dc96SGowrishankar Muthukrishnan };
637f317dc96SGowrishankar Muthukrishnan 
638f317dc96SGowrishankar Muthukrishnan static int
63940e461d3SAnoob Joseph odm_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev)
64040e461d3SAnoob Joseph {
64140e461d3SAnoob Joseph 	char name[RTE_DEV_NAME_MAX_LEN];
64240e461d3SAnoob Joseph 	struct odm_dev *odm = NULL;
64340e461d3SAnoob Joseph 	struct rte_dma_dev *dmadev;
6446d935bddSGowrishankar Muthukrishnan 	int rc;
64540e461d3SAnoob Joseph 
64640e461d3SAnoob Joseph 	if (!pci_dev->mem_resource[0].addr)
64740e461d3SAnoob Joseph 		return -ENODEV;
64840e461d3SAnoob Joseph 
64940e461d3SAnoob Joseph 	memset(name, 0, sizeof(name));
65040e461d3SAnoob Joseph 	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
65140e461d3SAnoob Joseph 
65240e461d3SAnoob Joseph 	dmadev = rte_dma_pmd_allocate(name, pci_dev->device.numa_node, sizeof(*odm));
65340e461d3SAnoob Joseph 	if (dmadev == NULL) {
654*e99981afSDavid Marchand 		ODM_LOG(ERR, "DMA device allocation failed for %s", name);
65540e461d3SAnoob Joseph 		return -ENOMEM;
65640e461d3SAnoob Joseph 	}
65740e461d3SAnoob Joseph 
658*e99981afSDavid Marchand 	ODM_LOG(INFO, "DMA device %s probed", name);
6596d935bddSGowrishankar Muthukrishnan 	odm = dmadev->data->dev_private;
6606d935bddSGowrishankar Muthukrishnan 
661f317dc96SGowrishankar Muthukrishnan 	dmadev->device = &pci_dev->device;
662f317dc96SGowrishankar Muthukrishnan 	dmadev->fp_obj->dev_private = odm;
663f317dc96SGowrishankar Muthukrishnan 	dmadev->dev_ops = &odm_dmadev_ops;
664f317dc96SGowrishankar Muthukrishnan 
665c57d26faSVidya Sagar Velumuri 	dmadev->fp_obj->copy = odm_dmadev_copy;
666c57d26faSVidya Sagar Velumuri 	dmadev->fp_obj->copy_sg = odm_dmadev_copy_sg;
667d76c27e6SVidya Sagar Velumuri 	dmadev->fp_obj->fill = odm_dmadev_fill;
668d76c27e6SVidya Sagar Velumuri 	dmadev->fp_obj->submit = odm_dmadev_submit;
669d76c27e6SVidya Sagar Velumuri 	dmadev->fp_obj->completed = odm_dmadev_completed;
670d76c27e6SVidya Sagar Velumuri 	dmadev->fp_obj->completed_status = odm_dmadev_completed_status;
671d76c27e6SVidya Sagar Velumuri 	dmadev->fp_obj->burst_capacity = odm_dmadev_burst_capacity;
672c57d26faSVidya Sagar Velumuri 
6736d935bddSGowrishankar Muthukrishnan 	odm->pci_dev = pci_dev;
6746d935bddSGowrishankar Muthukrishnan 
6756d935bddSGowrishankar Muthukrishnan 	rc = odm_dev_init(odm);
6766d935bddSGowrishankar Muthukrishnan 	if (rc < 0)
6776d935bddSGowrishankar Muthukrishnan 		goto dma_pmd_release;
67840e461d3SAnoob Joseph 
67940e461d3SAnoob Joseph 	return 0;
6806d935bddSGowrishankar Muthukrishnan 
6816d935bddSGowrishankar Muthukrishnan dma_pmd_release:
6826d935bddSGowrishankar Muthukrishnan 	rte_dma_pmd_release(name);
6836d935bddSGowrishankar Muthukrishnan 
6846d935bddSGowrishankar Muthukrishnan 	return rc;
68540e461d3SAnoob Joseph }
68640e461d3SAnoob Joseph 
68740e461d3SAnoob Joseph static int
68840e461d3SAnoob Joseph odm_dmadev_remove(struct rte_pci_device *pci_dev)
68940e461d3SAnoob Joseph {
69040e461d3SAnoob Joseph 	char name[RTE_DEV_NAME_MAX_LEN];
69140e461d3SAnoob Joseph 
69240e461d3SAnoob Joseph 	memset(name, 0, sizeof(name));
69340e461d3SAnoob Joseph 	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
69440e461d3SAnoob Joseph 
69540e461d3SAnoob Joseph 	return rte_dma_pmd_release(name);
69640e461d3SAnoob Joseph }
69740e461d3SAnoob Joseph 
69840e461d3SAnoob Joseph static const struct rte_pci_id odm_dma_pci_map[] = {
69940e461d3SAnoob Joseph 	{
70040e461d3SAnoob Joseph 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_ODYSSEY_ODM_VF)
70140e461d3SAnoob Joseph 	},
70240e461d3SAnoob Joseph 	{
70340e461d3SAnoob Joseph 		.vendor_id = 0,
70440e461d3SAnoob Joseph 	},
70540e461d3SAnoob Joseph };
70640e461d3SAnoob Joseph 
70740e461d3SAnoob Joseph static struct rte_pci_driver odm_dmadev = {
70840e461d3SAnoob Joseph 	.id_table = odm_dma_pci_map,
70940e461d3SAnoob Joseph 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
71040e461d3SAnoob Joseph 	.probe = odm_dmadev_probe,
71140e461d3SAnoob Joseph 	.remove = odm_dmadev_remove,
71240e461d3SAnoob Joseph };
71340e461d3SAnoob Joseph 
71440e461d3SAnoob Joseph RTE_PMD_REGISTER_PCI(PCI_DRIVER_NAME, odm_dmadev);
71540e461d3SAnoob Joseph RTE_PMD_REGISTER_PCI_TABLE(PCI_DRIVER_NAME, odm_dma_pci_map);
71640e461d3SAnoob Joseph RTE_PMD_REGISTER_KMOD_DEP(PCI_DRIVER_NAME, "vfio-pci");
71740e461d3SAnoob Joseph RTE_LOG_REGISTER_DEFAULT(odm_logtype, NOTICE);
718