xref: /dpdk/drivers/dma/odm/odm_dmadev.c (revision e99981af34632ecce3bac82d05db97b08308f9b5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2024 Marvell.
3  */
4 
5 #include <string.h>
6 
7 #include <bus_pci_driver.h>
8 #include <rte_bus_pci.h>
9 #include <rte_common.h>
10 #include <rte_dmadev.h>
11 #include <rte_dmadev_pmd.h>
12 #include <rte_memcpy.h>
13 #include <rte_pci.h>
14 
15 #include "odm.h"
16 
17 #define PCI_VENDOR_ID_CAVIUM	 0x177D
18 #define PCI_DEVID_ODYSSEY_ODM_VF 0xA08C
19 #define PCI_DRIVER_NAME		 dma_odm
20 
21 static int
22 odm_dmadev_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info, uint32_t size)
23 {
24 	struct odm_dev *odm = NULL;
25 
26 	RTE_SET_USED(size);
27 
28 	odm = dev->fp_obj->dev_private;
29 
30 	dev_info->max_vchans = odm->max_qs;
31 	dev_info->nb_vchans = odm->num_qs;
32 	dev_info->dev_capa =
33 		(RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_COPY_SG);
34 	dev_info->max_desc = ODM_IRING_MAX_ENTRY;
35 	dev_info->min_desc = 1;
36 	dev_info->max_sges = ODM_MAX_POINTER;
37 
38 	return 0;
39 }
40 
41 static int
42 odm_dmadev_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf, uint32_t conf_sz)
43 {
44 	struct odm_dev *odm = NULL;
45 
46 	RTE_SET_USED(conf_sz);
47 
48 	odm = dev->fp_obj->dev_private;
49 	odm->num_qs = conf->nb_vchans;
50 
51 	return 0;
52 }
53 
54 static int
55 odm_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
56 		       const struct rte_dma_vchan_conf *conf, uint32_t conf_sz)
57 {
58 	struct odm_dev *odm = dev->fp_obj->dev_private;
59 
60 	RTE_SET_USED(conf_sz);
61 	return odm_vchan_setup(odm, vchan, conf->nb_desc);
62 }
63 
64 static int
65 odm_dmadev_start(struct rte_dma_dev *dev)
66 {
67 	struct odm_dev *odm = dev->fp_obj->dev_private;
68 
69 	return odm_enable(odm);
70 }
71 
72 static int
73 odm_dmadev_stop(struct rte_dma_dev *dev)
74 {
75 	struct odm_dev *odm = dev->fp_obj->dev_private;
76 
77 	return odm_disable(odm);
78 }
79 
80 static int
81 odm_dmadev_close(struct rte_dma_dev *dev)
82 {
83 	struct odm_dev *odm = dev->fp_obj->dev_private;
84 
85 	odm_disable(odm);
86 	odm_dev_fini(odm);
87 
88 	return 0;
89 }
90 
91 static int
92 odm_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iova_t dst, uint32_t length,
93 		uint64_t flags)
94 {
95 	uint16_t pending_submit_len, pending_submit_cnt, iring_sz_available, iring_head;
96 	const int num_words = ODM_IRING_ENTRY_SIZE_MIN;
97 	struct odm_dev *odm = dev_private;
98 	uint64_t *iring_head_ptr;
99 	struct odm_queue *vq;
100 	uint64_t h;
101 
102 	const union odm_instr_hdr_s hdr = {
103 		.s.ct = ODM_HDR_CT_CW_NC,
104 		.s.xtype = ODM_XTYPE_INTERNAL,
105 		.s.nfst = 1,
106 		.s.nlst = 1,
107 	};
108 
109 	vq = &odm->vq[vchan];
110 
111 	h = length;
112 	h |= ((uint64_t)length << 32);
113 
114 	const uint16_t max_iring_words = vq->iring_max_words;
115 
116 	iring_sz_available = vq->iring_sz_available;
117 	pending_submit_len = vq->pending_submit_len;
118 	pending_submit_cnt = vq->pending_submit_cnt;
119 	iring_head_ptr = vq->iring_mz->addr;
120 	iring_head = vq->iring_head;
121 
122 	if (iring_sz_available < num_words)
123 		return -ENOSPC;
124 
125 	if ((iring_head + num_words) >= max_iring_words) {
126 
127 		iring_head_ptr[iring_head] = hdr.u;
128 		iring_head = (iring_head + 1) % max_iring_words;
129 
130 		iring_head_ptr[iring_head] = h;
131 		iring_head = (iring_head + 1) % max_iring_words;
132 
133 		iring_head_ptr[iring_head] = src;
134 		iring_head = (iring_head + 1) % max_iring_words;
135 
136 		iring_head_ptr[iring_head] = dst;
137 		iring_head = (iring_head + 1) % max_iring_words;
138 	} else {
139 		iring_head_ptr[iring_head++] = hdr.u;
140 		iring_head_ptr[iring_head++] = h;
141 		iring_head_ptr[iring_head++] = src;
142 		iring_head_ptr[iring_head++] = dst;
143 	}
144 
145 	pending_submit_len += num_words;
146 
147 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
148 		rte_wmb();
149 		odm_write64(pending_submit_len, odm->rbase + ODM_VDMA_DBELL(vchan));
150 		vq->stats.submitted += pending_submit_cnt + 1;
151 		vq->pending_submit_len = 0;
152 		vq->pending_submit_cnt = 0;
153 	} else {
154 		vq->pending_submit_len = pending_submit_len;
155 		vq->pending_submit_cnt++;
156 	}
157 
158 	vq->iring_head = iring_head;
159 
160 	vq->iring_sz_available = iring_sz_available - num_words;
161 
162 	/* No extra space to save. Skip entry in extra space ring. */
163 	vq->ins_ring_head = (vq->ins_ring_head + 1) % vq->cring_max_entry;
164 
165 	return vq->desc_idx++;
166 }
167 
168 static inline void
169 odm_dmadev_fill_sg(uint64_t *cmd, const struct rte_dma_sge *src, const struct rte_dma_sge *dst,
170 		   uint16_t nb_src, uint16_t nb_dst, union odm_instr_hdr_s *hdr)
171 {
172 	int i = 0, j = 0;
173 	uint64_t h = 0;
174 
175 	cmd[j++] = hdr->u;
176 	/* When nb_src is even */
177 	if (!(nb_src & 0x1)) {
178 		/* Fill the iring with src pointers */
179 		for (i = 1; i < nb_src; i += 2) {
180 			h = ((uint64_t)src[i].length << 32) | src[i - 1].length;
181 			cmd[j++] = h;
182 			cmd[j++] = src[i - 1].addr;
183 			cmd[j++] = src[i].addr;
184 		}
185 
186 		/* Fill the iring with dst pointers */
187 		for (i = 1; i < nb_dst; i += 2) {
188 			h = ((uint64_t)dst[i].length << 32) | dst[i - 1].length;
189 			cmd[j++] = h;
190 			cmd[j++] = dst[i - 1].addr;
191 			cmd[j++] = dst[i].addr;
192 		}
193 
194 		/* Handle the last dst pointer when nb_dst is odd */
195 		if (nb_dst & 0x1) {
196 			h = dst[nb_dst - 1].length;
197 			cmd[j++] = h;
198 			cmd[j++] = dst[nb_dst - 1].addr;
199 			cmd[j++] = 0;
200 		}
201 	} else {
202 		/* When nb_src is odd */
203 
204 		/* Fill the iring with src pointers */
205 		for (i = 1; i < nb_src; i += 2) {
206 			h = ((uint64_t)src[i].length << 32) | src[i - 1].length;
207 			cmd[j++] = h;
208 			cmd[j++] = src[i - 1].addr;
209 			cmd[j++] = src[i].addr;
210 		}
211 
212 		/* Handle the last src pointer */
213 		h = ((uint64_t)dst[0].length << 32) | src[nb_src - 1].length;
214 		cmd[j++] = h;
215 		cmd[j++] = src[nb_src - 1].addr;
216 		cmd[j++] = dst[0].addr;
217 
218 		/* Fill the iring with dst pointers */
219 		for (i = 2; i < nb_dst; i += 2) {
220 			h = ((uint64_t)dst[i].length << 32) | dst[i - 1].length;
221 			cmd[j++] = h;
222 			cmd[j++] = dst[i - 1].addr;
223 			cmd[j++] = dst[i].addr;
224 		}
225 
226 		/* Handle the last dst pointer when nb_dst is even */
227 		if (!(nb_dst & 0x1)) {
228 			h = dst[nb_dst - 1].length;
229 			cmd[j++] = h;
230 			cmd[j++] = dst[nb_dst - 1].addr;
231 			cmd[j++] = 0;
232 		}
233 	}
234 }
235 
236 static int
237 odm_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge *src,
238 		   const struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst, uint64_t flags)
239 {
240 	uint16_t pending_submit_len, pending_submit_cnt, iring_head, ins_ring_head;
241 	uint16_t iring_sz_available, i, nb, num_words;
242 	uint64_t cmd[ODM_IRING_ENTRY_SIZE_MAX];
243 	struct odm_dev *odm = dev_private;
244 	uint32_t s_sz = 0, d_sz = 0;
245 	uint64_t *iring_head_ptr;
246 	struct odm_queue *vq;
247 	union odm_instr_hdr_s hdr = {
248 		.s.ct = ODM_HDR_CT_CW_NC,
249 		.s.xtype = ODM_XTYPE_INTERNAL,
250 	};
251 
252 	vq = &odm->vq[vchan];
253 	const uint16_t max_iring_words = vq->iring_max_words;
254 
255 	iring_head_ptr = vq->iring_mz->addr;
256 	iring_head = vq->iring_head;
257 	iring_sz_available = vq->iring_sz_available;
258 	ins_ring_head = vq->ins_ring_head;
259 	pending_submit_len = vq->pending_submit_len;
260 	pending_submit_cnt = vq->pending_submit_cnt;
261 
262 	if (unlikely(nb_src > 4 || nb_dst > 4))
263 		return -EINVAL;
264 
265 	for (i = 0; i < nb_src; i++)
266 		s_sz += src[i].length;
267 
268 	for (i = 0; i < nb_dst; i++)
269 		d_sz += dst[i].length;
270 
271 	if (s_sz != d_sz)
272 		return -EINVAL;
273 
274 	nb = nb_src + nb_dst;
275 	hdr.s.nfst = nb_src;
276 	hdr.s.nlst = nb_dst;
277 	num_words = 1 + 3 * (nb / 2 + (nb & 0x1));
278 
279 	if (iring_sz_available < num_words)
280 		return -ENOSPC;
281 
282 	if ((iring_head + num_words) >= max_iring_words) {
283 		uint16_t words_avail = max_iring_words - iring_head;
284 		uint16_t words_pend = num_words - words_avail;
285 
286 		if (unlikely(words_avail + words_pend > ODM_IRING_ENTRY_SIZE_MAX))
287 			return -ENOSPC;
288 
289 		odm_dmadev_fill_sg(cmd, src, dst, nb_src, nb_dst, &hdr);
290 		rte_memcpy((void *)&iring_head_ptr[iring_head], (void *)cmd, words_avail * 8);
291 		rte_memcpy((void *)iring_head_ptr, (void *)&cmd[words_avail], words_pend * 8);
292 		iring_head = words_pend;
293 	} else {
294 		odm_dmadev_fill_sg(&iring_head_ptr[iring_head], src, dst, nb_src, nb_dst, &hdr);
295 		iring_head += num_words;
296 	}
297 
298 	pending_submit_len += num_words;
299 
300 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
301 		rte_wmb();
302 		odm_write64(pending_submit_len, odm->rbase + ODM_VDMA_DBELL(vchan));
303 		vq->stats.submitted += pending_submit_cnt + 1;
304 		vq->pending_submit_len = 0;
305 		vq->pending_submit_cnt = 0;
306 	} else {
307 		vq->pending_submit_len = pending_submit_len;
308 		vq->pending_submit_cnt++;
309 	}
310 
311 	vq->iring_head = iring_head;
312 
313 	vq->iring_sz_available = iring_sz_available - num_words;
314 
315 	/* Save extra space used for the instruction. */
316 	vq->extra_ins_sz[ins_ring_head] = num_words - 4;
317 
318 	vq->ins_ring_head = (ins_ring_head + 1) % vq->cring_max_entry;
319 
320 	return vq->desc_idx++;
321 }
322 
323 static int
324 odm_dmadev_fill(void *dev_private, uint16_t vchan, uint64_t pattern, rte_iova_t dst,
325 		uint32_t length, uint64_t flags)
326 {
327 	uint16_t pending_submit_len, pending_submit_cnt, iring_sz_available, iring_head;
328 	const int num_words = ODM_IRING_ENTRY_SIZE_MIN;
329 	struct odm_dev *odm = dev_private;
330 	uint64_t *iring_head_ptr;
331 	struct odm_queue *vq;
332 	uint64_t h;
333 
334 	vq = &odm->vq[vchan];
335 
336 	union odm_instr_hdr_s hdr = {
337 		.s.ct = ODM_HDR_CT_CW_NC,
338 		.s.nfst = 0,
339 		.s.nlst = 1,
340 	};
341 
342 	h = (uint64_t)length;
343 
344 	switch (pattern) {
345 	case 0:
346 		hdr.s.xtype = ODM_XTYPE_FILL0;
347 		break;
348 	case 0xffffffffffffffff:
349 		hdr.s.xtype = ODM_XTYPE_FILL1;
350 		break;
351 	default:
352 		return -ENOTSUP;
353 	}
354 
355 	const uint16_t max_iring_words = vq->iring_max_words;
356 
357 	iring_sz_available = vq->iring_sz_available;
358 	pending_submit_len = vq->pending_submit_len;
359 	pending_submit_cnt = vq->pending_submit_cnt;
360 	iring_head_ptr = vq->iring_mz->addr;
361 	iring_head = vq->iring_head;
362 
363 	if (iring_sz_available < num_words)
364 		return -ENOSPC;
365 
366 	if ((iring_head + num_words) >= max_iring_words) {
367 
368 		iring_head_ptr[iring_head] = hdr.u;
369 		iring_head = (iring_head + 1) % max_iring_words;
370 
371 		iring_head_ptr[iring_head] = h;
372 		iring_head = (iring_head + 1) % max_iring_words;
373 
374 		iring_head_ptr[iring_head] = dst;
375 		iring_head = (iring_head + 1) % max_iring_words;
376 
377 		iring_head_ptr[iring_head] = 0;
378 		iring_head = (iring_head + 1) % max_iring_words;
379 	} else {
380 		iring_head_ptr[iring_head] = hdr.u;
381 		iring_head_ptr[iring_head + 1] = h;
382 		iring_head_ptr[iring_head + 2] = dst;
383 		iring_head_ptr[iring_head + 3] = 0;
384 		iring_head += num_words;
385 	}
386 
387 	pending_submit_len += num_words;
388 
389 	if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
390 		rte_wmb();
391 		odm_write64(pending_submit_len, odm->rbase + ODM_VDMA_DBELL(vchan));
392 		vq->stats.submitted += pending_submit_cnt + 1;
393 		vq->pending_submit_len = 0;
394 		vq->pending_submit_cnt = 0;
395 	} else {
396 		vq->pending_submit_len = pending_submit_len;
397 		vq->pending_submit_cnt++;
398 	}
399 
400 	vq->iring_head = iring_head;
401 	vq->iring_sz_available = iring_sz_available - num_words;
402 
403 	/* No extra space to save. Skip entry in extra space ring. */
404 	vq->ins_ring_head = (vq->ins_ring_head + 1) % vq->cring_max_entry;
405 
406 	vq->iring_sz_available = iring_sz_available - num_words;
407 
408 	return vq->desc_idx++;
409 }
410 
411 static uint16_t
412 odm_dmadev_completed(void *dev_private, uint16_t vchan, const uint16_t nb_cpls, uint16_t *last_idx,
413 		     bool *has_error)
414 {
415 	const union odm_cmpl_ent_s cmpl_zero = {0};
416 	uint16_t cring_head, iring_sz_available;
417 	struct odm_dev *odm = dev_private;
418 	union odm_cmpl_ent_s cmpl;
419 	struct odm_queue *vq;
420 	uint64_t nb_err = 0;
421 	uint32_t *cmpl_ptr;
422 	int cnt;
423 
424 	vq = &odm->vq[vchan];
425 	const uint32_t *base_addr = vq->cring_mz->addr;
426 	const uint16_t cring_max_entry = vq->cring_max_entry;
427 
428 	cring_head = vq->cring_head;
429 	iring_sz_available = vq->iring_sz_available;
430 
431 	if (unlikely(vq->stats.submitted == vq->stats.completed)) {
432 		*last_idx = (vq->stats.completed_offset + vq->stats.completed - 1) & 0xFFFF;
433 		return 0;
434 	}
435 
436 	for (cnt = 0; cnt < nb_cpls; cnt++) {
437 		cmpl_ptr = RTE_PTR_ADD(base_addr, cring_head * sizeof(cmpl));
438 		cmpl.u = rte_atomic_load_explicit((RTE_ATOMIC(uint32_t) *)cmpl_ptr,
439 						  rte_memory_order_relaxed);
440 		if (!cmpl.s.valid)
441 			break;
442 
443 		if (cmpl.s.cmp_code)
444 			nb_err++;
445 
446 		/* Free space for enqueue */
447 		iring_sz_available += 4 + vq->extra_ins_sz[cring_head];
448 
449 		/* Clear instruction extra space */
450 		vq->extra_ins_sz[cring_head] = 0;
451 
452 		rte_atomic_store_explicit((RTE_ATOMIC(uint32_t) *)cmpl_ptr, cmpl_zero.u,
453 					  rte_memory_order_relaxed);
454 		cring_head = (cring_head + 1) % cring_max_entry;
455 	}
456 
457 	vq->stats.errors += nb_err;
458 
459 	if (unlikely(has_error != NULL && nb_err))
460 		*has_error = true;
461 
462 	vq->cring_head = cring_head;
463 	vq->iring_sz_available = iring_sz_available;
464 
465 	vq->stats.completed += cnt;
466 
467 	*last_idx = (vq->stats.completed_offset + vq->stats.completed - 1) & 0xFFFF;
468 
469 	return cnt;
470 }
471 
472 static uint16_t
473 odm_dmadev_completed_status(void *dev_private, uint16_t vchan, const uint16_t nb_cpls,
474 			    uint16_t *last_idx, enum rte_dma_status_code *status)
475 {
476 	const union odm_cmpl_ent_s cmpl_zero = {0};
477 	uint16_t cring_head, iring_sz_available;
478 	struct odm_dev *odm = dev_private;
479 	union odm_cmpl_ent_s cmpl;
480 	struct odm_queue *vq;
481 	uint32_t *cmpl_ptr;
482 	int cnt;
483 
484 	vq = &odm->vq[vchan];
485 	const uint32_t *base_addr = vq->cring_mz->addr;
486 	const uint16_t cring_max_entry = vq->cring_max_entry;
487 
488 	cring_head = vq->cring_head;
489 	iring_sz_available = vq->iring_sz_available;
490 
491 	if (vq->stats.submitted == vq->stats.completed) {
492 		*last_idx = (vq->stats.completed_offset + vq->stats.completed - 1) & 0xFFFF;
493 		return 0;
494 	}
495 
496 #ifdef ODM_DEBUG
497 	ODM_LOG(DEBUG, "cring_head: 0x%" PRIx16, cring_head);
498 	ODM_LOG(DEBUG, "Submitted: 0x%" PRIx64, vq->stats.submitted);
499 	ODM_LOG(DEBUG, "Completed: 0x%" PRIx64, vq->stats.completed);
500 	ODM_LOG(DEBUG, "Hardware count: 0x%" PRIx64, odm_read64(odm->rbase + ODM_VDMA_CNT(vchan)));
501 #endif
502 
503 	for (cnt = 0; cnt < nb_cpls; cnt++) {
504 		cmpl_ptr = RTE_PTR_ADD(base_addr, cring_head * sizeof(cmpl));
505 		cmpl.u = rte_atomic_load_explicit((RTE_ATOMIC(uint32_t) *)cmpl_ptr,
506 						  rte_memory_order_relaxed);
507 		if (!cmpl.s.valid)
508 			break;
509 
510 		status[cnt] = cmpl.s.cmp_code;
511 
512 		if (cmpl.s.cmp_code)
513 			vq->stats.errors++;
514 
515 		/* Free space for enqueue */
516 		iring_sz_available += 4 + vq->extra_ins_sz[cring_head];
517 
518 		/* Clear instruction extra space */
519 		vq->extra_ins_sz[cring_head] = 0;
520 
521 		rte_atomic_store_explicit((RTE_ATOMIC(uint32_t) *)cmpl_ptr, cmpl_zero.u,
522 					  rte_memory_order_relaxed);
523 		cring_head = (cring_head + 1) % cring_max_entry;
524 	}
525 
526 	vq->cring_head = cring_head;
527 	vq->iring_sz_available = iring_sz_available;
528 
529 	vq->stats.completed += cnt;
530 
531 	*last_idx = (vq->stats.completed_offset + vq->stats.completed - 1) & 0xFFFF;
532 
533 	return cnt;
534 }
535 
536 static int
537 odm_dmadev_submit(void *dev_private, uint16_t vchan)
538 {
539 	struct odm_dev *odm = dev_private;
540 	uint16_t pending_submit_len;
541 	struct odm_queue *vq;
542 
543 	vq = &odm->vq[vchan];
544 	pending_submit_len = vq->pending_submit_len;
545 
546 	if (pending_submit_len == 0)
547 		return 0;
548 
549 	rte_wmb();
550 	odm_write64(pending_submit_len, odm->rbase + ODM_VDMA_DBELL(vchan));
551 	vq->pending_submit_len = 0;
552 	vq->stats.submitted += vq->pending_submit_cnt;
553 	vq->pending_submit_cnt = 0;
554 
555 	return 0;
556 }
557 
558 static uint16_t
559 odm_dmadev_burst_capacity(const void *dev_private, uint16_t vchan __rte_unused)
560 {
561 	const struct odm_dev *odm = dev_private;
562 	const struct odm_queue *vq;
563 
564 	vq = &odm->vq[vchan];
565 	return (vq->iring_sz_available / ODM_IRING_ENTRY_SIZE_MIN);
566 }
567 
568 static int
569 odm_stats_get(const struct rte_dma_dev *dev, uint16_t vchan, struct rte_dma_stats *rte_stats,
570 	      uint32_t size)
571 {
572 	struct odm_dev *odm = dev->fp_obj->dev_private;
573 
574 	if (size < sizeof(rte_stats))
575 		return -EINVAL;
576 	if (rte_stats == NULL)
577 		return -EINVAL;
578 
579 	if (vchan != RTE_DMA_ALL_VCHAN) {
580 		struct rte_dma_stats *stats = (struct rte_dma_stats *)&odm->vq[vchan].stats;
581 
582 		*rte_stats = *stats;
583 	} else {
584 		int i;
585 
586 		for (i = 0; i < odm->num_qs; i++) {
587 			struct rte_dma_stats *stats = (struct rte_dma_stats *)&odm->vq[i].stats;
588 
589 			rte_stats->submitted += stats->submitted;
590 			rte_stats->completed += stats->completed;
591 			rte_stats->errors += stats->errors;
592 		}
593 	}
594 
595 	return 0;
596 }
597 
598 static void
599 odm_vq_stats_reset(struct vq_stats *vq_stats)
600 {
601 	vq_stats->completed_offset += vq_stats->completed;
602 	vq_stats->completed = 0;
603 	vq_stats->errors = 0;
604 	vq_stats->submitted = 0;
605 }
606 
607 static int
608 odm_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
609 {
610 	struct odm_dev *odm = dev->fp_obj->dev_private;
611 	struct vq_stats *vq_stats;
612 	int i;
613 
614 	if (vchan != RTE_DMA_ALL_VCHAN) {
615 		vq_stats = &odm->vq[vchan].stats;
616 		odm_vq_stats_reset(vq_stats);
617 	} else {
618 		for (i = 0; i < odm->num_qs; i++) {
619 			vq_stats = &odm->vq[i].stats;
620 			odm_vq_stats_reset(vq_stats);
621 		}
622 	}
623 
624 	return 0;
625 }
626 
627 static const struct rte_dma_dev_ops odm_dmadev_ops = {
628 	.dev_close = odm_dmadev_close,
629 	.dev_configure = odm_dmadev_configure,
630 	.dev_info_get = odm_dmadev_info_get,
631 	.dev_start = odm_dmadev_start,
632 	.dev_stop = odm_dmadev_stop,
633 	.stats_get = odm_stats_get,
634 	.stats_reset = odm_stats_reset,
635 	.vchan_setup = odm_dmadev_vchan_setup,
636 };
637 
638 static int
639 odm_dmadev_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev)
640 {
641 	char name[RTE_DEV_NAME_MAX_LEN];
642 	struct odm_dev *odm = NULL;
643 	struct rte_dma_dev *dmadev;
644 	int rc;
645 
646 	if (!pci_dev->mem_resource[0].addr)
647 		return -ENODEV;
648 
649 	memset(name, 0, sizeof(name));
650 	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
651 
652 	dmadev = rte_dma_pmd_allocate(name, pci_dev->device.numa_node, sizeof(*odm));
653 	if (dmadev == NULL) {
654 		ODM_LOG(ERR, "DMA device allocation failed for %s", name);
655 		return -ENOMEM;
656 	}
657 
658 	ODM_LOG(INFO, "DMA device %s probed", name);
659 	odm = dmadev->data->dev_private;
660 
661 	dmadev->device = &pci_dev->device;
662 	dmadev->fp_obj->dev_private = odm;
663 	dmadev->dev_ops = &odm_dmadev_ops;
664 
665 	dmadev->fp_obj->copy = odm_dmadev_copy;
666 	dmadev->fp_obj->copy_sg = odm_dmadev_copy_sg;
667 	dmadev->fp_obj->fill = odm_dmadev_fill;
668 	dmadev->fp_obj->submit = odm_dmadev_submit;
669 	dmadev->fp_obj->completed = odm_dmadev_completed;
670 	dmadev->fp_obj->completed_status = odm_dmadev_completed_status;
671 	dmadev->fp_obj->burst_capacity = odm_dmadev_burst_capacity;
672 
673 	odm->pci_dev = pci_dev;
674 
675 	rc = odm_dev_init(odm);
676 	if (rc < 0)
677 		goto dma_pmd_release;
678 
679 	return 0;
680 
681 dma_pmd_release:
682 	rte_dma_pmd_release(name);
683 
684 	return rc;
685 }
686 
687 static int
688 odm_dmadev_remove(struct rte_pci_device *pci_dev)
689 {
690 	char name[RTE_DEV_NAME_MAX_LEN];
691 
692 	memset(name, 0, sizeof(name));
693 	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
694 
695 	return rte_dma_pmd_release(name);
696 }
697 
698 static const struct rte_pci_id odm_dma_pci_map[] = {
699 	{
700 		RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_ODYSSEY_ODM_VF)
701 	},
702 	{
703 		.vendor_id = 0,
704 	},
705 };
706 
707 static struct rte_pci_driver odm_dmadev = {
708 	.id_table = odm_dma_pci_map,
709 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
710 	.probe = odm_dmadev_probe,
711 	.remove = odm_dmadev_remove,
712 };
713 
714 RTE_PMD_REGISTER_PCI(PCI_DRIVER_NAME, odm_dmadev);
715 RTE_PMD_REGISTER_PCI_TABLE(PCI_DRIVER_NAME, odm_dma_pci_map);
716 RTE_PMD_REGISTER_KMOD_DEP(PCI_DRIVER_NAME, "vfio-pci");
717 RTE_LOG_REGISTER_DEFAULT(odm_logtype, NOTICE);
718