xref: /dpdk/drivers/dma/skeleton/skeleton_dmadev.c (revision 2b843cac232eb3f2fa79e4254e21766817e2019f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021-2024 HiSilicon Limited
3  */
4 
5 #include <inttypes.h>
6 #include <stdlib.h>
7 
8 #include <pthread.h>
9 
10 #include <bus_vdev_driver.h>
11 #include <rte_cycles.h>
12 #include <rte_eal.h>
13 #include <rte_kvargs.h>
14 #include <rte_lcore.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_memcpy.h>
18 
19 #include <rte_dmadev_pmd.h>
20 
21 #include "skeleton_dmadev.h"
22 
23 RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
24 #define RTE_LOGTYPE_SKELDMA skeldma_logtype
25 #define SKELDMA_LOG(level, ...) \
26 	RTE_LOG_LINE_PREFIX(level, SKELDMA, "%s(): ", __func__, __VA_ARGS__)
27 
28 static int
29 skeldma_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
30 		 uint32_t info_sz)
31 {
32 #define SKELDMA_MAX_DESC	8192
33 #define SKELDMA_MIN_DESC	32
34 
35 	RTE_SET_USED(dev);
36 	RTE_SET_USED(info_sz);
37 
38 	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
39 			     RTE_DMA_CAPA_SVA |
40 			     RTE_DMA_CAPA_OPS_COPY |
41 			     RTE_DMA_CAPA_OPS_COPY_SG |
42 			     RTE_DMA_CAPA_OPS_FILL;
43 	dev_info->max_vchans = 1;
44 	dev_info->max_desc = SKELDMA_MAX_DESC;
45 	dev_info->min_desc = SKELDMA_MIN_DESC;
46 	dev_info->max_sges = SKELDMA_MAX_SGES;
47 
48 	return 0;
49 }
50 
51 static int
52 skeldma_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,
53 		  uint32_t conf_sz)
54 {
55 	RTE_SET_USED(dev);
56 	RTE_SET_USED(conf);
57 	RTE_SET_USED(conf_sz);
58 	return 0;
59 }
60 
61 static inline void
62 do_copy_sg_one(struct rte_dma_sge *src, struct rte_dma_sge *dst, uint16_t nb_dst, uint64_t offset)
63 {
64 	uint32_t src_off = 0, dst_off = 0;
65 	uint32_t copy_len = 0;
66 	uint64_t tmp = 0;
67 	uint16_t i;
68 
69 	/* Locate the segment from which the copy is started. */
70 	for (i = 0; i < nb_dst; i++) {
71 		tmp += dst[i].length;
72 		if (offset < tmp) {
73 			copy_len = tmp - offset;
74 			dst_off = dst[i].length - copy_len;
75 			break;
76 		}
77 	}
78 
79 	for (/* Use the above index */; i < nb_dst; i++, copy_len = dst[i].length) {
80 		copy_len = RTE_MIN(copy_len, src->length - src_off);
81 		rte_memcpy((uint8_t *)(uintptr_t)dst[i].addr + dst_off,
82 			   (uint8_t *)(uintptr_t)src->addr + src_off,
83 			   copy_len);
84 		src_off += copy_len;
85 		if (src_off >= src->length)
86 			break;
87 		dst_off = 0;
88 	}
89 }
90 
91 static inline void
92 do_copy_sg(struct skeldma_desc *desc)
93 {
94 	uint64_t offset = 0;
95 	uint16_t i;
96 
97 	for (i = 0; i < desc->copy_sg.nb_src; i++) {
98 		do_copy_sg_one(&desc->copy_sg.src[i], desc->copy_sg.dst,
99 			       desc->copy_sg.nb_dst, offset);
100 		offset += desc->copy_sg.src[i].length;
101 	}
102 }
103 
104 static inline void
105 do_fill(struct skeldma_desc *desc)
106 {
107 	uint8_t *fills = (uint8_t *)&desc->fill.pattern;
108 	uint8_t *dst = (uint8_t *)desc->fill.dst;
109 	uint32_t i;
110 
111 	for (i = 0; i < desc->fill.len; i++)
112 		dst[i] = fills[i % 8];
113 }
114 
115 static uint32_t
116 cpuwork_thread(void *param)
117 {
118 #define SLEEP_THRESHOLD		10000
119 #define SLEEP_US_VAL		10
120 
121 	struct rte_dma_dev *dev = param;
122 	struct skeldma_hw *hw = dev->data->dev_private;
123 	struct skeldma_desc *desc = NULL;
124 	int ret;
125 
126 	while (!hw->exit_flag) {
127 		ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
128 		if (ret) {
129 			hw->zero_req_count++;
130 			if (hw->zero_req_count == 0)
131 				hw->zero_req_count = SLEEP_THRESHOLD;
132 			if (hw->zero_req_count >= SLEEP_THRESHOLD)
133 				rte_delay_us_sleep(SLEEP_US_VAL);
134 			continue;
135 		}
136 		hw->zero_req_count = 0;
137 
138 		if (desc->op == SKELDMA_OP_COPY)
139 			rte_memcpy(desc->copy.dst, desc->copy.src, desc->copy.len);
140 		else if (desc->op == SKELDMA_OP_COPY_SG)
141 			do_copy_sg(desc);
142 		else if (desc->op == SKELDMA_OP_FILL)
143 			do_fill(desc);
144 
145 		rte_atomic_fetch_add_explicit(&hw->completed_count, 1, rte_memory_order_release);
146 		(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
147 	}
148 
149 	return 0;
150 }
151 
152 static void
153 fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
154 {
155 	struct skeldma_desc *desc = NULL;
156 	while (rte_ring_count(ring) > 0) {
157 		(void)rte_ring_dequeue(ring, (void **)&desc);
158 		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
159 	}
160 }
161 
162 static int
163 skeldma_start(struct rte_dma_dev *dev)
164 {
165 	struct skeldma_hw *hw = dev->data->dev_private;
166 	char name[RTE_THREAD_INTERNAL_NAME_SIZE];
167 	rte_cpuset_t cpuset;
168 	int ret;
169 
170 	if (hw->desc_mem == NULL) {
171 		SKELDMA_LOG(ERR, "Vchan was not setup, start fail!");
172 		return -EINVAL;
173 	}
174 
175 	/* Reset the dmadev to a known state, include:
176 	 * 1) fflush pending/running/completed ring to empty ring.
177 	 * 2) init ring idx to zero.
178 	 * 3) init running statistics.
179 	 * 4) mark cpuwork task exit_flag to false.
180 	 */
181 	fflush_ring(hw, hw->desc_pending);
182 	fflush_ring(hw, hw->desc_running);
183 	fflush_ring(hw, hw->desc_completed);
184 	hw->ridx = 0;
185 	hw->last_ridx = hw->ridx - 1;
186 	hw->submitted_count = 0;
187 	hw->zero_req_count = 0;
188 	hw->completed_count = 0;
189 	hw->exit_flag = false;
190 
191 	rte_mb();
192 
193 	snprintf(name, sizeof(name), "dma-skel%d", dev->data->dev_id);
194 	ret = rte_thread_create_internal_control(&hw->thread, name,
195 			cpuwork_thread, dev);
196 	if (ret) {
197 		SKELDMA_LOG(ERR, "Start cpuwork thread fail!");
198 		return -EINVAL;
199 	}
200 
201 	if (hw->lcore_id != -1) {
202 		cpuset = rte_lcore_cpuset(hw->lcore_id);
203 		ret = rte_thread_set_affinity_by_id(hw->thread, &cpuset);
204 		if (ret)
205 			SKELDMA_LOG(WARNING,
206 				"Set thread affinity lcore = %d fail!",
207 				hw->lcore_id);
208 	}
209 
210 	return 0;
211 }
212 
213 static int
214 skeldma_stop(struct rte_dma_dev *dev)
215 {
216 	struct skeldma_hw *hw = dev->data->dev_private;
217 
218 	hw->exit_flag = true;
219 	rte_delay_ms(1);
220 
221 	(void)pthread_cancel((pthread_t)hw->thread.opaque_id);
222 	rte_thread_join(hw->thread, NULL);
223 
224 	return 0;
225 }
226 
227 static int
228 vchan_setup(struct skeldma_hw *hw, int16_t dev_id, uint16_t nb_desc)
229 {
230 	char name[RTE_RING_NAMESIZE];
231 	struct skeldma_desc *desc;
232 	struct rte_ring *empty;
233 	struct rte_ring *pending;
234 	struct rte_ring *running;
235 	struct rte_ring *completed;
236 	uint16_t i;
237 
238 	desc = rte_zmalloc_socket(NULL, nb_desc * sizeof(struct skeldma_desc),
239 				  RTE_CACHE_LINE_SIZE, hw->socket_id);
240 	if (desc == NULL) {
241 		SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
242 		return -ENOMEM;
243 	}
244 
245 	snprintf(name, RTE_RING_NAMESIZE, "dma_skel_desc_empty_%d", dev_id);
246 	empty = rte_ring_create(name, nb_desc, hw->socket_id,
247 				RING_F_SP_ENQ | RING_F_SC_DEQ);
248 	snprintf(name, RTE_RING_NAMESIZE, "dma_skel_desc_pend_%d", dev_id);
249 	pending = rte_ring_create(name, nb_desc, hw->socket_id,
250 				  RING_F_SP_ENQ | RING_F_SC_DEQ);
251 	snprintf(name, RTE_RING_NAMESIZE, "dma_skel_desc_run_%d", dev_id);
252 	running = rte_ring_create(name, nb_desc, hw->socket_id,
253 				  RING_F_SP_ENQ | RING_F_SC_DEQ);
254 	snprintf(name, RTE_RING_NAMESIZE, "dma_skel_desc_comp_%d", dev_id);
255 	completed = rte_ring_create(name, nb_desc, hw->socket_id,
256 				    RING_F_SP_ENQ | RING_F_SC_DEQ);
257 	if (empty == NULL || pending == NULL || running == NULL ||
258 	    completed == NULL) {
259 		SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
260 		rte_ring_free(empty);
261 		rte_ring_free(pending);
262 		rte_ring_free(running);
263 		rte_ring_free(completed);
264 		rte_free(desc);
265 		return -ENOMEM;
266 	}
267 
268 	/* The real usable ring size is *count-1* instead of *count* to
269 	 * differentiate a free ring from an empty ring.
270 	 * @see rte_ring_create
271 	 */
272 	for (i = 0; i < nb_desc - 1; i++)
273 		(void)rte_ring_enqueue(empty, (void *)(desc + i));
274 
275 	hw->desc_mem = desc;
276 	hw->desc_empty = empty;
277 	hw->desc_pending = pending;
278 	hw->desc_running = running;
279 	hw->desc_completed = completed;
280 
281 	return 0;
282 }
283 
284 static void
285 vchan_release(struct skeldma_hw *hw)
286 {
287 	if (hw->desc_mem == NULL)
288 		return;
289 
290 	rte_free(hw->desc_mem);
291 	hw->desc_mem = NULL;
292 	rte_ring_free(hw->desc_empty);
293 	hw->desc_empty = NULL;
294 	rte_ring_free(hw->desc_pending);
295 	hw->desc_pending = NULL;
296 	rte_ring_free(hw->desc_running);
297 	hw->desc_running = NULL;
298 	rte_ring_free(hw->desc_completed);
299 	hw->desc_completed = NULL;
300 }
301 
302 static int
303 skeldma_close(struct rte_dma_dev *dev)
304 {
305 	/* The device already stopped */
306 	vchan_release(dev->data->dev_private);
307 	return 0;
308 }
309 
310 static int
311 skeldma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
312 		    const struct rte_dma_vchan_conf *conf,
313 		    uint32_t conf_sz)
314 {
315 	struct skeldma_hw *hw = dev->data->dev_private;
316 
317 	RTE_SET_USED(vchan);
318 	RTE_SET_USED(conf_sz);
319 
320 	if (!rte_is_power_of_2(conf->nb_desc)) {
321 		SKELDMA_LOG(ERR, "Number of desc must be power of 2!");
322 		return -EINVAL;
323 	}
324 
325 	vchan_release(hw);
326 	return vchan_setup(hw, dev->data->dev_id, conf->nb_desc);
327 }
328 
329 static int
330 skeldma_vchan_status(const struct rte_dma_dev *dev,
331 		uint16_t vchan, enum rte_dma_vchan_status *status)
332 {
333 	struct skeldma_hw *hw = dev->data->dev_private;
334 
335 	RTE_SET_USED(vchan);
336 
337 	*status = RTE_DMA_VCHAN_IDLE;
338 	if (hw->submitted_count != rte_atomic_load_explicit(&hw->completed_count,
339 			rte_memory_order_acquire)
340 			|| hw->zero_req_count == 0)
341 		*status = RTE_DMA_VCHAN_ACTIVE;
342 	return 0;
343 }
344 
345 static int
346 skeldma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
347 		  struct rte_dma_stats *stats, uint32_t stats_sz)
348 {
349 	struct skeldma_hw *hw = dev->data->dev_private;
350 
351 	RTE_SET_USED(vchan);
352 	RTE_SET_USED(stats_sz);
353 
354 	stats->submitted = hw->submitted_count;
355 	stats->completed = hw->completed_count;
356 	stats->errors = 0;
357 
358 	return 0;
359 }
360 
361 static int
362 skeldma_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
363 {
364 	struct skeldma_hw *hw = dev->data->dev_private;
365 
366 	RTE_SET_USED(vchan);
367 
368 	hw->submitted_count = 0;
369 	hw->completed_count = 0;
370 
371 	return 0;
372 }
373 
374 static int
375 skeldma_dump(const struct rte_dma_dev *dev, FILE *f)
376 {
377 #define GET_RING_COUNT(ring)	((ring) ? (rte_ring_count(ring)) : 0)
378 
379 	struct skeldma_hw *hw = dev->data->dev_private;
380 
381 	(void)fprintf(f,
382 		"    lcore_id: %d\n"
383 		"    socket_id: %d\n"
384 		"    desc_empty_ring_count: %u\n"
385 		"    desc_pending_ring_count: %u\n"
386 		"    desc_running_ring_count: %u\n"
387 		"    desc_completed_ring_count: %u\n",
388 		hw->lcore_id, hw->socket_id,
389 		GET_RING_COUNT(hw->desc_empty),
390 		GET_RING_COUNT(hw->desc_pending),
391 		GET_RING_COUNT(hw->desc_running),
392 		GET_RING_COUNT(hw->desc_completed));
393 	(void)fprintf(f,
394 		"    next_ring_idx: %u\n"
395 		"    last_ring_idx: %u\n"
396 		"    submitted_count: %" PRIu64 "\n"
397 		"    completed_count: %" PRIu64 "\n",
398 		hw->ridx, hw->last_ridx,
399 		hw->submitted_count, hw->completed_count);
400 
401 	return 0;
402 }
403 
404 static inline void
405 submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
406 {
407 	uint16_t count = rte_ring_count(hw->desc_pending);
408 	struct skeldma_desc *pend_desc = NULL;
409 
410 	while (count > 0) {
411 		(void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
412 		(void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
413 		count--;
414 	}
415 
416 	if (desc)
417 		(void)rte_ring_enqueue(hw->desc_running, (void *)desc);
418 }
419 
420 static int
421 skeldma_copy(void *dev_private, uint16_t vchan,
422 	     rte_iova_t src, rte_iova_t dst,
423 	     uint32_t length, uint64_t flags)
424 {
425 	struct skeldma_hw *hw = dev_private;
426 	struct skeldma_desc *desc;
427 	int ret;
428 
429 	RTE_SET_USED(vchan);
430 	RTE_SET_USED(flags);
431 
432 	ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
433 	if (ret)
434 		return -ENOSPC;
435 	desc->op = SKELDMA_OP_COPY;
436 	desc->ridx = hw->ridx;
437 	desc->copy.src = (void *)(uintptr_t)src;
438 	desc->copy.dst = (void *)(uintptr_t)dst;
439 	desc->copy.len = length;
440 	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
441 		submit(hw, desc);
442 	else
443 		(void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
444 	hw->submitted_count++;
445 
446 	return hw->ridx++;
447 }
448 
449 static int
450 skeldma_copy_sg(void *dev_private, uint16_t vchan,
451 		const struct rte_dma_sge *src,
452 		const struct rte_dma_sge *dst,
453 		uint16_t nb_src, uint16_t nb_dst,
454 		uint64_t flags)
455 {
456 	struct skeldma_hw *hw = dev_private;
457 	struct skeldma_desc *desc;
458 	int ret;
459 
460 	RTE_SET_USED(vchan);
461 
462 	ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
463 	if (ret)
464 		return -ENOSPC;
465 	desc->op = SKELDMA_OP_COPY_SG;
466 	desc->ridx = hw->ridx;
467 	memcpy(desc->copy_sg.src, src, sizeof(*src) * nb_src);
468 	memcpy(desc->copy_sg.dst, dst, sizeof(*dst) * nb_dst);
469 	desc->copy_sg.nb_src = nb_src;
470 	desc->copy_sg.nb_dst = nb_dst;
471 	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
472 		submit(hw, desc);
473 	else
474 		(void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
475 	hw->submitted_count++;
476 
477 	return hw->ridx++;
478 }
479 
480 static int
481 skeldma_fill(void *dev_private, uint16_t vchan,
482 	     uint64_t pattern, rte_iova_t dst,
483 	     uint32_t length, uint64_t flags)
484 {
485 	struct skeldma_hw *hw = dev_private;
486 	struct skeldma_desc *desc;
487 	int ret;
488 
489 	RTE_SET_USED(vchan);
490 
491 	ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
492 	if (ret)
493 		return -ENOSPC;
494 	desc->op = SKELDMA_OP_FILL;
495 	desc->ridx = hw->ridx;
496 	desc->fill.dst = (void *)(uintptr_t)dst;
497 	desc->fill.len = length;
498 	desc->fill.pattern = pattern;
499 	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
500 		submit(hw, desc);
501 	else
502 		(void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
503 	hw->submitted_count++;
504 
505 	return hw->ridx++;
506 }
507 
508 static int
509 skeldma_submit(void *dev_private, uint16_t vchan)
510 {
511 	struct skeldma_hw *hw = dev_private;
512 	RTE_SET_USED(vchan);
513 	submit(hw, NULL);
514 	return 0;
515 }
516 
517 static uint16_t
518 skeldma_completed(void *dev_private,
519 		  uint16_t vchan, const uint16_t nb_cpls,
520 		  uint16_t *last_idx, bool *has_error)
521 {
522 	struct skeldma_hw *hw = dev_private;
523 	struct skeldma_desc *desc = NULL;
524 	uint16_t index = 0;
525 	uint16_t count;
526 
527 	RTE_SET_USED(vchan);
528 	RTE_SET_USED(has_error);
529 
530 	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
531 	while (index < count) {
532 		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
533 		if (index == count - 1) {
534 			hw->last_ridx = desc->ridx;
535 			*last_idx = desc->ridx;
536 		}
537 		index++;
538 		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
539 	}
540 	if (unlikely(count == 0))
541 		*last_idx = hw->last_ridx;
542 
543 	return count;
544 }
545 
546 static uint16_t
547 skeldma_completed_status(void *dev_private,
548 			 uint16_t vchan, const uint16_t nb_cpls,
549 			 uint16_t *last_idx, enum rte_dma_status_code *status)
550 {
551 	struct skeldma_hw *hw = dev_private;
552 	struct skeldma_desc *desc = NULL;
553 	uint16_t index = 0;
554 	uint16_t count;
555 
556 	RTE_SET_USED(vchan);
557 
558 	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
559 	while (index < count) {
560 		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
561 		if (index == count - 1) {
562 			hw->last_ridx = desc->ridx;
563 			*last_idx = desc->ridx;
564 		}
565 		status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
566 		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
567 	}
568 	if (unlikely(count == 0))
569 		*last_idx = hw->last_ridx;
570 
571 	return count;
572 }
573 
574 static uint16_t
575 skeldma_burst_capacity(const void *dev_private, uint16_t vchan)
576 {
577 	const struct skeldma_hw *hw = dev_private;
578 
579 	RTE_SET_USED(vchan);
580 	return rte_ring_count(hw->desc_empty);
581 }
582 
583 static const struct rte_dma_dev_ops skeldma_ops = {
584 	.dev_info_get     = skeldma_info_get,
585 	.dev_configure    = skeldma_configure,
586 	.dev_start        = skeldma_start,
587 	.dev_stop         = skeldma_stop,
588 	.dev_close        = skeldma_close,
589 
590 	.vchan_setup      = skeldma_vchan_setup,
591 	.vchan_status     = skeldma_vchan_status,
592 
593 	.stats_get        = skeldma_stats_get,
594 	.stats_reset      = skeldma_stats_reset,
595 
596 	.dev_dump         = skeldma_dump,
597 };
598 
599 static int
600 skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
601 {
602 	struct rte_dma_dev *dev;
603 	struct skeldma_hw *hw;
604 	int socket_id;
605 
606 	socket_id = (lcore_id < 0) ? rte_socket_id() :
607 				     rte_lcore_to_socket_id(lcore_id);
608 	dev = rte_dma_pmd_allocate(name, socket_id, sizeof(struct skeldma_hw));
609 	if (dev == NULL) {
610 		SKELDMA_LOG(ERR, "Unable to allocate dmadev: %s", name);
611 		return -EINVAL;
612 	}
613 
614 	dev->device = &vdev->device;
615 	dev->dev_ops = &skeldma_ops;
616 	dev->fp_obj->dev_private = dev->data->dev_private;
617 	dev->fp_obj->copy = skeldma_copy;
618 	dev->fp_obj->copy_sg = skeldma_copy_sg;
619 	dev->fp_obj->fill = skeldma_fill;
620 	dev->fp_obj->submit = skeldma_submit;
621 	dev->fp_obj->completed = skeldma_completed;
622 	dev->fp_obj->completed_status = skeldma_completed_status;
623 	dev->fp_obj->burst_capacity = skeldma_burst_capacity;
624 
625 	hw = dev->data->dev_private;
626 	hw->lcore_id = lcore_id;
627 	hw->socket_id = socket_id;
628 
629 	dev->state = RTE_DMA_DEV_READY;
630 
631 	return dev->data->dev_id;
632 }
633 
634 static int
635 skeldma_destroy(const char *name)
636 {
637 	return rte_dma_pmd_release(name);
638 }
639 
640 static int
641 skeldma_parse_lcore(const char *key __rte_unused,
642 		    const char *value,
643 		    void *opaque)
644 {
645 	int lcore_id;
646 
647 	if (value == NULL || opaque == NULL)
648 		return -EINVAL;
649 
650 	lcore_id = atoi(value);
651 	if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
652 		*(int *)opaque = lcore_id;
653 
654 	return 0;
655 }
656 
657 static void
658 skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
659 {
660 	static const char *const args[] = {
661 		SKELDMA_ARG_LCORE,
662 		NULL
663 	};
664 
665 	struct rte_kvargs *kvlist;
666 	const char *params;
667 
668 	params = rte_vdev_device_args(vdev);
669 	if (params == NULL || params[0] == '\0')
670 		return;
671 
672 	kvlist = rte_kvargs_parse(params, args);
673 	if (!kvlist)
674 		return;
675 
676 	(void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
677 				 skeldma_parse_lcore, lcore_id);
678 	SKELDMA_LOG(INFO, "Parse lcore_id = %d", *lcore_id);
679 
680 	rte_kvargs_free(kvlist);
681 }
682 
683 static int
684 skeldma_probe(struct rte_vdev_device *vdev)
685 {
686 	const char *name;
687 	int lcore_id = -1;
688 	int ret;
689 
690 	name = rte_vdev_device_name(vdev);
691 	if (name == NULL)
692 		return -EINVAL;
693 
694 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
695 		SKELDMA_LOG(ERR, "Multiple process not supported for %s", name);
696 		return -EINVAL;
697 	}
698 
699 	skeldma_parse_vdev_args(vdev, &lcore_id);
700 
701 	ret = skeldma_create(name, vdev, lcore_id);
702 	if (ret >= 0)
703 		SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d",
704 			name, lcore_id);
705 
706 	return ret < 0 ? ret : 0;
707 }
708 
709 static int
710 skeldma_remove(struct rte_vdev_device *vdev)
711 {
712 	const char *name;
713 	int ret;
714 
715 	name = rte_vdev_device_name(vdev);
716 	if (name == NULL)
717 		return -1;
718 
719 	ret = skeldma_destroy(name);
720 	if (!ret)
721 		SKELDMA_LOG(INFO, "Remove %s dmadev", name);
722 
723 	return ret;
724 }
725 
726 static struct rte_vdev_driver skeldma_pmd_drv = {
727 	.probe = skeldma_probe,
728 	.remove = skeldma_remove,
729 	.drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
730 };
731 
732 RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
733 RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
734 		SKELDMA_ARG_LCORE "=<uint16> ");
735