xref: /dpdk/app/test-mldev/test_inference_common.c (revision 38e884b5ad3ccd006f3614350d52adc78d1948df)
1bbd272edSSrikanth Yalavarthi /* SPDX-License-Identifier: BSD-3-Clause
2bbd272edSSrikanth Yalavarthi  * Copyright (c) 2022 Marvell.
3bbd272edSSrikanth Yalavarthi  */
4bbd272edSSrikanth Yalavarthi 
5bbd272edSSrikanth Yalavarthi #include <errno.h>
6bbd272edSSrikanth Yalavarthi #include <unistd.h>
7bbd272edSSrikanth Yalavarthi 
8bbd272edSSrikanth Yalavarthi #include <rte_common.h>
9bbd272edSSrikanth Yalavarthi #include <rte_launch.h>
10bbd272edSSrikanth Yalavarthi #include <rte_lcore.h>
11bbd272edSSrikanth Yalavarthi #include <rte_malloc.h>
12bbd272edSSrikanth Yalavarthi #include <rte_memzone.h>
13bbd272edSSrikanth Yalavarthi #include <rte_mldev.h>
14bbd272edSSrikanth Yalavarthi 
15bbd272edSSrikanth Yalavarthi #include "ml_common.h"
16bbd272edSSrikanth Yalavarthi #include "test_inference_common.h"
17bbd272edSSrikanth Yalavarthi 
18bbd272edSSrikanth Yalavarthi /* Enqueue inference requests with burst size equal to 1 */
19bbd272edSSrikanth Yalavarthi static int
20bbd272edSSrikanth Yalavarthi ml_enqueue_single(void *arg)
21bbd272edSSrikanth Yalavarthi {
22bbd272edSSrikanth Yalavarthi 	struct test_inference *t = ml_test_priv((struct ml_test *)arg);
23bbd272edSSrikanth Yalavarthi 	struct ml_request *req = NULL;
24bbd272edSSrikanth Yalavarthi 	struct rte_ml_op *op = NULL;
25bbd272edSSrikanth Yalavarthi 	struct ml_core_args *args;
26bbd272edSSrikanth Yalavarthi 	uint64_t model_enq = 0;
27bbd272edSSrikanth Yalavarthi 	uint32_t burst_enq;
28bbd272edSSrikanth Yalavarthi 	uint32_t lcore_id;
29bbd272edSSrikanth Yalavarthi 	uint16_t fid;
30bbd272edSSrikanth Yalavarthi 	int ret;
31bbd272edSSrikanth Yalavarthi 
32bbd272edSSrikanth Yalavarthi 	lcore_id = rte_lcore_id();
33bbd272edSSrikanth Yalavarthi 	args = &t->args[lcore_id];
34bbd272edSSrikanth Yalavarthi 	model_enq = 0;
35bbd272edSSrikanth Yalavarthi 
36bbd272edSSrikanth Yalavarthi 	if (args->nb_reqs == 0)
37bbd272edSSrikanth Yalavarthi 		return 0;
38bbd272edSSrikanth Yalavarthi 
39bbd272edSSrikanth Yalavarthi next_rep:
40bbd272edSSrikanth Yalavarthi 	fid = args->start_fid;
41bbd272edSSrikanth Yalavarthi 
42bbd272edSSrikanth Yalavarthi next_model:
43bbd272edSSrikanth Yalavarthi 	ret = rte_mempool_get(t->op_pool, (void **)&op);
44bbd272edSSrikanth Yalavarthi 	if (ret != 0)
45bbd272edSSrikanth Yalavarthi 		goto next_model;
46bbd272edSSrikanth Yalavarthi 
47bbd272edSSrikanth Yalavarthi retry:
48bbd272edSSrikanth Yalavarthi 	ret = rte_mempool_get(t->model[fid].io_pool, (void **)&req);
49bbd272edSSrikanth Yalavarthi 	if (ret != 0)
50bbd272edSSrikanth Yalavarthi 		goto retry;
51bbd272edSSrikanth Yalavarthi 
52bbd272edSSrikanth Yalavarthi 	op->model_id = t->model[fid].id;
53*38e884b5SSrikanth Yalavarthi 	op->nb_batches = t->model[fid].nb_batches;
54bbd272edSSrikanth Yalavarthi 	op->mempool = t->op_pool;
55bbd272edSSrikanth Yalavarthi 
56bbd272edSSrikanth Yalavarthi 	op->input.addr = req->input;
57bbd272edSSrikanth Yalavarthi 	op->input.length = t->model[fid].inp_qsize;
58bbd272edSSrikanth Yalavarthi 	op->input.next = NULL;
59bbd272edSSrikanth Yalavarthi 
60bbd272edSSrikanth Yalavarthi 	op->output.addr = req->output;
61bbd272edSSrikanth Yalavarthi 	op->output.length = t->model[fid].out_qsize;
62bbd272edSSrikanth Yalavarthi 	op->output.next = NULL;
63bbd272edSSrikanth Yalavarthi 
64bbd272edSSrikanth Yalavarthi 	op->user_ptr = req;
65bbd272edSSrikanth Yalavarthi 	req->niters++;
66bbd272edSSrikanth Yalavarthi 	req->fid = fid;
67bbd272edSSrikanth Yalavarthi 
68bbd272edSSrikanth Yalavarthi enqueue_req:
69c0e87165SSrikanth Yalavarthi 	burst_enq = rte_ml_enqueue_burst(t->cmn.opt->dev_id, args->qp_id, &op, 1);
70bbd272edSSrikanth Yalavarthi 	if (burst_enq == 0)
71bbd272edSSrikanth Yalavarthi 		goto enqueue_req;
72bbd272edSSrikanth Yalavarthi 
73bbd272edSSrikanth Yalavarthi 	fid++;
74bbd272edSSrikanth Yalavarthi 	if (likely(fid <= args->end_fid))
75bbd272edSSrikanth Yalavarthi 		goto next_model;
76bbd272edSSrikanth Yalavarthi 
77bbd272edSSrikanth Yalavarthi 	model_enq++;
78bbd272edSSrikanth Yalavarthi 	if (likely(model_enq < args->nb_reqs))
79bbd272edSSrikanth Yalavarthi 		goto next_rep;
80bbd272edSSrikanth Yalavarthi 
81bbd272edSSrikanth Yalavarthi 	return 0;
82bbd272edSSrikanth Yalavarthi }
83bbd272edSSrikanth Yalavarthi 
84bbd272edSSrikanth Yalavarthi /* Dequeue inference requests with burst size equal to 1 */
85bbd272edSSrikanth Yalavarthi static int
86bbd272edSSrikanth Yalavarthi ml_dequeue_single(void *arg)
87bbd272edSSrikanth Yalavarthi {
88bbd272edSSrikanth Yalavarthi 	struct test_inference *t = ml_test_priv((struct ml_test *)arg);
89bbd272edSSrikanth Yalavarthi 	struct rte_ml_op_error error;
90bbd272edSSrikanth Yalavarthi 	struct rte_ml_op *op = NULL;
91bbd272edSSrikanth Yalavarthi 	struct ml_core_args *args;
92bbd272edSSrikanth Yalavarthi 	struct ml_request *req;
93bbd272edSSrikanth Yalavarthi 	uint64_t total_deq = 0;
94bbd272edSSrikanth Yalavarthi 	uint8_t nb_filelist;
95bbd272edSSrikanth Yalavarthi 	uint32_t burst_deq;
96bbd272edSSrikanth Yalavarthi 	uint32_t lcore_id;
97bbd272edSSrikanth Yalavarthi 
98bbd272edSSrikanth Yalavarthi 	lcore_id = rte_lcore_id();
99bbd272edSSrikanth Yalavarthi 	args = &t->args[lcore_id];
100bbd272edSSrikanth Yalavarthi 	nb_filelist = args->end_fid - args->start_fid + 1;
101bbd272edSSrikanth Yalavarthi 
102bbd272edSSrikanth Yalavarthi 	if (args->nb_reqs == 0)
103bbd272edSSrikanth Yalavarthi 		return 0;
104bbd272edSSrikanth Yalavarthi 
105bbd272edSSrikanth Yalavarthi dequeue_req:
106c0e87165SSrikanth Yalavarthi 	burst_deq = rte_ml_dequeue_burst(t->cmn.opt->dev_id, args->qp_id, &op, 1);
107bbd272edSSrikanth Yalavarthi 
108bbd272edSSrikanth Yalavarthi 	if (likely(burst_deq == 1)) {
109bbd272edSSrikanth Yalavarthi 		total_deq += burst_deq;
110bbd272edSSrikanth Yalavarthi 		if (unlikely(op->status == RTE_ML_OP_STATUS_ERROR)) {
111bbd272edSSrikanth Yalavarthi 			rte_ml_op_error_get(t->cmn.opt->dev_id, op, &error);
112bbd272edSSrikanth Yalavarthi 			ml_err("error_code = 0x%" PRIx64 ", error_message = %s\n", error.errcode,
113bbd272edSSrikanth Yalavarthi 			       error.message);
114bbd272edSSrikanth Yalavarthi 			t->error_count[lcore_id]++;
115bbd272edSSrikanth Yalavarthi 		}
116bbd272edSSrikanth Yalavarthi 		req = (struct ml_request *)op->user_ptr;
117bbd272edSSrikanth Yalavarthi 		rte_mempool_put(t->model[req->fid].io_pool, req);
118bbd272edSSrikanth Yalavarthi 		rte_mempool_put(t->op_pool, op);
119bbd272edSSrikanth Yalavarthi 	}
120bbd272edSSrikanth Yalavarthi 
121bbd272edSSrikanth Yalavarthi 	if (likely(total_deq < args->nb_reqs * nb_filelist))
122bbd272edSSrikanth Yalavarthi 		goto dequeue_req;
123bbd272edSSrikanth Yalavarthi 
124bbd272edSSrikanth Yalavarthi 	return 0;
125bbd272edSSrikanth Yalavarthi }
126bbd272edSSrikanth Yalavarthi 
12739890f07SSrikanth Yalavarthi /* Enqueue inference requests with burst size greater than 1 */
12839890f07SSrikanth Yalavarthi static int
12939890f07SSrikanth Yalavarthi ml_enqueue_burst(void *arg)
13039890f07SSrikanth Yalavarthi {
13139890f07SSrikanth Yalavarthi 	struct test_inference *t = ml_test_priv((struct ml_test *)arg);
13239890f07SSrikanth Yalavarthi 	struct ml_core_args *args;
13339890f07SSrikanth Yalavarthi 	uint16_t ops_count;
13439890f07SSrikanth Yalavarthi 	uint64_t model_enq;
13539890f07SSrikanth Yalavarthi 	uint16_t burst_enq;
13639890f07SSrikanth Yalavarthi 	uint32_t lcore_id;
13739890f07SSrikanth Yalavarthi 	uint16_t pending;
13839890f07SSrikanth Yalavarthi 	uint16_t idx;
13939890f07SSrikanth Yalavarthi 	uint16_t fid;
14039890f07SSrikanth Yalavarthi 	uint16_t i;
14139890f07SSrikanth Yalavarthi 	int ret;
14239890f07SSrikanth Yalavarthi 
14339890f07SSrikanth Yalavarthi 	lcore_id = rte_lcore_id();
14439890f07SSrikanth Yalavarthi 	args = &t->args[lcore_id];
14539890f07SSrikanth Yalavarthi 	model_enq = 0;
14639890f07SSrikanth Yalavarthi 
14739890f07SSrikanth Yalavarthi 	if (args->nb_reqs == 0)
14839890f07SSrikanth Yalavarthi 		return 0;
14939890f07SSrikanth Yalavarthi 
15039890f07SSrikanth Yalavarthi next_rep:
15139890f07SSrikanth Yalavarthi 	fid = args->start_fid;
15239890f07SSrikanth Yalavarthi 
15339890f07SSrikanth Yalavarthi next_model:
15439890f07SSrikanth Yalavarthi 	ops_count = RTE_MIN(t->cmn.opt->burst_size, args->nb_reqs - model_enq);
15539890f07SSrikanth Yalavarthi 	ret = rte_mempool_get_bulk(t->op_pool, (void **)args->enq_ops, ops_count);
15639890f07SSrikanth Yalavarthi 	if (ret != 0)
15739890f07SSrikanth Yalavarthi 		goto next_model;
15839890f07SSrikanth Yalavarthi 
15939890f07SSrikanth Yalavarthi retry:
16039890f07SSrikanth Yalavarthi 	ret = rte_mempool_get_bulk(t->model[fid].io_pool, (void **)args->reqs, ops_count);
16139890f07SSrikanth Yalavarthi 	if (ret != 0)
16239890f07SSrikanth Yalavarthi 		goto retry;
16339890f07SSrikanth Yalavarthi 
16439890f07SSrikanth Yalavarthi 	for (i = 0; i < ops_count; i++) {
16539890f07SSrikanth Yalavarthi 		args->enq_ops[i]->model_id = t->model[fid].id;
166*38e884b5SSrikanth Yalavarthi 		args->enq_ops[i]->nb_batches = t->model[fid].nb_batches;
16739890f07SSrikanth Yalavarthi 		args->enq_ops[i]->mempool = t->op_pool;
16839890f07SSrikanth Yalavarthi 
16939890f07SSrikanth Yalavarthi 		args->enq_ops[i]->input.addr = args->reqs[i]->input;
17039890f07SSrikanth Yalavarthi 		args->enq_ops[i]->input.length = t->model[fid].inp_qsize;
17139890f07SSrikanth Yalavarthi 		args->enq_ops[i]->input.next = NULL;
17239890f07SSrikanth Yalavarthi 
17339890f07SSrikanth Yalavarthi 		args->enq_ops[i]->output.addr = args->reqs[i]->output;
17439890f07SSrikanth Yalavarthi 		args->enq_ops[i]->output.length = t->model[fid].out_qsize;
17539890f07SSrikanth Yalavarthi 		args->enq_ops[i]->output.next = NULL;
17639890f07SSrikanth Yalavarthi 
17739890f07SSrikanth Yalavarthi 		args->enq_ops[i]->user_ptr = args->reqs[i];
17839890f07SSrikanth Yalavarthi 		args->reqs[i]->niters++;
17939890f07SSrikanth Yalavarthi 		args->reqs[i]->fid = fid;
18039890f07SSrikanth Yalavarthi 	}
18139890f07SSrikanth Yalavarthi 
18239890f07SSrikanth Yalavarthi 	idx = 0;
18339890f07SSrikanth Yalavarthi 	pending = ops_count;
18439890f07SSrikanth Yalavarthi 
18539890f07SSrikanth Yalavarthi enqueue_reqs:
186c0e87165SSrikanth Yalavarthi 	burst_enq =
187c0e87165SSrikanth Yalavarthi 		rte_ml_enqueue_burst(t->cmn.opt->dev_id, args->qp_id, &args->enq_ops[idx], pending);
18839890f07SSrikanth Yalavarthi 	pending = pending - burst_enq;
18939890f07SSrikanth Yalavarthi 
19039890f07SSrikanth Yalavarthi 	if (pending > 0) {
19139890f07SSrikanth Yalavarthi 		idx = idx + burst_enq;
19239890f07SSrikanth Yalavarthi 		goto enqueue_reqs;
19339890f07SSrikanth Yalavarthi 	}
19439890f07SSrikanth Yalavarthi 
19539890f07SSrikanth Yalavarthi 	fid++;
19639890f07SSrikanth Yalavarthi 	if (fid <= args->end_fid)
19739890f07SSrikanth Yalavarthi 		goto next_model;
19839890f07SSrikanth Yalavarthi 
19939890f07SSrikanth Yalavarthi 	model_enq = model_enq + ops_count;
20039890f07SSrikanth Yalavarthi 	if (model_enq < args->nb_reqs)
20139890f07SSrikanth Yalavarthi 		goto next_rep;
20239890f07SSrikanth Yalavarthi 
20339890f07SSrikanth Yalavarthi 	return 0;
20439890f07SSrikanth Yalavarthi }
20539890f07SSrikanth Yalavarthi 
20639890f07SSrikanth Yalavarthi /* Dequeue inference requests with burst size greater than 1 */
20739890f07SSrikanth Yalavarthi static int
20839890f07SSrikanth Yalavarthi ml_dequeue_burst(void *arg)
20939890f07SSrikanth Yalavarthi {
21039890f07SSrikanth Yalavarthi 	struct test_inference *t = ml_test_priv((struct ml_test *)arg);
21139890f07SSrikanth Yalavarthi 	struct rte_ml_op_error error;
21239890f07SSrikanth Yalavarthi 	struct ml_core_args *args;
21339890f07SSrikanth Yalavarthi 	struct ml_request *req;
21439890f07SSrikanth Yalavarthi 	uint64_t total_deq = 0;
21539890f07SSrikanth Yalavarthi 	uint16_t burst_deq = 0;
21639890f07SSrikanth Yalavarthi 	uint8_t nb_filelist;
21739890f07SSrikanth Yalavarthi 	uint32_t lcore_id;
21839890f07SSrikanth Yalavarthi 	uint32_t i;
21939890f07SSrikanth Yalavarthi 
22039890f07SSrikanth Yalavarthi 	lcore_id = rte_lcore_id();
22139890f07SSrikanth Yalavarthi 	args = &t->args[lcore_id];
22239890f07SSrikanth Yalavarthi 	nb_filelist = args->end_fid - args->start_fid + 1;
22339890f07SSrikanth Yalavarthi 
22439890f07SSrikanth Yalavarthi 	if (args->nb_reqs == 0)
22539890f07SSrikanth Yalavarthi 		return 0;
22639890f07SSrikanth Yalavarthi 
22739890f07SSrikanth Yalavarthi dequeue_burst:
228c0e87165SSrikanth Yalavarthi 	burst_deq = rte_ml_dequeue_burst(t->cmn.opt->dev_id, args->qp_id, args->deq_ops,
229c0e87165SSrikanth Yalavarthi 					 t->cmn.opt->burst_size);
23039890f07SSrikanth Yalavarthi 
23139890f07SSrikanth Yalavarthi 	if (likely(burst_deq > 0)) {
23239890f07SSrikanth Yalavarthi 		total_deq += burst_deq;
23339890f07SSrikanth Yalavarthi 
23439890f07SSrikanth Yalavarthi 		for (i = 0; i < burst_deq; i++) {
23539890f07SSrikanth Yalavarthi 			if (unlikely(args->deq_ops[i]->status == RTE_ML_OP_STATUS_ERROR)) {
23639890f07SSrikanth Yalavarthi 				rte_ml_op_error_get(t->cmn.opt->dev_id, args->deq_ops[i], &error);
23739890f07SSrikanth Yalavarthi 				ml_err("error_code = 0x%" PRIx64 ", error_message = %s\n",
23839890f07SSrikanth Yalavarthi 				       error.errcode, error.message);
23939890f07SSrikanth Yalavarthi 				t->error_count[lcore_id]++;
24039890f07SSrikanth Yalavarthi 			}
24139890f07SSrikanth Yalavarthi 			req = (struct ml_request *)args->deq_ops[i]->user_ptr;
24239890f07SSrikanth Yalavarthi 			if (req != NULL)
24339890f07SSrikanth Yalavarthi 				rte_mempool_put(t->model[req->fid].io_pool, req);
24439890f07SSrikanth Yalavarthi 		}
24539890f07SSrikanth Yalavarthi 		rte_mempool_put_bulk(t->op_pool, (void *)args->deq_ops, burst_deq);
24639890f07SSrikanth Yalavarthi 	}
24739890f07SSrikanth Yalavarthi 
24839890f07SSrikanth Yalavarthi 	if (total_deq < args->nb_reqs * nb_filelist)
24939890f07SSrikanth Yalavarthi 		goto dequeue_burst;
25039890f07SSrikanth Yalavarthi 
25139890f07SSrikanth Yalavarthi 	return 0;
25239890f07SSrikanth Yalavarthi }
25339890f07SSrikanth Yalavarthi 
254bbd272edSSrikanth Yalavarthi bool
255bbd272edSSrikanth Yalavarthi test_inference_cap_check(struct ml_options *opt)
256bbd272edSSrikanth Yalavarthi {
257bbd272edSSrikanth Yalavarthi 	struct rte_ml_dev_info dev_info;
258bbd272edSSrikanth Yalavarthi 
259bbd272edSSrikanth Yalavarthi 	if (!ml_test_cap_check(opt))
260bbd272edSSrikanth Yalavarthi 		return false;
261bbd272edSSrikanth Yalavarthi 
262bbd272edSSrikanth Yalavarthi 	rte_ml_dev_info_get(opt->dev_id, &dev_info);
263c0e87165SSrikanth Yalavarthi 
264c0e87165SSrikanth Yalavarthi 	if (opt->queue_pairs > dev_info.max_queue_pairs) {
265c0e87165SSrikanth Yalavarthi 		ml_err("Insufficient capabilities: queue_pairs = %u, max_queue_pairs = %u",
266c0e87165SSrikanth Yalavarthi 		       opt->queue_pairs, dev_info.max_queue_pairs);
267c0e87165SSrikanth Yalavarthi 		return false;
268c0e87165SSrikanth Yalavarthi 	}
269c0e87165SSrikanth Yalavarthi 
270c0e87165SSrikanth Yalavarthi 	if (opt->queue_size > dev_info.max_desc) {
271c0e87165SSrikanth Yalavarthi 		ml_err("Insufficient capabilities: queue_size = %u, max_desc = %u", opt->queue_size,
272c0e87165SSrikanth Yalavarthi 		       dev_info.max_desc);
273c0e87165SSrikanth Yalavarthi 		return false;
274c0e87165SSrikanth Yalavarthi 	}
275c0e87165SSrikanth Yalavarthi 
276bbd272edSSrikanth Yalavarthi 	if (opt->nb_filelist > dev_info.max_models) {
277bbd272edSSrikanth Yalavarthi 		ml_err("Insufficient capabilities:  Filelist count exceeded device limit, count = %u (max limit = %u)",
278bbd272edSSrikanth Yalavarthi 		       opt->nb_filelist, dev_info.max_models);
279bbd272edSSrikanth Yalavarthi 		return false;
280bbd272edSSrikanth Yalavarthi 	}
281bbd272edSSrikanth Yalavarthi 
282bbd272edSSrikanth Yalavarthi 	return true;
283bbd272edSSrikanth Yalavarthi }
284bbd272edSSrikanth Yalavarthi 
285bbd272edSSrikanth Yalavarthi int
286bbd272edSSrikanth Yalavarthi test_inference_opt_check(struct ml_options *opt)
287bbd272edSSrikanth Yalavarthi {
288bbd272edSSrikanth Yalavarthi 	uint32_t i;
289bbd272edSSrikanth Yalavarthi 	int ret;
290bbd272edSSrikanth Yalavarthi 
291bbd272edSSrikanth Yalavarthi 	/* check common opts */
292bbd272edSSrikanth Yalavarthi 	ret = ml_test_opt_check(opt);
293bbd272edSSrikanth Yalavarthi 	if (ret != 0)
294bbd272edSSrikanth Yalavarthi 		return ret;
295bbd272edSSrikanth Yalavarthi 
296bbd272edSSrikanth Yalavarthi 	/* check file availability */
297bbd272edSSrikanth Yalavarthi 	for (i = 0; i < opt->nb_filelist; i++) {
298bbd272edSSrikanth Yalavarthi 		if (access(opt->filelist[i].model, F_OK) == -1) {
299bbd272edSSrikanth Yalavarthi 			ml_err("Model file not accessible: id = %u, file = %s", i,
300bbd272edSSrikanth Yalavarthi 			       opt->filelist[i].model);
301bbd272edSSrikanth Yalavarthi 			return -ENOENT;
302bbd272edSSrikanth Yalavarthi 		}
303bbd272edSSrikanth Yalavarthi 
304bbd272edSSrikanth Yalavarthi 		if (access(opt->filelist[i].input, F_OK) == -1) {
305bbd272edSSrikanth Yalavarthi 			ml_err("Input file not accessible: id = %u, file = %s", i,
306bbd272edSSrikanth Yalavarthi 			       opt->filelist[i].input);
307bbd272edSSrikanth Yalavarthi 			return -ENOENT;
308bbd272edSSrikanth Yalavarthi 		}
309bbd272edSSrikanth Yalavarthi 	}
310bbd272edSSrikanth Yalavarthi 
311bbd272edSSrikanth Yalavarthi 	if (opt->repetitions == 0) {
312bbd272edSSrikanth Yalavarthi 		ml_err("Invalid option, repetitions = %" PRIu64 "\n", opt->repetitions);
313bbd272edSSrikanth Yalavarthi 		return -EINVAL;
314bbd272edSSrikanth Yalavarthi 	}
315bbd272edSSrikanth Yalavarthi 
31639890f07SSrikanth Yalavarthi 	if (opt->burst_size == 0) {
31739890f07SSrikanth Yalavarthi 		ml_err("Invalid option, burst_size = %u\n", opt->burst_size);
31839890f07SSrikanth Yalavarthi 		return -EINVAL;
31939890f07SSrikanth Yalavarthi 	}
32039890f07SSrikanth Yalavarthi 
32139890f07SSrikanth Yalavarthi 	if (opt->burst_size > ML_TEST_MAX_POOL_SIZE) {
32239890f07SSrikanth Yalavarthi 		ml_err("Invalid option, burst_size = %u (> max supported = %d)\n", opt->burst_size,
32339890f07SSrikanth Yalavarthi 		       ML_TEST_MAX_POOL_SIZE);
32439890f07SSrikanth Yalavarthi 		return -EINVAL;
32539890f07SSrikanth Yalavarthi 	}
32639890f07SSrikanth Yalavarthi 
327c0e87165SSrikanth Yalavarthi 	if (opt->queue_pairs == 0) {
328c0e87165SSrikanth Yalavarthi 		ml_err("Invalid option, queue_pairs = %u\n", opt->queue_pairs);
329c0e87165SSrikanth Yalavarthi 		return -EINVAL;
330c0e87165SSrikanth Yalavarthi 	}
331c0e87165SSrikanth Yalavarthi 
332c0e87165SSrikanth Yalavarthi 	if (opt->queue_size == 0) {
333c0e87165SSrikanth Yalavarthi 		ml_err("Invalid option, queue_size = %u\n", opt->queue_size);
334c0e87165SSrikanth Yalavarthi 		return -EINVAL;
335c0e87165SSrikanth Yalavarthi 	}
336c0e87165SSrikanth Yalavarthi 
337bbd272edSSrikanth Yalavarthi 	/* check number of available lcores. */
338c0e87165SSrikanth Yalavarthi 	if (rte_lcore_count() < (uint32_t)(opt->queue_pairs * 2 + 1)) {
339bbd272edSSrikanth Yalavarthi 		ml_err("Insufficient lcores = %u\n", rte_lcore_count());
340c0e87165SSrikanth Yalavarthi 		ml_err("Minimum lcores required to create %u queue-pairs = %u\n", opt->queue_pairs,
341c0e87165SSrikanth Yalavarthi 		       (opt->queue_pairs * 2 + 1));
342bbd272edSSrikanth Yalavarthi 		return -EINVAL;
343bbd272edSSrikanth Yalavarthi 	}
344bbd272edSSrikanth Yalavarthi 
345bbd272edSSrikanth Yalavarthi 	return 0;
346bbd272edSSrikanth Yalavarthi }
347bbd272edSSrikanth Yalavarthi 
348bbd272edSSrikanth Yalavarthi void
349bbd272edSSrikanth Yalavarthi test_inference_opt_dump(struct ml_options *opt)
350bbd272edSSrikanth Yalavarthi {
351bbd272edSSrikanth Yalavarthi 	uint32_t i;
352bbd272edSSrikanth Yalavarthi 
353bbd272edSSrikanth Yalavarthi 	/* dump common opts */
354bbd272edSSrikanth Yalavarthi 	ml_test_opt_dump(opt);
355bbd272edSSrikanth Yalavarthi 
356bbd272edSSrikanth Yalavarthi 	/* dump test opts */
357bbd272edSSrikanth Yalavarthi 	ml_dump("repetitions", "%" PRIu64, opt->repetitions);
35839890f07SSrikanth Yalavarthi 	ml_dump("burst_size", "%u", opt->burst_size);
359c0e87165SSrikanth Yalavarthi 	ml_dump("queue_pairs", "%u", opt->queue_pairs);
360c0e87165SSrikanth Yalavarthi 	ml_dump("queue_size", "%u", opt->queue_size);
361bbd272edSSrikanth Yalavarthi 
362*38e884b5SSrikanth Yalavarthi 	if (opt->batches == 0)
363*38e884b5SSrikanth Yalavarthi 		ml_dump("batches", "%u (default)", opt->batches);
364*38e884b5SSrikanth Yalavarthi 	else
365*38e884b5SSrikanth Yalavarthi 		ml_dump("batches", "%u", opt->batches);
366*38e884b5SSrikanth Yalavarthi 
367bbd272edSSrikanth Yalavarthi 	ml_dump_begin("filelist");
368bbd272edSSrikanth Yalavarthi 	for (i = 0; i < opt->nb_filelist; i++) {
369bbd272edSSrikanth Yalavarthi 		ml_dump_list("model", i, opt->filelist[i].model);
370bbd272edSSrikanth Yalavarthi 		ml_dump_list("input", i, opt->filelist[i].input);
371bbd272edSSrikanth Yalavarthi 		ml_dump_list("output", i, opt->filelist[i].output);
372bbd272edSSrikanth Yalavarthi 	}
373bbd272edSSrikanth Yalavarthi 	ml_dump_end;
374bbd272edSSrikanth Yalavarthi }
375bbd272edSSrikanth Yalavarthi 
376bbd272edSSrikanth Yalavarthi int
377bbd272edSSrikanth Yalavarthi test_inference_setup(struct ml_test *test, struct ml_options *opt)
378bbd272edSSrikanth Yalavarthi {
379bbd272edSSrikanth Yalavarthi 	struct test_inference *t;
380bbd272edSSrikanth Yalavarthi 	void *test_inference;
38139890f07SSrikanth Yalavarthi 	uint32_t lcore_id;
382bbd272edSSrikanth Yalavarthi 	int ret = 0;
383bbd272edSSrikanth Yalavarthi 	uint32_t i;
384bbd272edSSrikanth Yalavarthi 
385bbd272edSSrikanth Yalavarthi 	test_inference = rte_zmalloc_socket(test->name, sizeof(struct test_inference),
386bbd272edSSrikanth Yalavarthi 					    RTE_CACHE_LINE_SIZE, opt->socket_id);
387bbd272edSSrikanth Yalavarthi 	if (test_inference == NULL) {
388bbd272edSSrikanth Yalavarthi 		ml_err("failed to allocate memory for test_model");
389bbd272edSSrikanth Yalavarthi 		ret = -ENOMEM;
390bbd272edSSrikanth Yalavarthi 		goto error;
391bbd272edSSrikanth Yalavarthi 	}
392bbd272edSSrikanth Yalavarthi 	test->test_priv = test_inference;
393bbd272edSSrikanth Yalavarthi 	t = ml_test_priv(test);
394bbd272edSSrikanth Yalavarthi 
395bbd272edSSrikanth Yalavarthi 	t->nb_used = 0;
396bbd272edSSrikanth Yalavarthi 	t->cmn.result = ML_TEST_FAILED;
397bbd272edSSrikanth Yalavarthi 	t->cmn.opt = opt;
398bbd272edSSrikanth Yalavarthi 	memset(t->error_count, 0, RTE_MAX_LCORE * sizeof(uint64_t));
399bbd272edSSrikanth Yalavarthi 
400bbd272edSSrikanth Yalavarthi 	/* get device info */
401bbd272edSSrikanth Yalavarthi 	ret = rte_ml_dev_info_get(opt->dev_id, &t->cmn.dev_info);
402bbd272edSSrikanth Yalavarthi 	if (ret < 0) {
403bbd272edSSrikanth Yalavarthi 		ml_err("failed to get device info");
404bbd272edSSrikanth Yalavarthi 		goto error;
405bbd272edSSrikanth Yalavarthi 	}
406bbd272edSSrikanth Yalavarthi 
40739890f07SSrikanth Yalavarthi 	if (opt->burst_size == 1) {
408bbd272edSSrikanth Yalavarthi 		t->enqueue = ml_enqueue_single;
409bbd272edSSrikanth Yalavarthi 		t->dequeue = ml_dequeue_single;
41039890f07SSrikanth Yalavarthi 	} else {
41139890f07SSrikanth Yalavarthi 		t->enqueue = ml_enqueue_burst;
41239890f07SSrikanth Yalavarthi 		t->dequeue = ml_dequeue_burst;
41339890f07SSrikanth Yalavarthi 	}
414bbd272edSSrikanth Yalavarthi 
415bbd272edSSrikanth Yalavarthi 	/* set model initial state */
416bbd272edSSrikanth Yalavarthi 	for (i = 0; i < opt->nb_filelist; i++)
417bbd272edSSrikanth Yalavarthi 		t->model[i].state = MODEL_INITIAL;
418bbd272edSSrikanth Yalavarthi 
41939890f07SSrikanth Yalavarthi 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
42039890f07SSrikanth Yalavarthi 		t->args[lcore_id].enq_ops = rte_zmalloc_socket(
42139890f07SSrikanth Yalavarthi 			"ml_test_enq_ops", opt->burst_size * sizeof(struct rte_ml_op *),
42239890f07SSrikanth Yalavarthi 			RTE_CACHE_LINE_SIZE, opt->socket_id);
42339890f07SSrikanth Yalavarthi 		t->args[lcore_id].deq_ops = rte_zmalloc_socket(
42439890f07SSrikanth Yalavarthi 			"ml_test_deq_ops", opt->burst_size * sizeof(struct rte_ml_op *),
42539890f07SSrikanth Yalavarthi 			RTE_CACHE_LINE_SIZE, opt->socket_id);
42639890f07SSrikanth Yalavarthi 		t->args[lcore_id].reqs = rte_zmalloc_socket(
42739890f07SSrikanth Yalavarthi 			"ml_test_requests", opt->burst_size * sizeof(struct ml_request *),
42839890f07SSrikanth Yalavarthi 			RTE_CACHE_LINE_SIZE, opt->socket_id);
42939890f07SSrikanth Yalavarthi 	}
43039890f07SSrikanth Yalavarthi 
431bbd272edSSrikanth Yalavarthi 	return 0;
432bbd272edSSrikanth Yalavarthi 
433bbd272edSSrikanth Yalavarthi error:
434bbd272edSSrikanth Yalavarthi 	if (test_inference != NULL)
435bbd272edSSrikanth Yalavarthi 		rte_free(test_inference);
436bbd272edSSrikanth Yalavarthi 
437bbd272edSSrikanth Yalavarthi 	return ret;
438bbd272edSSrikanth Yalavarthi }
439bbd272edSSrikanth Yalavarthi 
440bbd272edSSrikanth Yalavarthi void
441bbd272edSSrikanth Yalavarthi test_inference_destroy(struct ml_test *test, struct ml_options *opt)
442bbd272edSSrikanth Yalavarthi {
443bbd272edSSrikanth Yalavarthi 	struct test_inference *t;
444bbd272edSSrikanth Yalavarthi 
445bbd272edSSrikanth Yalavarthi 	RTE_SET_USED(opt);
446bbd272edSSrikanth Yalavarthi 
447bbd272edSSrikanth Yalavarthi 	t = ml_test_priv(test);
448bbd272edSSrikanth Yalavarthi 	if (t != NULL)
449bbd272edSSrikanth Yalavarthi 		rte_free(t);
450bbd272edSSrikanth Yalavarthi }
451bbd272edSSrikanth Yalavarthi 
452bbd272edSSrikanth Yalavarthi int
453bbd272edSSrikanth Yalavarthi ml_inference_mldev_setup(struct ml_test *test, struct ml_options *opt)
454bbd272edSSrikanth Yalavarthi {
455bbd272edSSrikanth Yalavarthi 	struct rte_ml_dev_qp_conf qp_conf;
456bbd272edSSrikanth Yalavarthi 	struct test_inference *t;
457c0e87165SSrikanth Yalavarthi 	uint16_t qp_id;
458bbd272edSSrikanth Yalavarthi 	int ret;
459bbd272edSSrikanth Yalavarthi 
460bbd272edSSrikanth Yalavarthi 	t = ml_test_priv(test);
461bbd272edSSrikanth Yalavarthi 
462c0e87165SSrikanth Yalavarthi 	RTE_SET_USED(t);
463c0e87165SSrikanth Yalavarthi 
464bbd272edSSrikanth Yalavarthi 	ret = ml_test_device_configure(test, opt);
465bbd272edSSrikanth Yalavarthi 	if (ret != 0)
466bbd272edSSrikanth Yalavarthi 		return ret;
467bbd272edSSrikanth Yalavarthi 
468bbd272edSSrikanth Yalavarthi 	/* setup queue pairs */
469c0e87165SSrikanth Yalavarthi 	qp_conf.nb_desc = opt->queue_size;
470bbd272edSSrikanth Yalavarthi 	qp_conf.cb = NULL;
471bbd272edSSrikanth Yalavarthi 
472c0e87165SSrikanth Yalavarthi 	for (qp_id = 0; qp_id < opt->queue_pairs; qp_id++) {
473c0e87165SSrikanth Yalavarthi 		qp_conf.nb_desc = opt->queue_size;
474c0e87165SSrikanth Yalavarthi 		qp_conf.cb = NULL;
475c0e87165SSrikanth Yalavarthi 
476c0e87165SSrikanth Yalavarthi 		ret = rte_ml_dev_queue_pair_setup(opt->dev_id, qp_id, &qp_conf, opt->socket_id);
477bbd272edSSrikanth Yalavarthi 		if (ret != 0) {
478bbd272edSSrikanth Yalavarthi 			ml_err("Failed to setup ml device queue-pair, dev_id = %d, qp_id = %u\n",
479c0e87165SSrikanth Yalavarthi 			       opt->dev_id, qp_id);
480c0e87165SSrikanth Yalavarthi 			return ret;
481c0e87165SSrikanth Yalavarthi 		}
482bbd272edSSrikanth Yalavarthi 	}
483bbd272edSSrikanth Yalavarthi 
484bbd272edSSrikanth Yalavarthi 	ret = ml_test_device_start(test, opt);
485bbd272edSSrikanth Yalavarthi 	if (ret != 0)
486bbd272edSSrikanth Yalavarthi 		goto error;
487bbd272edSSrikanth Yalavarthi 
488bbd272edSSrikanth Yalavarthi 	return 0;
489bbd272edSSrikanth Yalavarthi 
490bbd272edSSrikanth Yalavarthi error:
491bbd272edSSrikanth Yalavarthi 	ml_test_device_close(test, opt);
492bbd272edSSrikanth Yalavarthi 
493bbd272edSSrikanth Yalavarthi 	return ret;
494bbd272edSSrikanth Yalavarthi }
495bbd272edSSrikanth Yalavarthi 
496bbd272edSSrikanth Yalavarthi int
497bbd272edSSrikanth Yalavarthi ml_inference_mldev_destroy(struct ml_test *test, struct ml_options *opt)
498bbd272edSSrikanth Yalavarthi {
499bbd272edSSrikanth Yalavarthi 	int ret;
500bbd272edSSrikanth Yalavarthi 
501bbd272edSSrikanth Yalavarthi 	ret = ml_test_device_stop(test, opt);
502bbd272edSSrikanth Yalavarthi 	if (ret != 0)
503bbd272edSSrikanth Yalavarthi 		goto error;
504bbd272edSSrikanth Yalavarthi 
505bbd272edSSrikanth Yalavarthi 	ret = ml_test_device_close(test, opt);
506bbd272edSSrikanth Yalavarthi 	if (ret != 0)
507bbd272edSSrikanth Yalavarthi 		return ret;
508bbd272edSSrikanth Yalavarthi 
509bbd272edSSrikanth Yalavarthi 	return 0;
510bbd272edSSrikanth Yalavarthi 
511bbd272edSSrikanth Yalavarthi error:
512bbd272edSSrikanth Yalavarthi 	ml_test_device_close(test, opt);
513bbd272edSSrikanth Yalavarthi 
514bbd272edSSrikanth Yalavarthi 	return ret;
515bbd272edSSrikanth Yalavarthi }
516bbd272edSSrikanth Yalavarthi 
517bbd272edSSrikanth Yalavarthi /* Callback for IO pool create. This function would compute the fields of ml_request
518bbd272edSSrikanth Yalavarthi  * structure and prepare the quantized input data.
519bbd272edSSrikanth Yalavarthi  */
520bbd272edSSrikanth Yalavarthi static void
521bbd272edSSrikanth Yalavarthi ml_request_initialize(struct rte_mempool *mp, void *opaque, void *obj, unsigned int obj_idx)
522bbd272edSSrikanth Yalavarthi {
523bbd272edSSrikanth Yalavarthi 	struct test_inference *t = ml_test_priv((struct ml_test *)opaque);
524bbd272edSSrikanth Yalavarthi 	struct ml_request *req = (struct ml_request *)obj;
525bbd272edSSrikanth Yalavarthi 
526bbd272edSSrikanth Yalavarthi 	RTE_SET_USED(mp);
527bbd272edSSrikanth Yalavarthi 	RTE_SET_USED(obj_idx);
528bbd272edSSrikanth Yalavarthi 
529bbd272edSSrikanth Yalavarthi 	req->input = (uint8_t *)obj +
530bbd272edSSrikanth Yalavarthi 		     RTE_ALIGN_CEIL(sizeof(struct ml_request), t->cmn.dev_info.min_align_size);
531bbd272edSSrikanth Yalavarthi 	req->output = req->input +
532bbd272edSSrikanth Yalavarthi 		      RTE_ALIGN_CEIL(t->model[t->fid].inp_qsize, t->cmn.dev_info.min_align_size);
533bbd272edSSrikanth Yalavarthi 	req->niters = 0;
534bbd272edSSrikanth Yalavarthi 
535bbd272edSSrikanth Yalavarthi 	/* quantize data */
536*38e884b5SSrikanth Yalavarthi 	rte_ml_io_quantize(t->cmn.opt->dev_id, t->model[t->fid].id, t->model[t->fid].nb_batches,
537*38e884b5SSrikanth Yalavarthi 			   t->model[t->fid].input, req->input);
538bbd272edSSrikanth Yalavarthi }
539bbd272edSSrikanth Yalavarthi 
540bbd272edSSrikanth Yalavarthi int
541bbd272edSSrikanth Yalavarthi ml_inference_iomem_setup(struct ml_test *test, struct ml_options *opt, uint16_t fid)
542bbd272edSSrikanth Yalavarthi {
543bbd272edSSrikanth Yalavarthi 	struct test_inference *t = ml_test_priv(test);
544bbd272edSSrikanth Yalavarthi 	char mz_name[RTE_MEMZONE_NAMESIZE];
545bbd272edSSrikanth Yalavarthi 	char mp_name[RTE_MEMPOOL_NAMESIZE];
546bbd272edSSrikanth Yalavarthi 	const struct rte_memzone *mz;
547bbd272edSSrikanth Yalavarthi 	uint64_t nb_buffers;
548bbd272edSSrikanth Yalavarthi 	uint32_t buff_size;
549bbd272edSSrikanth Yalavarthi 	uint32_t mz_size;
550bbd272edSSrikanth Yalavarthi 	uint32_t fsize;
551bbd272edSSrikanth Yalavarthi 	FILE *fp;
552bbd272edSSrikanth Yalavarthi 	int ret;
553bbd272edSSrikanth Yalavarthi 
554bbd272edSSrikanth Yalavarthi 	/* get input buffer size */
555*38e884b5SSrikanth Yalavarthi 	ret = rte_ml_io_input_size_get(opt->dev_id, t->model[fid].id, t->model[fid].nb_batches,
556bbd272edSSrikanth Yalavarthi 				       &t->model[fid].inp_qsize, &t->model[fid].inp_dsize);
557bbd272edSSrikanth Yalavarthi 	if (ret != 0) {
558bbd272edSSrikanth Yalavarthi 		ml_err("Failed to get input size, model : %s\n", opt->filelist[fid].model);
559bbd272edSSrikanth Yalavarthi 		return ret;
560bbd272edSSrikanth Yalavarthi 	}
561bbd272edSSrikanth Yalavarthi 
562bbd272edSSrikanth Yalavarthi 	/* get output buffer size */
563*38e884b5SSrikanth Yalavarthi 	ret = rte_ml_io_output_size_get(opt->dev_id, t->model[fid].id, t->model[fid].nb_batches,
564*38e884b5SSrikanth Yalavarthi 					&t->model[fid].out_qsize, &t->model[fid].out_dsize);
565bbd272edSSrikanth Yalavarthi 	if (ret != 0) {
566bbd272edSSrikanth Yalavarthi 		ml_err("Failed to get input size, model : %s\n", opt->filelist[fid].model);
567bbd272edSSrikanth Yalavarthi 		return ret;
568bbd272edSSrikanth Yalavarthi 	}
569bbd272edSSrikanth Yalavarthi 
570bbd272edSSrikanth Yalavarthi 	/* allocate buffer for user data */
571bbd272edSSrikanth Yalavarthi 	mz_size = t->model[fid].inp_dsize + t->model[fid].out_dsize;
572bbd272edSSrikanth Yalavarthi 	sprintf(mz_name, "ml_user_data_%d", fid);
573bbd272edSSrikanth Yalavarthi 	mz = rte_memzone_reserve(mz_name, mz_size, opt->socket_id, 0);
574bbd272edSSrikanth Yalavarthi 	if (mz == NULL) {
575bbd272edSSrikanth Yalavarthi 		ml_err("Memzone allocation failed for ml_user_data\n");
576bbd272edSSrikanth Yalavarthi 		ret = -ENOMEM;
577bbd272edSSrikanth Yalavarthi 		goto error;
578bbd272edSSrikanth Yalavarthi 	}
579bbd272edSSrikanth Yalavarthi 
580bbd272edSSrikanth Yalavarthi 	t->model[fid].input = mz->addr;
581bbd272edSSrikanth Yalavarthi 	t->model[fid].output = t->model[fid].input + t->model[fid].inp_dsize;
582bbd272edSSrikanth Yalavarthi 
583bbd272edSSrikanth Yalavarthi 	/* load input file */
584bbd272edSSrikanth Yalavarthi 	fp = fopen(opt->filelist[fid].input, "r");
585bbd272edSSrikanth Yalavarthi 	if (fp == NULL) {
586bbd272edSSrikanth Yalavarthi 		ml_err("Failed to open input file : %s\n", opt->filelist[fid].input);
587bbd272edSSrikanth Yalavarthi 		ret = -errno;
588bbd272edSSrikanth Yalavarthi 		goto error;
589bbd272edSSrikanth Yalavarthi 	}
590bbd272edSSrikanth Yalavarthi 
591bbd272edSSrikanth Yalavarthi 	fseek(fp, 0, SEEK_END);
592bbd272edSSrikanth Yalavarthi 	fsize = ftell(fp);
593bbd272edSSrikanth Yalavarthi 	fseek(fp, 0, SEEK_SET);
594bbd272edSSrikanth Yalavarthi 	if (fsize != t->model[fid].inp_dsize) {
595bbd272edSSrikanth Yalavarthi 		ml_err("Invalid input file, size = %u (expected size = %" PRIu64 ")\n", fsize,
596bbd272edSSrikanth Yalavarthi 		       t->model[fid].inp_dsize);
597bbd272edSSrikanth Yalavarthi 		ret = -EINVAL;
598bbd272edSSrikanth Yalavarthi 		fclose(fp);
599bbd272edSSrikanth Yalavarthi 		goto error;
600bbd272edSSrikanth Yalavarthi 	}
601bbd272edSSrikanth Yalavarthi 
602bbd272edSSrikanth Yalavarthi 	if (fread(t->model[fid].input, 1, t->model[fid].inp_dsize, fp) != t->model[fid].inp_dsize) {
603bbd272edSSrikanth Yalavarthi 		ml_err("Failed to read input file : %s\n", opt->filelist[fid].input);
604bbd272edSSrikanth Yalavarthi 		ret = -errno;
605bbd272edSSrikanth Yalavarthi 		fclose(fp);
606bbd272edSSrikanth Yalavarthi 		goto error;
607bbd272edSSrikanth Yalavarthi 	}
608bbd272edSSrikanth Yalavarthi 	fclose(fp);
609bbd272edSSrikanth Yalavarthi 
610bbd272edSSrikanth Yalavarthi 	/* create mempool for quantized input and output buffers. ml_request_initialize is
611bbd272edSSrikanth Yalavarthi 	 * used as a callback for object creation.
612bbd272edSSrikanth Yalavarthi 	 */
613bbd272edSSrikanth Yalavarthi 	buff_size = RTE_ALIGN_CEIL(sizeof(struct ml_request), t->cmn.dev_info.min_align_size) +
614bbd272edSSrikanth Yalavarthi 		    RTE_ALIGN_CEIL(t->model[fid].inp_qsize, t->cmn.dev_info.min_align_size) +
615bbd272edSSrikanth Yalavarthi 		    RTE_ALIGN_CEIL(t->model[fid].out_qsize, t->cmn.dev_info.min_align_size);
616bbd272edSSrikanth Yalavarthi 	nb_buffers = RTE_MIN((uint64_t)ML_TEST_MAX_POOL_SIZE, opt->repetitions);
617bbd272edSSrikanth Yalavarthi 
618bbd272edSSrikanth Yalavarthi 	t->fid = fid;
619bbd272edSSrikanth Yalavarthi 	sprintf(mp_name, "ml_io_pool_%d", fid);
620bbd272edSSrikanth Yalavarthi 	t->model[fid].io_pool = rte_mempool_create(mp_name, nb_buffers, buff_size, 0, 0, NULL, NULL,
621bbd272edSSrikanth Yalavarthi 						   ml_request_initialize, test, opt->socket_id, 0);
622bbd272edSSrikanth Yalavarthi 	if (t->model[fid].io_pool == NULL) {
623bbd272edSSrikanth Yalavarthi 		ml_err("Failed to create io pool : %s\n", "ml_io_pool");
624bbd272edSSrikanth Yalavarthi 		ret = -ENOMEM;
625bbd272edSSrikanth Yalavarthi 		goto error;
626bbd272edSSrikanth Yalavarthi 	}
627bbd272edSSrikanth Yalavarthi 
628bbd272edSSrikanth Yalavarthi 	return 0;
629bbd272edSSrikanth Yalavarthi 
630bbd272edSSrikanth Yalavarthi error:
631bbd272edSSrikanth Yalavarthi 	if (mz != NULL)
632bbd272edSSrikanth Yalavarthi 		rte_memzone_free(mz);
633bbd272edSSrikanth Yalavarthi 
634bbd272edSSrikanth Yalavarthi 	if (t->model[fid].io_pool != NULL) {
635bbd272edSSrikanth Yalavarthi 		rte_mempool_free(t->model[fid].io_pool);
636bbd272edSSrikanth Yalavarthi 		t->model[fid].io_pool = NULL;
637bbd272edSSrikanth Yalavarthi 	}
638bbd272edSSrikanth Yalavarthi 
639bbd272edSSrikanth Yalavarthi 	return ret;
640bbd272edSSrikanth Yalavarthi }
641bbd272edSSrikanth Yalavarthi 
642bbd272edSSrikanth Yalavarthi void
643bbd272edSSrikanth Yalavarthi ml_inference_iomem_destroy(struct ml_test *test, struct ml_options *opt, uint16_t fid)
644bbd272edSSrikanth Yalavarthi {
645bbd272edSSrikanth Yalavarthi 	char mz_name[RTE_MEMZONE_NAMESIZE];
646bbd272edSSrikanth Yalavarthi 	char mp_name[RTE_MEMPOOL_NAMESIZE];
647bbd272edSSrikanth Yalavarthi 	const struct rte_memzone *mz;
648bbd272edSSrikanth Yalavarthi 	struct rte_mempool *mp;
649bbd272edSSrikanth Yalavarthi 
650bbd272edSSrikanth Yalavarthi 	RTE_SET_USED(test);
651bbd272edSSrikanth Yalavarthi 	RTE_SET_USED(opt);
652bbd272edSSrikanth Yalavarthi 
653bbd272edSSrikanth Yalavarthi 	/* release user data memzone */
654bbd272edSSrikanth Yalavarthi 	sprintf(mz_name, "ml_user_data_%d", fid);
655bbd272edSSrikanth Yalavarthi 	mz = rte_memzone_lookup(mz_name);
656bbd272edSSrikanth Yalavarthi 	if (mz != NULL)
657bbd272edSSrikanth Yalavarthi 		rte_memzone_free(mz);
658bbd272edSSrikanth Yalavarthi 
659bbd272edSSrikanth Yalavarthi 	/* destroy io pool */
660bbd272edSSrikanth Yalavarthi 	sprintf(mp_name, "ml_io_pool_%d", fid);
661bbd272edSSrikanth Yalavarthi 	mp = rte_mempool_lookup(mp_name);
662bbd272edSSrikanth Yalavarthi 	if (mp != NULL)
663bbd272edSSrikanth Yalavarthi 		rte_mempool_free(mp);
664bbd272edSSrikanth Yalavarthi }
665bbd272edSSrikanth Yalavarthi 
666bbd272edSSrikanth Yalavarthi int
667bbd272edSSrikanth Yalavarthi ml_inference_mem_setup(struct ml_test *test, struct ml_options *opt)
668bbd272edSSrikanth Yalavarthi {
669bbd272edSSrikanth Yalavarthi 	struct test_inference *t = ml_test_priv(test);
670bbd272edSSrikanth Yalavarthi 
671bbd272edSSrikanth Yalavarthi 	/* create op pool */
672bbd272edSSrikanth Yalavarthi 	t->op_pool = rte_ml_op_pool_create("ml_test_op_pool", ML_TEST_MAX_POOL_SIZE, 0, 0,
673bbd272edSSrikanth Yalavarthi 					   opt->socket_id);
674bbd272edSSrikanth Yalavarthi 	if (t->op_pool == NULL) {
675bbd272edSSrikanth Yalavarthi 		ml_err("Failed to create op pool : %s\n", "ml_op_pool");
676bbd272edSSrikanth Yalavarthi 		return -ENOMEM;
677bbd272edSSrikanth Yalavarthi 	}
678bbd272edSSrikanth Yalavarthi 
679bbd272edSSrikanth Yalavarthi 	return 0;
680bbd272edSSrikanth Yalavarthi }
681bbd272edSSrikanth Yalavarthi 
682bbd272edSSrikanth Yalavarthi void
683bbd272edSSrikanth Yalavarthi ml_inference_mem_destroy(struct ml_test *test, struct ml_options *opt)
684bbd272edSSrikanth Yalavarthi {
685bbd272edSSrikanth Yalavarthi 	struct test_inference *t = ml_test_priv(test);
686bbd272edSSrikanth Yalavarthi 
687bbd272edSSrikanth Yalavarthi 	RTE_SET_USED(opt);
688bbd272edSSrikanth Yalavarthi 
689bbd272edSSrikanth Yalavarthi 	/* release op pool */
690bbd272edSSrikanth Yalavarthi 	if (t->op_pool != NULL)
691bbd272edSSrikanth Yalavarthi 		rte_mempool_free(t->op_pool);
692bbd272edSSrikanth Yalavarthi }
693bbd272edSSrikanth Yalavarthi 
694bbd272edSSrikanth Yalavarthi /* Callback for mempool object iteration. This call would dequantize output data. */
695bbd272edSSrikanth Yalavarthi static void
696bbd272edSSrikanth Yalavarthi ml_request_finish(struct rte_mempool *mp, void *opaque, void *obj, unsigned int obj_idx)
697bbd272edSSrikanth Yalavarthi {
698bbd272edSSrikanth Yalavarthi 	struct test_inference *t = ml_test_priv((struct ml_test *)opaque);
699bbd272edSSrikanth Yalavarthi 	struct ml_request *req = (struct ml_request *)obj;
700bbd272edSSrikanth Yalavarthi 	struct ml_model *model = &t->model[req->fid];
701bbd272edSSrikanth Yalavarthi 
702bbd272edSSrikanth Yalavarthi 	RTE_SET_USED(mp);
703bbd272edSSrikanth Yalavarthi 	RTE_SET_USED(obj_idx);
704bbd272edSSrikanth Yalavarthi 
705bbd272edSSrikanth Yalavarthi 	if (req->niters == 0)
706bbd272edSSrikanth Yalavarthi 		return;
707bbd272edSSrikanth Yalavarthi 
708bbd272edSSrikanth Yalavarthi 	t->nb_used++;
709*38e884b5SSrikanth Yalavarthi 	rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id, t->model[req->fid].nb_batches,
710bbd272edSSrikanth Yalavarthi 			     req->output, model->output);
711bbd272edSSrikanth Yalavarthi }
712bbd272edSSrikanth Yalavarthi 
713bbd272edSSrikanth Yalavarthi int
714bbd272edSSrikanth Yalavarthi ml_inference_result(struct ml_test *test, struct ml_options *opt, uint16_t fid)
715bbd272edSSrikanth Yalavarthi {
716bbd272edSSrikanth Yalavarthi 	struct test_inference *t = ml_test_priv(test);
717bbd272edSSrikanth Yalavarthi 	uint64_t error_count = 0;
718bbd272edSSrikanth Yalavarthi 	uint32_t i;
719bbd272edSSrikanth Yalavarthi 
720bbd272edSSrikanth Yalavarthi 	RTE_SET_USED(opt);
721bbd272edSSrikanth Yalavarthi 
722bbd272edSSrikanth Yalavarthi 	/* check for errors */
723bbd272edSSrikanth Yalavarthi 	for (i = 0; i < RTE_MAX_LCORE; i++)
724bbd272edSSrikanth Yalavarthi 		error_count += t->error_count[i];
725bbd272edSSrikanth Yalavarthi 
726bbd272edSSrikanth Yalavarthi 	rte_mempool_obj_iter(t->model[fid].io_pool, ml_request_finish, test);
727bbd272edSSrikanth Yalavarthi 
728bbd272edSSrikanth Yalavarthi 	if ((t->nb_used > 0) && (error_count == 0))
729bbd272edSSrikanth Yalavarthi 		t->cmn.result = ML_TEST_SUCCESS;
730bbd272edSSrikanth Yalavarthi 	else
731bbd272edSSrikanth Yalavarthi 		t->cmn.result = ML_TEST_FAILED;
732bbd272edSSrikanth Yalavarthi 
733bbd272edSSrikanth Yalavarthi 	return t->cmn.result;
734bbd272edSSrikanth Yalavarthi }
735bbd272edSSrikanth Yalavarthi 
736bbd272edSSrikanth Yalavarthi int
737bbd272edSSrikanth Yalavarthi ml_inference_launch_cores(struct ml_test *test, struct ml_options *opt, uint16_t start_fid,
738bbd272edSSrikanth Yalavarthi 			  uint16_t end_fid)
739bbd272edSSrikanth Yalavarthi {
740bbd272edSSrikanth Yalavarthi 	struct test_inference *t = ml_test_priv(test);
741bbd272edSSrikanth Yalavarthi 	uint32_t lcore_id;
742c0e87165SSrikanth Yalavarthi 	uint32_t nb_reqs;
743bbd272edSSrikanth Yalavarthi 	uint32_t id = 0;
744c0e87165SSrikanth Yalavarthi 	uint32_t qp_id;
745c0e87165SSrikanth Yalavarthi 
746c0e87165SSrikanth Yalavarthi 	nb_reqs = opt->repetitions / opt->queue_pairs;
747bbd272edSSrikanth Yalavarthi 
748bbd272edSSrikanth Yalavarthi 	RTE_LCORE_FOREACH_WORKER(lcore_id)
749bbd272edSSrikanth Yalavarthi 	{
750c0e87165SSrikanth Yalavarthi 		if (id >= opt->queue_pairs * 2)
751bbd272edSSrikanth Yalavarthi 			break;
752bbd272edSSrikanth Yalavarthi 
753c0e87165SSrikanth Yalavarthi 		qp_id = id / 2;
754c0e87165SSrikanth Yalavarthi 		t->args[lcore_id].qp_id = qp_id;
755c0e87165SSrikanth Yalavarthi 		t->args[lcore_id].nb_reqs = nb_reqs;
756c0e87165SSrikanth Yalavarthi 		if (qp_id == 0)
757c0e87165SSrikanth Yalavarthi 			t->args[lcore_id].nb_reqs += opt->repetitions - nb_reqs * opt->queue_pairs;
758c0e87165SSrikanth Yalavarthi 
759c0e87165SSrikanth Yalavarthi 		if (t->args[lcore_id].nb_reqs == 0) {
760c0e87165SSrikanth Yalavarthi 			id++;
761c0e87165SSrikanth Yalavarthi 			break;
762c0e87165SSrikanth Yalavarthi 		}
763c0e87165SSrikanth Yalavarthi 
764bbd272edSSrikanth Yalavarthi 		t->args[lcore_id].start_fid = start_fid;
765bbd272edSSrikanth Yalavarthi 		t->args[lcore_id].end_fid = end_fid;
766bbd272edSSrikanth Yalavarthi 
767bbd272edSSrikanth Yalavarthi 		if (id % 2 == 0)
768bbd272edSSrikanth Yalavarthi 			rte_eal_remote_launch(t->enqueue, test, lcore_id);
769bbd272edSSrikanth Yalavarthi 		else
770bbd272edSSrikanth Yalavarthi 			rte_eal_remote_launch(t->dequeue, test, lcore_id);
771bbd272edSSrikanth Yalavarthi 
772bbd272edSSrikanth Yalavarthi 		id++;
773bbd272edSSrikanth Yalavarthi 	}
774bbd272edSSrikanth Yalavarthi 
775bbd272edSSrikanth Yalavarthi 	return 0;
776bbd272edSSrikanth Yalavarthi }
777