xref: /dpdk/app/test-crypto-perf/cperf_test_common.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <rte_malloc.h>
6 #include <rte_mbuf_pool_ops.h>
7 
8 #include "cperf_test_common.h"
9 
10 struct obj_params {
11 	uint32_t src_buf_offset;
12 	uint32_t dst_buf_offset;
13 	uint16_t segment_sz;
14 	uint16_t headroom_sz;
15 	uint16_t data_len;
16 	uint16_t segments_nb;
17 };
18 
19 static void
20 fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
21 		void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
22 		uint16_t headroom, uint16_t data_len)
23 {
24 	uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
25 
26 	/* start of buffer is after mbuf structure and priv data */
27 	m->priv_size = 0;
28 	m->buf_addr = (char *)m + mbuf_hdr_size;
29 	m->buf_iova = rte_mempool_virt2iova(obj) +
30 		mbuf_offset + mbuf_hdr_size;
31 	m->buf_len = segment_sz;
32 	m->data_len = data_len;
33 	m->pkt_len = data_len;
34 
35 	/* Use headroom specified for the buffer */
36 	m->data_off = headroom;
37 
38 	/* init some constant fields */
39 	m->pool = mp;
40 	m->nb_segs = 1;
41 	m->port = 0xff;
42 	rte_mbuf_refcnt_set(m, 1);
43 	m->next = NULL;
44 }
45 
46 static void
47 fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
48 		void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
49 		uint16_t headroom, uint16_t data_len, uint16_t segments_nb)
50 {
51 	uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
52 	uint16_t remaining_segments = segments_nb;
53 	struct rte_mbuf *next_mbuf;
54 	rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) +
55 			 mbuf_offset + mbuf_hdr_size;
56 
57 	do {
58 		/* start of buffer is after mbuf structure and priv data */
59 		m->priv_size = 0;
60 		m->buf_addr = (char *)m + mbuf_hdr_size;
61 		m->buf_iova = next_seg_phys_addr;
62 		next_seg_phys_addr += mbuf_hdr_size + segment_sz;
63 		m->buf_len = segment_sz;
64 		m->data_len = data_len;
65 
66 		/* Use headroom specified for the buffer */
67 		m->data_off = headroom;
68 
69 		/* init some constant fields */
70 		m->pool = mp;
71 		m->nb_segs = segments_nb;
72 		m->port = 0xff;
73 		rte_mbuf_refcnt_set(m, 1);
74 		next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
75 					mbuf_hdr_size + segment_sz);
76 		m->next = next_mbuf;
77 		m = next_mbuf;
78 		remaining_segments--;
79 
80 	} while (remaining_segments > 0);
81 
82 	m->next = NULL;
83 }
84 
85 static void
86 mempool_obj_init(struct rte_mempool *mp,
87 		 void *opaque_arg,
88 		 void *obj,
89 		 __rte_unused unsigned int i)
90 {
91 	struct obj_params *params = opaque_arg;
92 	struct rte_crypto_op *op = obj;
93 	struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
94 					params->src_buf_offset);
95 	/* Set crypto operation */
96 	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
97 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
98 	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
99 	op->phys_addr = rte_mem_virt2iova(obj);
100 	op->mempool = mp;
101 
102 	/* Set source buffer */
103 	op->sym->m_src = m;
104 	if (params->segments_nb == 1)
105 		fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
106 				params->segment_sz, params->headroom_sz,
107 				params->data_len);
108 	else
109 		fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
110 				params->segment_sz, params->headroom_sz,
111 				params->data_len, params->segments_nb);
112 
113 
114 	/* Set destination buffer */
115 	if (params->dst_buf_offset) {
116 		m = (struct rte_mbuf *) ((uint8_t *) obj +
117 				params->dst_buf_offset);
118 		fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
119 				params->segment_sz, params->headroom_sz,
120 				params->data_len);
121 		op->sym->m_dst = m;
122 	} else
123 		op->sym->m_dst = NULL;
124 }
125 
126 int
127 cperf_alloc_common_memory(const struct cperf_options *options,
128 			const struct cperf_test_vector *test_vector,
129 			uint8_t dev_id, uint16_t qp_id,
130 			size_t extra_op_priv_size,
131 			uint32_t *src_buf_offset,
132 			uint32_t *dst_buf_offset,
133 			struct rte_mempool **pool)
134 {
135 	const char *mp_ops_name;
136 	char pool_name[32] = "";
137 	int ret;
138 
139 	/* Calculate the object size */
140 	uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
141 		sizeof(struct rte_crypto_sym_op);
142 	uint16_t crypto_op_private_size;
143 
144 	if (options->op_type == CPERF_ASYM_MODEX) {
145 		snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_op_pool%u",
146 			 rte_socket_id());
147 		*pool = rte_crypto_op_pool_create(
148 			pool_name, RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
149 			options->pool_sz, 0, 0, rte_socket_id());
150 		if (*pool == NULL) {
151 			RTE_LOG(ERR, USER1,
152 				"Cannot allocate mempool for device %u\n",
153 				dev_id);
154 			return -1;
155 		}
156 		return 0;
157 	}
158 
159 	/*
160 	 * If doing AES-CCM, IV field needs to be 16 bytes long,
161 	 * and AAD field needs to be long enough to have 18 bytes,
162 	 * plus the length of the AAD, and all rounded to a
163 	 * multiple of 16 bytes.
164 	 */
165 	if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
166 		crypto_op_private_size = extra_op_priv_size +
167 			test_vector->cipher_iv.length +
168 			test_vector->auth_iv.length +
169 			RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16) +
170 			RTE_ALIGN_CEIL(options->aead_aad_sz + 18, 16);
171 	} else {
172 		crypto_op_private_size = extra_op_priv_size +
173 			test_vector->cipher_iv.length +
174 			test_vector->auth_iv.length +
175 			test_vector->aead_iv.length +
176 			options->aead_aad_sz;
177 	}
178 
179 	uint16_t crypto_op_total_size = crypto_op_size +
180 				crypto_op_private_size;
181 	uint16_t crypto_op_total_size_padded =
182 				RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
183 	uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
184 	uint32_t max_size = options->max_buffer_size + options->digest_sz;
185 	uint16_t segments_nb = (max_size % options->segment_sz) ?
186 			(max_size / options->segment_sz) + 1 :
187 			max_size / options->segment_sz;
188 	uint32_t obj_size = crypto_op_total_size_padded +
189 				(mbuf_size * segments_nb);
190 
191 	snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u",
192 			dev_id, qp_id);
193 
194 	*src_buf_offset = crypto_op_total_size_padded;
195 
196 	struct obj_params params = {
197 		.segment_sz = options->segment_sz,
198 		.headroom_sz = options->headroom_sz,
199 		/* Data len = segment size - (headroom + tailroom) */
200 		.data_len = options->segment_sz -
201 			    options->headroom_sz -
202 			    options->tailroom_sz,
203 		.segments_nb = segments_nb,
204 		.src_buf_offset = crypto_op_total_size_padded,
205 		.dst_buf_offset = 0
206 	};
207 
208 	if (options->out_of_place) {
209 		*dst_buf_offset = *src_buf_offset +
210 				(mbuf_size * segments_nb);
211 		params.dst_buf_offset = *dst_buf_offset;
212 		/* Destination buffer will be one segment only */
213 		obj_size += max_size + sizeof(struct rte_mbuf);
214 	}
215 
216 	*pool = rte_mempool_create_empty(pool_name,
217 			options->pool_sz, obj_size, 512, 0,
218 			rte_socket_id(), 0);
219 	if (*pool == NULL) {
220 		RTE_LOG(ERR, USER1,
221 			"Cannot allocate mempool for device %u\n",
222 			dev_id);
223 		return -1;
224 	}
225 
226 	mp_ops_name = rte_mbuf_best_mempool_ops();
227 
228 	ret = rte_mempool_set_ops_byname(*pool,
229 		mp_ops_name, NULL);
230 	if (ret != 0) {
231 		RTE_LOG(ERR, USER1,
232 			 "Error setting mempool handler for device %u\n",
233 			 dev_id);
234 		return -1;
235 	}
236 
237 	ret = rte_mempool_populate_default(*pool);
238 	if (ret < 0) {
239 		RTE_LOG(ERR, USER1,
240 			 "Error populating mempool for device %u\n",
241 			 dev_id);
242 		return -1;
243 	}
244 
245 	rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)&params);
246 
247 	return 0;
248 }
249