xref: /dpdk/app/test-crypto-perf/cperf_test_common.c (revision e811e2d76fb2ce8233313aaf641de5ebf34f23c2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <rte_malloc.h>
6 #include <rte_mbuf_pool_ops.h>
7 
8 #include "cperf_test_common.h"
9 
10 struct obj_params {
11 	uint32_t src_buf_offset;
12 	uint32_t dst_buf_offset;
13 	uint16_t segment_sz;
14 	uint16_t headroom_sz;
15 	uint16_t data_len;
16 	uint16_t segments_nb;
17 };
18 
19 static void
20 fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
21 		void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
22 		uint16_t headroom, uint16_t data_len)
23 {
24 	uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
25 
26 	/* start of buffer is after mbuf structure and priv data */
27 	m->priv_size = 0;
28 	m->buf_addr = (char *)m + mbuf_hdr_size;
29 	rte_mbuf_iova_set(m, rte_mempool_virt2iova(obj) + mbuf_offset + mbuf_hdr_size);
30 	m->buf_len = segment_sz;
31 	m->data_len = data_len;
32 	m->pkt_len = data_len;
33 
34 	/* Use headroom specified for the buffer */
35 	m->data_off = headroom;
36 
37 	/* init some constant fields */
38 	m->pool = mp;
39 	m->nb_segs = 1;
40 	m->port = 0xff;
41 	rte_mbuf_refcnt_set(m, 1);
42 	m->next = NULL;
43 }
44 
45 static void
46 fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
47 		void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
48 		uint16_t headroom, uint16_t data_len, uint16_t segments_nb)
49 {
50 	uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
51 	uint16_t remaining_segments = segments_nb;
52 	struct rte_mbuf *next_mbuf;
53 	rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) +
54 			 mbuf_offset + mbuf_hdr_size;
55 
56 	do {
57 		/* start of buffer is after mbuf structure and priv data */
58 		m->priv_size = 0;
59 		m->buf_addr = (char *)m + mbuf_hdr_size;
60 		rte_mbuf_iova_set(m, next_seg_phys_addr);
61 		next_seg_phys_addr += mbuf_hdr_size + segment_sz;
62 		m->buf_len = segment_sz;
63 		m->data_len = data_len;
64 
65 		/* Use headroom specified for the buffer */
66 		m->data_off = headroom;
67 
68 		/* init some constant fields */
69 		m->pool = mp;
70 		m->nb_segs = segments_nb;
71 		m->port = 0xff;
72 		rte_mbuf_refcnt_set(m, 1);
73 		next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
74 					mbuf_hdr_size + segment_sz);
75 		m->next = next_mbuf;
76 		m = next_mbuf;
77 		remaining_segments--;
78 
79 	} while (remaining_segments > 0);
80 
81 	m->next = NULL;
82 }
83 
84 static void
85 mempool_asym_obj_init(struct rte_mempool *mp, __rte_unused void *opaque_arg,
86 		      void *obj, __rte_unused unsigned int i)
87 {
88 	struct rte_crypto_op *op = obj;
89 
90 	/* Set crypto operation */
91 	op->type = RTE_CRYPTO_OP_TYPE_ASYMMETRIC;
92 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
93 	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
94 	op->phys_addr = rte_mem_virt2iova(obj);
95 	op->mempool = mp;
96 }
97 
98 static void
99 mempool_obj_init(struct rte_mempool *mp,
100 		 void *opaque_arg,
101 		 void *obj,
102 		 __rte_unused unsigned int i)
103 {
104 	struct obj_params *params = opaque_arg;
105 	struct rte_crypto_op *op = obj;
106 	struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
107 					params->src_buf_offset);
108 	/* Set crypto operation */
109 	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
110 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
111 	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
112 	op->phys_addr = rte_mem_virt2iova(obj);
113 	op->mempool = mp;
114 
115 	/* Set source buffer */
116 	op->sym->m_src = m;
117 	if (params->segments_nb == 1)
118 		fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
119 				params->segment_sz, params->headroom_sz,
120 				params->data_len);
121 	else
122 		fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
123 				params->segment_sz, params->headroom_sz,
124 				params->data_len, params->segments_nb);
125 
126 
127 	/* Set destination buffer */
128 	if (params->dst_buf_offset) {
129 		m = (struct rte_mbuf *) ((uint8_t *) obj +
130 				params->dst_buf_offset);
131 		fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
132 				params->segment_sz, params->headroom_sz,
133 				params->data_len);
134 		op->sym->m_dst = m;
135 	} else
136 		op->sym->m_dst = NULL;
137 }
138 
139 int
140 cperf_alloc_common_memory(const struct cperf_options *options,
141 			const struct cperf_test_vector *test_vector,
142 			uint8_t dev_id, uint16_t qp_id,
143 			size_t extra_op_priv_size,
144 			uint32_t *src_buf_offset,
145 			uint32_t *dst_buf_offset,
146 			struct rte_mempool **pool)
147 {
148 	const char *mp_ops_name;
149 	char pool_name[32] = "";
150 	int ret;
151 
152 	/* Calculate the object size */
153 	uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
154 		sizeof(struct rte_crypto_sym_op);
155 	uint16_t crypto_op_private_size;
156 
157 	if (options->op_type == CPERF_ASYM_MODEX) {
158 		snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_op_pool%u",
159 			 rte_socket_id());
160 		*pool = rte_crypto_op_pool_create(
161 			pool_name, RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
162 			options->pool_sz, RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
163 			rte_socket_id());
164 		if (*pool == NULL) {
165 			RTE_LOG(ERR, USER1,
166 				"Cannot allocate mempool for device %u\n",
167 				dev_id);
168 			return -1;
169 		}
170 		rte_mempool_obj_iter(*pool, mempool_asym_obj_init, NULL);
171 		return 0;
172 	}
173 
174 	/*
175 	 * If doing AES-CCM, IV field needs to be 16 bytes long,
176 	 * and AAD field needs to be long enough to have 18 bytes,
177 	 * plus the length of the AAD, and all rounded to a
178 	 * multiple of 16 bytes.
179 	 */
180 	if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
181 		crypto_op_private_size = extra_op_priv_size +
182 			test_vector->cipher_iv.length +
183 			test_vector->auth_iv.length +
184 			RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16) +
185 			RTE_ALIGN_CEIL(options->aead_aad_sz + 18, 16);
186 	} else {
187 		crypto_op_private_size = extra_op_priv_size +
188 			test_vector->cipher_iv.length +
189 			test_vector->auth_iv.length +
190 			test_vector->aead_iv.length +
191 			options->aead_aad_sz;
192 	}
193 
194 	uint16_t crypto_op_total_size = crypto_op_size +
195 				crypto_op_private_size;
196 	uint16_t crypto_op_total_size_padded =
197 				RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
198 	uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
199 	uint32_t max_size = options->max_buffer_size + options->digest_sz;
200 	uint16_t segments_nb = (max_size % options->segment_sz) ?
201 			(max_size / options->segment_sz) + 1 :
202 			max_size / options->segment_sz;
203 	uint32_t obj_size = crypto_op_total_size_padded +
204 				(mbuf_size * segments_nb);
205 
206 	snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u",
207 			dev_id, qp_id);
208 
209 	*src_buf_offset = crypto_op_total_size_padded;
210 
211 	struct obj_params params = {
212 		.segment_sz = options->segment_sz,
213 		.headroom_sz = options->headroom_sz,
214 		/* Data len = segment size - (headroom + tailroom) */
215 		.data_len = options->segment_sz -
216 			    options->headroom_sz -
217 			    options->tailroom_sz,
218 		.segments_nb = segments_nb,
219 		.src_buf_offset = crypto_op_total_size_padded,
220 		.dst_buf_offset = 0
221 	};
222 
223 	if (options->out_of_place) {
224 		*dst_buf_offset = *src_buf_offset +
225 				(mbuf_size * segments_nb);
226 		params.dst_buf_offset = *dst_buf_offset;
227 		/* Destination buffer will be one segment only */
228 		obj_size += max_size + sizeof(struct rte_mbuf);
229 	}
230 
231 	*pool = rte_mempool_create_empty(pool_name,
232 			options->pool_sz, obj_size, 512, 0,
233 			rte_socket_id(), 0);
234 	if (*pool == NULL) {
235 		RTE_LOG(ERR, USER1,
236 			"Cannot allocate mempool for device %u\n",
237 			dev_id);
238 		return -1;
239 	}
240 
241 	mp_ops_name = rte_mbuf_best_mempool_ops();
242 
243 	ret = rte_mempool_set_ops_byname(*pool,
244 		mp_ops_name, NULL);
245 	if (ret != 0) {
246 		RTE_LOG(ERR, USER1,
247 			 "Error setting mempool handler for device %u\n",
248 			 dev_id);
249 		return -1;
250 	}
251 
252 	ret = rte_mempool_populate_default(*pool);
253 	if (ret < 0) {
254 		RTE_LOG(ERR, USER1,
255 			 "Error populating mempool for device %u\n",
256 			 dev_id);
257 		return -1;
258 	}
259 
260 	rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)&params);
261 
262 	return 0;
263 }
264 
265 void
266 cperf_mbuf_set(struct rte_mbuf *mbuf,
267 		const struct cperf_options *options,
268 		const struct cperf_test_vector *test_vector)
269 {
270 	uint32_t segment_sz = options->segment_sz;
271 	uint8_t *mbuf_data;
272 	uint8_t *test_data;
273 	uint32_t remaining_bytes = options->max_buffer_size;
274 
275 	if (options->op_type == CPERF_AEAD) {
276 		test_data = (options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
277 					test_vector->plaintext.data :
278 					test_vector->ciphertext.data;
279 	} else {
280 		test_data =
281 			(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
282 				test_vector->plaintext.data :
283 				test_vector->ciphertext.data;
284 	}
285 
286 	while (remaining_bytes) {
287 		mbuf_data = rte_pktmbuf_mtod(mbuf, uint8_t *);
288 
289 		if (remaining_bytes <= segment_sz) {
290 			memcpy(mbuf_data, test_data, remaining_bytes);
291 			return;
292 		}
293 
294 		memcpy(mbuf_data, test_data, segment_sz);
295 		remaining_bytes -= segment_sz;
296 		test_data += segment_sz;
297 		mbuf = mbuf->next;
298 	}
299 }
300