xref: /dpdk/app/test-crypto-perf/cperf_test_common.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <rte_malloc.h>
6 #include <rte_mbuf_pool_ops.h>
7 
8 #include "cperf_test_common.h"
9 
10 struct obj_params {
11 	uint32_t src_buf_offset;
12 	uint32_t dst_buf_offset;
13 	uint16_t segment_sz;
14 	uint16_t headroom_sz;
15 	uint16_t data_len;
16 	uint16_t segments_nb;
17 };
18 
19 static void
20 fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
21 		void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
22 		uint16_t headroom, uint16_t data_len)
23 {
24 	uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
25 
26 	/* start of buffer is after mbuf structure and priv data */
27 	m->priv_size = 0;
28 	m->buf_addr = (char *)m + mbuf_hdr_size;
29 	rte_mbuf_iova_set(m, rte_mempool_virt2iova(obj) + mbuf_offset + mbuf_hdr_size);
30 	m->buf_len = segment_sz;
31 	m->data_len = data_len;
32 	m->pkt_len = data_len;
33 
34 	/* Use headroom specified for the buffer */
35 	m->data_off = headroom;
36 
37 	/* init some constant fields */
38 	m->pool = mp;
39 	m->nb_segs = 1;
40 	m->port = 0xff;
41 	rte_mbuf_refcnt_set(m, 1);
42 	m->next = NULL;
43 }
44 
45 static void
46 fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
47 		void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
48 		uint16_t headroom, uint16_t data_len, uint16_t segments_nb)
49 {
50 	uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
51 	uint16_t remaining_segments = segments_nb;
52 	rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) +
53 			 mbuf_offset + mbuf_hdr_size;
54 
55 	do {
56 		/* start of buffer is after mbuf structure and priv data */
57 		m->priv_size = 0;
58 		m->buf_addr = (char *)m + mbuf_hdr_size;
59 		rte_mbuf_iova_set(m, next_seg_phys_addr);
60 		next_seg_phys_addr += mbuf_hdr_size + segment_sz;
61 		m->buf_len = segment_sz;
62 		m->data_len = data_len;
63 
64 		/* Use headroom specified for the buffer */
65 		m->data_off = headroom;
66 
67 		/* init some constant fields */
68 		m->pool = mp;
69 		m->nb_segs = segments_nb;
70 		m->port = 0xff;
71 		rte_mbuf_refcnt_set(m, 1);
72 
73 		remaining_segments--;
74 		if (remaining_segments > 0) {
75 			m->next = (struct rte_mbuf *)((uint8_t *) m + mbuf_hdr_size + segment_sz);
76 			m = m->next;
77 		} else {
78 			m->next = NULL;
79 		}
80 	} while (remaining_segments > 0);
81 }
82 
83 static void
84 mempool_asym_obj_init(struct rte_mempool *mp, __rte_unused void *opaque_arg,
85 		      void *obj, __rte_unused unsigned int i)
86 {
87 	struct rte_crypto_op *op = obj;
88 
89 	/* Set crypto operation */
90 	op->type = RTE_CRYPTO_OP_TYPE_ASYMMETRIC;
91 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
92 	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
93 	op->phys_addr = rte_mem_virt2iova(obj);
94 	op->mempool = mp;
95 }
96 
97 static void
98 mempool_obj_init(struct rte_mempool *mp,
99 		 void *opaque_arg,
100 		 void *obj,
101 		 __rte_unused unsigned int i)
102 {
103 	struct obj_params *params = opaque_arg;
104 	struct rte_crypto_op *op = obj;
105 	struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
106 					params->src_buf_offset);
107 	/* Set crypto operation */
108 	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
109 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
110 	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
111 	op->phys_addr = rte_mem_virt2iova(obj);
112 	op->mempool = mp;
113 
114 	/* Set source buffer */
115 	op->sym->m_src = m;
116 	if (params->segments_nb == 1)
117 		fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
118 				params->segment_sz, params->headroom_sz,
119 				params->data_len);
120 	else
121 		fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
122 				params->segment_sz, params->headroom_sz,
123 				params->data_len, params->segments_nb);
124 
125 
126 	/* Set destination buffer */
127 	if (params->dst_buf_offset) {
128 		m = (struct rte_mbuf *) ((uint8_t *) obj +
129 				params->dst_buf_offset);
130 		fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
131 				params->segment_sz, params->headroom_sz,
132 				params->data_len);
133 		op->sym->m_dst = m;
134 	} else
135 		op->sym->m_dst = NULL;
136 }
137 
138 int
139 cperf_alloc_common_memory(const struct cperf_options *options,
140 			const struct cperf_test_vector *test_vector,
141 			uint8_t dev_id, uint16_t qp_id,
142 			size_t extra_op_priv_size,
143 			uint32_t *src_buf_offset,
144 			uint32_t *dst_buf_offset,
145 			struct rte_mempool **pool)
146 {
147 	const char *mp_ops_name;
148 	char pool_name[32] = "";
149 	int ret;
150 
151 	/* Calculate the object size */
152 	uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
153 		sizeof(struct rte_crypto_sym_op);
154 	uint16_t crypto_op_private_size;
155 
156 	if (options->op_type == CPERF_ASYM_MODEX) {
157 		snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "perf_asym_op_pool%u",
158 			 rte_socket_id());
159 		*pool = rte_crypto_op_pool_create(
160 			pool_name, RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
161 			options->pool_sz, RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
162 			rte_socket_id());
163 		if (*pool == NULL) {
164 			RTE_LOG(ERR, USER1,
165 				"Cannot allocate mempool for device %u\n",
166 				dev_id);
167 			return -1;
168 		}
169 		rte_mempool_obj_iter(*pool, mempool_asym_obj_init, NULL);
170 		return 0;
171 	}
172 
173 	/*
174 	 * If doing AES-CCM, IV field needs to be 16 bytes long,
175 	 * and AAD field needs to be long enough to have 18 bytes,
176 	 * plus the length of the AAD, and all rounded to a
177 	 * multiple of 16 bytes.
178 	 */
179 	if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
180 		crypto_op_private_size = extra_op_priv_size +
181 			test_vector->cipher_iv.length +
182 			test_vector->auth_iv.length +
183 			RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16) +
184 			RTE_ALIGN_CEIL(options->aead_aad_sz + 18, 16);
185 	} else {
186 		crypto_op_private_size = extra_op_priv_size +
187 			test_vector->cipher_iv.length +
188 			test_vector->auth_iv.length +
189 			test_vector->aead_iv.length +
190 			options->aead_aad_sz;
191 	}
192 
193 	uint16_t crypto_op_total_size = crypto_op_size +
194 				crypto_op_private_size;
195 	uint16_t crypto_op_total_size_padded =
196 				RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
197 	uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
198 	uint32_t max_size = options->max_buffer_size + options->digest_sz;
199 	uint32_t segment_data_len = options->segment_sz - options->headroom_sz -
200 				    options->tailroom_sz;
201 	uint16_t segments_nb = (max_size % segment_data_len) ?
202 				(max_size / segment_data_len) + 1 :
203 				(max_size / segment_data_len);
204 	uint32_t obj_size = crypto_op_total_size_padded +
205 				(mbuf_size * segments_nb);
206 
207 	snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u",
208 			dev_id, qp_id);
209 
210 	*src_buf_offset = crypto_op_total_size_padded;
211 
212 	struct obj_params params = {
213 		.segment_sz = options->segment_sz,
214 		.headroom_sz = options->headroom_sz,
215 		/* Data len = segment size - (headroom + tailroom) */
216 		.data_len = options->segment_sz -
217 			    options->headroom_sz -
218 			    options->tailroom_sz,
219 		.segments_nb = segments_nb,
220 		.src_buf_offset = crypto_op_total_size_padded,
221 		.dst_buf_offset = 0
222 	};
223 
224 	if (options->out_of_place) {
225 		*dst_buf_offset = *src_buf_offset +
226 				(mbuf_size * segments_nb);
227 		params.dst_buf_offset = *dst_buf_offset;
228 		/* Destination buffer will be one segment only */
229 		obj_size += max_size + sizeof(struct rte_mbuf) +
230 			options->headroom_sz + options->tailroom_sz;
231 	}
232 
233 	*pool = rte_mempool_create_empty(pool_name,
234 			options->pool_sz, obj_size, 512, 0,
235 			rte_socket_id(), 0);
236 	if (*pool == NULL) {
237 		RTE_LOG(ERR, USER1,
238 			"Cannot allocate mempool for device %u\n",
239 			dev_id);
240 		return -1;
241 	}
242 
243 	mp_ops_name = rte_mbuf_best_mempool_ops();
244 
245 	ret = rte_mempool_set_ops_byname(*pool,
246 		mp_ops_name, NULL);
247 	if (ret != 0) {
248 		RTE_LOG(ERR, USER1,
249 			 "Error setting mempool handler for device %u\n",
250 			 dev_id);
251 		return -1;
252 	}
253 
254 	ret = rte_mempool_populate_default(*pool);
255 	if (ret < 0) {
256 		RTE_LOG(ERR, USER1,
257 			 "Error populating mempool for device %u\n",
258 			 dev_id);
259 		return -1;
260 	}
261 
262 	rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)&params);
263 
264 	return 0;
265 }
266 
267 void
268 cperf_mbuf_set(struct rte_mbuf *mbuf,
269 		const struct cperf_options *options,
270 		const struct cperf_test_vector *test_vector)
271 {
272 	uint32_t segment_sz = options->segment_sz - options->headroom_sz - options->tailroom_sz;
273 	uint8_t *mbuf_data;
274 	uint8_t *test_data;
275 	uint32_t remaining_bytes = options->max_buffer_size;
276 
277 	if (options->op_type == CPERF_AEAD) {
278 		test_data = (options->aead_op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
279 					test_vector->plaintext.data :
280 					test_vector->ciphertext.data;
281 	} else {
282 		test_data =
283 			(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
284 				test_vector->plaintext.data :
285 				test_vector->ciphertext.data;
286 	}
287 
288 	while (remaining_bytes) {
289 		mbuf_data = rte_pktmbuf_mtod(mbuf, uint8_t *);
290 
291 		if (remaining_bytes <= segment_sz) {
292 			memcpy(mbuf_data, test_data, remaining_bytes);
293 			return;
294 		}
295 
296 		memcpy(mbuf_data, test_data, segment_sz);
297 		remaining_bytes -= segment_sz;
298 		test_data += segment_sz;
299 		mbuf = mbuf->next;
300 	}
301 }
302