xref: /dpdk/app/test-crypto-perf/cperf_test_common.c (revision 5449a688839315ff9da11a38d760dce05eaca119)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <rte_malloc.h>
6 #include <rte_mbuf_pool_ops.h>
7 
8 #include "cperf_test_common.h"
9 
10 struct obj_params {
11 	uint32_t src_buf_offset;
12 	uint32_t dst_buf_offset;
13 	uint16_t segment_sz;
14 	uint16_t segments_nb;
15 };
16 
17 static void
18 fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
19 		void *obj, uint32_t mbuf_offset, uint16_t segment_sz)
20 {
21 	uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
22 
23 	/* start of buffer is after mbuf structure and priv data */
24 	m->priv_size = 0;
25 	m->buf_addr = (char *)m + mbuf_hdr_size;
26 	m->buf_iova = rte_mempool_virt2iova(obj) +
27 		mbuf_offset + mbuf_hdr_size;
28 	m->buf_len = segment_sz;
29 	m->data_len = segment_sz;
30 
31 	/* No headroom needed for the buffer */
32 	m->data_off = 0;
33 
34 	/* init some constant fields */
35 	m->pool = mp;
36 	m->nb_segs = 1;
37 	m->port = 0xff;
38 	rte_mbuf_refcnt_set(m, 1);
39 	m->next = NULL;
40 }
41 
42 static void
43 fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
44 		void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
45 		uint16_t segments_nb)
46 {
47 	uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
48 	uint16_t remaining_segments = segments_nb;
49 	struct rte_mbuf *next_mbuf;
50 	rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) +
51 			 mbuf_offset + mbuf_hdr_size;
52 
53 	do {
54 		/* start of buffer is after mbuf structure and priv data */
55 		m->priv_size = 0;
56 		m->buf_addr = (char *)m + mbuf_hdr_size;
57 		m->buf_iova = next_seg_phys_addr;
58 		next_seg_phys_addr += mbuf_hdr_size + segment_sz;
59 		m->buf_len = segment_sz;
60 		m->data_len = segment_sz;
61 
62 		/* No headroom needed for the buffer */
63 		m->data_off = 0;
64 
65 		/* init some constant fields */
66 		m->pool = mp;
67 		m->nb_segs = segments_nb;
68 		m->port = 0xff;
69 		rte_mbuf_refcnt_set(m, 1);
70 		next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
71 					mbuf_hdr_size + segment_sz);
72 		m->next = next_mbuf;
73 		m = next_mbuf;
74 		remaining_segments--;
75 
76 	} while (remaining_segments > 0);
77 
78 	m->next = NULL;
79 }
80 
81 static void
82 mempool_obj_init(struct rte_mempool *mp,
83 		 void *opaque_arg,
84 		 void *obj,
85 		 __attribute__((unused)) unsigned int i)
86 {
87 	struct obj_params *params = opaque_arg;
88 	struct rte_crypto_op *op = obj;
89 	struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
90 					params->src_buf_offset);
91 	/* Set crypto operation */
92 	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
93 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
94 	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
95 	op->phys_addr = rte_mem_virt2iova(obj);
96 	op->mempool = mp;
97 
98 	/* Set source buffer */
99 	op->sym->m_src = m;
100 	if (params->segments_nb == 1)
101 		fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
102 				params->segment_sz);
103 	else
104 		fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
105 				params->segment_sz, params->segments_nb);
106 
107 
108 	/* Set destination buffer */
109 	if (params->dst_buf_offset) {
110 		m = (struct rte_mbuf *) ((uint8_t *) obj +
111 				params->dst_buf_offset);
112 		fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
113 				params->segment_sz);
114 		op->sym->m_dst = m;
115 	} else
116 		op->sym->m_dst = NULL;
117 }
118 
119 int
120 cperf_alloc_common_memory(const struct cperf_options *options,
121 			const struct cperf_test_vector *test_vector,
122 			uint8_t dev_id, uint16_t qp_id,
123 			size_t extra_op_priv_size,
124 			uint32_t *src_buf_offset,
125 			uint32_t *dst_buf_offset,
126 			struct rte_mempool **pool)
127 {
128 	const char *mp_ops_name;
129 	char pool_name[32] = "";
130 	int ret;
131 
132 	/* Calculate the object size */
133 	uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
134 		sizeof(struct rte_crypto_sym_op);
135 	uint16_t crypto_op_private_size;
136 	/*
137 	 * If doing AES-CCM, IV field needs to be 16 bytes long,
138 	 * and AAD field needs to be long enough to have 18 bytes,
139 	 * plus the length of the AAD, and all rounded to a
140 	 * multiple of 16 bytes.
141 	 */
142 	if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
143 		crypto_op_private_size = extra_op_priv_size +
144 			test_vector->cipher_iv.length +
145 			test_vector->auth_iv.length +
146 			RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16) +
147 			RTE_ALIGN_CEIL(options->aead_aad_sz + 18, 16);
148 	} else {
149 		crypto_op_private_size = extra_op_priv_size +
150 			test_vector->cipher_iv.length +
151 			test_vector->auth_iv.length +
152 			test_vector->aead_iv.length +
153 			options->aead_aad_sz;
154 	}
155 
156 	uint16_t crypto_op_total_size = crypto_op_size +
157 				crypto_op_private_size;
158 	uint16_t crypto_op_total_size_padded =
159 				RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
160 	uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
161 	uint32_t max_size = options->max_buffer_size + options->digest_sz;
162 	uint16_t segments_nb = (max_size % options->segment_sz) ?
163 			(max_size / options->segment_sz) + 1 :
164 			max_size / options->segment_sz;
165 	uint32_t obj_size = crypto_op_total_size_padded +
166 				(mbuf_size * segments_nb);
167 
168 	snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u",
169 			dev_id, qp_id);
170 
171 	*src_buf_offset = crypto_op_total_size_padded;
172 
173 	struct obj_params params = {
174 		.segment_sz = options->segment_sz,
175 		.segments_nb = segments_nb,
176 		.src_buf_offset = crypto_op_total_size_padded,
177 		.dst_buf_offset = 0
178 	};
179 
180 	if (options->out_of_place) {
181 		*dst_buf_offset = *src_buf_offset +
182 				(mbuf_size * segments_nb);
183 		params.dst_buf_offset = *dst_buf_offset;
184 		/* Destination buffer will be one segment only */
185 		obj_size += max_size;
186 	}
187 
188 	*pool = rte_mempool_create_empty(pool_name,
189 			options->pool_sz, obj_size, 512, 0,
190 			rte_socket_id(), 0);
191 	if (*pool == NULL) {
192 		RTE_LOG(ERR, USER1,
193 			"Cannot allocate mempool for device %u\n",
194 			dev_id);
195 		return -1;
196 	}
197 
198 	mp_ops_name = rte_mbuf_best_mempool_ops();
199 
200 	ret = rte_mempool_set_ops_byname(*pool,
201 		mp_ops_name, NULL);
202 	if (ret != 0) {
203 		RTE_LOG(ERR, USER1,
204 			 "Error setting mempool handler for device %u\n",
205 			 dev_id);
206 		return -1;
207 	}
208 
209 	ret = rte_mempool_populate_default(*pool);
210 	if (ret < 0) {
211 		RTE_LOG(ERR, USER1,
212 			 "Error populating mempool for device %u\n",
213 			 dev_id);
214 		return -1;
215 	}
216 
217 	rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)&params);
218 
219 	return 0;
220 }
221