xref: /dpdk/app/test-crypto-perf/cperf_test_common.c (revision bf9d6702eca95f01105e93ab38fc36e9932314f8)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <rte_malloc.h>
34 
35 #include "cperf_test_common.h"
36 
37 struct obj_params {
38 	uint32_t src_buf_offset;
39 	uint32_t dst_buf_offset;
40 	uint16_t segment_sz;
41 	uint16_t segments_nb;
42 };
43 
44 static void
45 fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
46 		void *obj, uint32_t mbuf_offset, uint16_t segment_sz)
47 {
48 	uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
49 
50 	/* start of buffer is after mbuf structure and priv data */
51 	m->priv_size = 0;
52 	m->buf_addr = (char *)m + mbuf_hdr_size;
53 	m->buf_physaddr = rte_mempool_virt2phy(mp, obj) +
54 		mbuf_offset + mbuf_hdr_size;
55 	m->buf_len = segment_sz;
56 	m->data_len = segment_sz;
57 
58 	/* No headroom needed for the buffer */
59 	m->data_off = 0;
60 
61 	/* init some constant fields */
62 	m->pool = mp;
63 	m->nb_segs = 1;
64 	m->port = 0xff;
65 	rte_mbuf_refcnt_set(m, 1);
66 	m->next = NULL;
67 }
68 
69 static void
70 fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
71 		void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
72 		uint16_t segments_nb)
73 {
74 	uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
75 	uint16_t remaining_segments = segments_nb;
76 	struct rte_mbuf *next_mbuf;
77 	phys_addr_t next_seg_phys_addr = rte_mempool_virt2phy(mp, obj) +
78 			 mbuf_offset + mbuf_hdr_size;
79 
80 	do {
81 		/* start of buffer is after mbuf structure and priv data */
82 		m->priv_size = 0;
83 		m->buf_addr = (char *)m + mbuf_hdr_size;
84 		m->buf_physaddr = next_seg_phys_addr;
85 		next_seg_phys_addr += mbuf_hdr_size + segment_sz;
86 		m->buf_len = segment_sz;
87 		m->data_len = segment_sz;
88 
89 		/* No headroom needed for the buffer */
90 		m->data_off = 0;
91 
92 		/* init some constant fields */
93 		m->pool = mp;
94 		m->nb_segs = segments_nb;
95 		m->port = 0xff;
96 		rte_mbuf_refcnt_set(m, 1);
97 		next_mbuf = (struct rte_mbuf *) ((uint8_t *) m +
98 					mbuf_hdr_size + segment_sz);
99 		m->next = next_mbuf;
100 		m = next_mbuf;
101 		remaining_segments--;
102 
103 	} while (remaining_segments > 0);
104 
105 	m->next = NULL;
106 }
107 
108 static void
109 mempool_obj_init(struct rte_mempool *mp,
110 		 void *opaque_arg,
111 		 void *obj,
112 		 __attribute__((unused)) unsigned int i)
113 {
114 	struct obj_params *params = opaque_arg;
115 	struct rte_crypto_op *op = obj;
116 	struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj +
117 					params->src_buf_offset);
118 	/* Set crypto operation */
119 	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
120 	op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
121 	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
122 
123 	/* Set source buffer */
124 	op->sym->m_src = m;
125 	if (params->segments_nb == 1)
126 		fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
127 				params->segment_sz);
128 	else
129 		fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
130 				params->segment_sz, params->segments_nb);
131 
132 
133 	/* Set destination buffer */
134 	if (params->dst_buf_offset) {
135 		m = (struct rte_mbuf *) ((uint8_t *) obj +
136 				params->dst_buf_offset);
137 		fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
138 				params->segment_sz);
139 		op->sym->m_dst = m;
140 	} else
141 		op->sym->m_dst = NULL;
142 }
143 
144 int
145 cperf_alloc_common_memory(const struct cperf_options *options,
146 			const struct cperf_test_vector *test_vector,
147 			uint8_t dev_id, uint16_t qp_id,
148 			size_t extra_op_priv_size,
149 			uint32_t *src_buf_offset,
150 			uint32_t *dst_buf_offset,
151 			struct rte_mempool **pool)
152 {
153 	char pool_name[32] = "";
154 	int ret;
155 
156 	/* Calculate the object size */
157 	uint16_t crypto_op_size = sizeof(struct rte_crypto_op) +
158 		sizeof(struct rte_crypto_sym_op);
159 	uint16_t crypto_op_private_size = extra_op_priv_size +
160 				test_vector->cipher_iv.length +
161 				test_vector->auth_iv.length +
162 				test_vector->aead_iv.length +
163 				options->aead_aad_sz;
164 	uint16_t crypto_op_total_size = crypto_op_size +
165 				crypto_op_private_size;
166 	uint16_t crypto_op_total_size_padded =
167 				RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size);
168 	uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz;
169 	uint32_t max_size = options->max_buffer_size + options->digest_sz;
170 	uint16_t segments_nb = (max_size % options->segment_sz) ?
171 			(max_size / options->segment_sz) + 1 :
172 			max_size / options->segment_sz;
173 	uint32_t obj_size = crypto_op_total_size_padded +
174 				(mbuf_size * segments_nb);
175 
176 	snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u",
177 			dev_id, qp_id);
178 
179 	*src_buf_offset = crypto_op_total_size_padded;
180 
181 	struct obj_params params = {
182 		.segment_sz = options->segment_sz,
183 		.segments_nb = segments_nb,
184 		.src_buf_offset = crypto_op_total_size_padded,
185 		.dst_buf_offset = 0
186 	};
187 
188 	if (options->out_of_place) {
189 		*dst_buf_offset = *src_buf_offset +
190 				(mbuf_size * segments_nb);
191 		params.dst_buf_offset = *dst_buf_offset;
192 		/* Destination buffer will be one segment only */
193 		obj_size += max_size;
194 	}
195 
196 	*pool = rte_mempool_create_empty(pool_name,
197 			options->pool_sz, obj_size, 512, 0,
198 			rte_socket_id(), 0);
199 	if (*pool == NULL) {
200 		RTE_LOG(ERR, USER1,
201 			"Cannot allocate mempool for device %u\n",
202 			dev_id);
203 		return -1;
204 	}
205 
206 	ret = rte_mempool_set_ops_byname(*pool,
207 		RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
208 	if (ret != 0) {
209 		RTE_LOG(ERR, USER1,
210 			 "Error setting mempool handler for device %u\n",
211 			 dev_id);
212 		return -1;
213 	}
214 
215 	ret = rte_mempool_populate_default(*pool);
216 	if (ret < 0) {
217 		RTE_LOG(ERR, USER1,
218 			 "Error populating mempool for device %u\n",
219 			 dev_id);
220 		return -1;
221 	}
222 
223 	rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)&params);
224 
225 	return 0;
226 }
227