1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Intel Corporation. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 10 * * Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * * Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * * Neither the name of Intel Corporation nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <rte_malloc.h> 34 35 #include "cperf_test_common.h" 36 37 struct obj_params { 38 uint32_t src_buf_offset; 39 uint32_t dst_buf_offset; 40 uint16_t segment_sz; 41 uint16_t segments_nb; 42 }; 43 44 static void 45 fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp, 46 void *obj, uint32_t mbuf_offset, uint16_t segment_sz) 47 { 48 uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf); 49 50 /* start of buffer is after mbuf structure and priv data */ 51 m->priv_size = 0; 52 m->buf_addr = (char *)m + mbuf_hdr_size; 53 m->buf_physaddr = rte_mempool_virt2phy(mp, obj) + 54 mbuf_offset + mbuf_hdr_size; 55 m->buf_len = segment_sz; 56 m->data_len = segment_sz; 57 58 /* No headroom needed for the buffer */ 59 m->data_off = 0; 60 61 /* init some constant fields */ 62 m->pool = mp; 63 m->nb_segs = 1; 64 m->port = 0xff; 65 rte_mbuf_refcnt_set(m, 1); 66 m->next = NULL; 67 } 68 69 static void 70 fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp, 71 void *obj, uint32_t mbuf_offset, uint16_t segment_sz, 72 uint16_t segments_nb) 73 { 74 uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf); 75 uint16_t remaining_segments = segments_nb; 76 struct rte_mbuf *next_mbuf; 77 phys_addr_t next_seg_phys_addr = rte_mempool_virt2phy(mp, obj) + 78 mbuf_offset + mbuf_hdr_size; 79 80 do { 81 /* start of buffer is after mbuf structure and priv data */ 82 m->priv_size = 0; 83 m->buf_addr = (char *)m + mbuf_hdr_size; 84 m->buf_physaddr = next_seg_phys_addr; 85 next_seg_phys_addr += mbuf_hdr_size + segment_sz; 86 m->buf_len = segment_sz; 87 m->data_len = segment_sz; 88 89 /* No headroom needed for the buffer */ 90 m->data_off = 0; 91 92 /* init some constant fields */ 93 m->pool = mp; 94 m->nb_segs = segments_nb; 95 m->port = 0xff; 96 rte_mbuf_refcnt_set(m, 1); 97 next_mbuf = (struct rte_mbuf *) ((uint8_t *) m + 98 mbuf_hdr_size + segment_sz); 99 m->next = next_mbuf; 100 m = next_mbuf; 101 remaining_segments--; 102 103 } while (remaining_segments > 0); 104 105 m->next = NULL; 106 } 107 108 static void 109 mempool_obj_init(struct rte_mempool *mp, 110 void *opaque_arg, 111 void *obj, 112 __attribute__((unused)) unsigned int i) 113 { 114 struct obj_params *params = opaque_arg; 115 struct rte_crypto_op *op = obj; 116 struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj + 117 params->src_buf_offset); 118 /* Set crypto operation */ 119 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; 120 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 121 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION; 122 123 /* Set source buffer */ 124 op->sym->m_src = m; 125 if (params->segments_nb == 1) 126 fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset, 127 params->segment_sz); 128 else 129 fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset, 130 params->segment_sz, params->segments_nb); 131 132 133 /* Set destination buffer */ 134 if (params->dst_buf_offset) { 135 m = (struct rte_mbuf *) ((uint8_t *) obj + 136 params->dst_buf_offset); 137 fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset, 138 params->segment_sz); 139 op->sym->m_dst = m; 140 } else 141 op->sym->m_dst = NULL; 142 } 143 144 int 145 cperf_alloc_common_memory(const struct cperf_options *options, 146 const struct cperf_test_vector *test_vector, 147 uint8_t dev_id, uint16_t qp_id, 148 size_t extra_op_priv_size, 149 uint32_t *src_buf_offset, 150 uint32_t *dst_buf_offset, 151 struct rte_mempool **pool) 152 { 153 char pool_name[32] = ""; 154 int ret; 155 156 /* Calculate the object size */ 157 uint16_t crypto_op_size = sizeof(struct rte_crypto_op) + 158 sizeof(struct rte_crypto_sym_op); 159 uint16_t crypto_op_private_size; 160 /* 161 * If doing AES-CCM, IV field needs to be 16 bytes long, 162 * and AAD field needs to be long enough to have 18 bytes, 163 * plus the length of the AAD, and all rounded to a 164 * multiple of 16 bytes. 165 */ 166 if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) { 167 crypto_op_private_size = extra_op_priv_size + 168 test_vector->cipher_iv.length + 169 test_vector->auth_iv.length + 170 RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16) + 171 RTE_ALIGN_CEIL(options->aead_aad_sz + 18, 16); 172 } else { 173 crypto_op_private_size = extra_op_priv_size + 174 test_vector->cipher_iv.length + 175 test_vector->auth_iv.length + 176 test_vector->aead_iv.length + 177 options->aead_aad_sz; 178 } 179 180 uint16_t crypto_op_total_size = crypto_op_size + 181 crypto_op_private_size; 182 uint16_t crypto_op_total_size_padded = 183 RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size); 184 uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz; 185 uint32_t max_size = options->max_buffer_size + options->digest_sz; 186 uint16_t segments_nb = (max_size % options->segment_sz) ? 187 (max_size / options->segment_sz) + 1 : 188 max_size / options->segment_sz; 189 uint32_t obj_size = crypto_op_total_size_padded + 190 (mbuf_size * segments_nb); 191 192 snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u", 193 dev_id, qp_id); 194 195 *src_buf_offset = crypto_op_total_size_padded; 196 197 struct obj_params params = { 198 .segment_sz = options->segment_sz, 199 .segments_nb = segments_nb, 200 .src_buf_offset = crypto_op_total_size_padded, 201 .dst_buf_offset = 0 202 }; 203 204 if (options->out_of_place) { 205 *dst_buf_offset = *src_buf_offset + 206 (mbuf_size * segments_nb); 207 params.dst_buf_offset = *dst_buf_offset; 208 /* Destination buffer will be one segment only */ 209 obj_size += max_size; 210 } 211 212 *pool = rte_mempool_create_empty(pool_name, 213 options->pool_sz, obj_size, 512, 0, 214 rte_socket_id(), 0); 215 if (*pool == NULL) { 216 RTE_LOG(ERR, USER1, 217 "Cannot allocate mempool for device %u\n", 218 dev_id); 219 return -1; 220 } 221 222 ret = rte_mempool_set_ops_byname(*pool, 223 RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL); 224 if (ret != 0) { 225 RTE_LOG(ERR, USER1, 226 "Error setting mempool handler for device %u\n", 227 dev_id); 228 return -1; 229 } 230 231 ret = rte_mempool_populate_default(*pool); 232 if (ret < 0) { 233 RTE_LOG(ERR, USER1, 234 "Error populating mempool for device %u\n", 235 dev_id); 236 return -1; 237 } 238 239 rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)¶ms); 240 241 return 0; 242 } 243