1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #include <rte_malloc.h> 6 #include <rte_mbuf_pool_ops.h> 7 8 #include "cperf_test_common.h" 9 10 struct obj_params { 11 uint32_t src_buf_offset; 12 uint32_t dst_buf_offset; 13 uint16_t segment_sz; 14 uint16_t headroom_sz; 15 uint16_t data_len; 16 uint16_t segments_nb; 17 }; 18 19 static void 20 fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp, 21 void *obj, uint32_t mbuf_offset, uint16_t segment_sz, 22 uint16_t headroom, uint16_t data_len) 23 { 24 uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf); 25 26 /* start of buffer is after mbuf structure and priv data */ 27 m->priv_size = 0; 28 m->buf_addr = (char *)m + mbuf_hdr_size; 29 m->buf_iova = rte_mempool_virt2iova(obj) + 30 mbuf_offset + mbuf_hdr_size; 31 m->buf_len = segment_sz; 32 m->data_len = data_len; 33 34 /* Use headroom specified for the buffer */ 35 m->data_off = headroom; 36 37 /* init some constant fields */ 38 m->pool = mp; 39 m->nb_segs = 1; 40 m->port = 0xff; 41 rte_mbuf_refcnt_set(m, 1); 42 m->next = NULL; 43 } 44 45 static void 46 fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp, 47 void *obj, uint32_t mbuf_offset, uint16_t segment_sz, 48 uint16_t headroom, uint16_t data_len, uint16_t segments_nb) 49 { 50 uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf); 51 uint16_t remaining_segments = segments_nb; 52 struct rte_mbuf *next_mbuf; 53 rte_iova_t next_seg_phys_addr = rte_mempool_virt2iova(obj) + 54 mbuf_offset + mbuf_hdr_size; 55 56 do { 57 /* start of buffer is after mbuf structure and priv data */ 58 m->priv_size = 0; 59 m->buf_addr = (char *)m + mbuf_hdr_size; 60 m->buf_iova = next_seg_phys_addr; 61 next_seg_phys_addr += mbuf_hdr_size + segment_sz; 62 m->buf_len = segment_sz; 63 m->data_len = data_len; 64 65 /* Use headroom specified for the buffer */ 66 m->data_off = headroom; 67 68 /* init some constant fields */ 69 m->pool = mp; 70 m->nb_segs = segments_nb; 71 m->port = 0xff; 72 rte_mbuf_refcnt_set(m, 1); 73 next_mbuf = (struct rte_mbuf *) ((uint8_t *) m + 74 mbuf_hdr_size + segment_sz); 75 m->next = next_mbuf; 76 m = next_mbuf; 77 remaining_segments--; 78 79 } while (remaining_segments > 0); 80 81 m->next = NULL; 82 } 83 84 static void 85 mempool_obj_init(struct rte_mempool *mp, 86 void *opaque_arg, 87 void *obj, 88 __attribute__((unused)) unsigned int i) 89 { 90 struct obj_params *params = opaque_arg; 91 struct rte_crypto_op *op = obj; 92 struct rte_mbuf *m = (struct rte_mbuf *) ((uint8_t *) obj + 93 params->src_buf_offset); 94 /* Set crypto operation */ 95 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; 96 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 97 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION; 98 op->phys_addr = rte_mem_virt2iova(obj); 99 op->mempool = mp; 100 101 /* Set source buffer */ 102 op->sym->m_src = m; 103 if (params->segments_nb == 1) 104 fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset, 105 params->segment_sz, params->headroom_sz, 106 params->data_len); 107 else 108 fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset, 109 params->segment_sz, params->headroom_sz, 110 params->data_len, params->segments_nb); 111 112 113 /* Set destination buffer */ 114 if (params->dst_buf_offset) { 115 m = (struct rte_mbuf *) ((uint8_t *) obj + 116 params->dst_buf_offset); 117 fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset, 118 params->segment_sz, params->headroom_sz, 119 params->data_len); 120 op->sym->m_dst = m; 121 } else 122 op->sym->m_dst = NULL; 123 } 124 125 int 126 cperf_alloc_common_memory(const struct cperf_options *options, 127 const struct cperf_test_vector *test_vector, 128 uint8_t dev_id, uint16_t qp_id, 129 size_t extra_op_priv_size, 130 uint32_t *src_buf_offset, 131 uint32_t *dst_buf_offset, 132 struct rte_mempool **pool) 133 { 134 const char *mp_ops_name; 135 char pool_name[32] = ""; 136 int ret; 137 138 /* Calculate the object size */ 139 uint16_t crypto_op_size = sizeof(struct rte_crypto_op) + 140 sizeof(struct rte_crypto_sym_op); 141 uint16_t crypto_op_private_size; 142 /* 143 * If doing AES-CCM, IV field needs to be 16 bytes long, 144 * and AAD field needs to be long enough to have 18 bytes, 145 * plus the length of the AAD, and all rounded to a 146 * multiple of 16 bytes. 147 */ 148 if (options->aead_algo == RTE_CRYPTO_AEAD_AES_CCM) { 149 crypto_op_private_size = extra_op_priv_size + 150 test_vector->cipher_iv.length + 151 test_vector->auth_iv.length + 152 RTE_ALIGN_CEIL(test_vector->aead_iv.length, 16) + 153 RTE_ALIGN_CEIL(options->aead_aad_sz + 18, 16); 154 } else { 155 crypto_op_private_size = extra_op_priv_size + 156 test_vector->cipher_iv.length + 157 test_vector->auth_iv.length + 158 test_vector->aead_iv.length + 159 options->aead_aad_sz; 160 } 161 162 uint16_t crypto_op_total_size = crypto_op_size + 163 crypto_op_private_size; 164 uint16_t crypto_op_total_size_padded = 165 RTE_CACHE_LINE_ROUNDUP(crypto_op_total_size); 166 uint32_t mbuf_size = sizeof(struct rte_mbuf) + options->segment_sz; 167 uint32_t max_size = options->max_buffer_size + options->digest_sz; 168 uint16_t segments_nb = (max_size % options->segment_sz) ? 169 (max_size / options->segment_sz) + 1 : 170 max_size / options->segment_sz; 171 uint32_t obj_size = crypto_op_total_size_padded + 172 (mbuf_size * segments_nb); 173 174 snprintf(pool_name, sizeof(pool_name), "pool_cdev_%u_qp_%u", 175 dev_id, qp_id); 176 177 *src_buf_offset = crypto_op_total_size_padded; 178 179 struct obj_params params = { 180 .segment_sz = options->segment_sz, 181 .headroom_sz = options->headroom_sz, 182 /* Data len = segment size - (headroom + tailroom) */ 183 .data_len = options->segment_sz - 184 options->headroom_sz - 185 options->tailroom_sz, 186 .segments_nb = segments_nb, 187 .src_buf_offset = crypto_op_total_size_padded, 188 .dst_buf_offset = 0 189 }; 190 191 if (options->out_of_place) { 192 *dst_buf_offset = *src_buf_offset + 193 (mbuf_size * segments_nb); 194 params.dst_buf_offset = *dst_buf_offset; 195 /* Destination buffer will be one segment only */ 196 obj_size += max_size; 197 } 198 199 *pool = rte_mempool_create_empty(pool_name, 200 options->pool_sz, obj_size, 512, 0, 201 rte_socket_id(), 0); 202 if (*pool == NULL) { 203 RTE_LOG(ERR, USER1, 204 "Cannot allocate mempool for device %u\n", 205 dev_id); 206 return -1; 207 } 208 209 mp_ops_name = rte_mbuf_best_mempool_ops(); 210 211 ret = rte_mempool_set_ops_byname(*pool, 212 mp_ops_name, NULL); 213 if (ret != 0) { 214 RTE_LOG(ERR, USER1, 215 "Error setting mempool handler for device %u\n", 216 dev_id); 217 return -1; 218 } 219 220 ret = rte_mempool_populate_default(*pool); 221 if (ret < 0) { 222 RTE_LOG(ERR, USER1, 223 "Error populating mempool for device %u\n", 224 dev_id); 225 return -1; 226 } 227 228 rte_mempool_obj_iter(*pool, mempool_obj_init, (void *)¶ms); 229 230 return 0; 231 } 232