1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #include <string.h> 6 7 #include <rte_common.h> 8 #include <rte_bus_vdev.h> 9 #include <rte_malloc.h> 10 #include <rte_ring.h> 11 #include <rte_kvargs.h> 12 13 #include <rte_bbdev.h> 14 #include <rte_bbdev_pmd.h> 15 16 #include <phy_turbo.h> 17 #include <phy_crc.h> 18 #include <phy_rate_match.h> 19 #include <divide.h> 20 21 #define DRIVER_NAME turbo_sw 22 23 /* Turbo SW PMD logging ID */ 24 static int bbdev_turbo_sw_logtype; 25 26 /* Helper macro for logging */ 27 #define rte_bbdev_log(level, fmt, ...) \ 28 rte_log(RTE_LOG_ ## level, bbdev_turbo_sw_logtype, fmt "\n", \ 29 ##__VA_ARGS__) 30 31 #define rte_bbdev_log_debug(fmt, ...) \ 32 rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \ 33 ##__VA_ARGS__) 34 35 /* private data structure */ 36 struct bbdev_private { 37 unsigned int max_nb_queues; /**< Max number of queues */ 38 }; 39 40 /* Initialisation params structure that can be used by Turbo SW driver */ 41 struct turbo_sw_params { 42 int socket_id; /*< Turbo SW device socket */ 43 uint16_t queues_num; /*< Turbo SW device queues number */ 44 }; 45 46 /* Accecptable params for Turbo SW devices */ 47 #define TURBO_SW_MAX_NB_QUEUES_ARG "max_nb_queues" 48 #define TURBO_SW_SOCKET_ID_ARG "socket_id" 49 50 static const char * const turbo_sw_valid_params[] = { 51 TURBO_SW_MAX_NB_QUEUES_ARG, 52 TURBO_SW_SOCKET_ID_ARG 53 }; 54 55 /* queue */ 56 struct turbo_sw_queue { 57 /* Ring for processed (encoded/decoded) operations which are ready to 58 * be dequeued. 59 */ 60 struct rte_ring *processed_pkts; 61 /* Stores input for turbo encoder (used when CRC attachment is 62 * performed 63 */ 64 uint8_t *enc_in; 65 /* Stores output from turbo encoder */ 66 uint8_t *enc_out; 67 /* Alpha gamma buf for bblib_turbo_decoder() function */ 68 int8_t *ag; 69 /* Temp buf for bblib_turbo_decoder() function */ 70 uint16_t *code_block; 71 /* Input buf for bblib_rate_dematching_lte() function */ 72 uint8_t *deint_input; 73 /* Output buf for bblib_rate_dematching_lte() function */ 74 uint8_t *deint_output; 75 /* Output buf for bblib_turbodec_adapter_lte() function */ 76 uint8_t *adapter_output; 77 /* Operation type of this queue */ 78 enum rte_bbdev_op_type type; 79 } __rte_cache_aligned; 80 81 /* Calculate index based on Table 5.1.3-3 from TS34.212 */ 82 static inline int32_t 83 compute_idx(uint16_t k) 84 { 85 int32_t result = 0; 86 87 if (k < RTE_BBDEV_MIN_CB_SIZE || k > RTE_BBDEV_MAX_CB_SIZE) 88 return -1; 89 90 if (k > 2048) { 91 if ((k - 2048) % 64 != 0) 92 result = -1; 93 94 result = 124 + (k - 2048) / 64; 95 } else if (k <= 512) { 96 if ((k - 40) % 8 != 0) 97 result = -1; 98 99 result = (k - 40) / 8 + 1; 100 } else if (k <= 1024) { 101 if ((k - 512) % 16 != 0) 102 result = -1; 103 104 result = 60 + (k - 512) / 16; 105 } else { /* 1024 < k <= 2048 */ 106 if ((k - 1024) % 32 != 0) 107 result = -1; 108 109 result = 92 + (k - 1024) / 32; 110 } 111 112 return result; 113 } 114 115 /* Read flag value 0/1 from bitmap */ 116 static inline bool 117 check_bit(uint32_t bitmap, uint32_t bitmask) 118 { 119 return bitmap & bitmask; 120 } 121 122 /* Get device info */ 123 static void 124 info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) 125 { 126 struct bbdev_private *internals = dev->data->dev_private; 127 128 static const struct rte_bbdev_op_cap bbdev_capabilities[] = { 129 { 130 .type = RTE_BBDEV_OP_TURBO_DEC, 131 .cap.turbo_dec = { 132 .capability_flags = 133 RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE | 134 RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN | 135 RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN | 136 RTE_BBDEV_TURBO_CRC_TYPE_24B | 137 RTE_BBDEV_TURBO_EARLY_TERMINATION, 138 .max_llr_modulus = 16, 139 .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS, 140 .num_buffers_hard_out = 141 RTE_BBDEV_MAX_CODE_BLOCKS, 142 .num_buffers_soft_out = 0, 143 } 144 }, 145 { 146 .type = RTE_BBDEV_OP_TURBO_ENC, 147 .cap.turbo_enc = { 148 .capability_flags = 149 RTE_BBDEV_TURBO_CRC_24B_ATTACH | 150 RTE_BBDEV_TURBO_CRC_24A_ATTACH | 151 RTE_BBDEV_TURBO_RATE_MATCH | 152 RTE_BBDEV_TURBO_RV_INDEX_BYPASS, 153 .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS, 154 .num_buffers_dst = RTE_BBDEV_MAX_CODE_BLOCKS, 155 } 156 }, 157 RTE_BBDEV_END_OF_CAPABILITIES_LIST() 158 }; 159 160 static struct rte_bbdev_queue_conf default_queue_conf = { 161 .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT, 162 }; 163 164 static const enum rte_cpu_flag_t cpu_flag = RTE_CPUFLAG_SSE4_2; 165 166 default_queue_conf.socket = dev->data->socket_id; 167 168 dev_info->driver_name = RTE_STR(DRIVER_NAME); 169 dev_info->max_num_queues = internals->max_nb_queues; 170 dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT; 171 dev_info->hardware_accelerated = false; 172 dev_info->max_queue_priority = 0; 173 dev_info->default_queue_conf = default_queue_conf; 174 dev_info->capabilities = bbdev_capabilities; 175 dev_info->cpu_flag_reqs = &cpu_flag; 176 dev_info->min_alignment = 64; 177 178 rte_bbdev_log_debug("got device info from %u\n", dev->data->dev_id); 179 } 180 181 /* Release queue */ 182 static int 183 q_release(struct rte_bbdev *dev, uint16_t q_id) 184 { 185 struct turbo_sw_queue *q = dev->data->queues[q_id].queue_private; 186 187 if (q != NULL) { 188 rte_ring_free(q->processed_pkts); 189 rte_free(q->enc_out); 190 rte_free(q->enc_in); 191 rte_free(q->ag); 192 rte_free(q->code_block); 193 rte_free(q->deint_input); 194 rte_free(q->deint_output); 195 rte_free(q->adapter_output); 196 rte_free(q); 197 dev->data->queues[q_id].queue_private = NULL; 198 } 199 200 rte_bbdev_log_debug("released device queue %u:%u", 201 dev->data->dev_id, q_id); 202 return 0; 203 } 204 205 /* Setup a queue */ 206 static int 207 q_setup(struct rte_bbdev *dev, uint16_t q_id, 208 const struct rte_bbdev_queue_conf *queue_conf) 209 { 210 int ret; 211 struct turbo_sw_queue *q; 212 char name[RTE_RING_NAMESIZE]; 213 214 /* Allocate the queue data structure. */ 215 q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q), 216 RTE_CACHE_LINE_SIZE, queue_conf->socket); 217 if (q == NULL) { 218 rte_bbdev_log(ERR, "Failed to allocate queue memory"); 219 return -ENOMEM; 220 } 221 222 /* Allocate memory for encoder output. */ 223 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_enc_out%u:%u", 224 dev->data->dev_id, q_id); 225 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 226 rte_bbdev_log(ERR, 227 "Creating queue name for device %u queue %u failed", 228 dev->data->dev_id, q_id); 229 return -ENAMETOOLONG; 230 } 231 q->enc_out = rte_zmalloc_socket(name, 232 ((RTE_BBDEV_MAX_TB_SIZE >> 3) + 3) * 233 sizeof(*q->enc_out) * 3, 234 RTE_CACHE_LINE_SIZE, queue_conf->socket); 235 if (q->enc_out == NULL) { 236 rte_bbdev_log(ERR, 237 "Failed to allocate queue memory for %s", name); 238 goto free_q; 239 } 240 241 /* Allocate memory for rate matching output. */ 242 ret = snprintf(name, RTE_RING_NAMESIZE, 243 RTE_STR(DRIVER_NAME)"_enc_in%u:%u", dev->data->dev_id, 244 q_id); 245 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 246 rte_bbdev_log(ERR, 247 "Creating queue name for device %u queue %u failed", 248 dev->data->dev_id, q_id); 249 return -ENAMETOOLONG; 250 } 251 q->enc_in = rte_zmalloc_socket(name, 252 (RTE_BBDEV_MAX_CB_SIZE >> 3) * sizeof(*q->enc_in), 253 RTE_CACHE_LINE_SIZE, queue_conf->socket); 254 if (q->enc_in == NULL) { 255 rte_bbdev_log(ERR, 256 "Failed to allocate queue memory for %s", name); 257 goto free_q; 258 } 259 260 /* Allocate memory for Aplha Gamma temp buffer. */ 261 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_ag%u:%u", 262 dev->data->dev_id, q_id); 263 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 264 rte_bbdev_log(ERR, 265 "Creating queue name for device %u queue %u failed", 266 dev->data->dev_id, q_id); 267 return -ENAMETOOLONG; 268 } 269 q->ag = rte_zmalloc_socket(name, 270 RTE_BBDEV_MAX_CB_SIZE * 10 * sizeof(*q->ag), 271 RTE_CACHE_LINE_SIZE, queue_conf->socket); 272 if (q->ag == NULL) { 273 rte_bbdev_log(ERR, 274 "Failed to allocate queue memory for %s", name); 275 goto free_q; 276 } 277 278 /* Allocate memory for code block temp buffer. */ 279 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_cb%u:%u", 280 dev->data->dev_id, q_id); 281 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 282 rte_bbdev_log(ERR, 283 "Creating queue name for device %u queue %u failed", 284 dev->data->dev_id, q_id); 285 return -ENAMETOOLONG; 286 } 287 q->code_block = rte_zmalloc_socket(name, 288 (6144 >> 3) * sizeof(*q->code_block), 289 RTE_CACHE_LINE_SIZE, queue_conf->socket); 290 if (q->code_block == NULL) { 291 rte_bbdev_log(ERR, 292 "Failed to allocate queue memory for %s", name); 293 goto free_q; 294 } 295 296 /* Allocate memory for Deinterleaver input. */ 297 ret = snprintf(name, RTE_RING_NAMESIZE, 298 RTE_STR(DRIVER_NAME)"_deint_input%u:%u", 299 dev->data->dev_id, q_id); 300 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 301 rte_bbdev_log(ERR, 302 "Creating queue name for device %u queue %u failed", 303 dev->data->dev_id, q_id); 304 return -ENAMETOOLONG; 305 } 306 q->deint_input = rte_zmalloc_socket(name, 307 RTE_BBDEV_MAX_KW * sizeof(*q->deint_input), 308 RTE_CACHE_LINE_SIZE, queue_conf->socket); 309 if (q->deint_input == NULL) { 310 rte_bbdev_log(ERR, 311 "Failed to allocate queue memory for %s", name); 312 goto free_q; 313 } 314 315 /* Allocate memory for Deinterleaver output. */ 316 ret = snprintf(name, RTE_RING_NAMESIZE, 317 RTE_STR(DRIVER_NAME)"_deint_output%u:%u", 318 dev->data->dev_id, q_id); 319 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 320 rte_bbdev_log(ERR, 321 "Creating queue name for device %u queue %u failed", 322 dev->data->dev_id, q_id); 323 return -ENAMETOOLONG; 324 } 325 q->deint_output = rte_zmalloc_socket(NULL, 326 RTE_BBDEV_MAX_KW * sizeof(*q->deint_output), 327 RTE_CACHE_LINE_SIZE, queue_conf->socket); 328 if (q->deint_output == NULL) { 329 rte_bbdev_log(ERR, 330 "Failed to allocate queue memory for %s", name); 331 goto free_q; 332 } 333 334 /* Allocate memory for Adapter output. */ 335 ret = snprintf(name, RTE_RING_NAMESIZE, 336 RTE_STR(DRIVER_NAME)"_adapter_output%u:%u", 337 dev->data->dev_id, q_id); 338 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 339 rte_bbdev_log(ERR, 340 "Creating queue name for device %u queue %u failed", 341 dev->data->dev_id, q_id); 342 return -ENAMETOOLONG; 343 } 344 q->adapter_output = rte_zmalloc_socket(NULL, 345 RTE_BBDEV_MAX_CB_SIZE * 6 * sizeof(*q->adapter_output), 346 RTE_CACHE_LINE_SIZE, queue_conf->socket); 347 if (q->adapter_output == NULL) { 348 rte_bbdev_log(ERR, 349 "Failed to allocate queue memory for %s", name); 350 goto free_q; 351 } 352 353 /* Create ring for packets awaiting to be dequeued. */ 354 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"%u:%u", 355 dev->data->dev_id, q_id); 356 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 357 rte_bbdev_log(ERR, 358 "Creating queue name for device %u queue %u failed", 359 dev->data->dev_id, q_id); 360 return -ENAMETOOLONG; 361 } 362 q->processed_pkts = rte_ring_create(name, queue_conf->queue_size, 363 queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ); 364 if (q->processed_pkts == NULL) { 365 rte_bbdev_log(ERR, "Failed to create ring for %s", name); 366 goto free_q; 367 } 368 369 q->type = queue_conf->op_type; 370 371 dev->data->queues[q_id].queue_private = q; 372 rte_bbdev_log_debug("setup device queue %s", name); 373 return 0; 374 375 free_q: 376 rte_ring_free(q->processed_pkts); 377 rte_free(q->enc_out); 378 rte_free(q->enc_in); 379 rte_free(q->ag); 380 rte_free(q->code_block); 381 rte_free(q->deint_input); 382 rte_free(q->deint_output); 383 rte_free(q->adapter_output); 384 rte_free(q); 385 return -EFAULT; 386 } 387 388 static const struct rte_bbdev_ops pmd_ops = { 389 .info_get = info_get, 390 .queue_setup = q_setup, 391 .queue_release = q_release 392 }; 393 394 /* Checks if the encoder input buffer is correct. 395 * Returns 0 if it's valid, -1 otherwise. 396 */ 397 static inline int 398 is_enc_input_valid(const uint16_t k, const int32_t k_idx, 399 const uint16_t in_length) 400 { 401 if (k_idx < 0) { 402 rte_bbdev_log(ERR, "K Index is invalid"); 403 return -1; 404 } 405 406 if (in_length - (k >> 3) < 0) { 407 rte_bbdev_log(ERR, 408 "Mismatch between input length (%u bytes) and K (%u bits)", 409 in_length, k); 410 return -1; 411 } 412 413 if (k > RTE_BBDEV_MAX_CB_SIZE) { 414 rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d", 415 k, RTE_BBDEV_MAX_CB_SIZE); 416 return -1; 417 } 418 419 return 0; 420 } 421 422 /* Checks if the decoder input buffer is correct. 423 * Returns 0 if it's valid, -1 otherwise. 424 */ 425 static inline int 426 is_dec_input_valid(int32_t k_idx, int16_t kw, int16_t in_length) 427 { 428 if (k_idx < 0) { 429 rte_bbdev_log(ERR, "K index is invalid"); 430 return -1; 431 } 432 433 if (in_length - kw < 0) { 434 rte_bbdev_log(ERR, 435 "Mismatch between input length (%u) and kw (%u)", 436 in_length, kw); 437 return -1; 438 } 439 440 if (kw > RTE_BBDEV_MAX_KW) { 441 rte_bbdev_log(ERR, "Input length (%u) is too big, max: %d", 442 kw, RTE_BBDEV_MAX_KW); 443 return -1; 444 } 445 446 return 0; 447 } 448 449 static inline void 450 process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op, 451 uint8_t r, uint8_t c, uint16_t k, uint16_t ncb, 452 uint32_t e, struct rte_mbuf *m_in, struct rte_mbuf *m_out, 453 uint16_t in_offset, uint16_t out_offset, uint16_t total_left) 454 { 455 int ret; 456 int16_t k_idx; 457 uint16_t m; 458 uint8_t *in, *out0, *out1, *out2, *tmp_out, *rm_out; 459 uint64_t first_3_bytes = 0; 460 struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc; 461 struct bblib_crc_request crc_req; 462 struct bblib_crc_response crc_resp; 463 struct bblib_turbo_encoder_request turbo_req; 464 struct bblib_turbo_encoder_response turbo_resp; 465 struct bblib_rate_match_dl_request rm_req; 466 struct bblib_rate_match_dl_response rm_resp; 467 468 k_idx = compute_idx(k); 469 in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset); 470 471 /* CRC24A (for TB) */ 472 if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH) && 473 (enc->code_block_mode == 1)) { 474 ret = is_enc_input_valid(k - 24, k_idx, total_left); 475 if (ret != 0) { 476 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 477 return; 478 } 479 crc_req.data = in; 480 crc_req.len = (k - 24) >> 3; 481 /* Check if there is a room for CRC bits. If not use 482 * the temporary buffer. 483 */ 484 if (rte_pktmbuf_append(m_in, 3) == NULL) { 485 rte_memcpy(q->enc_in, in, (k - 24) >> 3); 486 in = q->enc_in; 487 } else { 488 /* Store 3 first bytes of next CB as they will be 489 * overwritten by CRC bytes. If it is the last CB then 490 * there is no point to store 3 next bytes and this 491 * if..else branch will be omitted. 492 */ 493 first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]); 494 } 495 496 crc_resp.data = in; 497 bblib_lte_crc24a_gen(&crc_req, &crc_resp); 498 } else if (enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) { 499 /* CRC24B */ 500 ret = is_enc_input_valid(k - 24, k_idx, total_left); 501 if (ret != 0) { 502 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 503 return; 504 } 505 crc_req.data = in; 506 crc_req.len = (k - 24) >> 3; 507 /* Check if there is a room for CRC bits. If this is the last 508 * CB in TB. If not use temporary buffer. 509 */ 510 if ((c - r == 1) && (rte_pktmbuf_append(m_in, 3) == NULL)) { 511 rte_memcpy(q->enc_in, in, (k - 24) >> 3); 512 in = q->enc_in; 513 } else if (c - r > 1) { 514 /* Store 3 first bytes of next CB as they will be 515 * overwritten by CRC bytes. If it is the last CB then 516 * there is no point to store 3 next bytes and this 517 * if..else branch will be omitted. 518 */ 519 first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]); 520 } 521 522 crc_resp.data = in; 523 bblib_lte_crc24b_gen(&crc_req, &crc_resp); 524 } else { 525 ret = is_enc_input_valid(k, k_idx, total_left); 526 if (ret != 0) { 527 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 528 return; 529 } 530 } 531 532 /* Turbo encoder */ 533 534 /* Each bit layer output from turbo encoder is (k+4) bits long, i.e. 535 * input length + 4 tail bits. That's (k/8) + 1 bytes after rounding up. 536 * So dst_data's length should be 3*(k/8) + 3 bytes. 537 * In Rate-matching bypass case outputs pointers passed to encoder 538 * (out0, out1 and out2) can directly point to addresses of output from 539 * turbo_enc entity. 540 */ 541 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) { 542 out0 = q->enc_out; 543 out1 = RTE_PTR_ADD(out0, (k >> 3) + 1); 544 out2 = RTE_PTR_ADD(out1, (k >> 3) + 1); 545 } else { 546 out0 = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3) * 3 + 2); 547 if (out0 == NULL) { 548 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 549 rte_bbdev_log(ERR, 550 "Too little space in output mbuf"); 551 return; 552 } 553 enc->output.length += (k >> 3) * 3 + 2; 554 /* rte_bbdev_op_data.offset can be different than the 555 * offset of the appended bytes 556 */ 557 out0 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset); 558 out1 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, 559 out_offset + (k >> 3) + 1); 560 out2 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, 561 out_offset + 2 * ((k >> 3) + 1)); 562 } 563 564 turbo_req.case_id = k_idx; 565 turbo_req.input_win = in; 566 turbo_req.length = k >> 3; 567 turbo_resp.output_win_0 = out0; 568 turbo_resp.output_win_1 = out1; 569 turbo_resp.output_win_2 = out2; 570 if (bblib_turbo_encoder(&turbo_req, &turbo_resp) != 0) { 571 op->status |= 1 << RTE_BBDEV_DRV_ERROR; 572 rte_bbdev_log(ERR, "Turbo Encoder failed"); 573 return; 574 } 575 576 /* Restore 3 first bytes of next CB if they were overwritten by CRC*/ 577 if (first_3_bytes != 0) 578 *((uint64_t *)&in[(k - 32) >> 3]) = first_3_bytes; 579 580 /* Rate-matching */ 581 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) { 582 uint8_t mask_id; 583 /* Integer round up division by 8 */ 584 uint16_t out_len = (e + 7) >> 3; 585 /* The mask array is indexed using E%8. E is an even number so 586 * there are only 4 possible values. 587 */ 588 const uint8_t mask_out[] = {0xFF, 0xC0, 0xF0, 0xFC}; 589 590 /* get output data starting address */ 591 rm_out = (uint8_t *)rte_pktmbuf_append(m_out, out_len); 592 if (rm_out == NULL) { 593 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 594 rte_bbdev_log(ERR, 595 "Too little space in output mbuf"); 596 return; 597 } 598 /* rte_bbdev_op_data.offset can be different than the offset 599 * of the appended bytes 600 */ 601 rm_out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset); 602 603 /* index of current code block */ 604 rm_req.r = r; 605 /* total number of code block */ 606 rm_req.C = c; 607 /* For DL - 1, UL - 0 */ 608 rm_req.direction = 1; 609 /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nsoft, KMIMO 610 * and MDL_HARQ are used for Ncb calculation. As Ncb is already 611 * known we can adjust those parameters 612 */ 613 rm_req.Nsoft = ncb * rm_req.C; 614 rm_req.KMIMO = 1; 615 rm_req.MDL_HARQ = 1; 616 /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nl, Qm and G 617 * are used for E calculation. As E is already known we can 618 * adjust those parameters 619 */ 620 rm_req.NL = e; 621 rm_req.Qm = 1; 622 rm_req.G = rm_req.NL * rm_req.Qm * rm_req.C; 623 624 rm_req.rvidx = enc->rv_index; 625 rm_req.Kidx = k_idx - 1; 626 rm_req.nLen = k + 4; 627 rm_req.tin0 = out0; 628 rm_req.tin1 = out1; 629 rm_req.tin2 = out2; 630 rm_resp.output = rm_out; 631 rm_resp.OutputLen = out_len; 632 if (enc->op_flags & RTE_BBDEV_TURBO_RV_INDEX_BYPASS) 633 rm_req.bypass_rvidx = 1; 634 else 635 rm_req.bypass_rvidx = 0; 636 637 if (bblib_rate_match_dl(&rm_req, &rm_resp) != 0) { 638 op->status |= 1 << RTE_BBDEV_DRV_ERROR; 639 rte_bbdev_log(ERR, "Rate matching failed"); 640 return; 641 } 642 643 /* SW fills an entire last byte even if E%8 != 0. Clear the 644 * superfluous data bits for consistency with HW device. 645 */ 646 mask_id = (e & 7) >> 1; 647 rm_out[out_len - 1] &= mask_out[mask_id]; 648 649 enc->output.length += rm_resp.OutputLen; 650 } else { 651 /* Rate matching is bypassed */ 652 653 /* Completing last byte of out0 (where 4 tail bits are stored) 654 * by moving first 4 bits from out1 655 */ 656 tmp_out = (uint8_t *) --out1; 657 *tmp_out = *tmp_out | ((*(tmp_out + 1) & 0xF0) >> 4); 658 tmp_out++; 659 /* Shifting out1 data by 4 bits to the left */ 660 for (m = 0; m < k >> 3; ++m) { 661 uint8_t *first = tmp_out; 662 uint8_t second = *(tmp_out + 1); 663 *first = (*first << 4) | ((second & 0xF0) >> 4); 664 tmp_out++; 665 } 666 /* Shifting out2 data by 8 bits to the left */ 667 for (m = 0; m < (k >> 3) + 1; ++m) { 668 *tmp_out = *(tmp_out + 1); 669 tmp_out++; 670 } 671 *tmp_out = 0; 672 } 673 } 674 675 static inline void 676 enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op) 677 { 678 uint8_t c, r, crc24_bits = 0; 679 uint16_t k, ncb; 680 uint32_t e; 681 struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc; 682 uint16_t in_offset = enc->input.offset; 683 uint16_t out_offset = enc->output.offset; 684 struct rte_mbuf *m_in = enc->input.data; 685 struct rte_mbuf *m_out = enc->output.data; 686 uint16_t total_left = enc->input.length; 687 688 /* Clear op status */ 689 op->status = 0; 690 691 if (total_left > RTE_BBDEV_MAX_TB_SIZE >> 3) { 692 rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d", 693 total_left, RTE_BBDEV_MAX_TB_SIZE); 694 op->status = 1 << RTE_BBDEV_DATA_ERROR; 695 return; 696 } 697 698 if (m_in == NULL || m_out == NULL) { 699 rte_bbdev_log(ERR, "Invalid mbuf pointer"); 700 op->status = 1 << RTE_BBDEV_DATA_ERROR; 701 return; 702 } 703 704 if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) || 705 (enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH)) 706 crc24_bits = 24; 707 708 if (enc->code_block_mode == 0) { /* For Transport Block mode */ 709 c = enc->tb_params.c; 710 r = enc->tb_params.r; 711 } else {/* For Code Block mode */ 712 c = 1; 713 r = 0; 714 } 715 716 while (total_left > 0 && r < c) { 717 if (enc->code_block_mode == 0) { 718 k = (r < enc->tb_params.c_neg) ? 719 enc->tb_params.k_neg : enc->tb_params.k_pos; 720 ncb = (r < enc->tb_params.c_neg) ? 721 enc->tb_params.ncb_neg : enc->tb_params.ncb_pos; 722 e = (r < enc->tb_params.cab) ? 723 enc->tb_params.ea : enc->tb_params.eb; 724 } else { 725 k = enc->cb_params.k; 726 ncb = enc->cb_params.ncb; 727 e = enc->cb_params.e; 728 } 729 730 process_enc_cb(q, op, r, c, k, ncb, e, m_in, 731 m_out, in_offset, out_offset, total_left); 732 /* Update total_left */ 733 total_left -= (k - crc24_bits) >> 3; 734 /* Update offsets for next CBs (if exist) */ 735 in_offset += (k - crc24_bits) >> 3; 736 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) 737 out_offset += e >> 3; 738 else 739 out_offset += (k >> 3) * 3 + 2; 740 r++; 741 } 742 743 /* check if all input data was processed */ 744 if (total_left != 0) { 745 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 746 rte_bbdev_log(ERR, 747 "Mismatch between mbuf length and included CBs sizes"); 748 } 749 } 750 751 static inline uint16_t 752 enqueue_enc_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_enc_op **ops, 753 uint16_t nb_ops) 754 { 755 uint16_t i; 756 757 for (i = 0; i < nb_ops; ++i) 758 enqueue_enc_one_op(q, ops[i]); 759 760 return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops, 761 NULL); 762 } 763 764 /* Remove the padding bytes from a cyclic buffer. 765 * The input buffer is a data stream wk as described in 3GPP TS 36.212 section 766 * 5.1.4.1.2 starting from w0 and with length Ncb bytes. 767 * The output buffer is a data stream wk with pruned padding bytes. It's length 768 * is 3*D bytes and the order of non-padding bytes is preserved. 769 */ 770 static inline void 771 remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k, 772 uint16_t ncb) 773 { 774 uint32_t in_idx, out_idx, c_idx; 775 const uint32_t d = k + 4; 776 const uint32_t kw = (ncb / 3); 777 const uint32_t nd = kw - d; 778 const uint32_t r_subblock = kw / RTE_BBDEV_C_SUBBLOCK; 779 /* Inter-column permutation pattern */ 780 const uint32_t P[RTE_BBDEV_C_SUBBLOCK] = {0, 16, 8, 24, 4, 20, 12, 28, 781 2, 18, 10, 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13, 782 29, 3, 19, 11, 27, 7, 23, 15, 31}; 783 in_idx = 0; 784 out_idx = 0; 785 786 /* The padding bytes are at the first Nd positions in the first row. */ 787 for (c_idx = 0; in_idx < kw; in_idx += r_subblock, ++c_idx) { 788 if (P[c_idx] < nd) { 789 rte_memcpy(&out[out_idx], &in[in_idx + 1], 790 r_subblock - 1); 791 out_idx += r_subblock - 1; 792 } else { 793 rte_memcpy(&out[out_idx], &in[in_idx], r_subblock); 794 out_idx += r_subblock; 795 } 796 } 797 798 /* First and second parity bits sub-blocks are interlaced. */ 799 for (c_idx = 0; in_idx < ncb - 2 * r_subblock; 800 in_idx += 2 * r_subblock, ++c_idx) { 801 uint32_t second_block_c_idx = P[c_idx]; 802 uint32_t third_block_c_idx = P[c_idx] + 1; 803 804 if (second_block_c_idx < nd && third_block_c_idx < nd) { 805 rte_memcpy(&out[out_idx], &in[in_idx + 2], 806 2 * r_subblock - 2); 807 out_idx += 2 * r_subblock - 2; 808 } else if (second_block_c_idx >= nd && 809 third_block_c_idx >= nd) { 810 rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock); 811 out_idx += 2 * r_subblock; 812 } else if (second_block_c_idx < nd) { 813 out[out_idx++] = in[in_idx]; 814 rte_memcpy(&out[out_idx], &in[in_idx + 2], 815 2 * r_subblock - 2); 816 out_idx += 2 * r_subblock - 2; 817 } else { 818 rte_memcpy(&out[out_idx], &in[in_idx + 1], 819 2 * r_subblock - 1); 820 out_idx += 2 * r_subblock - 1; 821 } 822 } 823 824 /* Last interlaced row is different - its last byte is the only padding 825 * byte. We can have from 4 up to 28 padding bytes (Nd) per sub-block. 826 * After interlacing the 1st and 2nd parity sub-blocks we can have 0, 1 827 * or 2 padding bytes each time we make a step of 2 * R_SUBBLOCK bytes 828 * (moving to another column). 2nd parity sub-block uses the same 829 * inter-column permutation pattern as the systematic and 1st parity 830 * sub-blocks but it adds '1' to the resulting index and calculates the 831 * modulus of the result and Kw. Last column is mapped to itself (id 31) 832 * so the first byte taken from the 2nd parity sub-block will be the 833 * 32nd (31+1) byte, then 64th etc. (step is C_SUBBLOCK == 32) and the 834 * last byte will be the first byte from the sub-block: 835 * (32 + 32 * (R_SUBBLOCK-1)) % Kw == Kw % Kw == 0. Nd can't be smaller 836 * than 4 so we know that bytes with ids 0, 1, 2 and 3 must be the 837 * padding bytes. The bytes from the 1st parity sub-block are the bytes 838 * from the 31st column - Nd can't be greater than 28 so we are sure 839 * that there are no padding bytes in 31st column. 840 */ 841 rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock - 1); 842 } 843 844 static inline void 845 move_padding_bytes(const uint8_t *in, uint8_t *out, uint16_t k, 846 uint16_t ncb) 847 { 848 uint16_t d = k + 4; 849 uint16_t kpi = ncb / 3; 850 uint16_t nd = kpi - d; 851 852 rte_memcpy(&out[nd], in, d); 853 rte_memcpy(&out[nd + kpi + 64], &in[kpi], d); 854 rte_memcpy(&out[(nd - 1) + 2 * (kpi + 64)], &in[2 * kpi], d); 855 } 856 857 static inline void 858 process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op, 859 uint8_t c, uint16_t k, uint16_t kw, struct rte_mbuf *m_in, 860 struct rte_mbuf *m_out, uint16_t in_offset, uint16_t out_offset, 861 bool check_crc_24b, uint16_t total_left) 862 { 863 int ret; 864 int32_t k_idx; 865 int32_t iter_cnt; 866 uint8_t *in, *out, *adapter_input; 867 int32_t ncb, ncb_without_null; 868 struct bblib_turbo_adapter_ul_response adapter_resp; 869 struct bblib_turbo_adapter_ul_request adapter_req; 870 struct bblib_turbo_decoder_request turbo_req; 871 struct bblib_turbo_decoder_response turbo_resp; 872 struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec; 873 874 k_idx = compute_idx(k); 875 876 ret = is_dec_input_valid(k_idx, kw, total_left); 877 if (ret != 0) { 878 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 879 return; 880 } 881 882 in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset); 883 ncb = kw; 884 ncb_without_null = (k + 4) * 3; 885 886 if (check_bit(dec->op_flags, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE)) { 887 struct bblib_deinterleave_ul_request deint_req; 888 struct bblib_deinterleave_ul_response deint_resp; 889 890 /* SW decoder accepts only a circular buffer without NULL bytes 891 * so the input needs to be converted. 892 */ 893 remove_nulls_from_circular_buf(in, q->deint_input, k, ncb); 894 895 deint_req.pharqbuffer = q->deint_input; 896 deint_req.ncb = ncb_without_null; 897 deint_resp.pinteleavebuffer = q->deint_output; 898 bblib_deinterleave_ul(&deint_req, &deint_resp); 899 } else 900 move_padding_bytes(in, q->deint_output, k, ncb); 901 902 adapter_input = q->deint_output; 903 904 if (dec->op_flags & RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN) 905 adapter_req.isinverted = 1; 906 else if (dec->op_flags & RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN) 907 adapter_req.isinverted = 0; 908 else { 909 op->status |= 1 << RTE_BBDEV_DRV_ERROR; 910 rte_bbdev_log(ERR, "LLR format wasn't specified"); 911 return; 912 } 913 914 adapter_req.ncb = ncb_without_null; 915 adapter_req.pinteleavebuffer = adapter_input; 916 adapter_resp.pharqout = q->adapter_output; 917 bblib_turbo_adapter_ul(&adapter_req, &adapter_resp); 918 919 out = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3)); 920 if (out == NULL) { 921 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 922 rte_bbdev_log(ERR, "Too little space in output mbuf"); 923 return; 924 } 925 /* rte_bbdev_op_data.offset can be different than the offset of the 926 * appended bytes 927 */ 928 out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset); 929 if (check_crc_24b) 930 turbo_req.c = c + 1; 931 else 932 turbo_req.c = c; 933 turbo_req.input = (int8_t *)q->adapter_output; 934 turbo_req.k = k; 935 turbo_req.k_idx = k_idx; 936 turbo_req.max_iter_num = dec->iter_max; 937 turbo_resp.ag_buf = q->ag; 938 turbo_resp.cb_buf = q->code_block; 939 turbo_resp.output = out; 940 iter_cnt = bblib_turbo_decoder(&turbo_req, &turbo_resp); 941 dec->hard_output.length += (k >> 3); 942 943 if (iter_cnt > 0) { 944 /* Temporary solution for returned iter_count from SDK */ 945 iter_cnt = (iter_cnt - 1) / 2; 946 dec->iter_count = RTE_MAX(iter_cnt, dec->iter_count); 947 } else { 948 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 949 rte_bbdev_log(ERR, "Turbo Decoder failed"); 950 return; 951 } 952 } 953 954 static inline void 955 enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op) 956 { 957 uint8_t c, r = 0; 958 uint16_t kw, k = 0; 959 struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec; 960 struct rte_mbuf *m_in = dec->input.data; 961 struct rte_mbuf *m_out = dec->hard_output.data; 962 uint16_t in_offset = dec->input.offset; 963 uint16_t total_left = dec->input.length; 964 uint16_t out_offset = dec->hard_output.offset; 965 966 /* Clear op status */ 967 op->status = 0; 968 969 if (m_in == NULL || m_out == NULL) { 970 rte_bbdev_log(ERR, "Invalid mbuf pointer"); 971 op->status = 1 << RTE_BBDEV_DATA_ERROR; 972 return; 973 } 974 975 if (dec->code_block_mode == 0) { /* For Transport Block mode */ 976 c = dec->tb_params.c; 977 } else { /* For Code Block mode */ 978 k = dec->cb_params.k; 979 c = 1; 980 } 981 982 while (total_left > 0) { 983 if (dec->code_block_mode == 0) 984 k = (r < dec->tb_params.c_neg) ? 985 dec->tb_params.k_neg : dec->tb_params.k_pos; 986 987 /* Calculates circular buffer size (Kw). 988 * According to 3gpp 36.212 section 5.1.4.2 989 * Kw = 3 * Kpi, 990 * where: 991 * Kpi = nCol * nRow 992 * where nCol is 32 and nRow can be calculated from: 993 * D =< nCol * nRow 994 * where D is the size of each output from turbo encoder block 995 * (k + 4). 996 */ 997 kw = RTE_ALIGN_CEIL(k + 4, RTE_BBDEV_C_SUBBLOCK) * 3; 998 999 process_dec_cb(q, op, c, k, kw, m_in, m_out, in_offset, 1000 out_offset, check_bit(dec->op_flags, 1001 RTE_BBDEV_TURBO_CRC_TYPE_24B), total_left); 1002 /* As a result of decoding we get Code Block with included 1003 * decoded CRC24 at the end of Code Block. Type of CRC24 is 1004 * specified by flag. 1005 */ 1006 1007 /* Update total_left */ 1008 total_left -= kw; 1009 /* Update offsets for next CBs (if exist) */ 1010 in_offset += kw; 1011 out_offset += (k >> 3); 1012 r++; 1013 } 1014 if (total_left != 0) { 1015 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 1016 rte_bbdev_log(ERR, 1017 "Mismatch between mbuf length and included Circular buffer sizes"); 1018 } 1019 } 1020 1021 static inline uint16_t 1022 enqueue_dec_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_dec_op **ops, 1023 uint16_t nb_ops) 1024 { 1025 uint16_t i; 1026 1027 for (i = 0; i < nb_ops; ++i) 1028 enqueue_dec_one_op(q, ops[i]); 1029 1030 return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops, 1031 NULL); 1032 } 1033 1034 /* Enqueue burst */ 1035 static uint16_t 1036 enqueue_enc_ops(struct rte_bbdev_queue_data *q_data, 1037 struct rte_bbdev_enc_op **ops, uint16_t nb_ops) 1038 { 1039 void *queue = q_data->queue_private; 1040 struct turbo_sw_queue *q = queue; 1041 uint16_t nb_enqueued = 0; 1042 1043 nb_enqueued = enqueue_enc_all_ops(q, ops, nb_ops); 1044 1045 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; 1046 q_data->queue_stats.enqueued_count += nb_enqueued; 1047 1048 return nb_enqueued; 1049 } 1050 1051 /* Enqueue burst */ 1052 static uint16_t 1053 enqueue_dec_ops(struct rte_bbdev_queue_data *q_data, 1054 struct rte_bbdev_dec_op **ops, uint16_t nb_ops) 1055 { 1056 void *queue = q_data->queue_private; 1057 struct turbo_sw_queue *q = queue; 1058 uint16_t nb_enqueued = 0; 1059 1060 nb_enqueued = enqueue_dec_all_ops(q, ops, nb_ops); 1061 1062 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; 1063 q_data->queue_stats.enqueued_count += nb_enqueued; 1064 1065 return nb_enqueued; 1066 } 1067 1068 /* Dequeue decode burst */ 1069 static uint16_t 1070 dequeue_dec_ops(struct rte_bbdev_queue_data *q_data, 1071 struct rte_bbdev_dec_op **ops, uint16_t nb_ops) 1072 { 1073 struct turbo_sw_queue *q = q_data->queue_private; 1074 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, 1075 (void **)ops, nb_ops, NULL); 1076 q_data->queue_stats.dequeued_count += nb_dequeued; 1077 1078 return nb_dequeued; 1079 } 1080 1081 /* Dequeue encode burst */ 1082 static uint16_t 1083 dequeue_enc_ops(struct rte_bbdev_queue_data *q_data, 1084 struct rte_bbdev_enc_op **ops, uint16_t nb_ops) 1085 { 1086 struct turbo_sw_queue *q = q_data->queue_private; 1087 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, 1088 (void **)ops, nb_ops, NULL); 1089 q_data->queue_stats.dequeued_count += nb_dequeued; 1090 1091 return nb_dequeued; 1092 } 1093 1094 /* Parse 16bit integer from string argument */ 1095 static inline int 1096 parse_u16_arg(const char *key, const char *value, void *extra_args) 1097 { 1098 uint16_t *u16 = extra_args; 1099 unsigned int long result; 1100 1101 if ((value == NULL) || (extra_args == NULL)) 1102 return -EINVAL; 1103 errno = 0; 1104 result = strtoul(value, NULL, 0); 1105 if ((result >= (1 << 16)) || (errno != 0)) { 1106 rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key); 1107 return -ERANGE; 1108 } 1109 *u16 = (uint16_t)result; 1110 return 0; 1111 } 1112 1113 /* Parse parameters used to create device */ 1114 static int 1115 parse_turbo_sw_params(struct turbo_sw_params *params, const char *input_args) 1116 { 1117 struct rte_kvargs *kvlist = NULL; 1118 int ret = 0; 1119 1120 if (params == NULL) 1121 return -EINVAL; 1122 if (input_args) { 1123 kvlist = rte_kvargs_parse(input_args, turbo_sw_valid_params); 1124 if (kvlist == NULL) 1125 return -EFAULT; 1126 1127 ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[0], 1128 &parse_u16_arg, ¶ms->queues_num); 1129 if (ret < 0) 1130 goto exit; 1131 1132 ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[1], 1133 &parse_u16_arg, ¶ms->socket_id); 1134 if (ret < 0) 1135 goto exit; 1136 1137 if (params->socket_id >= RTE_MAX_NUMA_NODES) { 1138 rte_bbdev_log(ERR, "Invalid socket, must be < %u", 1139 RTE_MAX_NUMA_NODES); 1140 goto exit; 1141 } 1142 } 1143 1144 exit: 1145 if (kvlist) 1146 rte_kvargs_free(kvlist); 1147 return ret; 1148 } 1149 1150 /* Create device */ 1151 static int 1152 turbo_sw_bbdev_create(struct rte_vdev_device *vdev, 1153 struct turbo_sw_params *init_params) 1154 { 1155 struct rte_bbdev *bbdev; 1156 const char *name = rte_vdev_device_name(vdev); 1157 1158 bbdev = rte_bbdev_allocate(name); 1159 if (bbdev == NULL) 1160 return -ENODEV; 1161 1162 bbdev->data->dev_private = rte_zmalloc_socket(name, 1163 sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE, 1164 init_params->socket_id); 1165 if (bbdev->data->dev_private == NULL) { 1166 rte_bbdev_release(bbdev); 1167 return -ENOMEM; 1168 } 1169 1170 bbdev->dev_ops = &pmd_ops; 1171 bbdev->device = &vdev->device; 1172 bbdev->data->socket_id = init_params->socket_id; 1173 bbdev->intr_handle = NULL; 1174 1175 /* register rx/tx burst functions for data path */ 1176 bbdev->dequeue_enc_ops = dequeue_enc_ops; 1177 bbdev->dequeue_dec_ops = dequeue_dec_ops; 1178 bbdev->enqueue_enc_ops = enqueue_enc_ops; 1179 bbdev->enqueue_dec_ops = enqueue_dec_ops; 1180 ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues = 1181 init_params->queues_num; 1182 1183 return 0; 1184 } 1185 1186 /* Initialise device */ 1187 static int 1188 turbo_sw_bbdev_probe(struct rte_vdev_device *vdev) 1189 { 1190 struct turbo_sw_params init_params = { 1191 rte_socket_id(), 1192 RTE_BBDEV_DEFAULT_MAX_NB_QUEUES 1193 }; 1194 const char *name; 1195 const char *input_args; 1196 1197 if (vdev == NULL) 1198 return -EINVAL; 1199 1200 name = rte_vdev_device_name(vdev); 1201 if (name == NULL) 1202 return -EINVAL; 1203 input_args = rte_vdev_device_args(vdev); 1204 parse_turbo_sw_params(&init_params, input_args); 1205 1206 rte_bbdev_log_debug( 1207 "Initialising %s on NUMA node %d with max queues: %d\n", 1208 name, init_params.socket_id, init_params.queues_num); 1209 1210 return turbo_sw_bbdev_create(vdev, &init_params); 1211 } 1212 1213 /* Uninitialise device */ 1214 static int 1215 turbo_sw_bbdev_remove(struct rte_vdev_device *vdev) 1216 { 1217 struct rte_bbdev *bbdev; 1218 const char *name; 1219 1220 if (vdev == NULL) 1221 return -EINVAL; 1222 1223 name = rte_vdev_device_name(vdev); 1224 if (name == NULL) 1225 return -EINVAL; 1226 1227 bbdev = rte_bbdev_get_named_dev(name); 1228 if (bbdev == NULL) 1229 return -EINVAL; 1230 1231 rte_free(bbdev->data->dev_private); 1232 1233 return rte_bbdev_release(bbdev); 1234 } 1235 1236 static struct rte_vdev_driver bbdev_turbo_sw_pmd_drv = { 1237 .probe = turbo_sw_bbdev_probe, 1238 .remove = turbo_sw_bbdev_remove 1239 }; 1240 1241 RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_turbo_sw_pmd_drv); 1242 RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME, 1243 TURBO_SW_MAX_NB_QUEUES_ARG"=<int> " 1244 TURBO_SW_SOCKET_ID_ARG"=<int>"); 1245 1246 RTE_INIT(null_bbdev_init_log); 1247 static void 1248 null_bbdev_init_log(void) 1249 { 1250 bbdev_turbo_sw_logtype = rte_log_register("pmd.bb.turbo_sw"); 1251 if (bbdev_turbo_sw_logtype >= 0) 1252 rte_log_set_level(bbdev_turbo_sw_logtype, RTE_LOG_NOTICE); 1253 } 1254