1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #include <string.h> 6 7 #include <rte_common.h> 8 #include <rte_bus_vdev.h> 9 #include <rte_malloc.h> 10 #include <rte_ring.h> 11 #include <rte_kvargs.h> 12 13 #include <rte_bbdev.h> 14 #include <rte_bbdev_pmd.h> 15 16 #include <phy_turbo.h> 17 #include <phy_crc.h> 18 #include <phy_rate_match.h> 19 #include <divide.h> 20 21 #define DRIVER_NAME turbo_sw 22 23 /* Turbo SW PMD logging ID */ 24 static int bbdev_turbo_sw_logtype; 25 26 /* Helper macro for logging */ 27 #define rte_bbdev_log(level, fmt, ...) \ 28 rte_log(RTE_LOG_ ## level, bbdev_turbo_sw_logtype, fmt "\n", \ 29 ##__VA_ARGS__) 30 31 #define rte_bbdev_log_debug(fmt, ...) \ 32 rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \ 33 ##__VA_ARGS__) 34 35 /* private data structure */ 36 struct bbdev_private { 37 unsigned int max_nb_queues; /**< Max number of queues */ 38 }; 39 40 /* Initialisation params structure that can be used by Turbo SW driver */ 41 struct turbo_sw_params { 42 int socket_id; /*< Turbo SW device socket */ 43 uint16_t queues_num; /*< Turbo SW device queues number */ 44 }; 45 46 /* Accecptable params for Turbo SW devices */ 47 #define TURBO_SW_MAX_NB_QUEUES_ARG "max_nb_queues" 48 #define TURBO_SW_SOCKET_ID_ARG "socket_id" 49 50 static const char * const turbo_sw_valid_params[] = { 51 TURBO_SW_MAX_NB_QUEUES_ARG, 52 TURBO_SW_SOCKET_ID_ARG 53 }; 54 55 /* queue */ 56 struct turbo_sw_queue { 57 /* Ring for processed (encoded/decoded) operations which are ready to 58 * be dequeued. 59 */ 60 struct rte_ring *processed_pkts; 61 /* Stores input for turbo encoder (used when CRC attachment is 62 * performed 63 */ 64 uint8_t *enc_in; 65 /* Stores output from turbo encoder */ 66 uint8_t *enc_out; 67 /* Alpha gamma buf for bblib_turbo_decoder() function */ 68 int8_t *ag; 69 /* Temp buf for bblib_turbo_decoder() function */ 70 uint16_t *code_block; 71 /* Input buf for bblib_rate_dematching_lte() function */ 72 uint8_t *deint_input; 73 /* Output buf for bblib_rate_dematching_lte() function */ 74 uint8_t *deint_output; 75 /* Output buf for bblib_turbodec_adapter_lte() function */ 76 uint8_t *adapter_output; 77 /* Operation type of this queue */ 78 enum rte_bbdev_op_type type; 79 } __rte_cache_aligned; 80 81 /* Calculate index based on Table 5.1.3-3 from TS34.212 */ 82 static inline int32_t 83 compute_idx(uint16_t k) 84 { 85 int32_t result = 0; 86 87 if (k < RTE_BBDEV_MIN_CB_SIZE || k > RTE_BBDEV_MAX_CB_SIZE) 88 return -1; 89 90 if (k > 2048) { 91 if ((k - 2048) % 64 != 0) 92 result = -1; 93 94 result = 124 + (k - 2048) / 64; 95 } else if (k <= 512) { 96 if ((k - 40) % 8 != 0) 97 result = -1; 98 99 result = (k - 40) / 8 + 1; 100 } else if (k <= 1024) { 101 if ((k - 512) % 16 != 0) 102 result = -1; 103 104 result = 60 + (k - 512) / 16; 105 } else { /* 1024 < k <= 2048 */ 106 if ((k - 1024) % 32 != 0) 107 result = -1; 108 109 result = 92 + (k - 1024) / 32; 110 } 111 112 return result; 113 } 114 115 /* Read flag value 0/1 from bitmap */ 116 static inline bool 117 check_bit(uint32_t bitmap, uint32_t bitmask) 118 { 119 return bitmap & bitmask; 120 } 121 122 /* Get device info */ 123 static void 124 info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) 125 { 126 struct bbdev_private *internals = dev->data->dev_private; 127 128 static const struct rte_bbdev_op_cap bbdev_capabilities[] = { 129 { 130 .type = RTE_BBDEV_OP_TURBO_DEC, 131 .cap.turbo_dec = { 132 .capability_flags = 133 RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE | 134 RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN | 135 RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN | 136 RTE_BBDEV_TURBO_CRC_TYPE_24B | 137 RTE_BBDEV_TURBO_EARLY_TERMINATION, 138 .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS, 139 .num_buffers_hard_out = 140 RTE_BBDEV_MAX_CODE_BLOCKS, 141 .num_buffers_soft_out = 0, 142 } 143 }, 144 { 145 .type = RTE_BBDEV_OP_TURBO_ENC, 146 .cap.turbo_enc = { 147 .capability_flags = 148 RTE_BBDEV_TURBO_CRC_24B_ATTACH | 149 RTE_BBDEV_TURBO_CRC_24A_ATTACH | 150 RTE_BBDEV_TURBO_RATE_MATCH | 151 RTE_BBDEV_TURBO_RV_INDEX_BYPASS, 152 .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS, 153 .num_buffers_dst = RTE_BBDEV_MAX_CODE_BLOCKS, 154 } 155 }, 156 RTE_BBDEV_END_OF_CAPABILITIES_LIST() 157 }; 158 159 static struct rte_bbdev_queue_conf default_queue_conf = { 160 .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT, 161 }; 162 163 static const enum rte_cpu_flag_t cpu_flag = RTE_CPUFLAG_SSE4_2; 164 165 default_queue_conf.socket = dev->data->socket_id; 166 167 dev_info->driver_name = RTE_STR(DRIVER_NAME); 168 dev_info->max_num_queues = internals->max_nb_queues; 169 dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT; 170 dev_info->hardware_accelerated = false; 171 dev_info->max_queue_priority = 0; 172 dev_info->default_queue_conf = default_queue_conf; 173 dev_info->capabilities = bbdev_capabilities; 174 dev_info->cpu_flag_reqs = &cpu_flag; 175 dev_info->min_alignment = 64; 176 177 rte_bbdev_log_debug("got device info from %u\n", dev->data->dev_id); 178 } 179 180 /* Release queue */ 181 static int 182 q_release(struct rte_bbdev *dev, uint16_t q_id) 183 { 184 struct turbo_sw_queue *q = dev->data->queues[q_id].queue_private; 185 186 if (q != NULL) { 187 rte_ring_free(q->processed_pkts); 188 rte_free(q->enc_out); 189 rte_free(q->enc_in); 190 rte_free(q->ag); 191 rte_free(q->code_block); 192 rte_free(q->deint_input); 193 rte_free(q->deint_output); 194 rte_free(q->adapter_output); 195 rte_free(q); 196 dev->data->queues[q_id].queue_private = NULL; 197 } 198 199 rte_bbdev_log_debug("released device queue %u:%u", 200 dev->data->dev_id, q_id); 201 return 0; 202 } 203 204 /* Setup a queue */ 205 static int 206 q_setup(struct rte_bbdev *dev, uint16_t q_id, 207 const struct rte_bbdev_queue_conf *queue_conf) 208 { 209 int ret; 210 struct turbo_sw_queue *q; 211 char name[RTE_RING_NAMESIZE]; 212 213 /* Allocate the queue data structure. */ 214 q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q), 215 RTE_CACHE_LINE_SIZE, queue_conf->socket); 216 if (q == NULL) { 217 rte_bbdev_log(ERR, "Failed to allocate queue memory"); 218 return -ENOMEM; 219 } 220 221 /* Allocate memory for encoder output. */ 222 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_enc_out%u:%u", 223 dev->data->dev_id, q_id); 224 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 225 rte_bbdev_log(ERR, 226 "Creating queue name for device %u queue %u failed", 227 dev->data->dev_id, q_id); 228 return -ENAMETOOLONG; 229 } 230 q->enc_out = rte_zmalloc_socket(name, 231 ((RTE_BBDEV_MAX_TB_SIZE >> 3) + 3) * 232 sizeof(*q->enc_out) * 3, 233 RTE_CACHE_LINE_SIZE, queue_conf->socket); 234 if (q->enc_out == NULL) { 235 rte_bbdev_log(ERR, 236 "Failed to allocate queue memory for %s", name); 237 goto free_q; 238 } 239 240 /* Allocate memory for rate matching output. */ 241 ret = snprintf(name, RTE_RING_NAMESIZE, 242 RTE_STR(DRIVER_NAME)"_enc_in%u:%u", dev->data->dev_id, 243 q_id); 244 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 245 rte_bbdev_log(ERR, 246 "Creating queue name for device %u queue %u failed", 247 dev->data->dev_id, q_id); 248 return -ENAMETOOLONG; 249 } 250 q->enc_in = rte_zmalloc_socket(name, 251 (RTE_BBDEV_MAX_CB_SIZE >> 3) * sizeof(*q->enc_in), 252 RTE_CACHE_LINE_SIZE, queue_conf->socket); 253 if (q->enc_in == NULL) { 254 rte_bbdev_log(ERR, 255 "Failed to allocate queue memory for %s", name); 256 goto free_q; 257 } 258 259 /* Allocate memory for Aplha Gamma temp buffer. */ 260 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_ag%u:%u", 261 dev->data->dev_id, q_id); 262 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 263 rte_bbdev_log(ERR, 264 "Creating queue name for device %u queue %u failed", 265 dev->data->dev_id, q_id); 266 return -ENAMETOOLONG; 267 } 268 q->ag = rte_zmalloc_socket(name, 269 RTE_BBDEV_MAX_CB_SIZE * 10 * sizeof(*q->ag), 270 RTE_CACHE_LINE_SIZE, queue_conf->socket); 271 if (q->ag == NULL) { 272 rte_bbdev_log(ERR, 273 "Failed to allocate queue memory for %s", name); 274 goto free_q; 275 } 276 277 /* Allocate memory for code block temp buffer. */ 278 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_cb%u:%u", 279 dev->data->dev_id, q_id); 280 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 281 rte_bbdev_log(ERR, 282 "Creating queue name for device %u queue %u failed", 283 dev->data->dev_id, q_id); 284 return -ENAMETOOLONG; 285 } 286 q->code_block = rte_zmalloc_socket(name, 287 (6144 >> 3) * sizeof(*q->code_block), 288 RTE_CACHE_LINE_SIZE, queue_conf->socket); 289 if (q->code_block == NULL) { 290 rte_bbdev_log(ERR, 291 "Failed to allocate queue memory for %s", name); 292 goto free_q; 293 } 294 295 /* Allocate memory for Deinterleaver input. */ 296 ret = snprintf(name, RTE_RING_NAMESIZE, 297 RTE_STR(DRIVER_NAME)"_deint_input%u:%u", 298 dev->data->dev_id, q_id); 299 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 300 rte_bbdev_log(ERR, 301 "Creating queue name for device %u queue %u failed", 302 dev->data->dev_id, q_id); 303 return -ENAMETOOLONG; 304 } 305 q->deint_input = rte_zmalloc_socket(name, 306 RTE_BBDEV_MAX_KW * sizeof(*q->deint_input), 307 RTE_CACHE_LINE_SIZE, queue_conf->socket); 308 if (q->deint_input == NULL) { 309 rte_bbdev_log(ERR, 310 "Failed to allocate queue memory for %s", name); 311 goto free_q; 312 } 313 314 /* Allocate memory for Deinterleaver output. */ 315 ret = snprintf(name, RTE_RING_NAMESIZE, 316 RTE_STR(DRIVER_NAME)"_deint_output%u:%u", 317 dev->data->dev_id, q_id); 318 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 319 rte_bbdev_log(ERR, 320 "Creating queue name for device %u queue %u failed", 321 dev->data->dev_id, q_id); 322 return -ENAMETOOLONG; 323 } 324 q->deint_output = rte_zmalloc_socket(NULL, 325 RTE_BBDEV_MAX_KW * sizeof(*q->deint_output), 326 RTE_CACHE_LINE_SIZE, queue_conf->socket); 327 if (q->deint_output == NULL) { 328 rte_bbdev_log(ERR, 329 "Failed to allocate queue memory for %s", name); 330 goto free_q; 331 } 332 333 /* Allocate memory for Adapter output. */ 334 ret = snprintf(name, RTE_RING_NAMESIZE, 335 RTE_STR(DRIVER_NAME)"_adapter_output%u:%u", 336 dev->data->dev_id, q_id); 337 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 338 rte_bbdev_log(ERR, 339 "Creating queue name for device %u queue %u failed", 340 dev->data->dev_id, q_id); 341 return -ENAMETOOLONG; 342 } 343 q->adapter_output = rte_zmalloc_socket(NULL, 344 RTE_BBDEV_MAX_CB_SIZE * 6 * sizeof(*q->adapter_output), 345 RTE_CACHE_LINE_SIZE, queue_conf->socket); 346 if (q->adapter_output == NULL) { 347 rte_bbdev_log(ERR, 348 "Failed to allocate queue memory for %s", name); 349 goto free_q; 350 } 351 352 /* Create ring for packets awaiting to be dequeued. */ 353 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"%u:%u", 354 dev->data->dev_id, q_id); 355 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 356 rte_bbdev_log(ERR, 357 "Creating queue name for device %u queue %u failed", 358 dev->data->dev_id, q_id); 359 return -ENAMETOOLONG; 360 } 361 q->processed_pkts = rte_ring_create(name, queue_conf->queue_size, 362 queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ); 363 if (q->processed_pkts == NULL) { 364 rte_bbdev_log(ERR, "Failed to create ring for %s", name); 365 goto free_q; 366 } 367 368 q->type = queue_conf->op_type; 369 370 dev->data->queues[q_id].queue_private = q; 371 rte_bbdev_log_debug("setup device queue %s", name); 372 return 0; 373 374 free_q: 375 rte_ring_free(q->processed_pkts); 376 rte_free(q->enc_out); 377 rte_free(q->enc_in); 378 rte_free(q->ag); 379 rte_free(q->code_block); 380 rte_free(q->deint_input); 381 rte_free(q->deint_output); 382 rte_free(q->adapter_output); 383 rte_free(q); 384 return -EFAULT; 385 } 386 387 static const struct rte_bbdev_ops pmd_ops = { 388 .info_get = info_get, 389 .queue_setup = q_setup, 390 .queue_release = q_release 391 }; 392 393 /* Checks if the encoder input buffer is correct. 394 * Returns 0 if it's valid, -1 otherwise. 395 */ 396 static inline int 397 is_enc_input_valid(const uint16_t k, const int32_t k_idx, 398 const uint16_t in_length) 399 { 400 if (k_idx < 0) { 401 rte_bbdev_log(ERR, "K Index is invalid"); 402 return -1; 403 } 404 405 if (in_length - (k >> 3) < 0) { 406 rte_bbdev_log(ERR, 407 "Mismatch between input length (%u bytes) and K (%u bits)", 408 in_length, k); 409 return -1; 410 } 411 412 if (k > RTE_BBDEV_MAX_CB_SIZE) { 413 rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d", 414 k, RTE_BBDEV_MAX_CB_SIZE); 415 return -1; 416 } 417 418 return 0; 419 } 420 421 /* Checks if the decoder input buffer is correct. 422 * Returns 0 if it's valid, -1 otherwise. 423 */ 424 static inline int 425 is_dec_input_valid(int32_t k_idx, int16_t kw, int16_t in_length) 426 { 427 if (k_idx < 0) { 428 rte_bbdev_log(ERR, "K index is invalid"); 429 return -1; 430 } 431 432 if (in_length - kw < 0) { 433 rte_bbdev_log(ERR, 434 "Mismatch between input length (%u) and kw (%u)", 435 in_length, kw); 436 return -1; 437 } 438 439 if (kw > RTE_BBDEV_MAX_KW) { 440 rte_bbdev_log(ERR, "Input length (%u) is too big, max: %d", 441 kw, RTE_BBDEV_MAX_KW); 442 return -1; 443 } 444 445 return 0; 446 } 447 448 static inline void 449 process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op, 450 uint8_t r, uint8_t c, uint16_t k, uint16_t ncb, 451 uint32_t e, struct rte_mbuf *m_in, struct rte_mbuf *m_out, 452 uint16_t in_offset, uint16_t out_offset, uint16_t total_left) 453 { 454 int ret; 455 int16_t k_idx; 456 uint16_t m; 457 uint8_t *in, *out0, *out1, *out2, *tmp_out, *rm_out; 458 uint64_t first_3_bytes = 0; 459 struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc; 460 struct bblib_crc_request crc_req; 461 struct bblib_crc_response crc_resp; 462 struct bblib_turbo_encoder_request turbo_req; 463 struct bblib_turbo_encoder_response turbo_resp; 464 struct bblib_rate_match_dl_request rm_req; 465 struct bblib_rate_match_dl_response rm_resp; 466 467 k_idx = compute_idx(k); 468 in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset); 469 470 /* CRC24A (for TB) */ 471 if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH) && 472 (enc->code_block_mode == 1)) { 473 ret = is_enc_input_valid(k - 24, k_idx, total_left); 474 if (ret != 0) { 475 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 476 return; 477 } 478 crc_req.data = in; 479 crc_req.len = (k - 24) >> 3; 480 /* Check if there is a room for CRC bits. If not use 481 * the temporary buffer. 482 */ 483 if (rte_pktmbuf_append(m_in, 3) == NULL) { 484 rte_memcpy(q->enc_in, in, (k - 24) >> 3); 485 in = q->enc_in; 486 } else { 487 /* Store 3 first bytes of next CB as they will be 488 * overwritten by CRC bytes. If it is the last CB then 489 * there is no point to store 3 next bytes and this 490 * if..else branch will be omitted. 491 */ 492 first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]); 493 } 494 495 crc_resp.data = in; 496 bblib_lte_crc24a_gen(&crc_req, &crc_resp); 497 } else if (enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) { 498 /* CRC24B */ 499 ret = is_enc_input_valid(k - 24, k_idx, total_left); 500 if (ret != 0) { 501 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 502 return; 503 } 504 crc_req.data = in; 505 crc_req.len = (k - 24) >> 3; 506 /* Check if there is a room for CRC bits. If this is the last 507 * CB in TB. If not use temporary buffer. 508 */ 509 if ((c - r == 1) && (rte_pktmbuf_append(m_in, 3) == NULL)) { 510 rte_memcpy(q->enc_in, in, (k - 24) >> 3); 511 in = q->enc_in; 512 } else if (c - r > 1) { 513 /* Store 3 first bytes of next CB as they will be 514 * overwritten by CRC bytes. If it is the last CB then 515 * there is no point to store 3 next bytes and this 516 * if..else branch will be omitted. 517 */ 518 first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]); 519 } 520 521 crc_resp.data = in; 522 bblib_lte_crc24b_gen(&crc_req, &crc_resp); 523 } else { 524 ret = is_enc_input_valid(k, k_idx, total_left); 525 if (ret != 0) { 526 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 527 return; 528 } 529 } 530 531 /* Turbo encoder */ 532 533 /* Each bit layer output from turbo encoder is (k+4) bits long, i.e. 534 * input length + 4 tail bits. That's (k/8) + 1 bytes after rounding up. 535 * So dst_data's length should be 3*(k/8) + 3 bytes. 536 * In Rate-matching bypass case outputs pointers passed to encoder 537 * (out0, out1 and out2) can directly point to addresses of output from 538 * turbo_enc entity. 539 */ 540 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) { 541 out0 = q->enc_out; 542 out1 = RTE_PTR_ADD(out0, (k >> 3) + 1); 543 out2 = RTE_PTR_ADD(out1, (k >> 3) + 1); 544 } else { 545 out0 = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3) * 3 + 2); 546 if (out0 == NULL) { 547 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 548 rte_bbdev_log(ERR, 549 "Too little space in output mbuf"); 550 return; 551 } 552 enc->output.length += (k >> 3) * 3 + 2; 553 /* rte_bbdev_op_data.offset can be different than the 554 * offset of the appended bytes 555 */ 556 out0 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset); 557 out1 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, 558 out_offset + (k >> 3) + 1); 559 out2 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, 560 out_offset + 2 * ((k >> 3) + 1)); 561 } 562 563 turbo_req.case_id = k_idx; 564 turbo_req.input_win = in; 565 turbo_req.length = k >> 3; 566 turbo_resp.output_win_0 = out0; 567 turbo_resp.output_win_1 = out1; 568 turbo_resp.output_win_2 = out2; 569 if (bblib_turbo_encoder(&turbo_req, &turbo_resp) != 0) { 570 op->status |= 1 << RTE_BBDEV_DRV_ERROR; 571 rte_bbdev_log(ERR, "Turbo Encoder failed"); 572 return; 573 } 574 575 /* Restore 3 first bytes of next CB if they were overwritten by CRC*/ 576 if (first_3_bytes != 0) 577 *((uint64_t *)&in[(k - 32) >> 3]) = first_3_bytes; 578 579 /* Rate-matching */ 580 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) { 581 /* get output data starting address */ 582 rm_out = (uint8_t *)rte_pktmbuf_append(m_out, (e >> 3)); 583 if (rm_out == NULL) { 584 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 585 rte_bbdev_log(ERR, 586 "Too little space in output mbuf"); 587 return; 588 } 589 /* rte_bbdev_op_data.offset can be different than the offset 590 * of the appended bytes 591 */ 592 rm_out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset); 593 594 /* index of current code block */ 595 rm_req.r = r; 596 /* total number of code block */ 597 rm_req.C = c; 598 /* For DL - 1, UL - 0 */ 599 rm_req.direction = 1; 600 /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nsoft, KMIMO 601 * and MDL_HARQ are used for Ncb calculation. As Ncb is already 602 * known we can adjust those parameters 603 */ 604 rm_req.Nsoft = ncb * rm_req.C; 605 rm_req.KMIMO = 1; 606 rm_req.MDL_HARQ = 1; 607 /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nl, Qm and G 608 * are used for E calculation. As E is already known we can 609 * adjust those parameters 610 */ 611 rm_req.NL = e; 612 rm_req.Qm = 1; 613 rm_req.G = rm_req.NL * rm_req.Qm * rm_req.C; 614 615 rm_req.rvidx = enc->rv_index; 616 rm_req.Kidx = k_idx - 1; 617 rm_req.nLen = k + 4; 618 rm_req.tin0 = out0; 619 rm_req.tin1 = out1; 620 rm_req.tin2 = out2; 621 rm_resp.output = rm_out; 622 rm_resp.OutputLen = (e >> 3); 623 if (enc->op_flags & RTE_BBDEV_TURBO_RV_INDEX_BYPASS) 624 rm_req.bypass_rvidx = 1; 625 else 626 rm_req.bypass_rvidx = 0; 627 628 if (bblib_rate_match_dl(&rm_req, &rm_resp) != 0) { 629 op->status |= 1 << RTE_BBDEV_DRV_ERROR; 630 rte_bbdev_log(ERR, "Rate matching failed"); 631 return; 632 } 633 enc->output.length += rm_resp.OutputLen; 634 } else { 635 /* Rate matching is bypassed */ 636 637 /* Completing last byte of out0 (where 4 tail bits are stored) 638 * by moving first 4 bits from out1 639 */ 640 tmp_out = (uint8_t *) --out1; 641 *tmp_out = *tmp_out | ((*(tmp_out + 1) & 0xF0) >> 4); 642 tmp_out++; 643 /* Shifting out1 data by 4 bits to the left */ 644 for (m = 0; m < k >> 3; ++m) { 645 uint8_t *first = tmp_out; 646 uint8_t second = *(tmp_out + 1); 647 *first = (*first << 4) | ((second & 0xF0) >> 4); 648 tmp_out++; 649 } 650 /* Shifting out2 data by 8 bits to the left */ 651 for (m = 0; m < (k >> 3) + 1; ++m) { 652 *tmp_out = *(tmp_out + 1); 653 tmp_out++; 654 } 655 *tmp_out = 0; 656 } 657 } 658 659 static inline void 660 enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op) 661 { 662 uint8_t c, r, crc24_bits = 0; 663 uint16_t k, ncb; 664 uint32_t e; 665 struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc; 666 uint16_t in_offset = enc->input.offset; 667 uint16_t out_offset = enc->output.offset; 668 struct rte_mbuf *m_in = enc->input.data; 669 struct rte_mbuf *m_out = enc->output.data; 670 uint16_t total_left = enc->input.length; 671 672 /* Clear op status */ 673 op->status = 0; 674 675 if (total_left > RTE_BBDEV_MAX_TB_SIZE >> 3) { 676 rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d", 677 total_left, RTE_BBDEV_MAX_TB_SIZE); 678 op->status = 1 << RTE_BBDEV_DATA_ERROR; 679 return; 680 } 681 682 if (m_in == NULL || m_out == NULL) { 683 rte_bbdev_log(ERR, "Invalid mbuf pointer"); 684 op->status = 1 << RTE_BBDEV_DATA_ERROR; 685 return; 686 } 687 688 if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) || 689 (enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH)) 690 crc24_bits = 24; 691 692 if (enc->code_block_mode == 0) { /* For Transport Block mode */ 693 c = enc->tb_params.c; 694 r = enc->tb_params.r; 695 } else {/* For Code Block mode */ 696 c = 1; 697 r = 0; 698 } 699 700 while (total_left > 0 && r < c) { 701 if (enc->code_block_mode == 0) { 702 k = (r < enc->tb_params.c_neg) ? 703 enc->tb_params.k_neg : enc->tb_params.k_pos; 704 ncb = (r < enc->tb_params.c_neg) ? 705 enc->tb_params.ncb_neg : enc->tb_params.ncb_pos; 706 e = (r < enc->tb_params.cab) ? 707 enc->tb_params.ea : enc->tb_params.eb; 708 } else { 709 k = enc->cb_params.k; 710 ncb = enc->cb_params.ncb; 711 e = enc->cb_params.e; 712 } 713 714 process_enc_cb(q, op, r, c, k, ncb, e, m_in, 715 m_out, in_offset, out_offset, total_left); 716 /* Update total_left */ 717 total_left -= (k - crc24_bits) >> 3; 718 /* Update offsets for next CBs (if exist) */ 719 in_offset += (k - crc24_bits) >> 3; 720 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) 721 out_offset += e >> 3; 722 else 723 out_offset += (k >> 3) * 3 + 2; 724 r++; 725 } 726 727 /* check if all input data was processed */ 728 if (total_left != 0) { 729 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 730 rte_bbdev_log(ERR, 731 "Mismatch between mbuf length and included CBs sizes"); 732 } 733 } 734 735 static inline uint16_t 736 enqueue_enc_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_enc_op **ops, 737 uint16_t nb_ops) 738 { 739 uint16_t i; 740 741 for (i = 0; i < nb_ops; ++i) 742 enqueue_enc_one_op(q, ops[i]); 743 744 return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops, 745 NULL); 746 } 747 748 /* Remove the padding bytes from a cyclic buffer. 749 * The input buffer is a data stream wk as described in 3GPP TS 36.212 section 750 * 5.1.4.1.2 starting from w0 and with length Ncb bytes. 751 * The output buffer is a data stream wk with pruned padding bytes. It's length 752 * is 3*D bytes and the order of non-padding bytes is preserved. 753 */ 754 static inline void 755 remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k, 756 uint16_t ncb) 757 { 758 uint32_t in_idx, out_idx, c_idx; 759 const uint32_t d = k + 4; 760 const uint32_t kw = (ncb / 3); 761 const uint32_t nd = kw - d; 762 const uint32_t r_subblock = kw / RTE_BBDEV_C_SUBBLOCK; 763 /* Inter-column permutation pattern */ 764 const uint32_t P[RTE_BBDEV_C_SUBBLOCK] = {0, 16, 8, 24, 4, 20, 12, 28, 765 2, 18, 10, 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13, 766 29, 3, 19, 11, 27, 7, 23, 15, 31}; 767 in_idx = 0; 768 out_idx = 0; 769 770 /* The padding bytes are at the first Nd positions in the first row. */ 771 for (c_idx = 0; in_idx < kw; in_idx += r_subblock, ++c_idx) { 772 if (P[c_idx] < nd) { 773 rte_memcpy(&out[out_idx], &in[in_idx + 1], 774 r_subblock - 1); 775 out_idx += r_subblock - 1; 776 } else { 777 rte_memcpy(&out[out_idx], &in[in_idx], r_subblock); 778 out_idx += r_subblock; 779 } 780 } 781 782 /* First and second parity bits sub-blocks are interlaced. */ 783 for (c_idx = 0; in_idx < ncb - 2 * r_subblock; 784 in_idx += 2 * r_subblock, ++c_idx) { 785 uint32_t second_block_c_idx = P[c_idx]; 786 uint32_t third_block_c_idx = P[c_idx] + 1; 787 788 if (second_block_c_idx < nd && third_block_c_idx < nd) { 789 rte_memcpy(&out[out_idx], &in[in_idx + 2], 790 2 * r_subblock - 2); 791 out_idx += 2 * r_subblock - 2; 792 } else if (second_block_c_idx >= nd && 793 third_block_c_idx >= nd) { 794 rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock); 795 out_idx += 2 * r_subblock; 796 } else if (second_block_c_idx < nd) { 797 out[out_idx++] = in[in_idx]; 798 rte_memcpy(&out[out_idx], &in[in_idx + 2], 799 2 * r_subblock - 2); 800 out_idx += 2 * r_subblock - 2; 801 } else { 802 rte_memcpy(&out[out_idx], &in[in_idx + 1], 803 2 * r_subblock - 1); 804 out_idx += 2 * r_subblock - 1; 805 } 806 } 807 808 /* Last interlaced row is different - its last byte is the only padding 809 * byte. We can have from 2 up to 26 padding bytes (Nd) per sub-block. 810 * After interlacing the 1st and 2nd parity sub-blocks we can have 0, 1 811 * or 2 padding bytes each time we make a step of 2 * R_SUBBLOCK bytes 812 * (moving to another column). 2nd parity sub-block uses the same 813 * inter-column permutation pattern as the systematic and 1st parity 814 * sub-blocks but it adds '1' to the resulting index and calculates the 815 * modulus of the result and Kw. Last column is mapped to itself (id 31) 816 * so the first byte taken from the 2nd parity sub-block will be the 817 * 32nd (31+1) byte, then 64th etc. (step is C_SUBBLOCK == 32) and the 818 * last byte will be the first byte from the sub-block: 819 * (32 + 32 * (R_SUBBLOCK-1)) % Kw == Kw % Kw == 0. Nd can't be smaller 820 * than 2 so we know that bytes with ids 0 and 1 must be the padding 821 * bytes. The bytes from the 1st parity sub-block are the bytes from the 822 * 31st column - Nd can't be greater than 26 so we are sure that there 823 * are no padding bytes in 31st column. 824 */ 825 rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock - 1); 826 } 827 828 static inline void 829 move_padding_bytes(const uint8_t *in, uint8_t *out, uint16_t k, 830 uint16_t ncb) 831 { 832 uint16_t d = k + 4; 833 uint16_t kpi = ncb / 3; 834 uint16_t nd = kpi - d; 835 836 rte_memcpy(&out[nd], in, d); 837 rte_memcpy(&out[nd + kpi + 64], &in[kpi], d); 838 rte_memcpy(&out[nd + 2 * (kpi + 64)], &in[2 * kpi], d); 839 } 840 841 static inline void 842 process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op, 843 uint8_t c, uint16_t k, uint16_t kw, struct rte_mbuf *m_in, 844 struct rte_mbuf *m_out, uint16_t in_offset, uint16_t out_offset, 845 bool check_crc_24b, uint16_t total_left) 846 { 847 int ret; 848 int32_t k_idx; 849 int32_t iter_cnt; 850 uint8_t *in, *out, *adapter_input; 851 int32_t ncb, ncb_without_null; 852 struct bblib_turbo_adapter_ul_response adapter_resp; 853 struct bblib_turbo_adapter_ul_request adapter_req; 854 struct bblib_turbo_decoder_request turbo_req; 855 struct bblib_turbo_decoder_response turbo_resp; 856 struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec; 857 858 k_idx = compute_idx(k); 859 860 ret = is_dec_input_valid(k_idx, kw, total_left); 861 if (ret != 0) { 862 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 863 return; 864 } 865 866 in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset); 867 ncb = kw; 868 ncb_without_null = (k + 4) * 3; 869 870 if (check_bit(dec->op_flags, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE)) { 871 struct bblib_deinterleave_ul_request deint_req; 872 struct bblib_deinterleave_ul_response deint_resp; 873 874 /* SW decoder accepts only a circular buffer without NULL bytes 875 * so the input needs to be converted. 876 */ 877 remove_nulls_from_circular_buf(in, q->deint_input, k, ncb); 878 879 deint_req.pharqbuffer = q->deint_input; 880 deint_req.ncb = ncb_without_null; 881 deint_resp.pinteleavebuffer = q->deint_output; 882 bblib_deinterleave_ul(&deint_req, &deint_resp); 883 } else 884 move_padding_bytes(in, q->deint_output, k, ncb); 885 886 adapter_input = q->deint_output; 887 888 if (dec->op_flags & RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN) 889 adapter_req.isinverted = 1; 890 else if (dec->op_flags & RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN) 891 adapter_req.isinverted = 0; 892 else { 893 op->status |= 1 << RTE_BBDEV_DRV_ERROR; 894 rte_bbdev_log(ERR, "LLR format wasn't specified"); 895 return; 896 } 897 898 adapter_req.ncb = ncb_without_null; 899 adapter_req.pinteleavebuffer = adapter_input; 900 adapter_resp.pharqout = q->adapter_output; 901 bblib_turbo_adapter_ul(&adapter_req, &adapter_resp); 902 903 out = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3)); 904 if (out == NULL) { 905 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 906 rte_bbdev_log(ERR, "Too little space in output mbuf"); 907 return; 908 } 909 /* rte_bbdev_op_data.offset can be different than the offset of the 910 * appended bytes 911 */ 912 out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset); 913 if (check_crc_24b) 914 turbo_req.c = c + 1; 915 else 916 turbo_req.c = c; 917 turbo_req.input = (int8_t *)q->adapter_output; 918 turbo_req.k = k; 919 turbo_req.k_idx = k_idx; 920 turbo_req.max_iter_num = dec->iter_max; 921 turbo_resp.ag_buf = q->ag; 922 turbo_resp.cb_buf = q->code_block; 923 turbo_resp.output = out; 924 iter_cnt = bblib_turbo_decoder(&turbo_req, &turbo_resp); 925 dec->hard_output.length += (k >> 3); 926 927 if (iter_cnt > 0) { 928 /* Temporary solution for returned iter_count from SDK */ 929 iter_cnt = (iter_cnt - 1) / 2; 930 dec->iter_count = RTE_MAX(iter_cnt, dec->iter_count); 931 } else { 932 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 933 rte_bbdev_log(ERR, "Turbo Decoder failed"); 934 return; 935 } 936 } 937 938 static inline void 939 enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op) 940 { 941 uint8_t c, r = 0; 942 uint16_t kw, k = 0; 943 struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec; 944 struct rte_mbuf *m_in = dec->input.data; 945 struct rte_mbuf *m_out = dec->hard_output.data; 946 uint16_t in_offset = dec->input.offset; 947 uint16_t total_left = dec->input.length; 948 uint16_t out_offset = dec->hard_output.offset; 949 950 /* Clear op status */ 951 op->status = 0; 952 953 if (m_in == NULL || m_out == NULL) { 954 rte_bbdev_log(ERR, "Invalid mbuf pointer"); 955 op->status = 1 << RTE_BBDEV_DATA_ERROR; 956 return; 957 } 958 959 if (dec->code_block_mode == 0) { /* For Transport Block mode */ 960 c = dec->tb_params.c; 961 } else { /* For Code Block mode */ 962 k = dec->cb_params.k; 963 c = 1; 964 } 965 966 while (total_left > 0) { 967 if (dec->code_block_mode == 0) 968 k = (r < dec->tb_params.c_neg) ? 969 dec->tb_params.k_neg : dec->tb_params.k_pos; 970 971 /* Calculates circular buffer size (Kw). 972 * According to 3gpp 36.212 section 5.1.4.2 973 * Kw = 3 * Kpi, 974 * where: 975 * Kpi = nCol * nRow 976 * where nCol is 32 and nRow can be calculated from: 977 * D =< nCol * nRow 978 * where D is the size of each output from turbo encoder block 979 * (k + 4). 980 */ 981 kw = RTE_ALIGN_CEIL(k + 4, RTE_BBDEV_C_SUBBLOCK) * 3; 982 983 process_dec_cb(q, op, c, k, kw, m_in, m_out, in_offset, 984 out_offset, check_bit(dec->op_flags, 985 RTE_BBDEV_TURBO_CRC_TYPE_24B), total_left); 986 /* As a result of decoding we get Code Block with included 987 * decoded CRC24 at the end of Code Block. Type of CRC24 is 988 * specified by flag. 989 */ 990 991 /* Update total_left */ 992 total_left -= kw; 993 /* Update offsets for next CBs (if exist) */ 994 in_offset += kw; 995 out_offset += (k >> 3); 996 r++; 997 } 998 if (total_left != 0) { 999 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 1000 rte_bbdev_log(ERR, 1001 "Mismatch between mbuf length and included Circular buffer sizes"); 1002 } 1003 } 1004 1005 static inline uint16_t 1006 enqueue_dec_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_dec_op **ops, 1007 uint16_t nb_ops) 1008 { 1009 uint16_t i; 1010 1011 for (i = 0; i < nb_ops; ++i) 1012 enqueue_dec_one_op(q, ops[i]); 1013 1014 return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops, 1015 NULL); 1016 } 1017 1018 /* Enqueue burst */ 1019 static uint16_t 1020 enqueue_enc_ops(struct rte_bbdev_queue_data *q_data, 1021 struct rte_bbdev_enc_op **ops, uint16_t nb_ops) 1022 { 1023 void *queue = q_data->queue_private; 1024 struct turbo_sw_queue *q = queue; 1025 uint16_t nb_enqueued = 0; 1026 1027 nb_enqueued = enqueue_enc_all_ops(q, ops, nb_ops); 1028 1029 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; 1030 q_data->queue_stats.enqueued_count += nb_enqueued; 1031 1032 return nb_enqueued; 1033 } 1034 1035 /* Enqueue burst */ 1036 static uint16_t 1037 enqueue_dec_ops(struct rte_bbdev_queue_data *q_data, 1038 struct rte_bbdev_dec_op **ops, uint16_t nb_ops) 1039 { 1040 void *queue = q_data->queue_private; 1041 struct turbo_sw_queue *q = queue; 1042 uint16_t nb_enqueued = 0; 1043 1044 nb_enqueued = enqueue_dec_all_ops(q, ops, nb_ops); 1045 1046 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; 1047 q_data->queue_stats.enqueued_count += nb_enqueued; 1048 1049 return nb_enqueued; 1050 } 1051 1052 /* Dequeue decode burst */ 1053 static uint16_t 1054 dequeue_dec_ops(struct rte_bbdev_queue_data *q_data, 1055 struct rte_bbdev_dec_op **ops, uint16_t nb_ops) 1056 { 1057 struct turbo_sw_queue *q = q_data->queue_private; 1058 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, 1059 (void **)ops, nb_ops, NULL); 1060 q_data->queue_stats.dequeued_count += nb_dequeued; 1061 1062 return nb_dequeued; 1063 } 1064 1065 /* Dequeue encode burst */ 1066 static uint16_t 1067 dequeue_enc_ops(struct rte_bbdev_queue_data *q_data, 1068 struct rte_bbdev_enc_op **ops, uint16_t nb_ops) 1069 { 1070 struct turbo_sw_queue *q = q_data->queue_private; 1071 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, 1072 (void **)ops, nb_ops, NULL); 1073 q_data->queue_stats.dequeued_count += nb_dequeued; 1074 1075 return nb_dequeued; 1076 } 1077 1078 /* Parse 16bit integer from string argument */ 1079 static inline int 1080 parse_u16_arg(const char *key, const char *value, void *extra_args) 1081 { 1082 uint16_t *u16 = extra_args; 1083 unsigned int long result; 1084 1085 if ((value == NULL) || (extra_args == NULL)) 1086 return -EINVAL; 1087 errno = 0; 1088 result = strtoul(value, NULL, 0); 1089 if ((result >= (1 << 16)) || (errno != 0)) { 1090 rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key); 1091 return -ERANGE; 1092 } 1093 *u16 = (uint16_t)result; 1094 return 0; 1095 } 1096 1097 /* Parse parameters used to create device */ 1098 static int 1099 parse_turbo_sw_params(struct turbo_sw_params *params, const char *input_args) 1100 { 1101 struct rte_kvargs *kvlist = NULL; 1102 int ret = 0; 1103 1104 if (params == NULL) 1105 return -EINVAL; 1106 if (input_args) { 1107 kvlist = rte_kvargs_parse(input_args, turbo_sw_valid_params); 1108 if (kvlist == NULL) 1109 return -EFAULT; 1110 1111 ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[0], 1112 &parse_u16_arg, ¶ms->queues_num); 1113 if (ret < 0) 1114 goto exit; 1115 1116 ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[1], 1117 &parse_u16_arg, ¶ms->socket_id); 1118 if (ret < 0) 1119 goto exit; 1120 1121 if (params->socket_id >= RTE_MAX_NUMA_NODES) { 1122 rte_bbdev_log(ERR, "Invalid socket, must be < %u", 1123 RTE_MAX_NUMA_NODES); 1124 goto exit; 1125 } 1126 } 1127 1128 exit: 1129 if (kvlist) 1130 rte_kvargs_free(kvlist); 1131 return ret; 1132 } 1133 1134 /* Create device */ 1135 static int 1136 turbo_sw_bbdev_create(struct rte_vdev_device *vdev, 1137 struct turbo_sw_params *init_params) 1138 { 1139 struct rte_bbdev *bbdev; 1140 const char *name = rte_vdev_device_name(vdev); 1141 1142 bbdev = rte_bbdev_allocate(name); 1143 if (bbdev == NULL) 1144 return -ENODEV; 1145 1146 bbdev->data->dev_private = rte_zmalloc_socket(name, 1147 sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE, 1148 init_params->socket_id); 1149 if (bbdev->data->dev_private == NULL) { 1150 rte_bbdev_release(bbdev); 1151 return -ENOMEM; 1152 } 1153 1154 bbdev->dev_ops = &pmd_ops; 1155 bbdev->device = &vdev->device; 1156 bbdev->data->socket_id = init_params->socket_id; 1157 bbdev->intr_handle = NULL; 1158 1159 /* register rx/tx burst functions for data path */ 1160 bbdev->dequeue_enc_ops = dequeue_enc_ops; 1161 bbdev->dequeue_dec_ops = dequeue_dec_ops; 1162 bbdev->enqueue_enc_ops = enqueue_enc_ops; 1163 bbdev->enqueue_dec_ops = enqueue_dec_ops; 1164 ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues = 1165 init_params->queues_num; 1166 1167 return 0; 1168 } 1169 1170 /* Initialise device */ 1171 static int 1172 turbo_sw_bbdev_probe(struct rte_vdev_device *vdev) 1173 { 1174 struct turbo_sw_params init_params = { 1175 rte_socket_id(), 1176 RTE_BBDEV_DEFAULT_MAX_NB_QUEUES 1177 }; 1178 const char *name; 1179 const char *input_args; 1180 1181 if (vdev == NULL) 1182 return -EINVAL; 1183 1184 name = rte_vdev_device_name(vdev); 1185 if (name == NULL) 1186 return -EINVAL; 1187 input_args = rte_vdev_device_args(vdev); 1188 parse_turbo_sw_params(&init_params, input_args); 1189 1190 rte_bbdev_log_debug( 1191 "Initialising %s on NUMA node %d with max queues: %d\n", 1192 name, init_params.socket_id, init_params.queues_num); 1193 1194 return turbo_sw_bbdev_create(vdev, &init_params); 1195 } 1196 1197 /* Uninitialise device */ 1198 static int 1199 turbo_sw_bbdev_remove(struct rte_vdev_device *vdev) 1200 { 1201 struct rte_bbdev *bbdev; 1202 const char *name; 1203 1204 if (vdev == NULL) 1205 return -EINVAL; 1206 1207 name = rte_vdev_device_name(vdev); 1208 if (name == NULL) 1209 return -EINVAL; 1210 1211 bbdev = rte_bbdev_get_named_dev(name); 1212 if (bbdev == NULL) 1213 return -EINVAL; 1214 1215 rte_free(bbdev->data->dev_private); 1216 1217 return rte_bbdev_release(bbdev); 1218 } 1219 1220 static struct rte_vdev_driver bbdev_turbo_sw_pmd_drv = { 1221 .probe = turbo_sw_bbdev_probe, 1222 .remove = turbo_sw_bbdev_remove 1223 }; 1224 1225 RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_turbo_sw_pmd_drv); 1226 RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME, 1227 TURBO_SW_MAX_NB_QUEUES_ARG"=<int> " 1228 TURBO_SW_SOCKET_ID_ARG"=<int>"); 1229 1230 RTE_INIT(null_bbdev_init_log); 1231 static void 1232 null_bbdev_init_log(void) 1233 { 1234 bbdev_turbo_sw_logtype = rte_log_register("pmd.bb.turbo_sw"); 1235 if (bbdev_turbo_sw_logtype >= 0) 1236 rte_log_set_level(bbdev_turbo_sw_logtype, RTE_LOG_NOTICE); 1237 } 1238