1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #include <string.h> 6 7 #include <rte_common.h> 8 #include <rte_bus_vdev.h> 9 #include <rte_malloc.h> 10 #include <rte_ring.h> 11 #include <rte_kvargs.h> 12 13 #include <rte_bbdev.h> 14 #include <rte_bbdev_pmd.h> 15 16 #include <phy_turbo.h> 17 #include <phy_crc.h> 18 #include <phy_rate_match.h> 19 #include <divide.h> 20 21 #define DRIVER_NAME turbo_sw 22 23 /* Turbo SW PMD logging ID */ 24 static int bbdev_turbo_sw_logtype; 25 26 /* Helper macro for logging */ 27 #define rte_bbdev_log(level, fmt, ...) \ 28 rte_log(RTE_LOG_ ## level, bbdev_turbo_sw_logtype, fmt "\n", \ 29 ##__VA_ARGS__) 30 31 #define rte_bbdev_log_debug(fmt, ...) \ 32 rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \ 33 ##__VA_ARGS__) 34 35 /* Number of columns in sub-block interleaver (36.212, section 5.1.4.1.1) */ 36 #define C_SUBBLOCK (32) 37 #define MAX_TB_SIZE (391656) 38 #define MAX_CB_SIZE (6144) 39 #define MAX_KW (18528) 40 41 /* private data structure */ 42 struct bbdev_private { 43 unsigned int max_nb_queues; /**< Max number of queues */ 44 }; 45 46 /* Initialisation params structure that can be used by Turbo SW driver */ 47 struct turbo_sw_params { 48 int socket_id; /*< Turbo SW device socket */ 49 uint16_t queues_num; /*< Turbo SW device queues number */ 50 }; 51 52 /* Accecptable params for Turbo SW devices */ 53 #define TURBO_SW_MAX_NB_QUEUES_ARG "max_nb_queues" 54 #define TURBO_SW_SOCKET_ID_ARG "socket_id" 55 56 static const char * const turbo_sw_valid_params[] = { 57 TURBO_SW_MAX_NB_QUEUES_ARG, 58 TURBO_SW_SOCKET_ID_ARG 59 }; 60 61 /* queue */ 62 struct turbo_sw_queue { 63 /* Ring for processed (encoded/decoded) operations which are ready to 64 * be dequeued. 65 */ 66 struct rte_ring *processed_pkts; 67 /* Stores input for turbo encoder (used when CRC attachment is 68 * performed 69 */ 70 uint8_t *enc_in; 71 /* Stores output from turbo encoder */ 72 uint8_t *enc_out; 73 /* Alpha gamma buf for bblib_turbo_decoder() function */ 74 int8_t *ag; 75 /* Temp buf for bblib_turbo_decoder() function */ 76 uint16_t *code_block; 77 /* Input buf for bblib_rate_dematching_lte() function */ 78 uint8_t *deint_input; 79 /* Output buf for bblib_rate_dematching_lte() function */ 80 uint8_t *deint_output; 81 /* Output buf for bblib_turbodec_adapter_lte() function */ 82 uint8_t *adapter_output; 83 /* Operation type of this queue */ 84 enum rte_bbdev_op_type type; 85 } __rte_cache_aligned; 86 87 /* Calculate index based on Table 5.1.3-3 from TS34.212 */ 88 static inline int32_t 89 compute_idx(uint16_t k) 90 { 91 int32_t result = 0; 92 93 if (k < 40 || k > MAX_CB_SIZE) 94 return -1; 95 96 if (k > 2048) { 97 if ((k - 2048) % 64 != 0) 98 result = -1; 99 100 result = 124 + (k - 2048) / 64; 101 } else if (k <= 512) { 102 if ((k - 40) % 8 != 0) 103 result = -1; 104 105 result = (k - 40) / 8 + 1; 106 } else if (k <= 1024) { 107 if ((k - 512) % 16 != 0) 108 result = -1; 109 110 result = 60 + (k - 512) / 16; 111 } else { /* 1024 < k <= 2048 */ 112 if ((k - 1024) % 32 != 0) 113 result = -1; 114 115 result = 92 + (k - 1024) / 32; 116 } 117 118 return result; 119 } 120 121 /* Read flag value 0/1 from bitmap */ 122 static inline bool 123 check_bit(uint32_t bitmap, uint32_t bitmask) 124 { 125 return bitmap & bitmask; 126 } 127 128 /* Get device info */ 129 static void 130 info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info) 131 { 132 struct bbdev_private *internals = dev->data->dev_private; 133 134 static const struct rte_bbdev_op_cap bbdev_capabilities[] = { 135 { 136 .type = RTE_BBDEV_OP_TURBO_DEC, 137 .cap.turbo_dec = { 138 .capability_flags = 139 RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE | 140 RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN | 141 RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN | 142 RTE_BBDEV_TURBO_CRC_TYPE_24B | 143 RTE_BBDEV_TURBO_EARLY_TERMINATION, 144 .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS, 145 .num_buffers_hard_out = 146 RTE_BBDEV_MAX_CODE_BLOCKS, 147 .num_buffers_soft_out = 0, 148 } 149 }, 150 { 151 .type = RTE_BBDEV_OP_TURBO_ENC, 152 .cap.turbo_enc = { 153 .capability_flags = 154 RTE_BBDEV_TURBO_CRC_24B_ATTACH | 155 RTE_BBDEV_TURBO_CRC_24A_ATTACH | 156 RTE_BBDEV_TURBO_RATE_MATCH | 157 RTE_BBDEV_TURBO_RV_INDEX_BYPASS, 158 .num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS, 159 .num_buffers_dst = RTE_BBDEV_MAX_CODE_BLOCKS, 160 } 161 }, 162 RTE_BBDEV_END_OF_CAPABILITIES_LIST() 163 }; 164 165 static struct rte_bbdev_queue_conf default_queue_conf = { 166 .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT, 167 }; 168 169 static const enum rte_cpu_flag_t cpu_flag = RTE_CPUFLAG_SSE4_2; 170 171 default_queue_conf.socket = dev->data->socket_id; 172 173 dev_info->driver_name = RTE_STR(DRIVER_NAME); 174 dev_info->max_num_queues = internals->max_nb_queues; 175 dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT; 176 dev_info->hardware_accelerated = false; 177 dev_info->max_queue_priority = 0; 178 dev_info->default_queue_conf = default_queue_conf; 179 dev_info->capabilities = bbdev_capabilities; 180 dev_info->cpu_flag_reqs = &cpu_flag; 181 dev_info->min_alignment = 64; 182 183 rte_bbdev_log_debug("got device info from %u\n", dev->data->dev_id); 184 } 185 186 /* Release queue */ 187 static int 188 q_release(struct rte_bbdev *dev, uint16_t q_id) 189 { 190 struct turbo_sw_queue *q = dev->data->queues[q_id].queue_private; 191 192 if (q != NULL) { 193 rte_ring_free(q->processed_pkts); 194 rte_free(q->enc_out); 195 rte_free(q->enc_in); 196 rte_free(q->ag); 197 rte_free(q->code_block); 198 rte_free(q->deint_input); 199 rte_free(q->deint_output); 200 rte_free(q->adapter_output); 201 rte_free(q); 202 dev->data->queues[q_id].queue_private = NULL; 203 } 204 205 rte_bbdev_log_debug("released device queue %u:%u", 206 dev->data->dev_id, q_id); 207 return 0; 208 } 209 210 /* Setup a queue */ 211 static int 212 q_setup(struct rte_bbdev *dev, uint16_t q_id, 213 const struct rte_bbdev_queue_conf *queue_conf) 214 { 215 int ret; 216 struct turbo_sw_queue *q; 217 char name[RTE_RING_NAMESIZE]; 218 219 /* Allocate the queue data structure. */ 220 q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q), 221 RTE_CACHE_LINE_SIZE, queue_conf->socket); 222 if (q == NULL) { 223 rte_bbdev_log(ERR, "Failed to allocate queue memory"); 224 return -ENOMEM; 225 } 226 227 /* Allocate memory for encoder output. */ 228 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_enc_out%u:%u", 229 dev->data->dev_id, q_id); 230 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 231 rte_bbdev_log(ERR, 232 "Creating queue name for device %u queue %u failed", 233 dev->data->dev_id, q_id); 234 return -ENAMETOOLONG; 235 } 236 q->enc_out = rte_zmalloc_socket(name, 237 ((MAX_TB_SIZE >> 3) + 3) * sizeof(*q->enc_out) * 3, 238 RTE_CACHE_LINE_SIZE, queue_conf->socket); 239 if (q->enc_out == NULL) { 240 rte_bbdev_log(ERR, 241 "Failed to allocate queue memory for %s", name); 242 goto free_q; 243 } 244 245 /* Allocate memory for rate matching output. */ 246 ret = snprintf(name, RTE_RING_NAMESIZE, 247 RTE_STR(DRIVER_NAME)"_enc_in%u:%u", dev->data->dev_id, 248 q_id); 249 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 250 rte_bbdev_log(ERR, 251 "Creating queue name for device %u queue %u failed", 252 dev->data->dev_id, q_id); 253 return -ENAMETOOLONG; 254 } 255 q->enc_in = rte_zmalloc_socket(name, 256 (MAX_CB_SIZE >> 3) * sizeof(*q->enc_in), 257 RTE_CACHE_LINE_SIZE, queue_conf->socket); 258 if (q->enc_in == NULL) { 259 rte_bbdev_log(ERR, 260 "Failed to allocate queue memory for %s", name); 261 goto free_q; 262 } 263 264 /* Allocate memory for Aplha Gamma temp buffer. */ 265 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_ag%u:%u", 266 dev->data->dev_id, q_id); 267 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 268 rte_bbdev_log(ERR, 269 "Creating queue name for device %u queue %u failed", 270 dev->data->dev_id, q_id); 271 return -ENAMETOOLONG; 272 } 273 q->ag = rte_zmalloc_socket(name, 274 MAX_CB_SIZE * 10 * sizeof(*q->ag), 275 RTE_CACHE_LINE_SIZE, queue_conf->socket); 276 if (q->ag == NULL) { 277 rte_bbdev_log(ERR, 278 "Failed to allocate queue memory for %s", name); 279 goto free_q; 280 } 281 282 /* Allocate memory for code block temp buffer. */ 283 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_cb%u:%u", 284 dev->data->dev_id, q_id); 285 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 286 rte_bbdev_log(ERR, 287 "Creating queue name for device %u queue %u failed", 288 dev->data->dev_id, q_id); 289 return -ENAMETOOLONG; 290 } 291 q->code_block = rte_zmalloc_socket(name, 292 (6144 >> 3) * sizeof(*q->code_block), 293 RTE_CACHE_LINE_SIZE, queue_conf->socket); 294 if (q->code_block == NULL) { 295 rte_bbdev_log(ERR, 296 "Failed to allocate queue memory for %s", name); 297 goto free_q; 298 } 299 300 /* Allocate memory for Deinterleaver input. */ 301 ret = snprintf(name, RTE_RING_NAMESIZE, 302 RTE_STR(DRIVER_NAME)"_deint_input%u:%u", 303 dev->data->dev_id, q_id); 304 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 305 rte_bbdev_log(ERR, 306 "Creating queue name for device %u queue %u failed", 307 dev->data->dev_id, q_id); 308 return -ENAMETOOLONG; 309 } 310 q->deint_input = rte_zmalloc_socket(name, 311 MAX_KW * sizeof(*q->deint_input), 312 RTE_CACHE_LINE_SIZE, queue_conf->socket); 313 if (q->deint_input == NULL) { 314 rte_bbdev_log(ERR, 315 "Failed to allocate queue memory for %s", name); 316 goto free_q; 317 } 318 319 /* Allocate memory for Deinterleaver output. */ 320 ret = snprintf(name, RTE_RING_NAMESIZE, 321 RTE_STR(DRIVER_NAME)"_deint_output%u:%u", 322 dev->data->dev_id, q_id); 323 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 324 rte_bbdev_log(ERR, 325 "Creating queue name for device %u queue %u failed", 326 dev->data->dev_id, q_id); 327 return -ENAMETOOLONG; 328 } 329 q->deint_output = rte_zmalloc_socket(NULL, 330 MAX_KW * sizeof(*q->deint_output), 331 RTE_CACHE_LINE_SIZE, queue_conf->socket); 332 if (q->deint_output == NULL) { 333 rte_bbdev_log(ERR, 334 "Failed to allocate queue memory for %s", name); 335 goto free_q; 336 } 337 338 /* Allocate memory for Adapter output. */ 339 ret = snprintf(name, RTE_RING_NAMESIZE, 340 RTE_STR(DRIVER_NAME)"_adapter_output%u:%u", 341 dev->data->dev_id, q_id); 342 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 343 rte_bbdev_log(ERR, 344 "Creating queue name for device %u queue %u failed", 345 dev->data->dev_id, q_id); 346 return -ENAMETOOLONG; 347 } 348 q->adapter_output = rte_zmalloc_socket(NULL, 349 MAX_CB_SIZE * 6 * sizeof(*q->adapter_output), 350 RTE_CACHE_LINE_SIZE, queue_conf->socket); 351 if (q->adapter_output == NULL) { 352 rte_bbdev_log(ERR, 353 "Failed to allocate queue memory for %s", name); 354 goto free_q; 355 } 356 357 /* Create ring for packets awaiting to be dequeued. */ 358 ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"%u:%u", 359 dev->data->dev_id, q_id); 360 if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) { 361 rte_bbdev_log(ERR, 362 "Creating queue name for device %u queue %u failed", 363 dev->data->dev_id, q_id); 364 return -ENAMETOOLONG; 365 } 366 q->processed_pkts = rte_ring_create(name, queue_conf->queue_size, 367 queue_conf->socket, RING_F_SP_ENQ | RING_F_SC_DEQ); 368 if (q->processed_pkts == NULL) { 369 rte_bbdev_log(ERR, "Failed to create ring for %s", name); 370 goto free_q; 371 } 372 373 q->type = queue_conf->op_type; 374 375 dev->data->queues[q_id].queue_private = q; 376 rte_bbdev_log_debug("setup device queue %s", name); 377 return 0; 378 379 free_q: 380 rte_ring_free(q->processed_pkts); 381 rte_free(q->enc_out); 382 rte_free(q->enc_in); 383 rte_free(q->ag); 384 rte_free(q->code_block); 385 rte_free(q->deint_input); 386 rte_free(q->deint_output); 387 rte_free(q->adapter_output); 388 rte_free(q); 389 return -EFAULT; 390 } 391 392 static const struct rte_bbdev_ops pmd_ops = { 393 .info_get = info_get, 394 .queue_setup = q_setup, 395 .queue_release = q_release 396 }; 397 398 /* Checks if the encoder input buffer is correct. 399 * Returns 0 if it's valid, -1 otherwise. 400 */ 401 static inline int 402 is_enc_input_valid(const uint16_t k, const int32_t k_idx, 403 const uint16_t in_length) 404 { 405 if (k_idx < 0) { 406 rte_bbdev_log(ERR, "K Index is invalid"); 407 return -1; 408 } 409 410 if (in_length - (k >> 3) < 0) { 411 rte_bbdev_log(ERR, 412 "Mismatch between input length (%u bytes) and K (%u bits)", 413 in_length, k); 414 return -1; 415 } 416 417 if (k > MAX_CB_SIZE) { 418 rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d", 419 k, MAX_CB_SIZE); 420 return -1; 421 } 422 423 return 0; 424 } 425 426 /* Checks if the decoder input buffer is correct. 427 * Returns 0 if it's valid, -1 otherwise. 428 */ 429 static inline int 430 is_dec_input_valid(int32_t k_idx, int16_t kw, int16_t in_length) 431 { 432 if (k_idx < 0) { 433 rte_bbdev_log(ERR, "K index is invalid"); 434 return -1; 435 } 436 437 if (in_length - kw < 0) { 438 rte_bbdev_log(ERR, 439 "Mismatch between input length (%u) and kw (%u)", 440 in_length, kw); 441 return -1; 442 } 443 444 if (kw > MAX_KW) { 445 rte_bbdev_log(ERR, "Input length (%u) is too big, max: %d", 446 kw, MAX_KW); 447 return -1; 448 } 449 450 return 0; 451 } 452 453 static inline void 454 process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op, 455 uint8_t r, uint8_t c, uint16_t k, uint16_t ncb, 456 uint32_t e, struct rte_mbuf *m_in, struct rte_mbuf *m_out, 457 uint16_t in_offset, uint16_t out_offset, uint16_t total_left) 458 { 459 int ret; 460 int16_t k_idx; 461 uint16_t m; 462 uint8_t *in, *out0, *out1, *out2, *tmp_out, *rm_out; 463 uint64_t first_3_bytes = 0; 464 struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc; 465 struct bblib_crc_request crc_req; 466 struct bblib_crc_response crc_resp; 467 struct bblib_turbo_encoder_request turbo_req; 468 struct bblib_turbo_encoder_response turbo_resp; 469 struct bblib_rate_match_dl_request rm_req; 470 struct bblib_rate_match_dl_response rm_resp; 471 472 k_idx = compute_idx(k); 473 in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset); 474 475 /* CRC24A (for TB) */ 476 if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH) && 477 (enc->code_block_mode == 1)) { 478 ret = is_enc_input_valid(k - 24, k_idx, total_left); 479 if (ret != 0) { 480 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 481 return; 482 } 483 crc_req.data = in; 484 crc_req.len = (k - 24) >> 3; 485 /* Check if there is a room for CRC bits. If not use 486 * the temporary buffer. 487 */ 488 if (rte_pktmbuf_append(m_in, 3) == NULL) { 489 rte_memcpy(q->enc_in, in, (k - 24) >> 3); 490 in = q->enc_in; 491 } else { 492 /* Store 3 first bytes of next CB as they will be 493 * overwritten by CRC bytes. If it is the last CB then 494 * there is no point to store 3 next bytes and this 495 * if..else branch will be omitted. 496 */ 497 first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]); 498 } 499 500 crc_resp.data = in; 501 bblib_lte_crc24a_gen(&crc_req, &crc_resp); 502 } else if (enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) { 503 /* CRC24B */ 504 ret = is_enc_input_valid(k - 24, k_idx, total_left); 505 if (ret != 0) { 506 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 507 return; 508 } 509 crc_req.data = in; 510 crc_req.len = (k - 24) >> 3; 511 /* Check if there is a room for CRC bits. If this is the last 512 * CB in TB. If not use temporary buffer. 513 */ 514 if ((c - r == 1) && (rte_pktmbuf_append(m_in, 3) == NULL)) { 515 rte_memcpy(q->enc_in, in, (k - 24) >> 3); 516 in = q->enc_in; 517 } else if (c - r > 1) { 518 /* Store 3 first bytes of next CB as they will be 519 * overwritten by CRC bytes. If it is the last CB then 520 * there is no point to store 3 next bytes and this 521 * if..else branch will be omitted. 522 */ 523 first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]); 524 } 525 526 crc_resp.data = in; 527 bblib_lte_crc24b_gen(&crc_req, &crc_resp); 528 } else { 529 ret = is_enc_input_valid(k, k_idx, total_left); 530 if (ret != 0) { 531 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 532 return; 533 } 534 } 535 536 /* Turbo encoder */ 537 538 /* Each bit layer output from turbo encoder is (k+4) bits long, i.e. 539 * input length + 4 tail bits. That's (k/8) + 1 bytes after rounding up. 540 * So dst_data's length should be 3*(k/8) + 3 bytes. 541 * In Rate-matching bypass case outputs pointers passed to encoder 542 * (out0, out1 and out2) can directly point to addresses of output from 543 * turbo_enc entity. 544 */ 545 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) { 546 out0 = q->enc_out; 547 out1 = RTE_PTR_ADD(out0, (k >> 3) + 1); 548 out2 = RTE_PTR_ADD(out1, (k >> 3) + 1); 549 } else { 550 out0 = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3) * 3 + 2); 551 if (out0 == NULL) { 552 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 553 rte_bbdev_log(ERR, 554 "Too little space in output mbuf"); 555 return; 556 } 557 enc->output.length += (k >> 3) * 3 + 2; 558 /* rte_bbdev_op_data.offset can be different than the 559 * offset of the appended bytes 560 */ 561 out0 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset); 562 out1 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, 563 out_offset + (k >> 3) + 1); 564 out2 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, 565 out_offset + 2 * ((k >> 3) + 1)); 566 } 567 568 turbo_req.case_id = k_idx; 569 turbo_req.input_win = in; 570 turbo_req.length = k >> 3; 571 turbo_resp.output_win_0 = out0; 572 turbo_resp.output_win_1 = out1; 573 turbo_resp.output_win_2 = out2; 574 if (bblib_turbo_encoder(&turbo_req, &turbo_resp) != 0) { 575 op->status |= 1 << RTE_BBDEV_DRV_ERROR; 576 rte_bbdev_log(ERR, "Turbo Encoder failed"); 577 return; 578 } 579 580 /* Restore 3 first bytes of next CB if they were overwritten by CRC*/ 581 if (first_3_bytes != 0) 582 *((uint64_t *)&in[(k - 32) >> 3]) = first_3_bytes; 583 584 /* Rate-matching */ 585 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) { 586 /* get output data starting address */ 587 rm_out = (uint8_t *)rte_pktmbuf_append(m_out, (e >> 3)); 588 if (rm_out == NULL) { 589 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 590 rte_bbdev_log(ERR, 591 "Too little space in output mbuf"); 592 return; 593 } 594 /* rte_bbdev_op_data.offset can be different than the offset 595 * of the appended bytes 596 */ 597 rm_out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset); 598 599 /* index of current code block */ 600 rm_req.r = r; 601 /* total number of code block */ 602 rm_req.C = c; 603 /* For DL - 1, UL - 0 */ 604 rm_req.direction = 1; 605 /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nsoft, KMIMO 606 * and MDL_HARQ are used for Ncb calculation. As Ncb is already 607 * known we can adjust those parameters 608 */ 609 rm_req.Nsoft = ncb * rm_req.C; 610 rm_req.KMIMO = 1; 611 rm_req.MDL_HARQ = 1; 612 /* According to 3ggp 36.212 Spec 5.1.4.1.2 section Nl, Qm and G 613 * are used for E calculation. As E is already known we can 614 * adjust those parameters 615 */ 616 rm_req.NL = e; 617 rm_req.Qm = 1; 618 rm_req.G = rm_req.NL * rm_req.Qm * rm_req.C; 619 620 rm_req.rvidx = enc->rv_index; 621 rm_req.Kidx = k_idx - 1; 622 rm_req.nLen = k + 4; 623 rm_req.tin0 = out0; 624 rm_req.tin1 = out1; 625 rm_req.tin2 = out2; 626 rm_resp.output = rm_out; 627 rm_resp.OutputLen = (e >> 3); 628 if (enc->op_flags & RTE_BBDEV_TURBO_RV_INDEX_BYPASS) 629 rm_req.bypass_rvidx = 1; 630 else 631 rm_req.bypass_rvidx = 0; 632 633 if (bblib_rate_match_dl(&rm_req, &rm_resp) != 0) { 634 op->status |= 1 << RTE_BBDEV_DRV_ERROR; 635 rte_bbdev_log(ERR, "Rate matching failed"); 636 return; 637 } 638 enc->output.length += rm_resp.OutputLen; 639 } else { 640 /* Rate matching is bypassed */ 641 642 /* Completing last byte of out0 (where 4 tail bits are stored) 643 * by moving first 4 bits from out1 644 */ 645 tmp_out = (uint8_t *) --out1; 646 *tmp_out = *tmp_out | ((*(tmp_out + 1) & 0xF0) >> 4); 647 tmp_out++; 648 /* Shifting out1 data by 4 bits to the left */ 649 for (m = 0; m < k >> 3; ++m) { 650 uint8_t *first = tmp_out; 651 uint8_t second = *(tmp_out + 1); 652 *first = (*first << 4) | ((second & 0xF0) >> 4); 653 tmp_out++; 654 } 655 /* Shifting out2 data by 8 bits to the left */ 656 for (m = 0; m < (k >> 3) + 1; ++m) { 657 *tmp_out = *(tmp_out + 1); 658 tmp_out++; 659 } 660 *tmp_out = 0; 661 } 662 } 663 664 static inline void 665 enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op) 666 { 667 uint8_t c, r, crc24_bits = 0; 668 uint16_t k, ncb; 669 uint32_t e; 670 struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc; 671 uint16_t in_offset = enc->input.offset; 672 uint16_t out_offset = enc->output.offset; 673 struct rte_mbuf *m_in = enc->input.data; 674 struct rte_mbuf *m_out = enc->output.data; 675 uint16_t total_left = enc->input.length; 676 677 /* Clear op status */ 678 op->status = 0; 679 680 if (total_left > MAX_TB_SIZE >> 3) { 681 rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d", 682 total_left, MAX_TB_SIZE); 683 op->status = 1 << RTE_BBDEV_DATA_ERROR; 684 return; 685 } 686 687 if (m_in == NULL || m_out == NULL) { 688 rte_bbdev_log(ERR, "Invalid mbuf pointer"); 689 op->status = 1 << RTE_BBDEV_DATA_ERROR; 690 return; 691 } 692 693 if ((enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) || 694 (enc->op_flags & RTE_BBDEV_TURBO_CRC_24A_ATTACH)) 695 crc24_bits = 24; 696 697 if (enc->code_block_mode == 0) { /* For Transport Block mode */ 698 c = enc->tb_params.c; 699 r = enc->tb_params.r; 700 } else {/* For Code Block mode */ 701 c = 1; 702 r = 0; 703 } 704 705 while (total_left > 0 && r < c) { 706 if (enc->code_block_mode == 0) { 707 k = (r < enc->tb_params.c_neg) ? 708 enc->tb_params.k_neg : enc->tb_params.k_pos; 709 ncb = (r < enc->tb_params.c_neg) ? 710 enc->tb_params.ncb_neg : enc->tb_params.ncb_pos; 711 e = (r < enc->tb_params.cab) ? 712 enc->tb_params.ea : enc->tb_params.eb; 713 } else { 714 k = enc->cb_params.k; 715 ncb = enc->cb_params.ncb; 716 e = enc->cb_params.e; 717 } 718 719 process_enc_cb(q, op, r, c, k, ncb, e, m_in, 720 m_out, in_offset, out_offset, total_left); 721 /* Update total_left */ 722 total_left -= (k - crc24_bits) >> 3; 723 /* Update offsets for next CBs (if exist) */ 724 in_offset += (k - crc24_bits) >> 3; 725 if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) 726 out_offset += e >> 3; 727 else 728 out_offset += (k >> 3) * 3 + 2; 729 r++; 730 } 731 732 /* check if all input data was processed */ 733 if (total_left != 0) { 734 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 735 rte_bbdev_log(ERR, 736 "Mismatch between mbuf length and included CBs sizes"); 737 } 738 } 739 740 static inline uint16_t 741 enqueue_enc_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_enc_op **ops, 742 uint16_t nb_ops) 743 { 744 uint16_t i; 745 746 for (i = 0; i < nb_ops; ++i) 747 enqueue_enc_one_op(q, ops[i]); 748 749 return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops, 750 NULL); 751 } 752 753 /* Remove the padding bytes from a cyclic buffer. 754 * The input buffer is a data stream wk as described in 3GPP TS 36.212 section 755 * 5.1.4.1.2 starting from w0 and with length Ncb bytes. 756 * The output buffer is a data stream wk with pruned padding bytes. It's length 757 * is 3*D bytes and the order of non-padding bytes is preserved. 758 */ 759 static inline void 760 remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k, 761 uint16_t ncb) 762 { 763 uint32_t in_idx, out_idx, c_idx; 764 const uint32_t d = k + 4; 765 const uint32_t kw = (ncb / 3); 766 const uint32_t nd = kw - d; 767 const uint32_t r_subblock = kw / C_SUBBLOCK; 768 /* Inter-column permutation pattern */ 769 const uint32_t P[C_SUBBLOCK] = {0, 16, 8, 24, 4, 20, 12, 28, 2, 18, 10, 770 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13, 29, 3, 19, 771 11, 27, 7, 23, 15, 31}; 772 in_idx = 0; 773 out_idx = 0; 774 775 /* The padding bytes are at the first Nd positions in the first row. */ 776 for (c_idx = 0; in_idx < kw; in_idx += r_subblock, ++c_idx) { 777 if (P[c_idx] < nd) { 778 rte_memcpy(&out[out_idx], &in[in_idx + 1], 779 r_subblock - 1); 780 out_idx += r_subblock - 1; 781 } else { 782 rte_memcpy(&out[out_idx], &in[in_idx], r_subblock); 783 out_idx += r_subblock; 784 } 785 } 786 787 /* First and second parity bits sub-blocks are interlaced. */ 788 for (c_idx = 0; in_idx < ncb - 2 * r_subblock; 789 in_idx += 2 * r_subblock, ++c_idx) { 790 uint32_t second_block_c_idx = P[c_idx]; 791 uint32_t third_block_c_idx = P[c_idx] + 1; 792 793 if (second_block_c_idx < nd && third_block_c_idx < nd) { 794 rte_memcpy(&out[out_idx], &in[in_idx + 2], 795 2 * r_subblock - 2); 796 out_idx += 2 * r_subblock - 2; 797 } else if (second_block_c_idx >= nd && 798 third_block_c_idx >= nd) { 799 rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock); 800 out_idx += 2 * r_subblock; 801 } else if (second_block_c_idx < nd) { 802 out[out_idx++] = in[in_idx]; 803 rte_memcpy(&out[out_idx], &in[in_idx + 2], 804 2 * r_subblock - 2); 805 out_idx += 2 * r_subblock - 2; 806 } else { 807 rte_memcpy(&out[out_idx], &in[in_idx + 1], 808 2 * r_subblock - 1); 809 out_idx += 2 * r_subblock - 1; 810 } 811 } 812 813 /* Last interlaced row is different - its last byte is the only padding 814 * byte. We can have from 2 up to 26 padding bytes (Nd) per sub-block. 815 * After interlacing the 1st and 2nd parity sub-blocks we can have 0, 1 816 * or 2 padding bytes each time we make a step of 2 * R_SUBBLOCK bytes 817 * (moving to another column). 2nd parity sub-block uses the same 818 * inter-column permutation pattern as the systematic and 1st parity 819 * sub-blocks but it adds '1' to the resulting index and calculates the 820 * modulus of the result and Kw. Last column is mapped to itself (id 31) 821 * so the first byte taken from the 2nd parity sub-block will be the 822 * 32nd (31+1) byte, then 64th etc. (step is C_SUBBLOCK == 32) and the 823 * last byte will be the first byte from the sub-block: 824 * (32 + 32 * (R_SUBBLOCK-1)) % Kw == Kw % Kw == 0. Nd can't be smaller 825 * than 2 so we know that bytes with ids 0 and 1 must be the padding 826 * bytes. The bytes from the 1st parity sub-block are the bytes from the 827 * 31st column - Nd can't be greater than 26 so we are sure that there 828 * are no padding bytes in 31st column. 829 */ 830 rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock - 1); 831 } 832 833 static inline void 834 move_padding_bytes(const uint8_t *in, uint8_t *out, uint16_t k, 835 uint16_t ncb) 836 { 837 uint16_t d = k + 4; 838 uint16_t kpi = ncb / 3; 839 uint16_t nd = kpi - d; 840 841 rte_memcpy(&out[nd], in, d); 842 rte_memcpy(&out[nd + kpi + 64], &in[kpi], d); 843 rte_memcpy(&out[nd + 2 * (kpi + 64)], &in[2 * kpi], d); 844 } 845 846 static inline void 847 process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op, 848 uint8_t c, uint16_t k, uint16_t kw, struct rte_mbuf *m_in, 849 struct rte_mbuf *m_out, uint16_t in_offset, uint16_t out_offset, 850 bool check_crc_24b, uint16_t total_left) 851 { 852 int ret; 853 int32_t k_idx; 854 int32_t iter_cnt; 855 uint8_t *in, *out, *adapter_input; 856 int32_t ncb, ncb_without_null; 857 struct bblib_turbo_adapter_ul_response adapter_resp; 858 struct bblib_turbo_adapter_ul_request adapter_req; 859 struct bblib_turbo_decoder_request turbo_req; 860 struct bblib_turbo_decoder_response turbo_resp; 861 struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec; 862 863 k_idx = compute_idx(k); 864 865 ret = is_dec_input_valid(k_idx, kw, total_left); 866 if (ret != 0) { 867 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 868 return; 869 } 870 871 in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset); 872 ncb = kw; 873 ncb_without_null = (k + 4) * 3; 874 875 if (check_bit(dec->op_flags, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE)) { 876 struct bblib_deinterleave_ul_request deint_req; 877 struct bblib_deinterleave_ul_response deint_resp; 878 879 /* SW decoder accepts only a circular buffer without NULL bytes 880 * so the input needs to be converted. 881 */ 882 remove_nulls_from_circular_buf(in, q->deint_input, k, ncb); 883 884 deint_req.pharqbuffer = q->deint_input; 885 deint_req.ncb = ncb_without_null; 886 deint_resp.pinteleavebuffer = q->deint_output; 887 bblib_deinterleave_ul(&deint_req, &deint_resp); 888 } else 889 move_padding_bytes(in, q->deint_output, k, ncb); 890 891 adapter_input = q->deint_output; 892 893 if (dec->op_flags & RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN) 894 adapter_req.isinverted = 1; 895 else if (dec->op_flags & RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN) 896 adapter_req.isinverted = 0; 897 else { 898 op->status |= 1 << RTE_BBDEV_DRV_ERROR; 899 rte_bbdev_log(ERR, "LLR format wasn't specified"); 900 return; 901 } 902 903 adapter_req.ncb = ncb_without_null; 904 adapter_req.pinteleavebuffer = adapter_input; 905 adapter_resp.pharqout = q->adapter_output; 906 bblib_turbo_adapter_ul(&adapter_req, &adapter_resp); 907 908 out = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3)); 909 if (out == NULL) { 910 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 911 rte_bbdev_log(ERR, "Too little space in output mbuf"); 912 return; 913 } 914 /* rte_bbdev_op_data.offset can be different than the offset of the 915 * appended bytes 916 */ 917 out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset); 918 if (check_crc_24b) 919 turbo_req.c = c + 1; 920 else 921 turbo_req.c = c; 922 turbo_req.input = (int8_t *)q->adapter_output; 923 turbo_req.k = k; 924 turbo_req.k_idx = k_idx; 925 turbo_req.max_iter_num = dec->iter_max; 926 turbo_resp.ag_buf = q->ag; 927 turbo_resp.cb_buf = q->code_block; 928 turbo_resp.output = out; 929 iter_cnt = bblib_turbo_decoder(&turbo_req, &turbo_resp); 930 dec->hard_output.length += (k >> 3); 931 932 if (iter_cnt > 0) { 933 /* Temporary solution for returned iter_count from SDK */ 934 iter_cnt = (iter_cnt - 1) / 2; 935 dec->iter_count = RTE_MAX(iter_cnt, dec->iter_count); 936 } else { 937 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 938 rte_bbdev_log(ERR, "Turbo Decoder failed"); 939 return; 940 } 941 } 942 943 static inline void 944 enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op) 945 { 946 uint8_t c, r = 0; 947 uint16_t kw, k = 0; 948 struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec; 949 struct rte_mbuf *m_in = dec->input.data; 950 struct rte_mbuf *m_out = dec->hard_output.data; 951 uint16_t in_offset = dec->input.offset; 952 uint16_t total_left = dec->input.length; 953 uint16_t out_offset = dec->hard_output.offset; 954 955 /* Clear op status */ 956 op->status = 0; 957 958 if (m_in == NULL || m_out == NULL) { 959 rte_bbdev_log(ERR, "Invalid mbuf pointer"); 960 op->status = 1 << RTE_BBDEV_DATA_ERROR; 961 return; 962 } 963 964 if (dec->code_block_mode == 0) { /* For Transport Block mode */ 965 c = dec->tb_params.c; 966 } else { /* For Code Block mode */ 967 k = dec->cb_params.k; 968 c = 1; 969 } 970 971 while (total_left > 0) { 972 if (dec->code_block_mode == 0) 973 k = (r < dec->tb_params.c_neg) ? 974 dec->tb_params.k_neg : dec->tb_params.k_pos; 975 976 /* Calculates circular buffer size (Kw). 977 * According to 3gpp 36.212 section 5.1.4.2 978 * Kw = 3 * Kpi, 979 * where: 980 * Kpi = nCol * nRow 981 * where nCol is 32 and nRow can be calculated from: 982 * D =< nCol * nRow 983 * where D is the size of each output from turbo encoder block 984 * (k + 4). 985 */ 986 kw = RTE_ALIGN_CEIL(k + 4, C_SUBBLOCK) * 3; 987 988 process_dec_cb(q, op, c, k, kw, m_in, m_out, in_offset, 989 out_offset, check_bit(dec->op_flags, 990 RTE_BBDEV_TURBO_CRC_TYPE_24B), total_left); 991 /* As a result of decoding we get Code Block with included 992 * decoded CRC24 at the end of Code Block. Type of CRC24 is 993 * specified by flag. 994 */ 995 996 /* Update total_left */ 997 total_left -= kw; 998 /* Update offsets for next CBs (if exist) */ 999 in_offset += kw; 1000 out_offset += (k >> 3); 1001 r++; 1002 } 1003 if (total_left != 0) { 1004 op->status |= 1 << RTE_BBDEV_DATA_ERROR; 1005 rte_bbdev_log(ERR, 1006 "Mismatch between mbuf length and included Circular buffer sizes"); 1007 } 1008 } 1009 1010 static inline uint16_t 1011 enqueue_dec_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_dec_op **ops, 1012 uint16_t nb_ops) 1013 { 1014 uint16_t i; 1015 1016 for (i = 0; i < nb_ops; ++i) 1017 enqueue_dec_one_op(q, ops[i]); 1018 1019 return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops, 1020 NULL); 1021 } 1022 1023 /* Enqueue burst */ 1024 static uint16_t 1025 enqueue_enc_ops(struct rte_bbdev_queue_data *q_data, 1026 struct rte_bbdev_enc_op **ops, uint16_t nb_ops) 1027 { 1028 void *queue = q_data->queue_private; 1029 struct turbo_sw_queue *q = queue; 1030 uint16_t nb_enqueued = 0; 1031 1032 nb_enqueued = enqueue_enc_all_ops(q, ops, nb_ops); 1033 1034 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; 1035 q_data->queue_stats.enqueued_count += nb_enqueued; 1036 1037 return nb_enqueued; 1038 } 1039 1040 /* Enqueue burst */ 1041 static uint16_t 1042 enqueue_dec_ops(struct rte_bbdev_queue_data *q_data, 1043 struct rte_bbdev_dec_op **ops, uint16_t nb_ops) 1044 { 1045 void *queue = q_data->queue_private; 1046 struct turbo_sw_queue *q = queue; 1047 uint16_t nb_enqueued = 0; 1048 1049 nb_enqueued = enqueue_dec_all_ops(q, ops, nb_ops); 1050 1051 q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued; 1052 q_data->queue_stats.enqueued_count += nb_enqueued; 1053 1054 return nb_enqueued; 1055 } 1056 1057 /* Dequeue decode burst */ 1058 static uint16_t 1059 dequeue_dec_ops(struct rte_bbdev_queue_data *q_data, 1060 struct rte_bbdev_dec_op **ops, uint16_t nb_ops) 1061 { 1062 struct turbo_sw_queue *q = q_data->queue_private; 1063 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, 1064 (void **)ops, nb_ops, NULL); 1065 q_data->queue_stats.dequeued_count += nb_dequeued; 1066 1067 return nb_dequeued; 1068 } 1069 1070 /* Dequeue encode burst */ 1071 static uint16_t 1072 dequeue_enc_ops(struct rte_bbdev_queue_data *q_data, 1073 struct rte_bbdev_enc_op **ops, uint16_t nb_ops) 1074 { 1075 struct turbo_sw_queue *q = q_data->queue_private; 1076 uint16_t nb_dequeued = rte_ring_dequeue_burst(q->processed_pkts, 1077 (void **)ops, nb_ops, NULL); 1078 q_data->queue_stats.dequeued_count += nb_dequeued; 1079 1080 return nb_dequeued; 1081 } 1082 1083 /* Parse 16bit integer from string argument */ 1084 static inline int 1085 parse_u16_arg(const char *key, const char *value, void *extra_args) 1086 { 1087 uint16_t *u16 = extra_args; 1088 unsigned int long result; 1089 1090 if ((value == NULL) || (extra_args == NULL)) 1091 return -EINVAL; 1092 errno = 0; 1093 result = strtoul(value, NULL, 0); 1094 if ((result >= (1 << 16)) || (errno != 0)) { 1095 rte_bbdev_log(ERR, "Invalid value %lu for %s", result, key); 1096 return -ERANGE; 1097 } 1098 *u16 = (uint16_t)result; 1099 return 0; 1100 } 1101 1102 /* Parse parameters used to create device */ 1103 static int 1104 parse_turbo_sw_params(struct turbo_sw_params *params, const char *input_args) 1105 { 1106 struct rte_kvargs *kvlist = NULL; 1107 int ret = 0; 1108 1109 if (params == NULL) 1110 return -EINVAL; 1111 if (input_args) { 1112 kvlist = rte_kvargs_parse(input_args, turbo_sw_valid_params); 1113 if (kvlist == NULL) 1114 return -EFAULT; 1115 1116 ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[0], 1117 &parse_u16_arg, ¶ms->queues_num); 1118 if (ret < 0) 1119 goto exit; 1120 1121 ret = rte_kvargs_process(kvlist, turbo_sw_valid_params[1], 1122 &parse_u16_arg, ¶ms->socket_id); 1123 if (ret < 0) 1124 goto exit; 1125 1126 if (params->socket_id >= RTE_MAX_NUMA_NODES) { 1127 rte_bbdev_log(ERR, "Invalid socket, must be < %u", 1128 RTE_MAX_NUMA_NODES); 1129 goto exit; 1130 } 1131 } 1132 1133 exit: 1134 if (kvlist) 1135 rte_kvargs_free(kvlist); 1136 return ret; 1137 } 1138 1139 /* Create device */ 1140 static int 1141 turbo_sw_bbdev_create(struct rte_vdev_device *vdev, 1142 struct turbo_sw_params *init_params) 1143 { 1144 struct rte_bbdev *bbdev; 1145 const char *name = rte_vdev_device_name(vdev); 1146 1147 bbdev = rte_bbdev_allocate(name); 1148 if (bbdev == NULL) 1149 return -ENODEV; 1150 1151 bbdev->data->dev_private = rte_zmalloc_socket(name, 1152 sizeof(struct bbdev_private), RTE_CACHE_LINE_SIZE, 1153 init_params->socket_id); 1154 if (bbdev->data->dev_private == NULL) { 1155 rte_bbdev_release(bbdev); 1156 return -ENOMEM; 1157 } 1158 1159 bbdev->dev_ops = &pmd_ops; 1160 bbdev->device = &vdev->device; 1161 bbdev->data->socket_id = init_params->socket_id; 1162 bbdev->intr_handle = NULL; 1163 1164 /* register rx/tx burst functions for data path */ 1165 bbdev->dequeue_enc_ops = dequeue_enc_ops; 1166 bbdev->dequeue_dec_ops = dequeue_dec_ops; 1167 bbdev->enqueue_enc_ops = enqueue_enc_ops; 1168 bbdev->enqueue_dec_ops = enqueue_dec_ops; 1169 ((struct bbdev_private *) bbdev->data->dev_private)->max_nb_queues = 1170 init_params->queues_num; 1171 1172 return 0; 1173 } 1174 1175 /* Initialise device */ 1176 static int 1177 turbo_sw_bbdev_probe(struct rte_vdev_device *vdev) 1178 { 1179 struct turbo_sw_params init_params = { 1180 rte_socket_id(), 1181 RTE_BBDEV_DEFAULT_MAX_NB_QUEUES 1182 }; 1183 const char *name; 1184 const char *input_args; 1185 1186 if (vdev == NULL) 1187 return -EINVAL; 1188 1189 name = rte_vdev_device_name(vdev); 1190 if (name == NULL) 1191 return -EINVAL; 1192 input_args = rte_vdev_device_args(vdev); 1193 parse_turbo_sw_params(&init_params, input_args); 1194 1195 rte_bbdev_log_debug( 1196 "Initialising %s on NUMA node %d with max queues: %d\n", 1197 name, init_params.socket_id, init_params.queues_num); 1198 1199 return turbo_sw_bbdev_create(vdev, &init_params); 1200 } 1201 1202 /* Uninitialise device */ 1203 static int 1204 turbo_sw_bbdev_remove(struct rte_vdev_device *vdev) 1205 { 1206 struct rte_bbdev *bbdev; 1207 const char *name; 1208 1209 if (vdev == NULL) 1210 return -EINVAL; 1211 1212 name = rte_vdev_device_name(vdev); 1213 if (name == NULL) 1214 return -EINVAL; 1215 1216 bbdev = rte_bbdev_get_named_dev(name); 1217 if (bbdev == NULL) 1218 return -EINVAL; 1219 1220 rte_free(bbdev->data->dev_private); 1221 1222 return rte_bbdev_release(bbdev); 1223 } 1224 1225 static struct rte_vdev_driver bbdev_turbo_sw_pmd_drv = { 1226 .probe = turbo_sw_bbdev_probe, 1227 .remove = turbo_sw_bbdev_remove 1228 }; 1229 1230 RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_turbo_sw_pmd_drv); 1231 RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME, 1232 TURBO_SW_MAX_NB_QUEUES_ARG"=<int> " 1233 TURBO_SW_SOCKET_ID_ARG"=<int>"); 1234 1235 RTE_INIT(null_bbdev_init_log); 1236 static void 1237 null_bbdev_init_log(void) 1238 { 1239 bbdev_turbo_sw_logtype = rte_log_register("pmd.bb.turbo_sw"); 1240 if (bbdev_turbo_sw_logtype >= 0) 1241 rte_log_set_level(bbdev_turbo_sw_logtype, RTE_LOG_NOTICE); 1242 } 1243