1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016-2018 Solarflare Communications Inc. 4 * All rights reserved. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_mempool.h> 11 12 #include "efx.h" 13 14 #include "sfc.h" 15 #include "sfc_debug.h" 16 #include "sfc_log.h" 17 #include "sfc_ev.h" 18 #include "sfc_rx.h" 19 #include "sfc_kvargs.h" 20 #include "sfc_tweak.h" 21 22 /* 23 * Maximum number of Rx queue flush attempt in the case of failure or 24 * flush timeout 25 */ 26 #define SFC_RX_QFLUSH_ATTEMPTS (3) 27 28 /* 29 * Time to wait between event queue polling attempts when waiting for Rx 30 * queue flush done or failed events. 31 */ 32 #define SFC_RX_QFLUSH_POLL_WAIT_MS (1) 33 34 /* 35 * Maximum number of event queue polling attempts when waiting for Rx queue 36 * flush done or failed events. It defines Rx queue flush attempt timeout 37 * together with SFC_RX_QFLUSH_POLL_WAIT_MS. 38 */ 39 #define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000) 40 41 void 42 sfc_rx_qflush_done(struct sfc_rxq_info *rxq_info) 43 { 44 rxq_info->state |= SFC_RXQ_FLUSHED; 45 rxq_info->state &= ~SFC_RXQ_FLUSHING; 46 } 47 48 void 49 sfc_rx_qflush_failed(struct sfc_rxq_info *rxq_info) 50 { 51 rxq_info->state |= SFC_RXQ_FLUSH_FAILED; 52 rxq_info->state &= ~SFC_RXQ_FLUSHING; 53 } 54 55 static int 56 sfc_efx_rx_qprime(struct sfc_efx_rxq *rxq) 57 { 58 int rc = 0; 59 60 if (rxq->evq->read_ptr_primed != rxq->evq->read_ptr) { 61 rc = efx_ev_qprime(rxq->evq->common, rxq->evq->read_ptr); 62 if (rc == 0) 63 rxq->evq->read_ptr_primed = rxq->evq->read_ptr; 64 } 65 return rc; 66 } 67 68 static void 69 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq) 70 { 71 unsigned int free_space; 72 unsigned int bulks; 73 void *objs[SFC_RX_REFILL_BULK]; 74 efsys_dma_addr_t addr[RTE_DIM(objs)]; 75 unsigned int added = rxq->added; 76 unsigned int id; 77 unsigned int i; 78 struct sfc_efx_rx_sw_desc *rxd; 79 struct rte_mbuf *m; 80 uint16_t port_id = rxq->dp.dpq.port_id; 81 82 free_space = rxq->max_fill_level - (added - rxq->completed); 83 84 if (free_space < rxq->refill_threshold) 85 return; 86 87 bulks = free_space / RTE_DIM(objs); 88 /* refill_threshold guarantees that bulks is positive */ 89 SFC_ASSERT(bulks > 0); 90 91 id = added & rxq->ptr_mask; 92 do { 93 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs, 94 RTE_DIM(objs)) < 0)) { 95 /* 96 * It is hardly a safe way to increment counter 97 * from different contexts, but all PMDs do it. 98 */ 99 rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed += 100 RTE_DIM(objs); 101 /* Return if we have posted nothing yet */ 102 if (added == rxq->added) 103 return; 104 /* Push posted */ 105 break; 106 } 107 108 for (i = 0; i < RTE_DIM(objs); 109 ++i, id = (id + 1) & rxq->ptr_mask) { 110 m = objs[i]; 111 112 MBUF_RAW_ALLOC_CHECK(m); 113 114 rxd = &rxq->sw_desc[id]; 115 rxd->mbuf = m; 116 117 m->data_off = RTE_PKTMBUF_HEADROOM; 118 m->port = port_id; 119 120 addr[i] = rte_pktmbuf_iova(m); 121 } 122 123 efx_rx_qpost(rxq->common, addr, rxq->buf_size, 124 RTE_DIM(objs), rxq->completed, added); 125 added += RTE_DIM(objs); 126 } while (--bulks > 0); 127 128 SFC_ASSERT(added != rxq->added); 129 rxq->added = added; 130 efx_rx_qpush(rxq->common, added, &rxq->pushed); 131 } 132 133 static uint64_t 134 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags) 135 { 136 uint64_t mbuf_flags = 0; 137 138 switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) { 139 case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4): 140 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD; 141 break; 142 case EFX_PKT_IPV4: 143 mbuf_flags |= PKT_RX_IP_CKSUM_BAD; 144 break; 145 default: 146 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0); 147 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) == 148 PKT_RX_IP_CKSUM_UNKNOWN); 149 break; 150 } 151 152 switch ((desc_flags & 153 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) { 154 case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP): 155 case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP): 156 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD; 157 break; 158 case EFX_PKT_TCP: 159 case EFX_PKT_UDP: 160 mbuf_flags |= PKT_RX_L4_CKSUM_BAD; 161 break; 162 default: 163 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0); 164 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) == 165 PKT_RX_L4_CKSUM_UNKNOWN); 166 break; 167 } 168 169 return mbuf_flags; 170 } 171 172 static uint32_t 173 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags) 174 { 175 return RTE_PTYPE_L2_ETHER | 176 ((desc_flags & EFX_PKT_IPV4) ? 177 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) | 178 ((desc_flags & EFX_PKT_IPV6) ? 179 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) | 180 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) | 181 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0); 182 } 183 184 static const uint32_t * 185 sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps) 186 { 187 static const uint32_t ptypes[] = { 188 RTE_PTYPE_L2_ETHER, 189 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 190 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 191 RTE_PTYPE_L4_TCP, 192 RTE_PTYPE_L4_UDP, 193 RTE_PTYPE_UNKNOWN 194 }; 195 196 return ptypes; 197 } 198 199 static void 200 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags, 201 struct rte_mbuf *m) 202 { 203 uint8_t *mbuf_data; 204 205 206 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0) 207 return; 208 209 mbuf_data = rte_pktmbuf_mtod(m, uint8_t *); 210 211 if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) { 212 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common, 213 EFX_RX_HASHALG_TOEPLITZ, 214 mbuf_data); 215 216 m->ol_flags |= PKT_RX_RSS_HASH; 217 } 218 } 219 220 static uint16_t 221 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 222 { 223 struct sfc_dp_rxq *dp_rxq = rx_queue; 224 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 225 unsigned int completed; 226 unsigned int prefix_size = rxq->prefix_size; 227 unsigned int done_pkts = 0; 228 boolean_t discard_next = B_FALSE; 229 struct rte_mbuf *scatter_pkt = NULL; 230 231 if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)) 232 return 0; 233 234 sfc_ev_qpoll(rxq->evq); 235 236 completed = rxq->completed; 237 while (completed != rxq->pending && done_pkts < nb_pkts) { 238 unsigned int id; 239 struct sfc_efx_rx_sw_desc *rxd; 240 struct rte_mbuf *m; 241 unsigned int seg_len; 242 unsigned int desc_flags; 243 244 id = completed++ & rxq->ptr_mask; 245 rxd = &rxq->sw_desc[id]; 246 m = rxd->mbuf; 247 desc_flags = rxd->flags; 248 249 if (discard_next) 250 goto discard; 251 252 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD)) 253 goto discard; 254 255 if (desc_flags & EFX_PKT_PREFIX_LEN) { 256 uint16_t tmp_size; 257 int rc __rte_unused; 258 259 rc = efx_pseudo_hdr_pkt_length_get(rxq->common, 260 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size); 261 SFC_ASSERT(rc == 0); 262 seg_len = tmp_size; 263 } else { 264 seg_len = rxd->size - prefix_size; 265 } 266 267 rte_pktmbuf_data_len(m) = seg_len; 268 rte_pktmbuf_pkt_len(m) = seg_len; 269 270 if (scatter_pkt != NULL) { 271 if (rte_pktmbuf_chain(scatter_pkt, m) != 0) { 272 rte_pktmbuf_free(scatter_pkt); 273 goto discard; 274 } 275 /* The packet to deliver */ 276 m = scatter_pkt; 277 } 278 279 if (desc_flags & EFX_PKT_CONT) { 280 /* The packet is scattered, more fragments to come */ 281 scatter_pkt = m; 282 /* Further fragments have no prefix */ 283 prefix_size = 0; 284 continue; 285 } 286 287 /* Scattered packet is done */ 288 scatter_pkt = NULL; 289 /* The first fragment of the packet has prefix */ 290 prefix_size = rxq->prefix_size; 291 292 m->ol_flags = 293 sfc_efx_rx_desc_flags_to_offload_flags(desc_flags); 294 m->packet_type = 295 sfc_efx_rx_desc_flags_to_packet_type(desc_flags); 296 297 /* 298 * Extract RSS hash from the packet prefix and 299 * set the corresponding field (if needed and possible) 300 */ 301 sfc_efx_rx_set_rss_hash(rxq, desc_flags, m); 302 303 m->data_off += prefix_size; 304 305 *rx_pkts++ = m; 306 done_pkts++; 307 continue; 308 309 discard: 310 discard_next = ((desc_flags & EFX_PKT_CONT) != 0); 311 rte_mbuf_raw_free(m); 312 rxd->mbuf = NULL; 313 } 314 315 /* pending is only moved when entire packet is received */ 316 SFC_ASSERT(scatter_pkt == NULL); 317 318 rxq->completed = completed; 319 320 sfc_efx_rx_qrefill(rxq); 321 322 if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN) 323 sfc_efx_rx_qprime(rxq); 324 325 return done_pkts; 326 } 327 328 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending; 329 static unsigned int 330 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq) 331 { 332 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 333 334 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0) 335 return 0; 336 337 sfc_ev_qpoll(rxq->evq); 338 339 return rxq->pending - rxq->completed; 340 } 341 342 static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status; 343 static int 344 sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset) 345 { 346 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 347 348 if (unlikely(offset > rxq->ptr_mask)) 349 return -EINVAL; 350 351 /* 352 * Poll EvQ to derive up-to-date 'rxq->pending' figure; 353 * it is required for the queue to be running, but the 354 * check is omitted because API design assumes that it 355 * is the duty of the caller to satisfy all conditions 356 */ 357 SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 358 SFC_EFX_RXQ_FLAG_RUNNING); 359 sfc_ev_qpoll(rxq->evq); 360 361 /* 362 * There is a handful of reserved entries in the ring, 363 * but an explicit check whether the offset points to 364 * a reserved entry is neglected since the two checks 365 * below rely on the figures which take the HW limits 366 * into account and thus if an entry is reserved, the 367 * checks will fail and UNAVAIL code will be returned 368 */ 369 370 if (offset < (rxq->pending - rxq->completed)) 371 return RTE_ETH_RX_DESC_DONE; 372 373 if (offset < (rxq->added - rxq->completed)) 374 return RTE_ETH_RX_DESC_AVAIL; 375 376 return RTE_ETH_RX_DESC_UNAVAIL; 377 } 378 379 boolean_t 380 sfc_rx_check_scatter(size_t pdu, size_t rx_buf_size, uint32_t rx_prefix_size, 381 boolean_t rx_scatter_enabled, const char **error) 382 { 383 if ((rx_buf_size < pdu + rx_prefix_size) && !rx_scatter_enabled) { 384 *error = "Rx scatter is disabled and RxQ mbuf pool object size is too small"; 385 return B_FALSE; 386 } 387 388 return B_TRUE; 389 } 390 391 /** Get Rx datapath ops by the datapath RxQ handle */ 392 const struct sfc_dp_rx * 393 sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq) 394 { 395 const struct sfc_dp_queue *dpq = &dp_rxq->dpq; 396 struct rte_eth_dev *eth_dev; 397 struct sfc_adapter_priv *sap; 398 399 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id)); 400 eth_dev = &rte_eth_devices[dpq->port_id]; 401 402 sap = sfc_adapter_priv_by_eth_dev(eth_dev); 403 404 return sap->dp_rx; 405 } 406 407 struct sfc_rxq_info * 408 sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq) 409 { 410 const struct sfc_dp_queue *dpq = &dp_rxq->dpq; 411 struct rte_eth_dev *eth_dev; 412 struct sfc_adapter_shared *sas; 413 414 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id)); 415 eth_dev = &rte_eth_devices[dpq->port_id]; 416 417 sas = sfc_adapter_shared_by_eth_dev(eth_dev); 418 419 SFC_ASSERT(dpq->queue_id < sas->rxq_count); 420 return &sas->rxq_info[dpq->queue_id]; 421 } 422 423 struct sfc_rxq * 424 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq) 425 { 426 const struct sfc_dp_queue *dpq = &dp_rxq->dpq; 427 struct rte_eth_dev *eth_dev; 428 struct sfc_adapter *sa; 429 430 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id)); 431 eth_dev = &rte_eth_devices[dpq->port_id]; 432 433 sa = sfc_adapter_by_eth_dev(eth_dev); 434 435 SFC_ASSERT(dpq->queue_id < sfc_sa2shared(sa)->rxq_count); 436 return &sa->rxq_ctrl[dpq->queue_id]; 437 } 438 439 static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings; 440 static int 441 sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc, 442 __rte_unused struct sfc_dp_rx_hw_limits *limits, 443 __rte_unused struct rte_mempool *mb_pool, 444 unsigned int *rxq_entries, 445 unsigned int *evq_entries, 446 unsigned int *rxq_max_fill_level) 447 { 448 *rxq_entries = nb_rx_desc; 449 *evq_entries = nb_rx_desc; 450 *rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries); 451 return 0; 452 } 453 454 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate; 455 static int 456 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id, 457 const struct rte_pci_addr *pci_addr, int socket_id, 458 const struct sfc_dp_rx_qcreate_info *info, 459 struct sfc_dp_rxq **dp_rxqp) 460 { 461 struct sfc_efx_rxq *rxq; 462 int rc; 463 464 rc = ENOMEM; 465 rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq), 466 RTE_CACHE_LINE_SIZE, socket_id); 467 if (rxq == NULL) 468 goto fail_rxq_alloc; 469 470 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr); 471 472 rc = ENOMEM; 473 rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc", 474 info->rxq_entries, 475 sizeof(*rxq->sw_desc), 476 RTE_CACHE_LINE_SIZE, socket_id); 477 if (rxq->sw_desc == NULL) 478 goto fail_desc_alloc; 479 480 /* efx datapath is bound to efx control path */ 481 rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq; 482 if (info->flags & SFC_RXQ_FLAG_RSS_HASH) 483 rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH; 484 rxq->ptr_mask = info->rxq_entries - 1; 485 rxq->batch_max = info->batch_max; 486 rxq->prefix_size = info->prefix_size; 487 rxq->max_fill_level = info->max_fill_level; 488 rxq->refill_threshold = info->refill_threshold; 489 rxq->buf_size = info->buf_size; 490 rxq->refill_mb_pool = info->refill_mb_pool; 491 492 *dp_rxqp = &rxq->dp; 493 return 0; 494 495 fail_desc_alloc: 496 rte_free(rxq); 497 498 fail_rxq_alloc: 499 return rc; 500 } 501 502 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy; 503 static void 504 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq) 505 { 506 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 507 508 rte_free(rxq->sw_desc); 509 rte_free(rxq); 510 } 511 512 513 /* Use qstop and qstart functions in the case of qstart failure */ 514 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop; 515 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge; 516 517 518 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart; 519 static int 520 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq, 521 __rte_unused unsigned int evq_read_ptr) 522 { 523 /* libefx-based datapath is specific to libefx-based PMD */ 524 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 525 struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq); 526 int rc; 527 528 rxq->common = crxq->common; 529 530 rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0; 531 532 sfc_efx_rx_qrefill(rxq); 533 534 rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING); 535 536 if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN) { 537 rc = sfc_efx_rx_qprime(rxq); 538 if (rc != 0) 539 goto fail_rx_qprime; 540 } 541 542 return 0; 543 544 fail_rx_qprime: 545 sfc_efx_rx_qstop(dp_rxq, NULL); 546 sfc_efx_rx_qpurge(dp_rxq); 547 return rc; 548 } 549 550 static void 551 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq, 552 __rte_unused unsigned int *evq_read_ptr) 553 { 554 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 555 556 rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING; 557 558 /* libefx-based datapath is bound to libefx-based PMD and uses 559 * event queue structure directly. So, there is no necessity to 560 * return EvQ read pointer. 561 */ 562 } 563 564 static void 565 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq) 566 { 567 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 568 unsigned int i; 569 struct sfc_efx_rx_sw_desc *rxd; 570 571 for (i = rxq->completed; i != rxq->added; ++i) { 572 rxd = &rxq->sw_desc[i & rxq->ptr_mask]; 573 rte_mbuf_raw_free(rxd->mbuf); 574 rxd->mbuf = NULL; 575 /* Packed stream relies on 0 in inactive SW desc. 576 * Rx queue stop is not performance critical, so 577 * there is no harm to do it always. 578 */ 579 rxd->flags = 0; 580 rxd->size = 0; 581 } 582 583 rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED; 584 } 585 586 static sfc_dp_rx_intr_enable_t sfc_efx_rx_intr_enable; 587 static int 588 sfc_efx_rx_intr_enable(struct sfc_dp_rxq *dp_rxq) 589 { 590 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 591 int rc = 0; 592 593 rxq->flags |= SFC_EFX_RXQ_FLAG_INTR_EN; 594 if (rxq->flags & SFC_EFX_RXQ_FLAG_STARTED) { 595 rc = sfc_efx_rx_qprime(rxq); 596 if (rc != 0) 597 rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN; 598 } 599 return rc; 600 } 601 602 static sfc_dp_rx_intr_disable_t sfc_efx_rx_intr_disable; 603 static int 604 sfc_efx_rx_intr_disable(struct sfc_dp_rxq *dp_rxq) 605 { 606 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 607 608 /* Cannot disarm, just disable rearm */ 609 rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN; 610 return 0; 611 } 612 613 struct sfc_dp_rx sfc_efx_rx = { 614 .dp = { 615 .name = SFC_KVARG_DATAPATH_EFX, 616 .type = SFC_DP_RX, 617 .hw_fw_caps = 0, 618 }, 619 .features = SFC_DP_RX_FEAT_INTR, 620 .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM | 621 DEV_RX_OFFLOAD_RSS_HASH, 622 .queue_offload_capa = DEV_RX_OFFLOAD_SCATTER, 623 .qsize_up_rings = sfc_efx_rx_qsize_up_rings, 624 .qcreate = sfc_efx_rx_qcreate, 625 .qdestroy = sfc_efx_rx_qdestroy, 626 .qstart = sfc_efx_rx_qstart, 627 .qstop = sfc_efx_rx_qstop, 628 .qpurge = sfc_efx_rx_qpurge, 629 .supported_ptypes_get = sfc_efx_supported_ptypes_get, 630 .qdesc_npending = sfc_efx_rx_qdesc_npending, 631 .qdesc_status = sfc_efx_rx_qdesc_status, 632 .intr_enable = sfc_efx_rx_intr_enable, 633 .intr_disable = sfc_efx_rx_intr_disable, 634 .pkt_burst = sfc_efx_recv_pkts, 635 }; 636 637 static void 638 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index) 639 { 640 struct sfc_rxq_info *rxq_info; 641 struct sfc_rxq *rxq; 642 unsigned int retry_count; 643 unsigned int wait_count; 644 int rc; 645 646 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; 647 SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED); 648 649 rxq = &sa->rxq_ctrl[sw_index]; 650 651 /* 652 * Retry Rx queue flushing in the case of flush failed or 653 * timeout. In the worst case it can delay for 6 seconds. 654 */ 655 for (retry_count = 0; 656 ((rxq_info->state & SFC_RXQ_FLUSHED) == 0) && 657 (retry_count < SFC_RX_QFLUSH_ATTEMPTS); 658 ++retry_count) { 659 rc = efx_rx_qflush(rxq->common); 660 if (rc != 0) { 661 rxq_info->state |= (rc == EALREADY) ? 662 SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED; 663 break; 664 } 665 rxq_info->state &= ~SFC_RXQ_FLUSH_FAILED; 666 rxq_info->state |= SFC_RXQ_FLUSHING; 667 668 /* 669 * Wait for Rx queue flush done or failed event at least 670 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more 671 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied 672 * by SFC_RX_QFLUSH_POLL_ATTEMPTS). 673 */ 674 wait_count = 0; 675 do { 676 rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS); 677 sfc_ev_qpoll(rxq->evq); 678 } while ((rxq_info->state & SFC_RXQ_FLUSHING) && 679 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS)); 680 681 if (rxq_info->state & SFC_RXQ_FLUSHING) 682 sfc_err(sa, "RxQ %u flush timed out", sw_index); 683 684 if (rxq_info->state & SFC_RXQ_FLUSH_FAILED) 685 sfc_err(sa, "RxQ %u flush failed", sw_index); 686 687 if (rxq_info->state & SFC_RXQ_FLUSHED) 688 sfc_notice(sa, "RxQ %u flushed", sw_index); 689 } 690 691 sa->priv.dp_rx->qpurge(rxq_info->dp); 692 } 693 694 static int 695 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq) 696 { 697 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; 698 boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE; 699 struct sfc_port *port = &sa->port; 700 int rc; 701 702 /* 703 * If promiscuous or all-multicast mode has been requested, setting 704 * filter for the default Rx queue might fail, in particular, while 705 * running over PCI function which is not a member of corresponding 706 * privilege groups; if this occurs, few iterations will be made to 707 * repeat this step without promiscuous and all-multicast flags set 708 */ 709 retry: 710 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss); 711 if (rc == 0) 712 return 0; 713 else if (rc != EOPNOTSUPP) 714 return rc; 715 716 if (port->promisc) { 717 sfc_warn(sa, "promiscuous mode has been requested, " 718 "but the HW rejects it"); 719 sfc_warn(sa, "promiscuous mode will be disabled"); 720 721 port->promisc = B_FALSE; 722 sa->eth_dev->data->promiscuous = 0; 723 rc = sfc_set_rx_mode(sa); 724 if (rc != 0) 725 return rc; 726 727 goto retry; 728 } 729 730 if (port->allmulti) { 731 sfc_warn(sa, "all-multicast mode has been requested, " 732 "but the HW rejects it"); 733 sfc_warn(sa, "all-multicast mode will be disabled"); 734 735 port->allmulti = B_FALSE; 736 sa->eth_dev->data->all_multicast = 0; 737 rc = sfc_set_rx_mode(sa); 738 if (rc != 0) 739 return rc; 740 741 goto retry; 742 } 743 744 return rc; 745 } 746 747 int 748 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index) 749 { 750 struct sfc_rxq_info *rxq_info; 751 struct sfc_rxq *rxq; 752 struct sfc_evq *evq; 753 int rc; 754 755 sfc_log_init(sa, "sw_index=%u", sw_index); 756 757 SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count); 758 759 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; 760 SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED); 761 762 rxq = &sa->rxq_ctrl[sw_index]; 763 evq = rxq->evq; 764 765 rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index)); 766 if (rc != 0) 767 goto fail_ev_qstart; 768 769 switch (rxq_info->type) { 770 case EFX_RXQ_TYPE_DEFAULT: 771 rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type, 772 rxq->buf_size, 773 &rxq->mem, rxq_info->entries, 0 /* not used on EF10 */, 774 rxq_info->type_flags, evq->common, &rxq->common); 775 break; 776 case EFX_RXQ_TYPE_ES_SUPER_BUFFER: { 777 struct rte_mempool *mp = rxq_info->refill_mb_pool; 778 struct rte_mempool_info mp_info; 779 780 rc = rte_mempool_ops_get_info(mp, &mp_info); 781 if (rc != 0) { 782 /* Positive errno is used in the driver */ 783 rc = -rc; 784 goto fail_mp_get_info; 785 } 786 if (mp_info.contig_block_size <= 0) { 787 rc = EINVAL; 788 goto fail_bad_contig_block_size; 789 } 790 rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0, 791 mp_info.contig_block_size, rxq->buf_size, 792 mp->header_size + mp->elt_size + mp->trailer_size, 793 sa->rxd_wait_timeout_ns, 794 &rxq->mem, rxq_info->entries, rxq_info->type_flags, 795 evq->common, &rxq->common); 796 break; 797 } 798 default: 799 rc = ENOTSUP; 800 } 801 if (rc != 0) 802 goto fail_rx_qcreate; 803 804 efx_rx_qenable(rxq->common); 805 806 rc = sa->priv.dp_rx->qstart(rxq_info->dp, evq->read_ptr); 807 if (rc != 0) 808 goto fail_dp_qstart; 809 810 rxq_info->state |= SFC_RXQ_STARTED; 811 812 if (sw_index == 0 && !sfc_sa2shared(sa)->isolated) { 813 rc = sfc_rx_default_rxq_set_filter(sa, rxq); 814 if (rc != 0) 815 goto fail_mac_filter_default_rxq_set; 816 } 817 818 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */ 819 sa->eth_dev->data->rx_queue_state[sw_index] = 820 RTE_ETH_QUEUE_STATE_STARTED; 821 822 return 0; 823 824 fail_mac_filter_default_rxq_set: 825 sfc_rx_qflush(sa, sw_index); 826 sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr); 827 rxq_info->state = SFC_RXQ_INITIALIZED; 828 829 fail_dp_qstart: 830 efx_rx_qdestroy(rxq->common); 831 832 fail_rx_qcreate: 833 fail_bad_contig_block_size: 834 fail_mp_get_info: 835 sfc_ev_qstop(evq); 836 837 fail_ev_qstart: 838 return rc; 839 } 840 841 void 842 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index) 843 { 844 struct sfc_rxq_info *rxq_info; 845 struct sfc_rxq *rxq; 846 847 sfc_log_init(sa, "sw_index=%u", sw_index); 848 849 SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count); 850 851 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; 852 853 if (rxq_info->state == SFC_RXQ_INITIALIZED) 854 return; 855 SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED); 856 857 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */ 858 sa->eth_dev->data->rx_queue_state[sw_index] = 859 RTE_ETH_QUEUE_STATE_STOPPED; 860 861 rxq = &sa->rxq_ctrl[sw_index]; 862 sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr); 863 864 if (sw_index == 0) 865 efx_mac_filter_default_rxq_clear(sa->nic); 866 867 sfc_rx_qflush(sa, sw_index); 868 869 rxq_info->state = SFC_RXQ_INITIALIZED; 870 871 efx_rx_qdestroy(rxq->common); 872 873 sfc_ev_qstop(rxq->evq); 874 } 875 876 static uint64_t 877 sfc_rx_get_offload_mask(struct sfc_adapter *sa) 878 { 879 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 880 uint64_t no_caps = 0; 881 882 if (encp->enc_tunnel_encapsulations_supported == 0) 883 no_caps |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 884 885 return ~no_caps; 886 } 887 888 uint64_t 889 sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa) 890 { 891 uint64_t caps = sa->priv.dp_rx->dev_offload_capa; 892 893 caps |= DEV_RX_OFFLOAD_JUMBO_FRAME; 894 895 return caps & sfc_rx_get_offload_mask(sa); 896 } 897 898 uint64_t 899 sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa) 900 { 901 return sa->priv.dp_rx->queue_offload_capa & sfc_rx_get_offload_mask(sa); 902 } 903 904 static int 905 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level, 906 const struct rte_eth_rxconf *rx_conf, 907 __rte_unused uint64_t offloads) 908 { 909 int rc = 0; 910 911 if (rx_conf->rx_thresh.pthresh != 0 || 912 rx_conf->rx_thresh.hthresh != 0 || 913 rx_conf->rx_thresh.wthresh != 0) { 914 sfc_warn(sa, 915 "RxQ prefetch/host/writeback thresholds are not supported"); 916 } 917 918 if (rx_conf->rx_free_thresh > rxq_max_fill_level) { 919 sfc_err(sa, 920 "RxQ free threshold too large: %u vs maximum %u", 921 rx_conf->rx_free_thresh, rxq_max_fill_level); 922 rc = EINVAL; 923 } 924 925 if (rx_conf->rx_drop_en == 0) { 926 sfc_err(sa, "RxQ drop disable is not supported"); 927 rc = EINVAL; 928 } 929 930 return rc; 931 } 932 933 static unsigned int 934 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool) 935 { 936 uint32_t data_off; 937 uint32_t order; 938 939 /* The mbuf object itself is always cache line aligned */ 940 order = rte_bsf32(RTE_CACHE_LINE_SIZE); 941 942 /* Data offset from mbuf object start */ 943 data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) + 944 RTE_PKTMBUF_HEADROOM; 945 946 order = MIN(order, rte_bsf32(data_off)); 947 948 return 1u << order; 949 } 950 951 static uint16_t 952 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool) 953 { 954 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 955 const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start); 956 const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end); 957 uint16_t buf_size; 958 unsigned int buf_aligned; 959 unsigned int start_alignment; 960 unsigned int end_padding_alignment; 961 962 /* Below it is assumed that both alignments are power of 2 */ 963 SFC_ASSERT(rte_is_power_of_2(nic_align_start)); 964 SFC_ASSERT(rte_is_power_of_2(nic_align_end)); 965 966 /* 967 * mbuf is always cache line aligned, double-check 968 * that it meets rx buffer start alignment requirements. 969 */ 970 971 /* Start from mbuf pool data room size */ 972 buf_size = rte_pktmbuf_data_room_size(mb_pool); 973 974 /* Remove headroom */ 975 if (buf_size <= RTE_PKTMBUF_HEADROOM) { 976 sfc_err(sa, 977 "RxQ mbuf pool %s object data room size %u is smaller than headroom %u", 978 mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM); 979 return 0; 980 } 981 buf_size -= RTE_PKTMBUF_HEADROOM; 982 983 /* Calculate guaranteed data start alignment */ 984 buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool); 985 986 /* Reserve space for start alignment */ 987 if (buf_aligned < nic_align_start) { 988 start_alignment = nic_align_start - buf_aligned; 989 if (buf_size <= start_alignment) { 990 sfc_err(sa, 991 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC", 992 mb_pool->name, 993 rte_pktmbuf_data_room_size(mb_pool), 994 RTE_PKTMBUF_HEADROOM, start_alignment); 995 return 0; 996 } 997 buf_aligned = nic_align_start; 998 buf_size -= start_alignment; 999 } else { 1000 start_alignment = 0; 1001 } 1002 1003 /* Make sure that end padding does not write beyond the buffer */ 1004 if (buf_aligned < nic_align_end) { 1005 /* 1006 * Estimate space which can be lost. If guarnteed buffer 1007 * size is odd, lost space is (nic_align_end - 1). More 1008 * accurate formula is below. 1009 */ 1010 end_padding_alignment = nic_align_end - 1011 MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1)); 1012 if (buf_size <= end_padding_alignment) { 1013 sfc_err(sa, 1014 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC", 1015 mb_pool->name, 1016 rte_pktmbuf_data_room_size(mb_pool), 1017 RTE_PKTMBUF_HEADROOM, start_alignment, 1018 end_padding_alignment); 1019 return 0; 1020 } 1021 buf_size -= end_padding_alignment; 1022 } else { 1023 /* 1024 * Start is aligned the same or better than end, 1025 * just align length. 1026 */ 1027 buf_size = EFX_P2ALIGN(uint32_t, buf_size, nic_align_end); 1028 } 1029 1030 return buf_size; 1031 } 1032 1033 int 1034 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, 1035 uint16_t nb_rx_desc, unsigned int socket_id, 1036 const struct rte_eth_rxconf *rx_conf, 1037 struct rte_mempool *mb_pool) 1038 { 1039 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 1040 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; 1041 int rc; 1042 unsigned int rxq_entries; 1043 unsigned int evq_entries; 1044 unsigned int rxq_max_fill_level; 1045 uint64_t offloads; 1046 uint16_t buf_size; 1047 struct sfc_rxq_info *rxq_info; 1048 struct sfc_evq *evq; 1049 struct sfc_rxq *rxq; 1050 struct sfc_dp_rx_qcreate_info info; 1051 struct sfc_dp_rx_hw_limits hw_limits; 1052 uint16_t rx_free_thresh; 1053 const char *error; 1054 1055 memset(&hw_limits, 0, sizeof(hw_limits)); 1056 hw_limits.rxq_max_entries = sa->rxq_max_entries; 1057 hw_limits.rxq_min_entries = sa->rxq_min_entries; 1058 hw_limits.evq_max_entries = sa->evq_max_entries; 1059 hw_limits.evq_min_entries = sa->evq_min_entries; 1060 1061 rc = sa->priv.dp_rx->qsize_up_rings(nb_rx_desc, &hw_limits, mb_pool, 1062 &rxq_entries, &evq_entries, 1063 &rxq_max_fill_level); 1064 if (rc != 0) 1065 goto fail_size_up_rings; 1066 SFC_ASSERT(rxq_entries >= sa->rxq_min_entries); 1067 SFC_ASSERT(rxq_entries <= sa->rxq_max_entries); 1068 SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc); 1069 1070 offloads = rx_conf->offloads | 1071 sa->eth_dev->data->dev_conf.rxmode.offloads; 1072 rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads); 1073 if (rc != 0) 1074 goto fail_bad_conf; 1075 1076 buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool); 1077 if (buf_size == 0) { 1078 sfc_err(sa, "RxQ %u mbuf pool object size is too small", 1079 sw_index); 1080 rc = EINVAL; 1081 goto fail_bad_conf; 1082 } 1083 1084 if (!sfc_rx_check_scatter(sa->port.pdu, buf_size, 1085 encp->enc_rx_prefix_size, 1086 (offloads & DEV_RX_OFFLOAD_SCATTER), 1087 &error)) { 1088 sfc_err(sa, "RxQ %u MTU check failed: %s", sw_index, error); 1089 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs " 1090 "PDU size %u plus Rx prefix %u bytes", 1091 sw_index, buf_size, (unsigned int)sa->port.pdu, 1092 encp->enc_rx_prefix_size); 1093 rc = EINVAL; 1094 goto fail_bad_conf; 1095 } 1096 1097 SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count); 1098 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; 1099 1100 SFC_ASSERT(rxq_entries <= rxq_info->max_entries); 1101 rxq_info->entries = rxq_entries; 1102 1103 if (sa->priv.dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER) 1104 rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER; 1105 else 1106 rxq_info->type = EFX_RXQ_TYPE_DEFAULT; 1107 1108 rxq_info->type_flags = 1109 (offloads & DEV_RX_OFFLOAD_SCATTER) ? 1110 EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE; 1111 1112 if ((encp->enc_tunnel_encapsulations_supported != 0) && 1113 (sfc_dp_rx_offload_capa(sa->priv.dp_rx) & 1114 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0) 1115 rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES; 1116 1117 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index, 1118 evq_entries, socket_id, &evq); 1119 if (rc != 0) 1120 goto fail_ev_qinit; 1121 1122 rxq = &sa->rxq_ctrl[sw_index]; 1123 rxq->evq = evq; 1124 rxq->hw_index = sw_index; 1125 /* 1126 * If Rx refill threshold is specified (its value is non zero) in 1127 * Rx configuration, use specified value. Otherwise use 1/8 of 1128 * the Rx descriptors number as the default. It allows to keep 1129 * Rx ring full-enough and does not refill too aggressive if 1130 * packet rate is high. 1131 * 1132 * Since PMD refills in bulks waiting for full bulk may be 1133 * refilled (basically round down), it is better to round up 1134 * here to mitigate it a bit. 1135 */ 1136 rx_free_thresh = (rx_conf->rx_free_thresh != 0) ? 1137 rx_conf->rx_free_thresh : EFX_DIV_ROUND_UP(nb_rx_desc, 8); 1138 /* Rx refill threshold cannot be smaller than refill bulk */ 1139 rxq_info->refill_threshold = 1140 RTE_MAX(rx_free_thresh, SFC_RX_REFILL_BULK); 1141 rxq_info->refill_mb_pool = mb_pool; 1142 rxq->buf_size = buf_size; 1143 1144 rc = sfc_dma_alloc(sa, "rxq", sw_index, 1145 efx_rxq_size(sa->nic, rxq_info->entries), 1146 socket_id, &rxq->mem); 1147 if (rc != 0) 1148 goto fail_dma_alloc; 1149 1150 memset(&info, 0, sizeof(info)); 1151 info.refill_mb_pool = rxq_info->refill_mb_pool; 1152 info.max_fill_level = rxq_max_fill_level; 1153 info.refill_threshold = rxq_info->refill_threshold; 1154 info.buf_size = buf_size; 1155 info.batch_max = encp->enc_rx_batch_max; 1156 info.prefix_size = encp->enc_rx_prefix_size; 1157 1158 if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0) 1159 info.flags |= SFC_RXQ_FLAG_RSS_HASH; 1160 1161 info.rxq_entries = rxq_info->entries; 1162 info.rxq_hw_ring = rxq->mem.esm_base; 1163 info.evq_hw_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index); 1164 info.evq_entries = evq_entries; 1165 info.evq_hw_ring = evq->mem.esm_base; 1166 info.hw_index = rxq->hw_index; 1167 info.mem_bar = sa->mem_bar.esb_base; 1168 info.vi_window_shift = encp->enc_vi_window_shift; 1169 1170 rc = sa->priv.dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index, 1171 &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr, 1172 socket_id, &info, &rxq_info->dp); 1173 if (rc != 0) 1174 goto fail_dp_rx_qcreate; 1175 1176 evq->dp_rxq = rxq_info->dp; 1177 1178 rxq_info->state = SFC_RXQ_INITIALIZED; 1179 1180 rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0); 1181 1182 return 0; 1183 1184 fail_dp_rx_qcreate: 1185 sfc_dma_free(sa, &rxq->mem); 1186 1187 fail_dma_alloc: 1188 sfc_ev_qfini(evq); 1189 1190 fail_ev_qinit: 1191 rxq_info->entries = 0; 1192 1193 fail_bad_conf: 1194 fail_size_up_rings: 1195 sfc_log_init(sa, "failed %d", rc); 1196 return rc; 1197 } 1198 1199 void 1200 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index) 1201 { 1202 struct sfc_rxq_info *rxq_info; 1203 struct sfc_rxq *rxq; 1204 1205 SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count); 1206 sa->eth_dev->data->rx_queues[sw_index] = NULL; 1207 1208 rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; 1209 1210 SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED); 1211 1212 sa->priv.dp_rx->qdestroy(rxq_info->dp); 1213 rxq_info->dp = NULL; 1214 1215 rxq_info->state &= ~SFC_RXQ_INITIALIZED; 1216 rxq_info->entries = 0; 1217 1218 rxq = &sa->rxq_ctrl[sw_index]; 1219 1220 sfc_dma_free(sa, &rxq->mem); 1221 1222 sfc_ev_qfini(rxq->evq); 1223 rxq->evq = NULL; 1224 } 1225 1226 /* 1227 * Mapping between RTE RSS hash functions and their EFX counterparts. 1228 */ 1229 static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = { 1230 { ETH_RSS_NONFRAG_IPV4_TCP, 1231 EFX_RX_HASH(IPV4_TCP, 4TUPLE) }, 1232 { ETH_RSS_NONFRAG_IPV4_UDP, 1233 EFX_RX_HASH(IPV4_UDP, 4TUPLE) }, 1234 { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX, 1235 EFX_RX_HASH(IPV6_TCP, 4TUPLE) }, 1236 { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX, 1237 EFX_RX_HASH(IPV6_UDP, 4TUPLE) }, 1238 { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER, 1239 EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) | 1240 EFX_RX_HASH(IPV4, 2TUPLE) }, 1241 { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER | 1242 ETH_RSS_IPV6_EX, 1243 EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) | 1244 EFX_RX_HASH(IPV6, 2TUPLE) } 1245 }; 1246 1247 static efx_rx_hash_type_t 1248 sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type, 1249 unsigned int *hash_type_flags_supported, 1250 unsigned int nb_hash_type_flags_supported) 1251 { 1252 efx_rx_hash_type_t hash_type_masked = 0; 1253 unsigned int i, j; 1254 1255 for (i = 0; i < nb_hash_type_flags_supported; ++i) { 1256 unsigned int class_tuple_lbn[] = { 1257 EFX_RX_CLASS_IPV4_TCP_LBN, 1258 EFX_RX_CLASS_IPV4_UDP_LBN, 1259 EFX_RX_CLASS_IPV4_LBN, 1260 EFX_RX_CLASS_IPV6_TCP_LBN, 1261 EFX_RX_CLASS_IPV6_UDP_LBN, 1262 EFX_RX_CLASS_IPV6_LBN 1263 }; 1264 1265 for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) { 1266 unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE; 1267 unsigned int flag; 1268 1269 tuple_mask <<= class_tuple_lbn[j]; 1270 flag = hash_type & tuple_mask; 1271 1272 if (flag == hash_type_flags_supported[i]) 1273 hash_type_masked |= flag; 1274 } 1275 } 1276 1277 return hash_type_masked; 1278 } 1279 1280 int 1281 sfc_rx_hash_init(struct sfc_adapter *sa) 1282 { 1283 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; 1284 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 1285 uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask; 1286 efx_rx_hash_alg_t alg; 1287 unsigned int flags_supp[EFX_RX_HASH_NFLAGS]; 1288 unsigned int nb_flags_supp; 1289 struct sfc_rss_hf_rte_to_efx *hf_map; 1290 struct sfc_rss_hf_rte_to_efx *entry; 1291 efx_rx_hash_type_t efx_hash_types; 1292 unsigned int i; 1293 int rc; 1294 1295 if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ)) 1296 alg = EFX_RX_HASHALG_TOEPLITZ; 1297 else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM)) 1298 alg = EFX_RX_HASHALG_PACKED_STREAM; 1299 else 1300 return EINVAL; 1301 1302 rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp, 1303 RTE_DIM(flags_supp), &nb_flags_supp); 1304 if (rc != 0) 1305 return rc; 1306 1307 hf_map = rte_calloc_socket("sfc-rss-hf-map", 1308 RTE_DIM(sfc_rss_hf_map), 1309 sizeof(*hf_map), 0, sa->socket_id); 1310 if (hf_map == NULL) 1311 return ENOMEM; 1312 1313 entry = hf_map; 1314 efx_hash_types = 0; 1315 for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) { 1316 efx_rx_hash_type_t ht; 1317 1318 ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx, 1319 flags_supp, nb_flags_supp); 1320 if (ht != 0) { 1321 entry->rte = sfc_rss_hf_map[i].rte; 1322 entry->efx = ht; 1323 efx_hash_types |= ht; 1324 ++entry; 1325 } 1326 } 1327 1328 rss->hash_alg = alg; 1329 rss->hf_map_nb_entries = (unsigned int)(entry - hf_map); 1330 rss->hf_map = hf_map; 1331 rss->hash_types = efx_hash_types; 1332 1333 return 0; 1334 } 1335 1336 void 1337 sfc_rx_hash_fini(struct sfc_adapter *sa) 1338 { 1339 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; 1340 1341 rte_free(rss->hf_map); 1342 } 1343 1344 int 1345 sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte, 1346 efx_rx_hash_type_t *efx) 1347 { 1348 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; 1349 efx_rx_hash_type_t hash_types = 0; 1350 unsigned int i; 1351 1352 for (i = 0; i < rss->hf_map_nb_entries; ++i) { 1353 uint64_t rte_mask = rss->hf_map[i].rte; 1354 1355 if ((rte & rte_mask) != 0) { 1356 rte &= ~rte_mask; 1357 hash_types |= rss->hf_map[i].efx; 1358 } 1359 } 1360 1361 if (rte != 0) { 1362 sfc_err(sa, "unsupported hash functions requested"); 1363 return EINVAL; 1364 } 1365 1366 *efx = hash_types; 1367 1368 return 0; 1369 } 1370 1371 uint64_t 1372 sfc_rx_hf_efx_to_rte(struct sfc_rss *rss, efx_rx_hash_type_t efx) 1373 { 1374 uint64_t rte = 0; 1375 unsigned int i; 1376 1377 for (i = 0; i < rss->hf_map_nb_entries; ++i) { 1378 efx_rx_hash_type_t hash_type = rss->hf_map[i].efx; 1379 1380 if ((efx & hash_type) == hash_type) 1381 rte |= rss->hf_map[i].rte; 1382 } 1383 1384 return rte; 1385 } 1386 1387 static int 1388 sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa, 1389 struct rte_eth_rss_conf *conf) 1390 { 1391 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; 1392 efx_rx_hash_type_t efx_hash_types = rss->hash_types; 1393 uint64_t rss_hf = sfc_rx_hf_efx_to_rte(rss, efx_hash_types); 1394 int rc; 1395 1396 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) { 1397 if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) || 1398 conf->rss_key != NULL) 1399 return EINVAL; 1400 } 1401 1402 if (conf->rss_hf != 0) { 1403 rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types); 1404 if (rc != 0) 1405 return rc; 1406 } 1407 1408 if (conf->rss_key != NULL) { 1409 if (conf->rss_key_len != sizeof(rss->key)) { 1410 sfc_err(sa, "RSS key size is wrong (should be %zu)", 1411 sizeof(rss->key)); 1412 return EINVAL; 1413 } 1414 rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key)); 1415 } 1416 1417 rss->hash_types = efx_hash_types; 1418 1419 return 0; 1420 } 1421 1422 static int 1423 sfc_rx_rss_config(struct sfc_adapter *sa) 1424 { 1425 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; 1426 int rc = 0; 1427 1428 if (rss->channels > 0) { 1429 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1430 rss->hash_alg, rss->hash_types, 1431 B_TRUE); 1432 if (rc != 0) 1433 goto finish; 1434 1435 rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1436 rss->key, sizeof(rss->key)); 1437 if (rc != 0) 1438 goto finish; 1439 1440 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT, 1441 rss->tbl, RTE_DIM(rss->tbl)); 1442 } 1443 1444 finish: 1445 return rc; 1446 } 1447 1448 int 1449 sfc_rx_start(struct sfc_adapter *sa) 1450 { 1451 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 1452 unsigned int sw_index; 1453 int rc; 1454 1455 sfc_log_init(sa, "rxq_count=%u", sas->rxq_count); 1456 1457 rc = efx_rx_init(sa->nic); 1458 if (rc != 0) 1459 goto fail_rx_init; 1460 1461 rc = sfc_rx_rss_config(sa); 1462 if (rc != 0) 1463 goto fail_rss_config; 1464 1465 for (sw_index = 0; sw_index < sas->rxq_count; ++sw_index) { 1466 if (sas->rxq_info[sw_index].state == SFC_RXQ_INITIALIZED && 1467 (!sas->rxq_info[sw_index].deferred_start || 1468 sas->rxq_info[sw_index].deferred_started)) { 1469 rc = sfc_rx_qstart(sa, sw_index); 1470 if (rc != 0) 1471 goto fail_rx_qstart; 1472 } 1473 } 1474 1475 return 0; 1476 1477 fail_rx_qstart: 1478 while (sw_index-- > 0) 1479 sfc_rx_qstop(sa, sw_index); 1480 1481 fail_rss_config: 1482 efx_rx_fini(sa->nic); 1483 1484 fail_rx_init: 1485 sfc_log_init(sa, "failed %d", rc); 1486 return rc; 1487 } 1488 1489 void 1490 sfc_rx_stop(struct sfc_adapter *sa) 1491 { 1492 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 1493 unsigned int sw_index; 1494 1495 sfc_log_init(sa, "rxq_count=%u", sas->rxq_count); 1496 1497 sw_index = sas->rxq_count; 1498 while (sw_index-- > 0) { 1499 if (sas->rxq_info[sw_index].state & SFC_RXQ_STARTED) 1500 sfc_rx_qstop(sa, sw_index); 1501 } 1502 1503 efx_rx_fini(sa->nic); 1504 } 1505 1506 static int 1507 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) 1508 { 1509 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 1510 struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index]; 1511 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 1512 unsigned int max_entries; 1513 1514 max_entries = encp->enc_rxq_max_ndescs; 1515 SFC_ASSERT(rte_is_power_of_2(max_entries)); 1516 1517 rxq_info->max_entries = max_entries; 1518 1519 return 0; 1520 } 1521 1522 static int 1523 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode) 1524 { 1525 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 1526 uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) | 1527 sfc_rx_get_queue_offload_caps(sa); 1528 struct sfc_rss *rss = &sas->rss; 1529 int rc = 0; 1530 1531 switch (rxmode->mq_mode) { 1532 case ETH_MQ_RX_NONE: 1533 /* No special checks are required */ 1534 break; 1535 case ETH_MQ_RX_RSS: 1536 if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) { 1537 sfc_err(sa, "RSS is not available"); 1538 rc = EINVAL; 1539 } 1540 break; 1541 default: 1542 sfc_err(sa, "Rx multi-queue mode %u not supported", 1543 rxmode->mq_mode); 1544 rc = EINVAL; 1545 } 1546 1547 /* 1548 * Requested offloads are validated against supported by ethdev, 1549 * so unsupported offloads cannot be added as the result of 1550 * below check. 1551 */ 1552 if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) != 1553 (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) { 1554 sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)"); 1555 rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM; 1556 } 1557 1558 if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) && 1559 (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) { 1560 sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on"); 1561 rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; 1562 } 1563 1564 if ((offloads_supported & DEV_RX_OFFLOAD_RSS_HASH) && 1565 (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG)) 1566 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1567 1568 return rc; 1569 } 1570 1571 /** 1572 * Destroy excess queues that are no longer needed after reconfiguration 1573 * or complete close. 1574 */ 1575 static void 1576 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues) 1577 { 1578 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 1579 int sw_index; 1580 1581 SFC_ASSERT(nb_rx_queues <= sas->rxq_count); 1582 1583 sw_index = sas->rxq_count; 1584 while (--sw_index >= (int)nb_rx_queues) { 1585 if (sas->rxq_info[sw_index].state & SFC_RXQ_INITIALIZED) 1586 sfc_rx_qfini(sa, sw_index); 1587 } 1588 1589 sas->rxq_count = nb_rx_queues; 1590 } 1591 1592 /** 1593 * Initialize Rx subsystem. 1594 * 1595 * Called at device (re)configuration stage when number of receive queues is 1596 * specified together with other device level receive configuration. 1597 * 1598 * It should be used to allocate NUMA-unaware resources. 1599 */ 1600 int 1601 sfc_rx_configure(struct sfc_adapter *sa) 1602 { 1603 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); 1604 struct sfc_rss *rss = &sas->rss; 1605 struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf; 1606 const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues; 1607 int rc; 1608 1609 sfc_log_init(sa, "nb_rx_queues=%u (old %u)", 1610 nb_rx_queues, sas->rxq_count); 1611 1612 rc = sfc_rx_check_mode(sa, &dev_conf->rxmode); 1613 if (rc != 0) 1614 goto fail_check_mode; 1615 1616 if (nb_rx_queues == sas->rxq_count) 1617 goto configure_rss; 1618 1619 if (sas->rxq_info == NULL) { 1620 rc = ENOMEM; 1621 sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues, 1622 sizeof(sas->rxq_info[0]), 0, 1623 sa->socket_id); 1624 if (sas->rxq_info == NULL) 1625 goto fail_rxqs_alloc; 1626 1627 /* 1628 * Allocate primary process only RxQ control from heap 1629 * since it should not be shared. 1630 */ 1631 rc = ENOMEM; 1632 sa->rxq_ctrl = calloc(nb_rx_queues, sizeof(sa->rxq_ctrl[0])); 1633 if (sa->rxq_ctrl == NULL) 1634 goto fail_rxqs_ctrl_alloc; 1635 } else { 1636 struct sfc_rxq_info *new_rxq_info; 1637 struct sfc_rxq *new_rxq_ctrl; 1638 1639 if (nb_rx_queues < sas->rxq_count) 1640 sfc_rx_fini_queues(sa, nb_rx_queues); 1641 1642 rc = ENOMEM; 1643 new_rxq_info = 1644 rte_realloc(sas->rxq_info, 1645 nb_rx_queues * sizeof(sas->rxq_info[0]), 0); 1646 if (new_rxq_info == NULL && nb_rx_queues > 0) 1647 goto fail_rxqs_realloc; 1648 1649 rc = ENOMEM; 1650 new_rxq_ctrl = realloc(sa->rxq_ctrl, 1651 nb_rx_queues * sizeof(sa->rxq_ctrl[0])); 1652 if (new_rxq_ctrl == NULL && nb_rx_queues > 0) 1653 goto fail_rxqs_ctrl_realloc; 1654 1655 sas->rxq_info = new_rxq_info; 1656 sa->rxq_ctrl = new_rxq_ctrl; 1657 if (nb_rx_queues > sas->rxq_count) { 1658 memset(&sas->rxq_info[sas->rxq_count], 0, 1659 (nb_rx_queues - sas->rxq_count) * 1660 sizeof(sas->rxq_info[0])); 1661 memset(&sa->rxq_ctrl[sas->rxq_count], 0, 1662 (nb_rx_queues - sas->rxq_count) * 1663 sizeof(sa->rxq_ctrl[0])); 1664 } 1665 } 1666 1667 while (sas->rxq_count < nb_rx_queues) { 1668 rc = sfc_rx_qinit_info(sa, sas->rxq_count); 1669 if (rc != 0) 1670 goto fail_rx_qinit_info; 1671 1672 sas->rxq_count++; 1673 } 1674 1675 configure_rss: 1676 rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ? 1677 MIN(sas->rxq_count, EFX_MAXRSS) : 0; 1678 1679 if (rss->channels > 0) { 1680 struct rte_eth_rss_conf *adv_conf_rss; 1681 unsigned int sw_index; 1682 1683 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index) 1684 rss->tbl[sw_index] = sw_index % rss->channels; 1685 1686 adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf; 1687 rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss); 1688 if (rc != 0) 1689 goto fail_rx_process_adv_conf_rss; 1690 } 1691 1692 return 0; 1693 1694 fail_rx_process_adv_conf_rss: 1695 fail_rx_qinit_info: 1696 fail_rxqs_ctrl_realloc: 1697 fail_rxqs_realloc: 1698 fail_rxqs_ctrl_alloc: 1699 fail_rxqs_alloc: 1700 sfc_rx_close(sa); 1701 1702 fail_check_mode: 1703 sfc_log_init(sa, "failed %d", rc); 1704 return rc; 1705 } 1706 1707 /** 1708 * Shutdown Rx subsystem. 1709 * 1710 * Called at device close stage, for example, before device shutdown. 1711 */ 1712 void 1713 sfc_rx_close(struct sfc_adapter *sa) 1714 { 1715 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; 1716 1717 sfc_rx_fini_queues(sa, 0); 1718 1719 rss->channels = 0; 1720 1721 free(sa->rxq_ctrl); 1722 sa->rxq_ctrl = NULL; 1723 1724 rte_free(sfc_sa2shared(sa)->rxq_info); 1725 sfc_sa2shared(sa)->rxq_info = NULL; 1726 } 1727