1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2016-2017 Solarflare Communications Inc. 5 * All rights reserved. 6 * 7 * This software was jointly developed between OKTET Labs (under contract 8 * for Solarflare) and Solarflare Communications, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright notice, 14 * this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <rte_mempool.h> 33 34 #include "efx.h" 35 36 #include "sfc.h" 37 #include "sfc_debug.h" 38 #include "sfc_log.h" 39 #include "sfc_ev.h" 40 #include "sfc_rx.h" 41 #include "sfc_kvargs.h" 42 #include "sfc_tweak.h" 43 44 /* 45 * Maximum number of Rx queue flush attempt in the case of failure or 46 * flush timeout 47 */ 48 #define SFC_RX_QFLUSH_ATTEMPTS (3) 49 50 /* 51 * Time to wait between event queue polling attempts when waiting for Rx 52 * queue flush done or failed events. 53 */ 54 #define SFC_RX_QFLUSH_POLL_WAIT_MS (1) 55 56 /* 57 * Maximum number of event queue polling attempts when waiting for Rx queue 58 * flush done or failed events. It defines Rx queue flush attempt timeout 59 * together with SFC_RX_QFLUSH_POLL_WAIT_MS. 60 */ 61 #define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000) 62 63 void 64 sfc_rx_qflush_done(struct sfc_rxq *rxq) 65 { 66 rxq->state |= SFC_RXQ_FLUSHED; 67 rxq->state &= ~SFC_RXQ_FLUSHING; 68 } 69 70 void 71 sfc_rx_qflush_failed(struct sfc_rxq *rxq) 72 { 73 rxq->state |= SFC_RXQ_FLUSH_FAILED; 74 rxq->state &= ~SFC_RXQ_FLUSHING; 75 } 76 77 static void 78 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq) 79 { 80 unsigned int free_space; 81 unsigned int bulks; 82 void *objs[SFC_RX_REFILL_BULK]; 83 efsys_dma_addr_t addr[RTE_DIM(objs)]; 84 unsigned int added = rxq->added; 85 unsigned int id; 86 unsigned int i; 87 struct sfc_efx_rx_sw_desc *rxd; 88 struct rte_mbuf *m; 89 uint16_t port_id = rxq->dp.dpq.port_id; 90 91 free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) - 92 (added - rxq->completed); 93 94 if (free_space < rxq->refill_threshold) 95 return; 96 97 bulks = free_space / RTE_DIM(objs); 98 /* refill_threshold guarantees that bulks is positive */ 99 SFC_ASSERT(bulks > 0); 100 101 id = added & rxq->ptr_mask; 102 do { 103 if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs, 104 RTE_DIM(objs)) < 0)) { 105 /* 106 * It is hardly a safe way to increment counter 107 * from different contexts, but all PMDs do it. 108 */ 109 rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed += 110 RTE_DIM(objs); 111 /* Return if we have posted nothing yet */ 112 if (added == rxq->added) 113 return; 114 /* Push posted */ 115 break; 116 } 117 118 for (i = 0; i < RTE_DIM(objs); 119 ++i, id = (id + 1) & rxq->ptr_mask) { 120 m = objs[i]; 121 122 rxd = &rxq->sw_desc[id]; 123 rxd->mbuf = m; 124 125 rte_mbuf_refcnt_set(m, 1); 126 m->data_off = RTE_PKTMBUF_HEADROOM; 127 m->next = NULL; 128 m->nb_segs = 1; 129 m->port = port_id; 130 131 addr[i] = rte_pktmbuf_mtophys(m); 132 } 133 134 efx_rx_qpost(rxq->common, addr, rxq->buf_size, 135 RTE_DIM(objs), rxq->completed, added); 136 added += RTE_DIM(objs); 137 } while (--bulks > 0); 138 139 SFC_ASSERT(added != rxq->added); 140 rxq->added = added; 141 efx_rx_qpush(rxq->common, added, &rxq->pushed); 142 } 143 144 static uint64_t 145 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags) 146 { 147 uint64_t mbuf_flags = 0; 148 149 switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) { 150 case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4): 151 mbuf_flags |= PKT_RX_IP_CKSUM_GOOD; 152 break; 153 case EFX_PKT_IPV4: 154 mbuf_flags |= PKT_RX_IP_CKSUM_BAD; 155 break; 156 default: 157 RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0); 158 SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) == 159 PKT_RX_IP_CKSUM_UNKNOWN); 160 break; 161 } 162 163 switch ((desc_flags & 164 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) { 165 case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP): 166 case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP): 167 mbuf_flags |= PKT_RX_L4_CKSUM_GOOD; 168 break; 169 case EFX_PKT_TCP: 170 case EFX_PKT_UDP: 171 mbuf_flags |= PKT_RX_L4_CKSUM_BAD; 172 break; 173 default: 174 RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0); 175 SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) == 176 PKT_RX_L4_CKSUM_UNKNOWN); 177 break; 178 } 179 180 return mbuf_flags; 181 } 182 183 static uint32_t 184 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags) 185 { 186 return RTE_PTYPE_L2_ETHER | 187 ((desc_flags & EFX_PKT_IPV4) ? 188 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) | 189 ((desc_flags & EFX_PKT_IPV6) ? 190 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) | 191 ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) | 192 ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0); 193 } 194 195 static const uint32_t * 196 sfc_efx_supported_ptypes_get(void) 197 { 198 static const uint32_t ptypes[] = { 199 RTE_PTYPE_L2_ETHER, 200 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 201 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 202 RTE_PTYPE_L4_TCP, 203 RTE_PTYPE_L4_UDP, 204 RTE_PTYPE_UNKNOWN 205 }; 206 207 return ptypes; 208 } 209 210 static void 211 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags, 212 struct rte_mbuf *m) 213 { 214 #if EFSYS_OPT_RX_SCALE 215 uint8_t *mbuf_data; 216 217 218 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0) 219 return; 220 221 mbuf_data = rte_pktmbuf_mtod(m, uint8_t *); 222 223 if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) { 224 m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common, 225 EFX_RX_HASHALG_TOEPLITZ, 226 mbuf_data); 227 228 m->ol_flags |= PKT_RX_RSS_HASH; 229 } 230 #endif 231 } 232 233 static uint16_t 234 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 235 { 236 struct sfc_dp_rxq *dp_rxq = rx_queue; 237 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 238 unsigned int completed; 239 unsigned int prefix_size = rxq->prefix_size; 240 unsigned int done_pkts = 0; 241 boolean_t discard_next = B_FALSE; 242 struct rte_mbuf *scatter_pkt = NULL; 243 244 if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)) 245 return 0; 246 247 sfc_ev_qpoll(rxq->evq); 248 249 completed = rxq->completed; 250 while (completed != rxq->pending && done_pkts < nb_pkts) { 251 unsigned int id; 252 struct sfc_efx_rx_sw_desc *rxd; 253 struct rte_mbuf *m; 254 unsigned int seg_len; 255 unsigned int desc_flags; 256 257 id = completed++ & rxq->ptr_mask; 258 rxd = &rxq->sw_desc[id]; 259 m = rxd->mbuf; 260 desc_flags = rxd->flags; 261 262 if (discard_next) 263 goto discard; 264 265 if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD)) 266 goto discard; 267 268 if (desc_flags & EFX_PKT_PREFIX_LEN) { 269 uint16_t tmp_size; 270 int rc __rte_unused; 271 272 rc = efx_pseudo_hdr_pkt_length_get(rxq->common, 273 rte_pktmbuf_mtod(m, uint8_t *), &tmp_size); 274 SFC_ASSERT(rc == 0); 275 seg_len = tmp_size; 276 } else { 277 seg_len = rxd->size - prefix_size; 278 } 279 280 rte_pktmbuf_data_len(m) = seg_len; 281 rte_pktmbuf_pkt_len(m) = seg_len; 282 283 if (scatter_pkt != NULL) { 284 if (rte_pktmbuf_chain(scatter_pkt, m) != 0) { 285 rte_pktmbuf_free(scatter_pkt); 286 goto discard; 287 } 288 /* The packet to deliver */ 289 m = scatter_pkt; 290 } 291 292 if (desc_flags & EFX_PKT_CONT) { 293 /* The packet is scattered, more fragments to come */ 294 scatter_pkt = m; 295 /* Futher fragments have no prefix */ 296 prefix_size = 0; 297 continue; 298 } 299 300 /* Scattered packet is done */ 301 scatter_pkt = NULL; 302 /* The first fragment of the packet has prefix */ 303 prefix_size = rxq->prefix_size; 304 305 m->ol_flags = 306 sfc_efx_rx_desc_flags_to_offload_flags(desc_flags); 307 m->packet_type = 308 sfc_efx_rx_desc_flags_to_packet_type(desc_flags); 309 310 /* 311 * Extract RSS hash from the packet prefix and 312 * set the corresponding field (if needed and possible) 313 */ 314 sfc_efx_rx_set_rss_hash(rxq, desc_flags, m); 315 316 m->data_off += prefix_size; 317 318 *rx_pkts++ = m; 319 done_pkts++; 320 continue; 321 322 discard: 323 discard_next = ((desc_flags & EFX_PKT_CONT) != 0); 324 rte_mempool_put(rxq->refill_mb_pool, m); 325 rxd->mbuf = NULL; 326 } 327 328 /* pending is only moved when entire packet is received */ 329 SFC_ASSERT(scatter_pkt == NULL); 330 331 rxq->completed = completed; 332 333 sfc_efx_rx_qrefill(rxq); 334 335 return done_pkts; 336 } 337 338 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending; 339 static unsigned int 340 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq) 341 { 342 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 343 344 if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0) 345 return 0; 346 347 sfc_ev_qpoll(rxq->evq); 348 349 return rxq->pending - rxq->completed; 350 } 351 352 struct sfc_rxq * 353 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq) 354 { 355 const struct sfc_dp_queue *dpq = &dp_rxq->dpq; 356 struct rte_eth_dev *eth_dev; 357 struct sfc_adapter *sa; 358 struct sfc_rxq *rxq; 359 360 SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id)); 361 eth_dev = &rte_eth_devices[dpq->port_id]; 362 363 sa = eth_dev->data->dev_private; 364 365 SFC_ASSERT(dpq->queue_id < sa->rxq_count); 366 rxq = sa->rxq_info[dpq->queue_id].rxq; 367 368 SFC_ASSERT(rxq != NULL); 369 return rxq; 370 } 371 372 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate; 373 static int 374 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id, 375 const struct rte_pci_addr *pci_addr, int socket_id, 376 const struct sfc_dp_rx_qcreate_info *info, 377 struct sfc_dp_rxq **dp_rxqp) 378 { 379 struct sfc_efx_rxq *rxq; 380 int rc; 381 382 rc = ENOMEM; 383 rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq), 384 RTE_CACHE_LINE_SIZE, socket_id); 385 if (rxq == NULL) 386 goto fail_rxq_alloc; 387 388 sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr); 389 390 rc = ENOMEM; 391 rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc", 392 info->rxq_entries, 393 sizeof(*rxq->sw_desc), 394 RTE_CACHE_LINE_SIZE, socket_id); 395 if (rxq->sw_desc == NULL) 396 goto fail_desc_alloc; 397 398 /* efx datapath is bound to efx control path */ 399 rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq; 400 if (info->flags & SFC_RXQ_FLAG_RSS_HASH) 401 rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH; 402 rxq->ptr_mask = info->rxq_entries - 1; 403 rxq->batch_max = info->batch_max; 404 rxq->prefix_size = info->prefix_size; 405 rxq->refill_threshold = info->refill_threshold; 406 rxq->buf_size = info->buf_size; 407 rxq->refill_mb_pool = info->refill_mb_pool; 408 409 *dp_rxqp = &rxq->dp; 410 return 0; 411 412 fail_desc_alloc: 413 rte_free(rxq); 414 415 fail_rxq_alloc: 416 return rc; 417 } 418 419 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy; 420 static void 421 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq) 422 { 423 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 424 425 rte_free(rxq->sw_desc); 426 rte_free(rxq); 427 } 428 429 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart; 430 static int 431 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq, 432 __rte_unused unsigned int evq_read_ptr) 433 { 434 /* libefx-based datapath is specific to libefx-based PMD */ 435 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 436 struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq); 437 438 rxq->common = crxq->common; 439 440 rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0; 441 442 sfc_efx_rx_qrefill(rxq); 443 444 rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING); 445 446 return 0; 447 } 448 449 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop; 450 static void 451 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq, 452 __rte_unused unsigned int *evq_read_ptr) 453 { 454 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 455 456 rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING; 457 458 /* libefx-based datapath is bound to libefx-based PMD and uses 459 * event queue structure directly. So, there is no necessity to 460 * return EvQ read pointer. 461 */ 462 } 463 464 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge; 465 static void 466 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq) 467 { 468 struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq); 469 unsigned int i; 470 struct sfc_efx_rx_sw_desc *rxd; 471 472 for (i = rxq->completed; i != rxq->added; ++i) { 473 rxd = &rxq->sw_desc[i & rxq->ptr_mask]; 474 rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf); 475 rxd->mbuf = NULL; 476 /* Packed stream relies on 0 in inactive SW desc. 477 * Rx queue stop is not performance critical, so 478 * there is no harm to do it always. 479 */ 480 rxd->flags = 0; 481 rxd->size = 0; 482 } 483 484 rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED; 485 } 486 487 struct sfc_dp_rx sfc_efx_rx = { 488 .dp = { 489 .name = SFC_KVARG_DATAPATH_EFX, 490 .type = SFC_DP_RX, 491 .hw_fw_caps = 0, 492 }, 493 .features = SFC_DP_RX_FEAT_SCATTER, 494 .qcreate = sfc_efx_rx_qcreate, 495 .qdestroy = sfc_efx_rx_qdestroy, 496 .qstart = sfc_efx_rx_qstart, 497 .qstop = sfc_efx_rx_qstop, 498 .qpurge = sfc_efx_rx_qpurge, 499 .supported_ptypes_get = sfc_efx_supported_ptypes_get, 500 .qdesc_npending = sfc_efx_rx_qdesc_npending, 501 .pkt_burst = sfc_efx_recv_pkts, 502 }; 503 504 unsigned int 505 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index) 506 { 507 struct sfc_rxq *rxq; 508 509 SFC_ASSERT(sw_index < sa->rxq_count); 510 rxq = sa->rxq_info[sw_index].rxq; 511 512 if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0) 513 return 0; 514 515 return sa->dp_rx->qdesc_npending(rxq->dp); 516 } 517 518 int 519 sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset) 520 { 521 struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq); 522 523 return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq); 524 } 525 526 static void 527 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index) 528 { 529 struct sfc_rxq *rxq; 530 unsigned int retry_count; 531 unsigned int wait_count; 532 533 rxq = sa->rxq_info[sw_index].rxq; 534 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED); 535 536 /* 537 * Retry Rx queue flushing in the case of flush failed or 538 * timeout. In the worst case it can delay for 6 seconds. 539 */ 540 for (retry_count = 0; 541 ((rxq->state & SFC_RXQ_FLUSHED) == 0) && 542 (retry_count < SFC_RX_QFLUSH_ATTEMPTS); 543 ++retry_count) { 544 if (efx_rx_qflush(rxq->common) != 0) { 545 rxq->state |= SFC_RXQ_FLUSH_FAILED; 546 break; 547 } 548 rxq->state &= ~SFC_RXQ_FLUSH_FAILED; 549 rxq->state |= SFC_RXQ_FLUSHING; 550 551 /* 552 * Wait for Rx queue flush done or failed event at least 553 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more 554 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied 555 * by SFC_RX_QFLUSH_POLL_ATTEMPTS). 556 */ 557 wait_count = 0; 558 do { 559 rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS); 560 sfc_ev_qpoll(rxq->evq); 561 } while ((rxq->state & SFC_RXQ_FLUSHING) && 562 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS)); 563 564 if (rxq->state & SFC_RXQ_FLUSHING) 565 sfc_err(sa, "RxQ %u flush timed out", sw_index); 566 567 if (rxq->state & SFC_RXQ_FLUSH_FAILED) 568 sfc_err(sa, "RxQ %u flush failed", sw_index); 569 570 if (rxq->state & SFC_RXQ_FLUSHED) 571 sfc_info(sa, "RxQ %u flushed", sw_index); 572 } 573 574 sa->dp_rx->qpurge(rxq->dp); 575 } 576 577 static int 578 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq) 579 { 580 boolean_t rss = (sa->rss_channels > 1) ? B_TRUE : B_FALSE; 581 struct sfc_port *port = &sa->port; 582 int rc; 583 584 /* 585 * If promiscuous or all-multicast mode has been requested, setting 586 * filter for the default Rx queue might fail, in particular, while 587 * running over PCI function which is not a member of corresponding 588 * privilege groups; if this occurs, few iterations will be made to 589 * repeat this step without promiscuous and all-multicast flags set 590 */ 591 retry: 592 rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss); 593 if (rc == 0) 594 return 0; 595 else if (rc != EOPNOTSUPP) 596 return rc; 597 598 if (port->promisc) { 599 sfc_warn(sa, "promiscuous mode has been requested, " 600 "but the HW rejects it"); 601 sfc_warn(sa, "promiscuous mode will be disabled"); 602 603 port->promisc = B_FALSE; 604 rc = sfc_set_rx_mode(sa); 605 if (rc != 0) 606 return rc; 607 608 goto retry; 609 } 610 611 if (port->allmulti) { 612 sfc_warn(sa, "all-multicast mode has been requested, " 613 "but the HW rejects it"); 614 sfc_warn(sa, "all-multicast mode will be disabled"); 615 616 port->allmulti = B_FALSE; 617 rc = sfc_set_rx_mode(sa); 618 if (rc != 0) 619 return rc; 620 621 goto retry; 622 } 623 624 return rc; 625 } 626 627 int 628 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index) 629 { 630 struct sfc_rxq_info *rxq_info; 631 struct sfc_rxq *rxq; 632 struct sfc_evq *evq; 633 int rc; 634 635 sfc_log_init(sa, "sw_index=%u", sw_index); 636 637 SFC_ASSERT(sw_index < sa->rxq_count); 638 639 rxq_info = &sa->rxq_info[sw_index]; 640 rxq = rxq_info->rxq; 641 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED); 642 643 evq = rxq->evq; 644 645 rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index)); 646 if (rc != 0) 647 goto fail_ev_qstart; 648 649 rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type, 650 &rxq->mem, rxq_info->entries, 651 0 /* not used on EF10 */, evq->common, 652 &rxq->common); 653 if (rc != 0) 654 goto fail_rx_qcreate; 655 656 efx_rx_qenable(rxq->common); 657 658 rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr); 659 if (rc != 0) 660 goto fail_dp_qstart; 661 662 rxq->state |= SFC_RXQ_STARTED; 663 664 if (sw_index == 0) { 665 rc = sfc_rx_default_rxq_set_filter(sa, rxq); 666 if (rc != 0) 667 goto fail_mac_filter_default_rxq_set; 668 } 669 670 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */ 671 sa->eth_dev->data->rx_queue_state[sw_index] = 672 RTE_ETH_QUEUE_STATE_STARTED; 673 674 return 0; 675 676 fail_mac_filter_default_rxq_set: 677 sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr); 678 679 fail_dp_qstart: 680 sfc_rx_qflush(sa, sw_index); 681 682 fail_rx_qcreate: 683 sfc_ev_qstop(evq); 684 685 fail_ev_qstart: 686 return rc; 687 } 688 689 void 690 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index) 691 { 692 struct sfc_rxq_info *rxq_info; 693 struct sfc_rxq *rxq; 694 695 sfc_log_init(sa, "sw_index=%u", sw_index); 696 697 SFC_ASSERT(sw_index < sa->rxq_count); 698 699 rxq_info = &sa->rxq_info[sw_index]; 700 rxq = rxq_info->rxq; 701 702 if (rxq->state == SFC_RXQ_INITIALIZED) 703 return; 704 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED); 705 706 /* It seems to be used by DPDK for debug purposes only ('rte_ether') */ 707 sa->eth_dev->data->rx_queue_state[sw_index] = 708 RTE_ETH_QUEUE_STATE_STOPPED; 709 710 sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr); 711 712 if (sw_index == 0) 713 efx_mac_filter_default_rxq_clear(sa->nic); 714 715 sfc_rx_qflush(sa, sw_index); 716 717 rxq->state = SFC_RXQ_INITIALIZED; 718 719 efx_rx_qdestroy(rxq->common); 720 721 sfc_ev_qstop(rxq->evq); 722 } 723 724 static int 725 sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc, 726 const struct rte_eth_rxconf *rx_conf) 727 { 728 const uint16_t rx_free_thresh_max = EFX_RXQ_LIMIT(nb_rx_desc); 729 int rc = 0; 730 731 if (rx_conf->rx_thresh.pthresh != 0 || 732 rx_conf->rx_thresh.hthresh != 0 || 733 rx_conf->rx_thresh.wthresh != 0) { 734 sfc_err(sa, 735 "RxQ prefetch/host/writeback thresholds are not supported"); 736 rc = EINVAL; 737 } 738 739 if (rx_conf->rx_free_thresh > rx_free_thresh_max) { 740 sfc_err(sa, 741 "RxQ free threshold too large: %u vs maximum %u", 742 rx_conf->rx_free_thresh, rx_free_thresh_max); 743 rc = EINVAL; 744 } 745 746 if (rx_conf->rx_drop_en == 0) { 747 sfc_err(sa, "RxQ drop disable is not supported"); 748 rc = EINVAL; 749 } 750 751 return rc; 752 } 753 754 static unsigned int 755 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool) 756 { 757 uint32_t data_off; 758 uint32_t order; 759 760 /* The mbuf object itself is always cache line aligned */ 761 order = rte_bsf32(RTE_CACHE_LINE_SIZE); 762 763 /* Data offset from mbuf object start */ 764 data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) + 765 RTE_PKTMBUF_HEADROOM; 766 767 order = MIN(order, rte_bsf32(data_off)); 768 769 return 1u << (order - 1); 770 } 771 772 static uint16_t 773 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool) 774 { 775 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 776 const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start); 777 const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end); 778 uint16_t buf_size; 779 unsigned int buf_aligned; 780 unsigned int start_alignment; 781 unsigned int end_padding_alignment; 782 783 /* Below it is assumed that both alignments are power of 2 */ 784 SFC_ASSERT(rte_is_power_of_2(nic_align_start)); 785 SFC_ASSERT(rte_is_power_of_2(nic_align_end)); 786 787 /* 788 * mbuf is always cache line aligned, double-check 789 * that it meets rx buffer start alignment requirements. 790 */ 791 792 /* Start from mbuf pool data room size */ 793 buf_size = rte_pktmbuf_data_room_size(mb_pool); 794 795 /* Remove headroom */ 796 if (buf_size <= RTE_PKTMBUF_HEADROOM) { 797 sfc_err(sa, 798 "RxQ mbuf pool %s object data room size %u is smaller than headroom %u", 799 mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM); 800 return 0; 801 } 802 buf_size -= RTE_PKTMBUF_HEADROOM; 803 804 /* Calculate guaranteed data start alignment */ 805 buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool); 806 807 /* Reserve space for start alignment */ 808 if (buf_aligned < nic_align_start) { 809 start_alignment = nic_align_start - buf_aligned; 810 if (buf_size <= start_alignment) { 811 sfc_err(sa, 812 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC", 813 mb_pool->name, 814 rte_pktmbuf_data_room_size(mb_pool), 815 RTE_PKTMBUF_HEADROOM, start_alignment); 816 return 0; 817 } 818 buf_aligned = nic_align_start; 819 buf_size -= start_alignment; 820 } else { 821 start_alignment = 0; 822 } 823 824 /* Make sure that end padding does not write beyond the buffer */ 825 if (buf_aligned < nic_align_end) { 826 /* 827 * Estimate space which can be lost. If guarnteed buffer 828 * size is odd, lost space is (nic_align_end - 1). More 829 * accurate formula is below. 830 */ 831 end_padding_alignment = nic_align_end - 832 MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1)); 833 if (buf_size <= end_padding_alignment) { 834 sfc_err(sa, 835 "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC", 836 mb_pool->name, 837 rte_pktmbuf_data_room_size(mb_pool), 838 RTE_PKTMBUF_HEADROOM, start_alignment, 839 end_padding_alignment); 840 return 0; 841 } 842 buf_size -= end_padding_alignment; 843 } else { 844 /* 845 * Start is aligned the same or better than end, 846 * just align length. 847 */ 848 buf_size = P2ALIGN(buf_size, nic_align_end); 849 } 850 851 return buf_size; 852 } 853 854 int 855 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, 856 uint16_t nb_rx_desc, unsigned int socket_id, 857 const struct rte_eth_rxconf *rx_conf, 858 struct rte_mempool *mb_pool) 859 { 860 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); 861 int rc; 862 uint16_t buf_size; 863 struct sfc_rxq_info *rxq_info; 864 struct sfc_evq *evq; 865 struct sfc_rxq *rxq; 866 struct sfc_dp_rx_qcreate_info info; 867 868 rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf); 869 if (rc != 0) 870 goto fail_bad_conf; 871 872 buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool); 873 if (buf_size == 0) { 874 sfc_err(sa, "RxQ %u mbuf pool object size is too small", 875 sw_index); 876 rc = EINVAL; 877 goto fail_bad_conf; 878 } 879 880 if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) && 881 !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) { 882 sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool " 883 "object size is too small", sw_index); 884 sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs " 885 "PDU size %u plus Rx prefix %u bytes", 886 sw_index, buf_size, (unsigned int)sa->port.pdu, 887 encp->enc_rx_prefix_size); 888 rc = EINVAL; 889 goto fail_bad_conf; 890 } 891 892 SFC_ASSERT(sw_index < sa->rxq_count); 893 rxq_info = &sa->rxq_info[sw_index]; 894 895 SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries); 896 rxq_info->entries = nb_rx_desc; 897 rxq_info->type = 898 sa->eth_dev->data->dev_conf.rxmode.enable_scatter ? 899 EFX_RXQ_TYPE_SCATTER : EFX_RXQ_TYPE_DEFAULT; 900 901 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index, 902 rxq_info->entries, socket_id, &evq); 903 if (rc != 0) 904 goto fail_ev_qinit; 905 906 rc = ENOMEM; 907 rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE, 908 socket_id); 909 if (rxq == NULL) 910 goto fail_rxq_alloc; 911 912 rxq_info->rxq = rxq; 913 914 rxq->evq = evq; 915 rxq->hw_index = sw_index; 916 rxq->refill_threshold = 917 RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK); 918 rxq->refill_mb_pool = mb_pool; 919 920 rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries), 921 socket_id, &rxq->mem); 922 if (rc != 0) 923 goto fail_dma_alloc; 924 925 memset(&info, 0, sizeof(info)); 926 info.refill_mb_pool = rxq->refill_mb_pool; 927 info.refill_threshold = rxq->refill_threshold; 928 info.buf_size = buf_size; 929 info.batch_max = encp->enc_rx_batch_max; 930 info.prefix_size = encp->enc_rx_prefix_size; 931 932 #if EFSYS_OPT_RX_SCALE 933 if (sa->hash_support == EFX_RX_HASH_AVAILABLE) 934 info.flags |= SFC_RXQ_FLAG_RSS_HASH; 935 #endif 936 937 info.rxq_entries = rxq_info->entries; 938 info.rxq_hw_ring = rxq->mem.esm_base; 939 info.evq_entries = rxq_info->entries; 940 info.evq_hw_ring = evq->mem.esm_base; 941 info.hw_index = rxq->hw_index; 942 info.mem_bar = sa->mem_bar.esb_base; 943 944 rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index, 945 &SFC_DEV_TO_PCI(sa->eth_dev)->addr, 946 socket_id, &info, &rxq->dp); 947 if (rc != 0) 948 goto fail_dp_rx_qcreate; 949 950 evq->dp_rxq = rxq->dp; 951 952 rxq->state = SFC_RXQ_INITIALIZED; 953 954 rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0); 955 956 return 0; 957 958 fail_dp_rx_qcreate: 959 sfc_dma_free(sa, &rxq->mem); 960 961 fail_dma_alloc: 962 rxq_info->rxq = NULL; 963 rte_free(rxq); 964 965 fail_rxq_alloc: 966 sfc_ev_qfini(evq); 967 968 fail_ev_qinit: 969 rxq_info->entries = 0; 970 971 fail_bad_conf: 972 sfc_log_init(sa, "failed %d", rc); 973 return rc; 974 } 975 976 void 977 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index) 978 { 979 struct sfc_rxq_info *rxq_info; 980 struct sfc_rxq *rxq; 981 982 SFC_ASSERT(sw_index < sa->rxq_count); 983 984 rxq_info = &sa->rxq_info[sw_index]; 985 986 rxq = rxq_info->rxq; 987 SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED); 988 989 sa->dp_rx->qdestroy(rxq->dp); 990 rxq->dp = NULL; 991 992 rxq_info->rxq = NULL; 993 rxq_info->entries = 0; 994 995 sfc_dma_free(sa, &rxq->mem); 996 997 sfc_ev_qfini(rxq->evq); 998 rxq->evq = NULL; 999 1000 rte_free(rxq); 1001 } 1002 1003 #if EFSYS_OPT_RX_SCALE 1004 efx_rx_hash_type_t 1005 sfc_rte_to_efx_hash_type(uint64_t rss_hf) 1006 { 1007 efx_rx_hash_type_t efx_hash_types = 0; 1008 1009 if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 1010 ETH_RSS_NONFRAG_IPV4_OTHER)) != 0) 1011 efx_hash_types |= EFX_RX_HASH_IPV4; 1012 1013 if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0) 1014 efx_hash_types |= EFX_RX_HASH_TCPIPV4; 1015 1016 if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 1017 ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0) 1018 efx_hash_types |= EFX_RX_HASH_IPV6; 1019 1020 if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0) 1021 efx_hash_types |= EFX_RX_HASH_TCPIPV6; 1022 1023 return efx_hash_types; 1024 } 1025 1026 uint64_t 1027 sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types) 1028 { 1029 uint64_t rss_hf = 0; 1030 1031 if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0) 1032 rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | 1033 ETH_RSS_NONFRAG_IPV4_OTHER); 1034 1035 if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0) 1036 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; 1037 1038 if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0) 1039 rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | 1040 ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX); 1041 1042 if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0) 1043 rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX); 1044 1045 return rss_hf; 1046 } 1047 #endif 1048 1049 static int 1050 sfc_rx_rss_config(struct sfc_adapter *sa) 1051 { 1052 int rc = 0; 1053 1054 #if EFSYS_OPT_RX_SCALE 1055 if (sa->rss_channels > 1) { 1056 rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ, 1057 sa->rss_hash_types, B_TRUE); 1058 if (rc != 0) 1059 goto finish; 1060 1061 rc = efx_rx_scale_key_set(sa->nic, sa->rss_key, 1062 sizeof(sa->rss_key)); 1063 if (rc != 0) 1064 goto finish; 1065 1066 rc = efx_rx_scale_tbl_set(sa->nic, sa->rss_tbl, 1067 sizeof(sa->rss_tbl)); 1068 } 1069 1070 finish: 1071 #endif 1072 return rc; 1073 } 1074 1075 int 1076 sfc_rx_start(struct sfc_adapter *sa) 1077 { 1078 unsigned int sw_index; 1079 int rc; 1080 1081 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count); 1082 1083 rc = efx_rx_init(sa->nic); 1084 if (rc != 0) 1085 goto fail_rx_init; 1086 1087 rc = sfc_rx_rss_config(sa); 1088 if (rc != 0) 1089 goto fail_rss_config; 1090 1091 for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) { 1092 if ((!sa->rxq_info[sw_index].deferred_start || 1093 sa->rxq_info[sw_index].deferred_started)) { 1094 rc = sfc_rx_qstart(sa, sw_index); 1095 if (rc != 0) 1096 goto fail_rx_qstart; 1097 } 1098 } 1099 1100 return 0; 1101 1102 fail_rx_qstart: 1103 while (sw_index-- > 0) 1104 sfc_rx_qstop(sa, sw_index); 1105 1106 fail_rss_config: 1107 efx_rx_fini(sa->nic); 1108 1109 fail_rx_init: 1110 sfc_log_init(sa, "failed %d", rc); 1111 return rc; 1112 } 1113 1114 void 1115 sfc_rx_stop(struct sfc_adapter *sa) 1116 { 1117 unsigned int sw_index; 1118 1119 sfc_log_init(sa, "rxq_count=%u", sa->rxq_count); 1120 1121 sw_index = sa->rxq_count; 1122 while (sw_index-- > 0) { 1123 if (sa->rxq_info[sw_index].rxq != NULL) 1124 sfc_rx_qstop(sa, sw_index); 1125 } 1126 1127 efx_rx_fini(sa->nic); 1128 } 1129 1130 static int 1131 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) 1132 { 1133 struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index]; 1134 unsigned int max_entries; 1135 1136 max_entries = EFX_RXQ_MAXNDESCS; 1137 SFC_ASSERT(rte_is_power_of_2(max_entries)); 1138 1139 rxq_info->max_entries = max_entries; 1140 1141 return 0; 1142 } 1143 1144 static int 1145 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode) 1146 { 1147 int rc = 0; 1148 1149 switch (rxmode->mq_mode) { 1150 case ETH_MQ_RX_NONE: 1151 /* No special checks are required */ 1152 break; 1153 #if EFSYS_OPT_RX_SCALE 1154 case ETH_MQ_RX_RSS: 1155 if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) { 1156 sfc_err(sa, "RSS is not available"); 1157 rc = EINVAL; 1158 } 1159 break; 1160 #endif 1161 default: 1162 sfc_err(sa, "Rx multi-queue mode %u not supported", 1163 rxmode->mq_mode); 1164 rc = EINVAL; 1165 } 1166 1167 if (rxmode->header_split) { 1168 sfc_err(sa, "Header split on Rx not supported"); 1169 rc = EINVAL; 1170 } 1171 1172 if (rxmode->hw_vlan_filter) { 1173 sfc_err(sa, "HW VLAN filtering not supported"); 1174 rc = EINVAL; 1175 } 1176 1177 if (rxmode->hw_vlan_strip) { 1178 sfc_err(sa, "HW VLAN stripping not supported"); 1179 rc = EINVAL; 1180 } 1181 1182 if (rxmode->hw_vlan_extend) { 1183 sfc_err(sa, 1184 "Q-in-Q HW VLAN stripping not supported"); 1185 rc = EINVAL; 1186 } 1187 1188 if (!rxmode->hw_strip_crc) { 1189 sfc_warn(sa, 1190 "FCS stripping control not supported - always stripped"); 1191 rxmode->hw_strip_crc = 1; 1192 } 1193 1194 if (rxmode->enable_scatter && 1195 (~sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)) { 1196 sfc_err(sa, "Rx scatter not supported by %s datapath", 1197 sa->dp_rx->dp.name); 1198 rc = EINVAL; 1199 } 1200 1201 if (rxmode->enable_lro) { 1202 sfc_err(sa, "LRO not supported"); 1203 rc = EINVAL; 1204 } 1205 1206 return rc; 1207 } 1208 1209 /** 1210 * Destroy excess queues that are no longer needed after reconfiguration 1211 * or complete close. 1212 */ 1213 static void 1214 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues) 1215 { 1216 int sw_index; 1217 1218 SFC_ASSERT(nb_rx_queues <= sa->rxq_count); 1219 1220 sw_index = sa->rxq_count; 1221 while (--sw_index >= (int)nb_rx_queues) { 1222 if (sa->rxq_info[sw_index].rxq != NULL) 1223 sfc_rx_qfini(sa, sw_index); 1224 } 1225 1226 sa->rxq_count = nb_rx_queues; 1227 } 1228 1229 /** 1230 * Initialize Rx subsystem. 1231 * 1232 * Called at device (re)configuration stage when number of receive queues is 1233 * specified together with other device level receive configuration. 1234 * 1235 * It should be used to allocate NUMA-unaware resources. 1236 */ 1237 int 1238 sfc_rx_configure(struct sfc_adapter *sa) 1239 { 1240 struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf; 1241 const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues; 1242 unsigned int sw_index; 1243 int rc; 1244 1245 sfc_log_init(sa, "nb_rx_queues=%u (old %u)", 1246 nb_rx_queues, sa->rxq_count); 1247 1248 rc = sfc_rx_check_mode(sa, &dev_conf->rxmode); 1249 if (rc != 0) 1250 goto fail_check_mode; 1251 1252 if (nb_rx_queues == sa->rxq_count) 1253 goto done; 1254 1255 if (sa->rxq_info == NULL) { 1256 rc = ENOMEM; 1257 sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues, 1258 sizeof(sa->rxq_info[0]), 0, 1259 sa->socket_id); 1260 if (sa->rxq_info == NULL) 1261 goto fail_rxqs_alloc; 1262 } else { 1263 struct sfc_rxq_info *new_rxq_info; 1264 1265 if (nb_rx_queues < sa->rxq_count) 1266 sfc_rx_fini_queues(sa, nb_rx_queues); 1267 1268 rc = ENOMEM; 1269 new_rxq_info = 1270 rte_realloc(sa->rxq_info, 1271 nb_rx_queues * sizeof(sa->rxq_info[0]), 0); 1272 if (new_rxq_info == NULL && nb_rx_queues > 0) 1273 goto fail_rxqs_realloc; 1274 1275 sa->rxq_info = new_rxq_info; 1276 if (nb_rx_queues > sa->rxq_count) 1277 memset(&sa->rxq_info[sa->rxq_count], 0, 1278 (nb_rx_queues - sa->rxq_count) * 1279 sizeof(sa->rxq_info[0])); 1280 } 1281 1282 while (sa->rxq_count < nb_rx_queues) { 1283 rc = sfc_rx_qinit_info(sa, sa->rxq_count); 1284 if (rc != 0) 1285 goto fail_rx_qinit_info; 1286 1287 sa->rxq_count++; 1288 } 1289 1290 #if EFSYS_OPT_RX_SCALE 1291 sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ? 1292 MIN(sa->rxq_count, EFX_MAXRSS) : 1; 1293 1294 if (sa->rss_channels > 1) { 1295 for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index) 1296 sa->rss_tbl[sw_index] = sw_index % sa->rss_channels; 1297 } 1298 #endif 1299 1300 done: 1301 return 0; 1302 1303 fail_rx_qinit_info: 1304 fail_rxqs_realloc: 1305 fail_rxqs_alloc: 1306 sfc_rx_close(sa); 1307 1308 fail_check_mode: 1309 sfc_log_init(sa, "failed %d", rc); 1310 return rc; 1311 } 1312 1313 /** 1314 * Shutdown Rx subsystem. 1315 * 1316 * Called at device close stage, for example, before device shutdown. 1317 */ 1318 void 1319 sfc_rx_close(struct sfc_adapter *sa) 1320 { 1321 sfc_rx_fini_queues(sa, 0); 1322 1323 rte_free(sa->rxq_info); 1324 sa->rxq_info = NULL; 1325 } 1326