1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include "qede_ethdev.h" 8 #include <rte_string_fns.h> 9 #include <rte_alarm.h> 10 #include <rte_kvargs.h> 11 12 static const struct qed_eth_ops *qed_ops; 13 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev); 14 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev); 15 16 #define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */ 17 18 struct rte_qede_xstats_name_off { 19 char name[RTE_ETH_XSTATS_NAME_SIZE]; 20 uint64_t offset; 21 }; 22 23 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 24 {"rx_unicast_bytes", 25 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)}, 26 {"rx_multicast_bytes", 27 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)}, 28 {"rx_broadcast_bytes", 29 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)}, 30 {"rx_unicast_packets", 31 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)}, 32 {"rx_multicast_packets", 33 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)}, 34 {"rx_broadcast_packets", 35 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)}, 36 37 {"tx_unicast_bytes", 38 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)}, 39 {"tx_multicast_bytes", 40 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)}, 41 {"tx_broadcast_bytes", 42 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)}, 43 {"tx_unicast_packets", 44 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)}, 45 {"tx_multicast_packets", 46 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)}, 47 {"tx_broadcast_packets", 48 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)}, 49 50 {"rx_64_byte_packets", 51 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)}, 52 {"rx_65_to_127_byte_packets", 53 offsetof(struct ecore_eth_stats_common, 54 rx_65_to_127_byte_packets)}, 55 {"rx_128_to_255_byte_packets", 56 offsetof(struct ecore_eth_stats_common, 57 rx_128_to_255_byte_packets)}, 58 {"rx_256_to_511_byte_packets", 59 offsetof(struct ecore_eth_stats_common, 60 rx_256_to_511_byte_packets)}, 61 {"rx_512_to_1023_byte_packets", 62 offsetof(struct ecore_eth_stats_common, 63 rx_512_to_1023_byte_packets)}, 64 {"rx_1024_to_1518_byte_packets", 65 offsetof(struct ecore_eth_stats_common, 66 rx_1024_to_1518_byte_packets)}, 67 {"tx_64_byte_packets", 68 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)}, 69 {"tx_65_to_127_byte_packets", 70 offsetof(struct ecore_eth_stats_common, 71 tx_65_to_127_byte_packets)}, 72 {"tx_128_to_255_byte_packets", 73 offsetof(struct ecore_eth_stats_common, 74 tx_128_to_255_byte_packets)}, 75 {"tx_256_to_511_byte_packets", 76 offsetof(struct ecore_eth_stats_common, 77 tx_256_to_511_byte_packets)}, 78 {"tx_512_to_1023_byte_packets", 79 offsetof(struct ecore_eth_stats_common, 80 tx_512_to_1023_byte_packets)}, 81 {"tx_1024_to_1518_byte_packets", 82 offsetof(struct ecore_eth_stats_common, 83 tx_1024_to_1518_byte_packets)}, 84 85 {"rx_mac_crtl_frames", 86 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)}, 87 {"tx_mac_control_frames", 88 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)}, 89 {"rx_pause_frames", 90 offsetof(struct ecore_eth_stats_common, rx_pause_frames)}, 91 {"tx_pause_frames", 92 offsetof(struct ecore_eth_stats_common, tx_pause_frames)}, 93 {"rx_priority_flow_control_frames", 94 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)}, 95 {"tx_priority_flow_control_frames", 96 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)}, 97 98 {"rx_crc_errors", 99 offsetof(struct ecore_eth_stats_common, rx_crc_errors)}, 100 {"rx_align_errors", 101 offsetof(struct ecore_eth_stats_common, rx_align_errors)}, 102 {"rx_carrier_errors", 103 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)}, 104 {"rx_oversize_packet_errors", 105 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)}, 106 {"rx_jabber_errors", 107 offsetof(struct ecore_eth_stats_common, rx_jabbers)}, 108 {"rx_undersize_packet_errors", 109 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)}, 110 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)}, 111 {"rx_host_buffer_not_available", 112 offsetof(struct ecore_eth_stats_common, no_buff_discards)}, 113 /* Number of packets discarded because they are bigger than MTU */ 114 {"rx_packet_too_big_discards", 115 offsetof(struct ecore_eth_stats_common, 116 packet_too_big_discard)}, 117 {"rx_ttl_zero_discards", 118 offsetof(struct ecore_eth_stats_common, ttl0_discard)}, 119 {"rx_multi_function_tag_filter_discards", 120 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)}, 121 {"rx_mac_filter_discards", 122 offsetof(struct ecore_eth_stats_common, mac_filter_discards)}, 123 {"rx_gft_filter_drop", 124 offsetof(struct ecore_eth_stats_common, gft_filter_drop)}, 125 {"rx_hw_buffer_truncates", 126 offsetof(struct ecore_eth_stats_common, brb_truncates)}, 127 {"rx_hw_buffer_discards", 128 offsetof(struct ecore_eth_stats_common, brb_discards)}, 129 {"tx_error_drop_packets", 130 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)}, 131 132 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)}, 133 {"rx_mac_unicast_packets", 134 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)}, 135 {"rx_mac_multicast_packets", 136 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)}, 137 {"rx_mac_broadcast_packets", 138 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)}, 139 {"rx_mac_frames_ok", 140 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)}, 141 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)}, 142 {"tx_mac_unicast_packets", 143 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)}, 144 {"tx_mac_multicast_packets", 145 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)}, 146 {"tx_mac_broadcast_packets", 147 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)}, 148 149 {"lro_coalesced_packets", 150 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)}, 151 {"lro_coalesced_events", 152 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)}, 153 {"lro_aborts_num", 154 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)}, 155 {"lro_not_coalesced_packets", 156 offsetof(struct ecore_eth_stats_common, 157 tpa_not_coalesced_pkts)}, 158 {"lro_coalesced_bytes", 159 offsetof(struct ecore_eth_stats_common, 160 tpa_coalesced_bytes)}, 161 }; 162 163 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = { 164 {"rx_1519_to_1522_byte_packets", 165 offsetof(struct ecore_eth_stats, bb) + 166 offsetof(struct ecore_eth_stats_bb, 167 rx_1519_to_1522_byte_packets)}, 168 {"rx_1519_to_2047_byte_packets", 169 offsetof(struct ecore_eth_stats, bb) + 170 offsetof(struct ecore_eth_stats_bb, 171 rx_1519_to_2047_byte_packets)}, 172 {"rx_2048_to_4095_byte_packets", 173 offsetof(struct ecore_eth_stats, bb) + 174 offsetof(struct ecore_eth_stats_bb, 175 rx_2048_to_4095_byte_packets)}, 176 {"rx_4096_to_9216_byte_packets", 177 offsetof(struct ecore_eth_stats, bb) + 178 offsetof(struct ecore_eth_stats_bb, 179 rx_4096_to_9216_byte_packets)}, 180 {"rx_9217_to_16383_byte_packets", 181 offsetof(struct ecore_eth_stats, bb) + 182 offsetof(struct ecore_eth_stats_bb, 183 rx_9217_to_16383_byte_packets)}, 184 185 {"tx_1519_to_2047_byte_packets", 186 offsetof(struct ecore_eth_stats, bb) + 187 offsetof(struct ecore_eth_stats_bb, 188 tx_1519_to_2047_byte_packets)}, 189 {"tx_2048_to_4095_byte_packets", 190 offsetof(struct ecore_eth_stats, bb) + 191 offsetof(struct ecore_eth_stats_bb, 192 tx_2048_to_4095_byte_packets)}, 193 {"tx_4096_to_9216_byte_packets", 194 offsetof(struct ecore_eth_stats, bb) + 195 offsetof(struct ecore_eth_stats_bb, 196 tx_4096_to_9216_byte_packets)}, 197 {"tx_9217_to_16383_byte_packets", 198 offsetof(struct ecore_eth_stats, bb) + 199 offsetof(struct ecore_eth_stats_bb, 200 tx_9217_to_16383_byte_packets)}, 201 202 {"tx_lpi_entry_count", 203 offsetof(struct ecore_eth_stats, bb) + 204 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)}, 205 {"tx_total_collisions", 206 offsetof(struct ecore_eth_stats, bb) + 207 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)}, 208 }; 209 210 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = { 211 {"rx_1519_to_max_byte_packets", 212 offsetof(struct ecore_eth_stats, ah) + 213 offsetof(struct ecore_eth_stats_ah, 214 rx_1519_to_max_byte_packets)}, 215 {"tx_1519_to_max_byte_packets", 216 offsetof(struct ecore_eth_stats, ah) + 217 offsetof(struct ecore_eth_stats_ah, 218 tx_1519_to_max_byte_packets)}, 219 }; 220 221 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { 222 {"rx_q_segments", 223 offsetof(struct qede_rx_queue, rx_segs)}, 224 {"rx_q_hw_errors", 225 offsetof(struct qede_rx_queue, rx_hw_errors)}, 226 {"rx_q_allocation_errors", 227 offsetof(struct qede_rx_queue, rx_alloc_errors)} 228 }; 229 230 /* Get FW version string based on fw_size */ 231 static int 232 qede_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size) 233 { 234 struct qede_dev *qdev = dev->data->dev_private; 235 struct ecore_dev *edev = &qdev->edev; 236 struct qed_dev_info *info = &qdev->dev_info.common; 237 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 238 size_t size; 239 240 if (fw_ver == NULL) 241 return 0; 242 243 if (IS_PF(edev)) 244 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 245 QEDE_PMD_FW_VERSION); 246 else 247 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", 248 info->fw_major, info->fw_minor, 249 info->fw_rev, info->fw_eng); 250 size = strlen(ver_str); 251 if (size + 1 <= fw_size) /* Add 1 byte for "\0" */ 252 strlcpy(fw_ver, ver_str, fw_size); 253 else 254 return (size + 1); 255 256 snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size), 257 " MFW: %d.%d.%d.%d", 258 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_3), 259 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_2), 260 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_1), 261 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_0)); 262 size = strlen(ver_str); 263 if (size + 1 <= fw_size) 264 strlcpy(fw_ver, ver_str, fw_size); 265 266 if (fw_size <= 32) 267 goto out; 268 269 snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size), 270 " MBI: %d.%d.%d", 271 GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_2), 272 GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_1), 273 GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_0)); 274 size = strlen(ver_str); 275 if (size + 1 <= fw_size) 276 strlcpy(fw_ver, ver_str, fw_size); 277 278 out: 279 return 0; 280 } 281 282 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 283 { 284 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 285 } 286 287 static void 288 qede_interrupt_handler_intx(void *param) 289 { 290 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 291 struct qede_dev *qdev = eth_dev->data->dev_private; 292 struct ecore_dev *edev = &qdev->edev; 293 u64 status; 294 295 /* Check if our device actually raised an interrupt */ 296 status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev)); 297 if (status & 0x1) { 298 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 299 300 if (rte_intr_ack(eth_dev->intr_handle)) 301 DP_ERR(edev, "rte_intr_ack failed\n"); 302 } 303 } 304 305 static void 306 qede_interrupt_handler(void *param) 307 { 308 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 309 struct qede_dev *qdev = eth_dev->data->dev_private; 310 struct ecore_dev *edev = &qdev->edev; 311 312 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 313 if (rte_intr_ack(eth_dev->intr_handle)) 314 DP_ERR(edev, "rte_intr_ack failed\n"); 315 } 316 317 static void 318 qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy) 319 { 320 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 321 struct qede_dev *qdev = dev->data->dev_private; 322 struct ecore_dev *edev = &qdev->edev; 323 bool use_tx_offload = false; 324 325 if (is_dummy) { 326 dev->rx_pkt_burst = qede_rxtx_pkts_dummy; 327 dev->tx_pkt_burst = qede_rxtx_pkts_dummy; 328 return; 329 } 330 331 if (ECORE_IS_CMT(edev)) { 332 dev->rx_pkt_burst = qede_recv_pkts_cmt; 333 dev->tx_pkt_burst = qede_xmit_pkts_cmt; 334 return; 335 } 336 337 if (dev->data->lro || dev->data->scattered_rx) { 338 DP_INFO(edev, "Assigning qede_recv_pkts\n"); 339 dev->rx_pkt_burst = qede_recv_pkts; 340 } else { 341 DP_INFO(edev, "Assigning qede_recv_pkts_regular\n"); 342 dev->rx_pkt_burst = qede_recv_pkts_regular; 343 } 344 345 use_tx_offload = !!(tx_offloads & 346 (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */ 347 DEV_TX_OFFLOAD_TCP_TSO | /* tso */ 348 DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */ 349 350 if (use_tx_offload) { 351 DP_INFO(edev, "Assigning qede_xmit_pkts\n"); 352 dev->tx_pkt_burst = qede_xmit_pkts; 353 } else { 354 DP_INFO(edev, "Assigning qede_xmit_pkts_regular\n"); 355 dev->tx_pkt_burst = qede_xmit_pkts_regular; 356 } 357 } 358 359 static void 360 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 361 { 362 rte_memcpy(&qdev->dev_info, info, sizeof(*info)); 363 qdev->ops = qed_ops; 364 } 365 366 static void qede_print_adapter_info(struct rte_eth_dev *dev) 367 { 368 struct qede_dev *qdev = dev->data->dev_private; 369 struct ecore_dev *edev = &qdev->edev; 370 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 371 372 DP_INFO(edev, "**************************************************\n"); 373 DP_INFO(edev, " %-20s: %s\n", "DPDK version", rte_version()); 374 DP_INFO(edev, " %-20s: %s %c%d\n", "Chip details", 375 ECORE_IS_BB(edev) ? "BB" : "AH", 376 'A' + edev->chip_rev, 377 (int)edev->chip_metal); 378 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 379 QEDE_PMD_DRV_VERSION); 380 DP_INFO(edev, " %-20s: %s\n", "Driver version", ver_str); 381 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 382 QEDE_PMD_BASE_VERSION); 383 DP_INFO(edev, " %-20s: %s\n", "Base version", ver_str); 384 qede_fw_version_get(dev, ver_str, sizeof(ver_str)); 385 DP_INFO(edev, " %-20s: %s\n", "Firmware version", ver_str); 386 DP_INFO(edev, " %-20s: %s\n", "Firmware file", qede_fw_file); 387 DP_INFO(edev, "**************************************************\n"); 388 } 389 390 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) 391 { 392 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; 393 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 394 unsigned int i = 0, j = 0, qid; 395 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 396 struct qede_tx_queue *txq; 397 398 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); 399 400 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev), 401 RTE_ETHDEV_QUEUE_STAT_CNTRS); 402 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev), 403 RTE_ETHDEV_QUEUE_STAT_CNTRS); 404 405 for (qid = 0; qid < qdev->num_rx_queues; qid++) { 406 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 407 offsetof(struct qede_rx_queue, rcv_pkts), 0, 408 sizeof(uint64_t)); 409 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 410 offsetof(struct qede_rx_queue, rx_hw_errors), 0, 411 sizeof(uint64_t)); 412 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 413 offsetof(struct qede_rx_queue, rx_alloc_errors), 0, 414 sizeof(uint64_t)); 415 416 if (xstats) 417 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++) 418 OSAL_MEMSET((((char *) 419 (qdev->fp_array[qid].rxq)) + 420 qede_rxq_xstats_strings[j].offset), 421 0, 422 sizeof(uint64_t)); 423 424 i++; 425 if (i == rxq_stat_cntrs) 426 break; 427 } 428 429 i = 0; 430 431 for (qid = 0; qid < qdev->num_tx_queues; qid++) { 432 txq = qdev->fp_array[qid].txq; 433 434 OSAL_MEMSET((uint64_t *)(uintptr_t) 435 (((uint64_t)(uintptr_t)(txq)) + 436 offsetof(struct qede_tx_queue, xmit_pkts)), 0, 437 sizeof(uint64_t)); 438 439 i++; 440 if (i == txq_stat_cntrs) 441 break; 442 } 443 } 444 445 static int 446 qede_stop_vport(struct ecore_dev *edev) 447 { 448 struct ecore_hwfn *p_hwfn; 449 uint8_t vport_id; 450 int rc; 451 int i; 452 453 vport_id = 0; 454 for_each_hwfn(edev, i) { 455 p_hwfn = &edev->hwfns[i]; 456 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 457 vport_id); 458 if (rc != ECORE_SUCCESS) { 459 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc); 460 return rc; 461 } 462 } 463 464 DP_INFO(edev, "vport stopped\n"); 465 466 return 0; 467 } 468 469 static int 470 qede_start_vport(struct qede_dev *qdev, uint16_t mtu) 471 { 472 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 473 struct ecore_sp_vport_start_params params; 474 struct ecore_hwfn *p_hwfn; 475 int rc; 476 int i; 477 478 if (qdev->vport_started) 479 qede_stop_vport(edev); 480 481 memset(¶ms, 0, sizeof(params)); 482 params.vport_id = 0; 483 params.mtu = mtu; 484 /* @DPDK - Disable FW placement */ 485 params.zero_placement_offset = 1; 486 for_each_hwfn(edev, i) { 487 p_hwfn = &edev->hwfns[i]; 488 params.concrete_fid = p_hwfn->hw_info.concrete_fid; 489 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 490 rc = ecore_sp_vport_start(p_hwfn, ¶ms); 491 if (rc != ECORE_SUCCESS) { 492 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 493 return rc; 494 } 495 } 496 ecore_reset_vport_stats(edev); 497 qdev->vport_started = true; 498 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); 499 500 return 0; 501 } 502 503 #define QEDE_NPAR_TX_SWITCHING "npar_tx_switching" 504 #define QEDE_VF_TX_SWITCHING "vf_tx_switching" 505 506 /* Activate or deactivate vport via vport-update */ 507 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) 508 { 509 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 510 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 511 struct ecore_sp_vport_update_params params; 512 struct ecore_hwfn *p_hwfn; 513 uint8_t i; 514 int rc = -1; 515 516 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 517 params.vport_id = 0; 518 params.update_vport_active_rx_flg = 1; 519 params.update_vport_active_tx_flg = 1; 520 params.vport_active_rx_flg = flg; 521 params.vport_active_tx_flg = flg; 522 if ((qdev->enable_tx_switching == false) && (flg == true)) { 523 params.update_tx_switching_flg = 1; 524 params.tx_switching_flg = !flg; 525 } 526 for_each_hwfn(edev, i) { 527 p_hwfn = &edev->hwfns[i]; 528 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 529 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 530 ECORE_SPQ_MODE_EBLOCK, NULL); 531 if (rc != ECORE_SUCCESS) { 532 DP_ERR(edev, "Failed to update vport\n"); 533 break; 534 } 535 } 536 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated"); 537 538 return rc; 539 } 540 541 static void 542 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, 543 uint16_t mtu, bool enable) 544 { 545 /* Enable LRO in split mode */ 546 sge_tpa_params->tpa_ipv4_en_flg = enable; 547 sge_tpa_params->tpa_ipv6_en_flg = enable; 548 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable; 549 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable; 550 /* set if tpa enable changes */ 551 sge_tpa_params->update_tpa_en_flg = 1; 552 /* set if tpa parameters should be handled */ 553 sge_tpa_params->update_tpa_param_flg = enable; 554 555 sge_tpa_params->max_buffers_per_cqe = 20; 556 /* Enable TPA in split mode. In this mode each TPA segment 557 * starts on the new BD, so there is one BD per segment. 558 */ 559 sge_tpa_params->tpa_pkt_split_flg = 1; 560 sge_tpa_params->tpa_hdr_data_split_flg = 0; 561 sge_tpa_params->tpa_gro_consistent_flg = 0; 562 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 563 sge_tpa_params->tpa_max_size = 0x7FFF; 564 sge_tpa_params->tpa_min_size_to_start = mtu / 2; 565 sge_tpa_params->tpa_min_size_to_cont = mtu / 2; 566 } 567 568 /* Enable/disable LRO via vport-update */ 569 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) 570 { 571 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 572 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 573 struct ecore_sp_vport_update_params params; 574 struct ecore_sge_tpa_params tpa_params; 575 struct ecore_hwfn *p_hwfn; 576 int rc; 577 int i; 578 579 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 580 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params)); 581 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg); 582 params.vport_id = 0; 583 params.sge_tpa_params = &tpa_params; 584 for_each_hwfn(edev, i) { 585 p_hwfn = &edev->hwfns[i]; 586 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 587 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 588 ECORE_SPQ_MODE_EBLOCK, NULL); 589 if (rc != ECORE_SUCCESS) { 590 DP_ERR(edev, "Failed to update LRO\n"); 591 return -1; 592 } 593 } 594 qdev->enable_lro = flg; 595 eth_dev->data->lro = flg; 596 597 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); 598 599 return 0; 600 } 601 602 static int 603 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, 604 enum qed_filter_rx_mode_type type) 605 { 606 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 607 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 608 struct ecore_filter_accept_flags flags; 609 610 memset(&flags, 0, sizeof(flags)); 611 612 flags.update_rx_mode_config = 1; 613 flags.update_tx_mode_config = 1; 614 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 615 ECORE_ACCEPT_MCAST_MATCHED | 616 ECORE_ACCEPT_BCAST; 617 618 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 619 ECORE_ACCEPT_MCAST_MATCHED | 620 ECORE_ACCEPT_BCAST; 621 622 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 623 flags.rx_accept_filter |= (ECORE_ACCEPT_UCAST_UNMATCHED | 624 ECORE_ACCEPT_MCAST_UNMATCHED); 625 if (IS_VF(edev)) { 626 flags.tx_accept_filter |= 627 (ECORE_ACCEPT_UCAST_UNMATCHED | 628 ECORE_ACCEPT_MCAST_UNMATCHED); 629 DP_INFO(edev, "Enabling Tx unmatched flags for VF\n"); 630 } 631 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 632 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 633 } 634 635 return ecore_filter_accept_cmd(edev, 0, flags, false, false, 636 ECORE_SPQ_MODE_CB, NULL); 637 } 638 639 int 640 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 641 bool add) 642 { 643 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 644 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 645 struct qede_ucast_entry *tmp = NULL; 646 struct qede_ucast_entry *u; 647 struct rte_ether_addr *mac_addr; 648 649 mac_addr = (struct rte_ether_addr *)ucast->mac; 650 if (add) { 651 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 652 if ((memcmp(mac_addr, &tmp->mac, 653 RTE_ETHER_ADDR_LEN) == 0) && 654 ucast->vni == tmp->vni && 655 ucast->vlan == tmp->vlan) { 656 DP_INFO(edev, "Unicast MAC is already added" 657 " with vlan = %u, vni = %u\n", 658 ucast->vlan, ucast->vni); 659 return 0; 660 } 661 } 662 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), 663 RTE_CACHE_LINE_SIZE); 664 if (!u) { 665 DP_ERR(edev, "Did not allocate memory for ucast\n"); 666 return -ENOMEM; 667 } 668 rte_ether_addr_copy(mac_addr, &u->mac); 669 u->vlan = ucast->vlan; 670 u->vni = ucast->vni; 671 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); 672 qdev->num_uc_addr++; 673 } else { 674 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 675 if ((memcmp(mac_addr, &tmp->mac, 676 RTE_ETHER_ADDR_LEN) == 0) && 677 ucast->vlan == tmp->vlan && 678 ucast->vni == tmp->vni) 679 break; 680 } 681 if (tmp == NULL) { 682 DP_INFO(edev, "Unicast MAC is not found\n"); 683 return -EINVAL; 684 } 685 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); 686 qdev->num_uc_addr--; 687 } 688 689 return 0; 690 } 691 692 static int 693 qede_add_mcast_filters(struct rte_eth_dev *eth_dev, 694 struct rte_ether_addr *mc_addrs, 695 uint32_t mc_addrs_num) 696 { 697 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 698 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 699 struct ecore_filter_mcast mcast; 700 struct qede_mcast_entry *m = NULL; 701 uint8_t i; 702 int rc; 703 704 for (i = 0; i < mc_addrs_num; i++) { 705 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), 706 RTE_CACHE_LINE_SIZE); 707 if (!m) { 708 DP_ERR(edev, "Did not allocate memory for mcast\n"); 709 return -ENOMEM; 710 } 711 rte_ether_addr_copy(&mc_addrs[i], &m->mac); 712 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); 713 } 714 memset(&mcast, 0, sizeof(mcast)); 715 mcast.num_mc_addrs = mc_addrs_num; 716 mcast.opcode = ECORE_FILTER_ADD; 717 for (i = 0; i < mc_addrs_num; i++) 718 rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *) 719 &mcast.mac[i]); 720 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 721 if (rc != ECORE_SUCCESS) { 722 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc); 723 return -1; 724 } 725 726 return 0; 727 } 728 729 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev) 730 { 731 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 732 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 733 struct qede_mcast_entry *tmp = NULL; 734 struct ecore_filter_mcast mcast; 735 int j; 736 int rc; 737 738 memset(&mcast, 0, sizeof(mcast)); 739 mcast.num_mc_addrs = qdev->num_mc_addr; 740 mcast.opcode = ECORE_FILTER_REMOVE; 741 j = 0; 742 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 743 rte_ether_addr_copy(&tmp->mac, 744 (struct rte_ether_addr *)&mcast.mac[j]); 745 j++; 746 } 747 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 748 if (rc != ECORE_SUCCESS) { 749 DP_ERR(edev, "Failed to delete multicast filter\n"); 750 return -1; 751 } 752 /* Init the list */ 753 while (!SLIST_EMPTY(&qdev->mc_list_head)) { 754 tmp = SLIST_FIRST(&qdev->mc_list_head); 755 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list); 756 } 757 SLIST_INIT(&qdev->mc_list_head); 758 759 return 0; 760 } 761 762 enum _ecore_status_t 763 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 764 bool add) 765 { 766 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 767 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 768 enum _ecore_status_t rc = ECORE_INVAL; 769 770 if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) { 771 DP_ERR(edev, "Ucast filter table limit exceeded," 772 " Please enable promisc mode\n"); 773 return ECORE_INVAL; 774 } 775 776 rc = qede_ucast_filter(eth_dev, ucast, add); 777 if (rc == 0) 778 rc = ecore_filter_ucast_cmd(edev, ucast, 779 ECORE_SPQ_MODE_CB, NULL); 780 /* Indicate error only for add filter operation. 781 * Delete filter operations are not severe. 782 */ 783 if ((rc != ECORE_SUCCESS) && add) 784 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", 785 rc, add); 786 787 return rc; 788 } 789 790 static int 791 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr, 792 __rte_unused uint32_t index, __rte_unused uint32_t pool) 793 { 794 struct ecore_filter_ucast ucast; 795 int re; 796 797 if (!rte_is_valid_assigned_ether_addr(mac_addr)) 798 return -EINVAL; 799 800 qede_set_ucast_cmn_params(&ucast); 801 ucast.opcode = ECORE_FILTER_ADD; 802 ucast.type = ECORE_FILTER_MAC; 803 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac); 804 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); 805 return re; 806 } 807 808 static void 809 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 810 { 811 struct qede_dev *qdev = eth_dev->data->dev_private; 812 struct ecore_dev *edev = &qdev->edev; 813 struct ecore_filter_ucast ucast; 814 815 PMD_INIT_FUNC_TRACE(edev); 816 817 if (index >= qdev->dev_info.num_mac_filters) { 818 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 819 index, qdev->dev_info.num_mac_filters); 820 return; 821 } 822 823 if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) 824 return; 825 826 qede_set_ucast_cmn_params(&ucast); 827 ucast.opcode = ECORE_FILTER_REMOVE; 828 ucast.type = ECORE_FILTER_MAC; 829 830 /* Use the index maintained by rte */ 831 rte_ether_addr_copy(ð_dev->data->mac_addrs[index], 832 (struct rte_ether_addr *)&ucast.mac); 833 834 qede_mac_int_ops(eth_dev, &ucast, false); 835 } 836 837 static int 838 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr) 839 { 840 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 841 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 842 843 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 844 mac_addr->addr_bytes)) { 845 DP_ERR(edev, "Setting MAC address is not allowed\n"); 846 return -EPERM; 847 } 848 849 qede_mac_addr_remove(eth_dev, 0); 850 851 return qede_mac_addr_add(eth_dev, mac_addr, 0, 0); 852 } 853 854 void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) 855 { 856 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 857 struct ecore_sp_vport_update_params params; 858 struct ecore_hwfn *p_hwfn; 859 uint8_t i; 860 int rc; 861 862 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 863 params.vport_id = 0; 864 params.update_accept_any_vlan_flg = 1; 865 params.accept_any_vlan = flg; 866 for_each_hwfn(edev, i) { 867 p_hwfn = &edev->hwfns[i]; 868 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 869 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 870 ECORE_SPQ_MODE_EBLOCK, NULL); 871 if (rc != ECORE_SUCCESS) { 872 DP_ERR(edev, "Failed to configure accept-any-vlan\n"); 873 return; 874 } 875 } 876 877 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled"); 878 } 879 880 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg) 881 { 882 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 883 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 884 struct ecore_sp_vport_update_params params; 885 struct ecore_hwfn *p_hwfn; 886 uint8_t i; 887 int rc; 888 889 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 890 params.vport_id = 0; 891 params.update_inner_vlan_removal_flg = 1; 892 params.inner_vlan_removal_flg = flg; 893 for_each_hwfn(edev, i) { 894 p_hwfn = &edev->hwfns[i]; 895 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 896 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 897 ECORE_SPQ_MODE_EBLOCK, NULL); 898 if (rc != ECORE_SUCCESS) { 899 DP_ERR(edev, "Failed to update vport\n"); 900 return -1; 901 } 902 } 903 904 qdev->vlan_strip_flg = flg; 905 906 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled"); 907 return 0; 908 } 909 910 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 911 uint16_t vlan_id, int on) 912 { 913 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 914 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 915 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 916 struct qede_vlan_entry *tmp = NULL; 917 struct qede_vlan_entry *vlan; 918 struct ecore_filter_ucast ucast; 919 int rc; 920 921 if (on) { 922 if (qdev->configured_vlans == dev_info->num_vlan_filters) { 923 DP_ERR(edev, "Reached max VLAN filter limit" 924 " enabling accept_any_vlan\n"); 925 qede_config_accept_any_vlan(qdev, true); 926 return 0; 927 } 928 929 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 930 if (tmp->vid == vlan_id) { 931 DP_INFO(edev, "VLAN %u already configured\n", 932 vlan_id); 933 return 0; 934 } 935 } 936 937 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), 938 RTE_CACHE_LINE_SIZE); 939 940 if (!vlan) { 941 DP_ERR(edev, "Did not allocate memory for VLAN\n"); 942 return -ENOMEM; 943 } 944 945 qede_set_ucast_cmn_params(&ucast); 946 ucast.opcode = ECORE_FILTER_ADD; 947 ucast.type = ECORE_FILTER_VLAN; 948 ucast.vlan = vlan_id; 949 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 950 NULL); 951 if (rc != 0) { 952 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 953 rc); 954 rte_free(vlan); 955 } else { 956 vlan->vid = vlan_id; 957 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); 958 qdev->configured_vlans++; 959 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", 960 vlan_id, qdev->configured_vlans); 961 } 962 } else { 963 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 964 if (tmp->vid == vlan_id) 965 break; 966 } 967 968 if (!tmp) { 969 if (qdev->configured_vlans == 0) { 970 DP_INFO(edev, 971 "No VLAN filters configured yet\n"); 972 return 0; 973 } 974 975 DP_ERR(edev, "VLAN %u not configured\n", vlan_id); 976 return -EINVAL; 977 } 978 979 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); 980 981 qede_set_ucast_cmn_params(&ucast); 982 ucast.opcode = ECORE_FILTER_REMOVE; 983 ucast.type = ECORE_FILTER_VLAN; 984 ucast.vlan = vlan_id; 985 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 986 NULL); 987 if (rc != 0) { 988 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 989 vlan_id, rc); 990 } else { 991 qdev->configured_vlans--; 992 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", 993 vlan_id, qdev->configured_vlans); 994 } 995 } 996 997 return rc; 998 } 999 1000 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 1001 { 1002 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1003 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1004 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1005 1006 if (mask & ETH_VLAN_STRIP_MASK) { 1007 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1008 (void)qede_vlan_stripping(eth_dev, 1); 1009 else 1010 (void)qede_vlan_stripping(eth_dev, 0); 1011 } 1012 1013 if (mask & ETH_VLAN_FILTER_MASK) { 1014 /* VLAN filtering kicks in when a VLAN is added */ 1015 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 1016 qede_vlan_filter_set(eth_dev, 0, 1); 1017 } else { 1018 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ 1019 DP_ERR(edev, 1020 " Please remove existing VLAN filters" 1021 " before disabling VLAN filtering\n"); 1022 /* Signal app that VLAN filtering is still 1023 * enabled 1024 */ 1025 eth_dev->data->dev_conf.rxmode.offloads |= 1026 DEV_RX_OFFLOAD_VLAN_FILTER; 1027 } else { 1028 qede_vlan_filter_set(eth_dev, 0, 0); 1029 } 1030 } 1031 } 1032 1033 qdev->vlan_offload_mask = mask; 1034 1035 DP_INFO(edev, "VLAN offload mask %d\n", mask); 1036 1037 return 0; 1038 } 1039 1040 static void qede_prandom_bytes(uint32_t *buff) 1041 { 1042 uint8_t i; 1043 1044 srand((unsigned int)time(NULL)); 1045 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 1046 buff[i] = rand(); 1047 } 1048 1049 int qede_config_rss(struct rte_eth_dev *eth_dev) 1050 { 1051 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1052 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1053 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; 1054 struct rte_eth_rss_reta_entry64 reta_conf[2]; 1055 struct rte_eth_rss_conf rss_conf; 1056 uint32_t i, id, pos, q; 1057 1058 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 1059 if (!rss_conf.rss_key) { 1060 DP_INFO(edev, "Applying driver default key\n"); 1061 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1062 qede_prandom_bytes(&def_rss_key[0]); 1063 rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; 1064 } 1065 1066 /* Configure RSS hash */ 1067 if (qede_rss_hash_update(eth_dev, &rss_conf)) 1068 return -EINVAL; 1069 1070 /* Configure default RETA */ 1071 memset(reta_conf, 0, sizeof(reta_conf)); 1072 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) 1073 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; 1074 1075 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 1076 id = i / RTE_RETA_GROUP_SIZE; 1077 pos = i % RTE_RETA_GROUP_SIZE; 1078 q = i % QEDE_RSS_COUNT(eth_dev); 1079 reta_conf[id].reta[pos] = q; 1080 } 1081 if (qede_rss_reta_update(eth_dev, &reta_conf[0], 1082 ECORE_RSS_IND_TABLE_SIZE)) 1083 return -EINVAL; 1084 1085 return 0; 1086 } 1087 1088 static void qede_fastpath_start(struct ecore_dev *edev) 1089 { 1090 struct ecore_hwfn *p_hwfn; 1091 int i; 1092 1093 for_each_hwfn(edev, i) { 1094 p_hwfn = &edev->hwfns[i]; 1095 ecore_hw_start_fastpath(p_hwfn); 1096 } 1097 } 1098 1099 static int qede_dev_start(struct rte_eth_dev *eth_dev) 1100 { 1101 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1102 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1103 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1104 1105 PMD_INIT_FUNC_TRACE(edev); 1106 1107 /* Update MTU only if it has changed */ 1108 if (qdev->new_mtu && qdev->new_mtu != qdev->mtu) { 1109 if (qede_update_mtu(eth_dev, qdev->new_mtu)) 1110 goto err; 1111 qdev->mtu = qdev->new_mtu; 1112 qdev->new_mtu = 0; 1113 } 1114 1115 /* Configure TPA parameters */ 1116 if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1117 if (qede_enable_tpa(eth_dev, true)) 1118 return -EINVAL; 1119 /* Enable scatter mode for LRO */ 1120 if (!eth_dev->data->scattered_rx) 1121 rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER; 1122 } 1123 1124 /* Start queues */ 1125 if (qede_start_queues(eth_dev)) 1126 goto err; 1127 1128 if (IS_PF(edev)) 1129 qede_reset_queue_stats(qdev, true); 1130 1131 /* Newer SR-IOV PF driver expects RX/TX queues to be started before 1132 * enabling RSS. Hence RSS configuration is deferred up to this point. 1133 * Also, we would like to retain similar behavior in PF case, so we 1134 * don't do PF/VF specific check here. 1135 */ 1136 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 1137 if (qede_config_rss(eth_dev)) 1138 goto err; 1139 1140 /* Enable vport*/ 1141 if (qede_activate_vport(eth_dev, true)) 1142 goto err; 1143 1144 /* Bring-up the link */ 1145 qede_dev_set_link_state(eth_dev, true); 1146 1147 /* Update link status */ 1148 qede_link_update(eth_dev, 0); 1149 1150 /* Start/resume traffic */ 1151 qede_fastpath_start(edev); 1152 1153 /* Assign I/O handlers */ 1154 qede_assign_rxtx_handlers(eth_dev, false); 1155 1156 DP_INFO(edev, "Device started\n"); 1157 1158 return 0; 1159 err: 1160 DP_ERR(edev, "Device start fails\n"); 1161 return -1; /* common error code is < 0 */ 1162 } 1163 1164 static void qede_dev_stop(struct rte_eth_dev *eth_dev) 1165 { 1166 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1167 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1168 1169 PMD_INIT_FUNC_TRACE(edev); 1170 1171 /* Bring the link down */ 1172 qede_dev_set_link_state(eth_dev, false); 1173 1174 /* Update link status */ 1175 qede_link_update(eth_dev, 0); 1176 1177 /* Replace I/O functions with dummy ones. It cannot 1178 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. 1179 */ 1180 qede_assign_rxtx_handlers(eth_dev, true); 1181 1182 /* Disable vport */ 1183 if (qede_activate_vport(eth_dev, false)) 1184 return; 1185 1186 if (qdev->enable_lro) 1187 qede_enable_tpa(eth_dev, false); 1188 1189 /* Stop queues */ 1190 qede_stop_queues(eth_dev); 1191 1192 /* Disable traffic */ 1193 ecore_hw_stop_fastpath(edev); /* TBD - loop */ 1194 1195 DP_INFO(edev, "Device is stopped\n"); 1196 } 1197 1198 static const char * const valid_args[] = { 1199 QEDE_NPAR_TX_SWITCHING, 1200 QEDE_VF_TX_SWITCHING, 1201 NULL, 1202 }; 1203 1204 static int qede_args_check(const char *key, const char *val, void *opaque) 1205 { 1206 unsigned long tmp; 1207 int ret = 0; 1208 struct rte_eth_dev *eth_dev = opaque; 1209 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1210 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1211 1212 errno = 0; 1213 tmp = strtoul(val, NULL, 0); 1214 if (errno) { 1215 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val); 1216 return errno; 1217 } 1218 1219 if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) || 1220 ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) { 1221 qdev->enable_tx_switching = !!tmp; 1222 DP_INFO(edev, "Disabling %s tx-switching\n", 1223 strcmp(QEDE_NPAR_TX_SWITCHING, key) ? 1224 "VF" : "NPAR"); 1225 } 1226 1227 return ret; 1228 } 1229 1230 static int qede_args(struct rte_eth_dev *eth_dev) 1231 { 1232 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1233 struct rte_kvargs *kvlist; 1234 struct rte_devargs *devargs; 1235 int ret; 1236 int i; 1237 1238 devargs = pci_dev->device.devargs; 1239 if (!devargs) 1240 return 0; /* return success */ 1241 1242 kvlist = rte_kvargs_parse(devargs->args, valid_args); 1243 if (kvlist == NULL) 1244 return -EINVAL; 1245 1246 /* Process parameters. */ 1247 for (i = 0; (valid_args[i] != NULL); ++i) { 1248 if (rte_kvargs_count(kvlist, valid_args[i])) { 1249 ret = rte_kvargs_process(kvlist, valid_args[i], 1250 qede_args_check, eth_dev); 1251 if (ret != ECORE_SUCCESS) { 1252 rte_kvargs_free(kvlist); 1253 return ret; 1254 } 1255 } 1256 } 1257 rte_kvargs_free(kvlist); 1258 1259 return 0; 1260 } 1261 1262 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 1263 { 1264 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1265 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1266 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1267 uint8_t num_rxqs; 1268 uint8_t num_txqs; 1269 int ret; 1270 1271 PMD_INIT_FUNC_TRACE(edev); 1272 1273 if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) 1274 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1275 1276 /* We need to have min 1 RX queue.There is no min check in 1277 * rte_eth_dev_configure(), so we are checking it here. 1278 */ 1279 if (eth_dev->data->nb_rx_queues == 0) { 1280 DP_ERR(edev, "Minimum one RX queue is required\n"); 1281 return -EINVAL; 1282 } 1283 1284 /* Enable Tx switching by default */ 1285 qdev->enable_tx_switching = 1; 1286 1287 /* Parse devargs and fix up rxmode */ 1288 if (qede_args(eth_dev)) 1289 DP_NOTICE(edev, false, 1290 "Invalid devargs supplied, requested change will not take effect\n"); 1291 1292 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || 1293 rxmode->mq_mode == ETH_MQ_RX_RSS)) { 1294 DP_ERR(edev, "Unsupported multi-queue mode\n"); 1295 return -ENOTSUP; 1296 } 1297 /* Flow director mode check */ 1298 if (qede_check_fdir_support(eth_dev)) 1299 return -ENOTSUP; 1300 1301 /* Allocate/reallocate fastpath resources only for new queue config */ 1302 num_txqs = eth_dev->data->nb_tx_queues * edev->num_hwfns; 1303 num_rxqs = eth_dev->data->nb_rx_queues * edev->num_hwfns; 1304 if (qdev->num_tx_queues != num_txqs || 1305 qdev->num_rx_queues != num_rxqs) { 1306 qede_dealloc_fp_resc(eth_dev); 1307 qdev->num_tx_queues = num_txqs; 1308 qdev->num_rx_queues = num_rxqs; 1309 if (qede_alloc_fp_resc(qdev)) 1310 return -ENOMEM; 1311 } 1312 1313 /* If jumbo enabled adjust MTU */ 1314 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 1315 eth_dev->data->mtu = 1316 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1317 RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD; 1318 1319 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) 1320 eth_dev->data->scattered_rx = 1; 1321 1322 if (qede_start_vport(qdev, eth_dev->data->mtu)) 1323 return -1; 1324 1325 qdev->mtu = eth_dev->data->mtu; 1326 1327 /* Enable VLAN offloads by default */ 1328 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | 1329 ETH_VLAN_FILTER_MASK); 1330 if (ret) 1331 return ret; 1332 1333 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", 1334 QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev)); 1335 1336 if (ECORE_IS_CMT(edev)) 1337 DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n", 1338 qdev->num_rx_queues, qdev->num_tx_queues); 1339 1340 1341 return 0; 1342 } 1343 1344 /* Info about HW descriptor ring limitations */ 1345 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 1346 .nb_max = 0x8000, /* 32K */ 1347 .nb_min = 128, 1348 .nb_align = 128 /* lowest common multiple */ 1349 }; 1350 1351 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 1352 .nb_max = 0x8000, /* 32K */ 1353 .nb_min = 256, 1354 .nb_align = 256, 1355 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, 1356 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 1357 }; 1358 1359 static int 1360 qede_dev_info_get(struct rte_eth_dev *eth_dev, 1361 struct rte_eth_dev_info *dev_info) 1362 { 1363 struct qede_dev *qdev = eth_dev->data->dev_private; 1364 struct ecore_dev *edev = &qdev->edev; 1365 struct qed_link_output link; 1366 uint32_t speed_cap = 0; 1367 1368 PMD_INIT_FUNC_TRACE(edev); 1369 1370 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; 1371 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 1372 dev_info->rx_desc_lim = qede_rx_desc_lim; 1373 dev_info->tx_desc_lim = qede_tx_desc_lim; 1374 1375 if (IS_PF(edev)) 1376 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1377 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2); 1378 else 1379 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1380 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); 1381 /* Since CMT mode internally doubles the number of queues */ 1382 if (ECORE_IS_CMT(edev)) 1383 dev_info->max_rx_queues = dev_info->max_rx_queues / 2; 1384 1385 dev_info->max_tx_queues = dev_info->max_rx_queues; 1386 1387 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; 1388 dev_info->max_vfs = 0; 1389 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 1390 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1391 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 1392 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 1393 DEV_RX_OFFLOAD_UDP_CKSUM | 1394 DEV_RX_OFFLOAD_TCP_CKSUM | 1395 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1396 DEV_RX_OFFLOAD_TCP_LRO | 1397 DEV_RX_OFFLOAD_KEEP_CRC | 1398 DEV_RX_OFFLOAD_SCATTER | 1399 DEV_RX_OFFLOAD_JUMBO_FRAME | 1400 DEV_RX_OFFLOAD_VLAN_FILTER | 1401 DEV_RX_OFFLOAD_VLAN_STRIP | 1402 DEV_RX_OFFLOAD_RSS_HASH); 1403 dev_info->rx_queue_offload_capa = 0; 1404 1405 /* TX offloads are on a per-packet basis, so it is applicable 1406 * to both at port and queue levels. 1407 */ 1408 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | 1409 DEV_TX_OFFLOAD_IPV4_CKSUM | 1410 DEV_TX_OFFLOAD_UDP_CKSUM | 1411 DEV_TX_OFFLOAD_TCP_CKSUM | 1412 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1413 DEV_TX_OFFLOAD_MULTI_SEGS | 1414 DEV_TX_OFFLOAD_TCP_TSO | 1415 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1416 DEV_TX_OFFLOAD_GENEVE_TNL_TSO); 1417 dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa; 1418 1419 dev_info->default_txconf = (struct rte_eth_txconf) { 1420 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS, 1421 }; 1422 1423 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1424 /* Packets are always dropped if no descriptors are available */ 1425 .rx_drop_en = 1, 1426 .offloads = 0, 1427 }; 1428 1429 memset(&link, 0, sizeof(struct qed_link_output)); 1430 qdev->ops->common->get_link(edev, &link); 1431 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1432 speed_cap |= ETH_LINK_SPEED_1G; 1433 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1434 speed_cap |= ETH_LINK_SPEED_10G; 1435 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1436 speed_cap |= ETH_LINK_SPEED_25G; 1437 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1438 speed_cap |= ETH_LINK_SPEED_40G; 1439 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1440 speed_cap |= ETH_LINK_SPEED_50G; 1441 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1442 speed_cap |= ETH_LINK_SPEED_100G; 1443 dev_info->speed_capa = speed_cap; 1444 1445 return 0; 1446 } 1447 1448 /* return 0 means link status changed, -1 means not changed */ 1449 int 1450 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 1451 { 1452 struct qede_dev *qdev = eth_dev->data->dev_private; 1453 struct ecore_dev *edev = &qdev->edev; 1454 struct qed_link_output q_link; 1455 struct rte_eth_link link; 1456 uint16_t link_duplex; 1457 1458 memset(&q_link, 0, sizeof(q_link)); 1459 memset(&link, 0, sizeof(link)); 1460 1461 qdev->ops->common->get_link(edev, &q_link); 1462 1463 /* Link Speed */ 1464 link.link_speed = q_link.speed; 1465 1466 /* Link Mode */ 1467 switch (q_link.duplex) { 1468 case QEDE_DUPLEX_HALF: 1469 link_duplex = ETH_LINK_HALF_DUPLEX; 1470 break; 1471 case QEDE_DUPLEX_FULL: 1472 link_duplex = ETH_LINK_FULL_DUPLEX; 1473 break; 1474 case QEDE_DUPLEX_UNKNOWN: 1475 default: 1476 link_duplex = -1; 1477 } 1478 link.link_duplex = link_duplex; 1479 1480 /* Link Status */ 1481 link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN; 1482 1483 /* AN */ 1484 link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 1485 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 1486 1487 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 1488 link.link_speed, link.link_duplex, 1489 link.link_autoneg, link.link_status); 1490 1491 return rte_eth_linkstatus_set(eth_dev, &link); 1492 } 1493 1494 static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 1495 { 1496 enum _ecore_status_t ecore_status; 1497 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1498 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1499 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1500 1501 PMD_INIT_FUNC_TRACE(edev); 1502 1503 ecore_status = qed_configure_filter_rx_mode(eth_dev, type); 1504 1505 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1506 } 1507 1508 static int qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 1509 { 1510 struct qede_dev *qdev = eth_dev->data->dev_private; 1511 struct ecore_dev *edev = &qdev->edev; 1512 enum _ecore_status_t ecore_status; 1513 1514 PMD_INIT_FUNC_TRACE(edev); 1515 1516 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1517 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1518 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 1519 else 1520 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1521 QED_FILTER_RX_MODE_TYPE_REGULAR); 1522 1523 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1524 } 1525 1526 static void qede_poll_sp_sb_cb(void *param) 1527 { 1528 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1529 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1530 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1531 int rc; 1532 1533 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 1534 qede_interrupt_action(&edev->hwfns[1]); 1535 1536 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 1537 qede_poll_sp_sb_cb, 1538 (void *)eth_dev); 1539 if (rc != 0) { 1540 DP_ERR(edev, "Unable to start periodic" 1541 " timer rc %d\n", rc); 1542 } 1543 } 1544 1545 static void qede_dev_close(struct rte_eth_dev *eth_dev) 1546 { 1547 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1548 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1549 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1550 1551 PMD_INIT_FUNC_TRACE(edev); 1552 1553 /* dev_stop() shall cleanup fp resources in hw but without releasing 1554 * dma memories and sw structures so that dev_start() can be called 1555 * by the app without reconfiguration. However, in dev_close() we 1556 * can release all the resources and device can be brought up newly 1557 */ 1558 if (eth_dev->data->dev_started) 1559 qede_dev_stop(eth_dev); 1560 1561 if (qdev->vport_started) 1562 qede_stop_vport(edev); 1563 qdev->vport_started = false; 1564 qede_fdir_dealloc_resc(eth_dev); 1565 qede_dealloc_fp_resc(eth_dev); 1566 1567 eth_dev->data->nb_rx_queues = 0; 1568 eth_dev->data->nb_tx_queues = 0; 1569 1570 qdev->ops->common->slowpath_stop(edev); 1571 qdev->ops->common->remove(edev); 1572 rte_intr_disable(&pci_dev->intr_handle); 1573 1574 switch (pci_dev->intr_handle.type) { 1575 case RTE_INTR_HANDLE_UIO_INTX: 1576 case RTE_INTR_HANDLE_VFIO_LEGACY: 1577 rte_intr_callback_unregister(&pci_dev->intr_handle, 1578 qede_interrupt_handler_intx, 1579 (void *)eth_dev); 1580 break; 1581 default: 1582 rte_intr_callback_unregister(&pci_dev->intr_handle, 1583 qede_interrupt_handler, 1584 (void *)eth_dev); 1585 } 1586 1587 if (ECORE_IS_CMT(edev)) 1588 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 1589 } 1590 1591 static int 1592 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 1593 { 1594 struct qede_dev *qdev = eth_dev->data->dev_private; 1595 struct ecore_dev *edev = &qdev->edev; 1596 struct ecore_eth_stats stats; 1597 unsigned int i = 0, j = 0, qid, idx, hw_fn; 1598 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 1599 struct qede_tx_queue *txq; 1600 1601 ecore_get_vport_stats(edev, &stats); 1602 1603 /* RX Stats */ 1604 eth_stats->ipackets = stats.common.rx_ucast_pkts + 1605 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts; 1606 1607 eth_stats->ibytes = stats.common.rx_ucast_bytes + 1608 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes; 1609 1610 eth_stats->ierrors = stats.common.rx_crc_errors + 1611 stats.common.rx_align_errors + 1612 stats.common.rx_carrier_errors + 1613 stats.common.rx_oversize_packets + 1614 stats.common.rx_jabbers + stats.common.rx_undersize_packets; 1615 1616 eth_stats->rx_nombuf = stats.common.no_buff_discards; 1617 1618 eth_stats->imissed = stats.common.mftag_filter_discards + 1619 stats.common.mac_filter_discards + 1620 stats.common.no_buff_discards + 1621 stats.common.brb_truncates + stats.common.brb_discards; 1622 1623 /* TX stats */ 1624 eth_stats->opackets = stats.common.tx_ucast_pkts + 1625 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts; 1626 1627 eth_stats->obytes = stats.common.tx_ucast_bytes + 1628 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes; 1629 1630 eth_stats->oerrors = stats.common.tx_err_drop_pkts; 1631 1632 /* Queue stats */ 1633 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev), 1634 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1635 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev), 1636 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1637 if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) || 1638 txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev)) 1639 DP_VERBOSE(edev, ECORE_MSG_DEBUG, 1640 "Not all the queue stats will be displayed. Set" 1641 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" 1642 " appropriately and retry.\n"); 1643 1644 for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) { 1645 eth_stats->q_ipackets[i] = 0; 1646 eth_stats->q_errors[i] = 0; 1647 1648 for_each_hwfn(edev, hw_fn) { 1649 idx = qid * edev->num_hwfns + hw_fn; 1650 1651 eth_stats->q_ipackets[i] += 1652 *(uint64_t *) 1653 (((char *)(qdev->fp_array[idx].rxq)) + 1654 offsetof(struct qede_rx_queue, 1655 rcv_pkts)); 1656 eth_stats->q_errors[i] += 1657 *(uint64_t *) 1658 (((char *)(qdev->fp_array[idx].rxq)) + 1659 offsetof(struct qede_rx_queue, 1660 rx_hw_errors)) + 1661 *(uint64_t *) 1662 (((char *)(qdev->fp_array[idx].rxq)) + 1663 offsetof(struct qede_rx_queue, 1664 rx_alloc_errors)); 1665 } 1666 1667 i++; 1668 if (i == rxq_stat_cntrs) 1669 break; 1670 } 1671 1672 for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) { 1673 eth_stats->q_opackets[j] = 0; 1674 1675 for_each_hwfn(edev, hw_fn) { 1676 idx = qid * edev->num_hwfns + hw_fn; 1677 1678 txq = qdev->fp_array[idx].txq; 1679 eth_stats->q_opackets[j] += 1680 *((uint64_t *)(uintptr_t) 1681 (((uint64_t)(uintptr_t)(txq)) + 1682 offsetof(struct qede_tx_queue, 1683 xmit_pkts))); 1684 } 1685 1686 j++; 1687 if (j == txq_stat_cntrs) 1688 break; 1689 } 1690 1691 return 0; 1692 } 1693 1694 static unsigned 1695 qede_get_xstats_count(struct qede_dev *qdev) { 1696 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; 1697 1698 if (ECORE_IS_BB(&qdev->edev)) 1699 return RTE_DIM(qede_xstats_strings) + 1700 RTE_DIM(qede_bb_xstats_strings) + 1701 (RTE_DIM(qede_rxq_xstats_strings) * 1702 QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns); 1703 else 1704 return RTE_DIM(qede_xstats_strings) + 1705 RTE_DIM(qede_ah_xstats_strings) + 1706 (RTE_DIM(qede_rxq_xstats_strings) * 1707 QEDE_RSS_COUNT(dev)); 1708 } 1709 1710 static int 1711 qede_get_xstats_names(struct rte_eth_dev *dev, 1712 struct rte_eth_xstat_name *xstats_names, 1713 __rte_unused unsigned int limit) 1714 { 1715 struct qede_dev *qdev = dev->data->dev_private; 1716 struct ecore_dev *edev = &qdev->edev; 1717 const unsigned int stat_cnt = qede_get_xstats_count(qdev); 1718 unsigned int i, qid, hw_fn, stat_idx = 0; 1719 1720 if (xstats_names == NULL) 1721 return stat_cnt; 1722 1723 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1724 strlcpy(xstats_names[stat_idx].name, 1725 qede_xstats_strings[i].name, 1726 sizeof(xstats_names[stat_idx].name)); 1727 stat_idx++; 1728 } 1729 1730 if (ECORE_IS_BB(edev)) { 1731 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1732 strlcpy(xstats_names[stat_idx].name, 1733 qede_bb_xstats_strings[i].name, 1734 sizeof(xstats_names[stat_idx].name)); 1735 stat_idx++; 1736 } 1737 } else { 1738 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1739 strlcpy(xstats_names[stat_idx].name, 1740 qede_ah_xstats_strings[i].name, 1741 sizeof(xstats_names[stat_idx].name)); 1742 stat_idx++; 1743 } 1744 } 1745 1746 for (qid = 0; qid < QEDE_RSS_COUNT(dev); qid++) { 1747 for_each_hwfn(edev, hw_fn) { 1748 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1749 snprintf(xstats_names[stat_idx].name, 1750 RTE_ETH_XSTATS_NAME_SIZE, 1751 "%.4s%d.%d%s", 1752 qede_rxq_xstats_strings[i].name, 1753 hw_fn, qid, 1754 qede_rxq_xstats_strings[i].name + 4); 1755 stat_idx++; 1756 } 1757 } 1758 } 1759 1760 return stat_cnt; 1761 } 1762 1763 static int 1764 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1765 unsigned int n) 1766 { 1767 struct qede_dev *qdev = dev->data->dev_private; 1768 struct ecore_dev *edev = &qdev->edev; 1769 struct ecore_eth_stats stats; 1770 const unsigned int num = qede_get_xstats_count(qdev); 1771 unsigned int i, qid, hw_fn, fpidx, stat_idx = 0; 1772 1773 if (n < num) 1774 return num; 1775 1776 ecore_get_vport_stats(edev, &stats); 1777 1778 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1779 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + 1780 qede_xstats_strings[i].offset); 1781 xstats[stat_idx].id = stat_idx; 1782 stat_idx++; 1783 } 1784 1785 if (ECORE_IS_BB(edev)) { 1786 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1787 xstats[stat_idx].value = 1788 *(uint64_t *)(((char *)&stats) + 1789 qede_bb_xstats_strings[i].offset); 1790 xstats[stat_idx].id = stat_idx; 1791 stat_idx++; 1792 } 1793 } else { 1794 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1795 xstats[stat_idx].value = 1796 *(uint64_t *)(((char *)&stats) + 1797 qede_ah_xstats_strings[i].offset); 1798 xstats[stat_idx].id = stat_idx; 1799 stat_idx++; 1800 } 1801 } 1802 1803 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 1804 for_each_hwfn(edev, hw_fn) { 1805 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1806 fpidx = qid * edev->num_hwfns + hw_fn; 1807 xstats[stat_idx].value = *(uint64_t *) 1808 (((char *)(qdev->fp_array[fpidx].rxq)) + 1809 qede_rxq_xstats_strings[i].offset); 1810 xstats[stat_idx].id = stat_idx; 1811 stat_idx++; 1812 } 1813 1814 } 1815 } 1816 1817 return stat_idx; 1818 } 1819 1820 static int 1821 qede_reset_xstats(struct rte_eth_dev *dev) 1822 { 1823 struct qede_dev *qdev = dev->data->dev_private; 1824 struct ecore_dev *edev = &qdev->edev; 1825 1826 ecore_reset_vport_stats(edev); 1827 qede_reset_queue_stats(qdev, true); 1828 1829 return 0; 1830 } 1831 1832 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 1833 { 1834 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1835 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1836 struct qed_link_params link_params; 1837 int rc; 1838 1839 DP_INFO(edev, "setting link state %d\n", link_up); 1840 memset(&link_params, 0, sizeof(link_params)); 1841 link_params.link_up = link_up; 1842 rc = qdev->ops->common->set_link(edev, &link_params); 1843 if (rc != ECORE_SUCCESS) 1844 DP_ERR(edev, "Unable to set link state %d\n", link_up); 1845 1846 return rc; 1847 } 1848 1849 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 1850 { 1851 return qede_dev_set_link_state(eth_dev, true); 1852 } 1853 1854 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 1855 { 1856 return qede_dev_set_link_state(eth_dev, false); 1857 } 1858 1859 static int qede_reset_stats(struct rte_eth_dev *eth_dev) 1860 { 1861 struct qede_dev *qdev = eth_dev->data->dev_private; 1862 struct ecore_dev *edev = &qdev->edev; 1863 1864 ecore_reset_vport_stats(edev); 1865 qede_reset_queue_stats(qdev, false); 1866 1867 return 0; 1868 } 1869 1870 static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 1871 { 1872 enum qed_filter_rx_mode_type type = 1873 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1874 enum _ecore_status_t ecore_status; 1875 1876 ecore_status = qed_configure_filter_rx_mode(eth_dev, type); 1877 1878 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1879 } 1880 1881 static int qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 1882 { 1883 enum _ecore_status_t ecore_status; 1884 1885 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1886 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1887 QED_FILTER_RX_MODE_TYPE_PROMISC); 1888 else 1889 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1890 QED_FILTER_RX_MODE_TYPE_REGULAR); 1891 1892 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1893 } 1894 1895 static int 1896 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, 1897 struct rte_ether_addr *mc_addrs, 1898 uint32_t mc_addrs_num) 1899 { 1900 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1901 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1902 uint8_t i; 1903 1904 if (mc_addrs_num > ECORE_MAX_MC_ADDRS) { 1905 DP_ERR(edev, "Reached max multicast filters limit," 1906 "Please enable multicast promisc mode\n"); 1907 return -ENOSPC; 1908 } 1909 1910 for (i = 0; i < mc_addrs_num; i++) { 1911 if (!rte_is_multicast_ether_addr(&mc_addrs[i])) { 1912 DP_ERR(edev, "Not a valid multicast MAC\n"); 1913 return -EINVAL; 1914 } 1915 } 1916 1917 /* Flush all existing entries */ 1918 if (qede_del_mcast_filters(eth_dev)) 1919 return -1; 1920 1921 /* Set new mcast list */ 1922 return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num); 1923 } 1924 1925 /* Update MTU via vport-update without doing port restart. 1926 * The vport must be deactivated before calling this API. 1927 */ 1928 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) 1929 { 1930 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1931 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1932 struct ecore_hwfn *p_hwfn; 1933 int rc; 1934 int i; 1935 1936 if (IS_PF(edev)) { 1937 struct ecore_sp_vport_update_params params; 1938 1939 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 1940 params.vport_id = 0; 1941 params.mtu = mtu; 1942 params.vport_id = 0; 1943 for_each_hwfn(edev, i) { 1944 p_hwfn = &edev->hwfns[i]; 1945 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1946 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 1947 ECORE_SPQ_MODE_EBLOCK, NULL); 1948 if (rc != ECORE_SUCCESS) 1949 goto err; 1950 } 1951 } else { 1952 for_each_hwfn(edev, i) { 1953 p_hwfn = &edev->hwfns[i]; 1954 rc = ecore_vf_pf_update_mtu(p_hwfn, mtu); 1955 if (rc == ECORE_INVAL) { 1956 DP_INFO(edev, "VF MTU Update TLV not supported\n"); 1957 /* Recreate vport */ 1958 rc = qede_start_vport(qdev, mtu); 1959 if (rc != ECORE_SUCCESS) 1960 goto err; 1961 1962 /* Restore config lost due to vport stop */ 1963 if (eth_dev->data->promiscuous) 1964 qede_promiscuous_enable(eth_dev); 1965 else 1966 qede_promiscuous_disable(eth_dev); 1967 1968 if (eth_dev->data->all_multicast) 1969 qede_allmulticast_enable(eth_dev); 1970 else 1971 qede_allmulticast_disable(eth_dev); 1972 1973 qede_vlan_offload_set(eth_dev, 1974 qdev->vlan_offload_mask); 1975 } else if (rc != ECORE_SUCCESS) { 1976 goto err; 1977 } 1978 } 1979 } 1980 DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu); 1981 1982 return 0; 1983 1984 err: 1985 DP_ERR(edev, "Failed to update MTU\n"); 1986 return -1; 1987 } 1988 1989 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 1990 struct rte_eth_fc_conf *fc_conf) 1991 { 1992 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1993 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1994 struct qed_link_output current_link; 1995 struct qed_link_params params; 1996 1997 memset(¤t_link, 0, sizeof(current_link)); 1998 qdev->ops->common->get_link(edev, ¤t_link); 1999 2000 memset(¶ms, 0, sizeof(params)); 2001 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 2002 if (fc_conf->autoneg) { 2003 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 2004 DP_ERR(edev, "Autoneg not supported\n"); 2005 return -EINVAL; 2006 } 2007 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 2008 } 2009 2010 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 2011 if (fc_conf->mode == RTE_FC_FULL) 2012 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 2013 QED_LINK_PAUSE_RX_ENABLE); 2014 if (fc_conf->mode == RTE_FC_TX_PAUSE) 2015 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 2016 if (fc_conf->mode == RTE_FC_RX_PAUSE) 2017 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 2018 2019 params.link_up = true; 2020 (void)qdev->ops->common->set_link(edev, ¶ms); 2021 2022 return 0; 2023 } 2024 2025 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 2026 struct rte_eth_fc_conf *fc_conf) 2027 { 2028 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2029 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2030 struct qed_link_output current_link; 2031 2032 memset(¤t_link, 0, sizeof(current_link)); 2033 qdev->ops->common->get_link(edev, ¤t_link); 2034 2035 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 2036 fc_conf->autoneg = true; 2037 2038 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 2039 QED_LINK_PAUSE_TX_ENABLE)) 2040 fc_conf->mode = RTE_FC_FULL; 2041 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 2042 fc_conf->mode = RTE_FC_RX_PAUSE; 2043 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 2044 fc_conf->mode = RTE_FC_TX_PAUSE; 2045 else 2046 fc_conf->mode = RTE_FC_NONE; 2047 2048 return 0; 2049 } 2050 2051 static const uint32_t * 2052 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 2053 { 2054 static const uint32_t ptypes[] = { 2055 RTE_PTYPE_L2_ETHER, 2056 RTE_PTYPE_L2_ETHER_VLAN, 2057 RTE_PTYPE_L3_IPV4, 2058 RTE_PTYPE_L3_IPV6, 2059 RTE_PTYPE_L4_TCP, 2060 RTE_PTYPE_L4_UDP, 2061 RTE_PTYPE_TUNNEL_VXLAN, 2062 RTE_PTYPE_L4_FRAG, 2063 RTE_PTYPE_TUNNEL_GENEVE, 2064 RTE_PTYPE_TUNNEL_GRE, 2065 /* Inner */ 2066 RTE_PTYPE_INNER_L2_ETHER, 2067 RTE_PTYPE_INNER_L2_ETHER_VLAN, 2068 RTE_PTYPE_INNER_L3_IPV4, 2069 RTE_PTYPE_INNER_L3_IPV6, 2070 RTE_PTYPE_INNER_L4_TCP, 2071 RTE_PTYPE_INNER_L4_UDP, 2072 RTE_PTYPE_INNER_L4_FRAG, 2073 RTE_PTYPE_UNKNOWN 2074 }; 2075 2076 if (eth_dev->rx_pkt_burst == qede_recv_pkts || 2077 eth_dev->rx_pkt_burst == qede_recv_pkts_regular || 2078 eth_dev->rx_pkt_burst == qede_recv_pkts_cmt) 2079 return ptypes; 2080 2081 return NULL; 2082 } 2083 2084 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) 2085 { 2086 *rss_caps = 0; 2087 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 2088 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 2089 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 2090 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 2091 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 2092 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 2093 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; 2094 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; 2095 } 2096 2097 int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 2098 struct rte_eth_rss_conf *rss_conf) 2099 { 2100 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2101 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2102 struct ecore_sp_vport_update_params vport_update_params; 2103 struct ecore_rss_params rss_params; 2104 struct ecore_hwfn *p_hwfn; 2105 uint32_t *key = (uint32_t *)rss_conf->rss_key; 2106 uint64_t hf = rss_conf->rss_hf; 2107 uint8_t len = rss_conf->rss_key_len; 2108 uint8_t idx, i, j, fpidx; 2109 int rc; 2110 2111 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2112 memset(&rss_params, 0, sizeof(rss_params)); 2113 2114 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", 2115 (unsigned long)hf, len, key); 2116 2117 if (hf != 0) { 2118 /* Enabling RSS */ 2119 DP_INFO(edev, "Enabling rss\n"); 2120 2121 /* RSS caps */ 2122 qede_init_rss_caps(&rss_params.rss_caps, hf); 2123 rss_params.update_rss_capabilities = 1; 2124 2125 /* RSS hash key */ 2126 if (key) { 2127 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { 2128 DP_ERR(edev, "RSS key length exceeds limit\n"); 2129 return -EINVAL; 2130 } 2131 DP_INFO(edev, "Applying user supplied hash key\n"); 2132 rss_params.update_rss_key = 1; 2133 memcpy(&rss_params.rss_key, key, len); 2134 } 2135 rss_params.rss_enable = 1; 2136 } 2137 2138 rss_params.update_rss_config = 1; 2139 /* tbl_size has to be set with capabilities */ 2140 rss_params.rss_table_size_log = 7; 2141 vport_update_params.vport_id = 0; 2142 2143 for_each_hwfn(edev, i) { 2144 /* pass the L2 handles instead of qids */ 2145 for (j = 0 ; j < ECORE_RSS_IND_TABLE_SIZE ; j++) { 2146 idx = j % QEDE_RSS_COUNT(eth_dev); 2147 fpidx = idx * edev->num_hwfns + i; 2148 rss_params.rss_ind_table[j] = 2149 qdev->fp_array[fpidx].rxq->handle; 2150 } 2151 2152 vport_update_params.rss_params = &rss_params; 2153 2154 p_hwfn = &edev->hwfns[i]; 2155 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2156 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2157 ECORE_SPQ_MODE_EBLOCK, NULL); 2158 if (rc) { 2159 DP_ERR(edev, "vport-update for RSS failed\n"); 2160 return rc; 2161 } 2162 } 2163 qdev->rss_enable = rss_params.rss_enable; 2164 2165 /* Update local structure for hash query */ 2166 qdev->rss_conf.rss_hf = hf; 2167 qdev->rss_conf.rss_key_len = len; 2168 if (qdev->rss_enable) { 2169 if (qdev->rss_conf.rss_key == NULL) { 2170 qdev->rss_conf.rss_key = (uint8_t *)malloc(len); 2171 if (qdev->rss_conf.rss_key == NULL) { 2172 DP_ERR(edev, "No memory to store RSS key\n"); 2173 return -ENOMEM; 2174 } 2175 } 2176 if (key && len) { 2177 DP_INFO(edev, "Storing RSS key\n"); 2178 memcpy(qdev->rss_conf.rss_key, key, len); 2179 } 2180 } else if (!qdev->rss_enable && len == 0) { 2181 if (qdev->rss_conf.rss_key) { 2182 free(qdev->rss_conf.rss_key); 2183 qdev->rss_conf.rss_key = NULL; 2184 DP_INFO(edev, "Free RSS key\n"); 2185 } 2186 } 2187 2188 return 0; 2189 } 2190 2191 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 2192 struct rte_eth_rss_conf *rss_conf) 2193 { 2194 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2195 2196 rss_conf->rss_hf = qdev->rss_conf.rss_hf; 2197 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; 2198 2199 if (rss_conf->rss_key && qdev->rss_conf.rss_key) 2200 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, 2201 rss_conf->rss_key_len); 2202 return 0; 2203 } 2204 2205 int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 2206 struct rte_eth_rss_reta_entry64 *reta_conf, 2207 uint16_t reta_size) 2208 { 2209 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2210 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2211 struct ecore_sp_vport_update_params vport_update_params; 2212 struct ecore_rss_params *params; 2213 uint16_t i, j, idx, fid, shift; 2214 struct ecore_hwfn *p_hwfn; 2215 uint8_t entry; 2216 int rc = 0; 2217 2218 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2219 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 2220 reta_size); 2221 return -EINVAL; 2222 } 2223 2224 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2225 params = rte_zmalloc("qede_rss", sizeof(*params), RTE_CACHE_LINE_SIZE); 2226 if (params == NULL) { 2227 DP_ERR(edev, "failed to allocate memory\n"); 2228 return -ENOMEM; 2229 } 2230 2231 params->update_rss_ind_table = 1; 2232 params->rss_table_size_log = 7; 2233 params->update_rss_config = 1; 2234 2235 vport_update_params.vport_id = 0; 2236 /* Use the current value of rss_enable */ 2237 params->rss_enable = qdev->rss_enable; 2238 vport_update_params.rss_params = params; 2239 2240 for_each_hwfn(edev, i) { 2241 for (j = 0; j < reta_size; j++) { 2242 idx = j / RTE_RETA_GROUP_SIZE; 2243 shift = j % RTE_RETA_GROUP_SIZE; 2244 if (reta_conf[idx].mask & (1ULL << shift)) { 2245 entry = reta_conf[idx].reta[shift]; 2246 fid = entry * edev->num_hwfns + i; 2247 /* Pass rxq handles to ecore */ 2248 params->rss_ind_table[j] = 2249 qdev->fp_array[fid].rxq->handle; 2250 /* Update the local copy for RETA query cmd */ 2251 qdev->rss_ind_table[j] = entry; 2252 } 2253 } 2254 2255 p_hwfn = &edev->hwfns[i]; 2256 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2257 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2258 ECORE_SPQ_MODE_EBLOCK, NULL); 2259 if (rc) { 2260 DP_ERR(edev, "vport-update for RSS failed\n"); 2261 goto out; 2262 } 2263 } 2264 2265 out: 2266 rte_free(params); 2267 return rc; 2268 } 2269 2270 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 2271 struct rte_eth_rss_reta_entry64 *reta_conf, 2272 uint16_t reta_size) 2273 { 2274 struct qede_dev *qdev = eth_dev->data->dev_private; 2275 struct ecore_dev *edev = &qdev->edev; 2276 uint16_t i, idx, shift; 2277 uint8_t entry; 2278 2279 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2280 DP_ERR(edev, "reta_size %d is not supported\n", 2281 reta_size); 2282 return -EINVAL; 2283 } 2284 2285 for (i = 0; i < reta_size; i++) { 2286 idx = i / RTE_RETA_GROUP_SIZE; 2287 shift = i % RTE_RETA_GROUP_SIZE; 2288 if (reta_conf[idx].mask & (1ULL << shift)) { 2289 entry = qdev->rss_ind_table[i]; 2290 reta_conf[idx].reta[shift] = entry; 2291 } 2292 } 2293 2294 return 0; 2295 } 2296 2297 2298 2299 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 2300 { 2301 struct qede_dev *qdev = QEDE_INIT_QDEV(dev); 2302 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2303 struct rte_eth_dev_info dev_info = {0}; 2304 struct qede_fastpath *fp; 2305 uint32_t max_rx_pkt_len; 2306 uint32_t frame_size; 2307 uint16_t bufsz; 2308 bool restart = false; 2309 int i, rc; 2310 2311 PMD_INIT_FUNC_TRACE(edev); 2312 rc = qede_dev_info_get(dev, &dev_info); 2313 if (rc != 0) { 2314 DP_ERR(edev, "Error during getting ethernet device info\n"); 2315 return rc; 2316 } 2317 max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN; 2318 frame_size = max_rx_pkt_len; 2319 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) { 2320 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n", 2321 mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN - 2322 QEDE_ETH_OVERHEAD); 2323 return -EINVAL; 2324 } 2325 if (!dev->data->scattered_rx && 2326 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 2327 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", 2328 dev->data->min_rx_buf_size); 2329 return -EINVAL; 2330 } 2331 if (dev->data->dev_started) { 2332 dev->data->dev_started = 0; 2333 qede_dev_stop(dev); 2334 restart = true; 2335 } 2336 rte_delay_ms(1000); 2337 qdev->new_mtu = mtu; 2338 2339 /* Fix up RX buf size for all queues of the port */ 2340 for (i = 0; i < qdev->num_rx_queues; i++) { 2341 fp = &qdev->fp_array[i]; 2342 if (fp->rxq != NULL) { 2343 bufsz = (uint16_t)rte_pktmbuf_data_room_size( 2344 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; 2345 /* cache align the mbuf size to simplfy rx_buf_size 2346 * calculation 2347 */ 2348 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz); 2349 rc = qede_calc_rx_buf_size(dev, bufsz, frame_size); 2350 if (rc < 0) 2351 return rc; 2352 2353 fp->rxq->rx_buf_size = rc; 2354 } 2355 } 2356 if (max_rx_pkt_len > RTE_ETHER_MAX_LEN) 2357 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 2358 else 2359 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2360 2361 if (!dev->data->dev_started && restart) { 2362 qede_dev_start(dev); 2363 dev->data->dev_started = 1; 2364 } 2365 2366 /* update max frame size */ 2367 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; 2368 2369 return 0; 2370 } 2371 2372 static int 2373 qede_dev_reset(struct rte_eth_dev *dev) 2374 { 2375 int ret; 2376 2377 ret = qede_eth_dev_uninit(dev); 2378 if (ret) 2379 return ret; 2380 2381 return qede_eth_dev_init(dev); 2382 } 2383 2384 static const struct eth_dev_ops qede_eth_dev_ops = { 2385 .dev_configure = qede_dev_configure, 2386 .dev_infos_get = qede_dev_info_get, 2387 .rx_queue_setup = qede_rx_queue_setup, 2388 .rx_queue_release = qede_rx_queue_release, 2389 .tx_queue_setup = qede_tx_queue_setup, 2390 .tx_queue_release = qede_tx_queue_release, 2391 .dev_start = qede_dev_start, 2392 .dev_reset = qede_dev_reset, 2393 .dev_set_link_up = qede_dev_set_link_up, 2394 .dev_set_link_down = qede_dev_set_link_down, 2395 .link_update = qede_link_update, 2396 .promiscuous_enable = qede_promiscuous_enable, 2397 .promiscuous_disable = qede_promiscuous_disable, 2398 .allmulticast_enable = qede_allmulticast_enable, 2399 .allmulticast_disable = qede_allmulticast_disable, 2400 .set_mc_addr_list = qede_set_mc_addr_list, 2401 .dev_stop = qede_dev_stop, 2402 .dev_close = qede_dev_close, 2403 .stats_get = qede_get_stats, 2404 .stats_reset = qede_reset_stats, 2405 .xstats_get = qede_get_xstats, 2406 .xstats_reset = qede_reset_xstats, 2407 .xstats_get_names = qede_get_xstats_names, 2408 .mac_addr_add = qede_mac_addr_add, 2409 .mac_addr_remove = qede_mac_addr_remove, 2410 .mac_addr_set = qede_mac_addr_set, 2411 .vlan_offload_set = qede_vlan_offload_set, 2412 .vlan_filter_set = qede_vlan_filter_set, 2413 .flow_ctrl_set = qede_flow_ctrl_set, 2414 .flow_ctrl_get = qede_flow_ctrl_get, 2415 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2416 .rss_hash_update = qede_rss_hash_update, 2417 .rss_hash_conf_get = qede_rss_hash_conf_get, 2418 .reta_update = qede_rss_reta_update, 2419 .reta_query = qede_rss_reta_query, 2420 .mtu_set = qede_set_mtu, 2421 .filter_ctrl = qede_dev_filter_ctrl, 2422 .udp_tunnel_port_add = qede_udp_dst_port_add, 2423 .udp_tunnel_port_del = qede_udp_dst_port_del, 2424 .fw_version_get = qede_fw_version_get, 2425 .get_reg = qede_get_regs, 2426 }; 2427 2428 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 2429 .dev_configure = qede_dev_configure, 2430 .dev_infos_get = qede_dev_info_get, 2431 .rx_queue_setup = qede_rx_queue_setup, 2432 .rx_queue_release = qede_rx_queue_release, 2433 .tx_queue_setup = qede_tx_queue_setup, 2434 .tx_queue_release = qede_tx_queue_release, 2435 .dev_start = qede_dev_start, 2436 .dev_reset = qede_dev_reset, 2437 .dev_set_link_up = qede_dev_set_link_up, 2438 .dev_set_link_down = qede_dev_set_link_down, 2439 .link_update = qede_link_update, 2440 .promiscuous_enable = qede_promiscuous_enable, 2441 .promiscuous_disable = qede_promiscuous_disable, 2442 .allmulticast_enable = qede_allmulticast_enable, 2443 .allmulticast_disable = qede_allmulticast_disable, 2444 .set_mc_addr_list = qede_set_mc_addr_list, 2445 .dev_stop = qede_dev_stop, 2446 .dev_close = qede_dev_close, 2447 .stats_get = qede_get_stats, 2448 .stats_reset = qede_reset_stats, 2449 .xstats_get = qede_get_xstats, 2450 .xstats_reset = qede_reset_xstats, 2451 .xstats_get_names = qede_get_xstats_names, 2452 .vlan_offload_set = qede_vlan_offload_set, 2453 .vlan_filter_set = qede_vlan_filter_set, 2454 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2455 .rss_hash_update = qede_rss_hash_update, 2456 .rss_hash_conf_get = qede_rss_hash_conf_get, 2457 .reta_update = qede_rss_reta_update, 2458 .reta_query = qede_rss_reta_query, 2459 .mtu_set = qede_set_mtu, 2460 .udp_tunnel_port_add = qede_udp_dst_port_add, 2461 .udp_tunnel_port_del = qede_udp_dst_port_del, 2462 .mac_addr_add = qede_mac_addr_add, 2463 .mac_addr_remove = qede_mac_addr_remove, 2464 .mac_addr_set = qede_mac_addr_set, 2465 .fw_version_get = qede_fw_version_get, 2466 }; 2467 2468 static void qede_update_pf_params(struct ecore_dev *edev) 2469 { 2470 struct ecore_pf_params pf_params; 2471 2472 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 2473 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; 2474 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 2475 qed_ops->common->update_pf_params(edev, &pf_params); 2476 } 2477 2478 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 2479 { 2480 struct rte_pci_device *pci_dev; 2481 struct rte_pci_addr pci_addr; 2482 struct qede_dev *adapter; 2483 struct ecore_dev *edev; 2484 struct qed_dev_eth_info dev_info; 2485 struct qed_slowpath_params params; 2486 static bool do_once = true; 2487 uint8_t bulletin_change; 2488 uint8_t vf_mac[RTE_ETHER_ADDR_LEN]; 2489 uint8_t is_mac_forced; 2490 bool is_mac_exist; 2491 /* Fix up ecore debug level */ 2492 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 2493 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 2494 uint32_t int_mode; 2495 int rc; 2496 2497 /* Extract key data structures */ 2498 adapter = eth_dev->data->dev_private; 2499 adapter->ethdev = eth_dev; 2500 edev = &adapter->edev; 2501 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2502 pci_addr = pci_dev->addr; 2503 2504 PMD_INIT_FUNC_TRACE(edev); 2505 2506 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 2507 pci_addr.bus, pci_addr.devid, pci_addr.function, 2508 eth_dev->data->port_id); 2509 2510 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2511 DP_ERR(edev, "Skipping device init from secondary process\n"); 2512 return 0; 2513 } 2514 2515 rte_eth_copy_pci_info(eth_dev, pci_dev); 2516 2517 /* @DPDK */ 2518 edev->vendor_id = pci_dev->id.vendor_id; 2519 edev->device_id = pci_dev->id.device_id; 2520 2521 qed_ops = qed_get_eth_ops(); 2522 if (!qed_ops) { 2523 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 2524 rc = -EINVAL; 2525 goto err; 2526 } 2527 2528 DP_INFO(edev, "Starting qede probe\n"); 2529 rc = qed_ops->common->probe(edev, pci_dev, dp_module, 2530 dp_level, is_vf); 2531 if (rc != 0) { 2532 DP_ERR(edev, "qede probe failed rc %d\n", rc); 2533 rc = -ENODEV; 2534 goto err; 2535 } 2536 qede_update_pf_params(edev); 2537 2538 switch (pci_dev->intr_handle.type) { 2539 case RTE_INTR_HANDLE_UIO_INTX: 2540 case RTE_INTR_HANDLE_VFIO_LEGACY: 2541 int_mode = ECORE_INT_MODE_INTA; 2542 rte_intr_callback_register(&pci_dev->intr_handle, 2543 qede_interrupt_handler_intx, 2544 (void *)eth_dev); 2545 break; 2546 default: 2547 int_mode = ECORE_INT_MODE_MSIX; 2548 rte_intr_callback_register(&pci_dev->intr_handle, 2549 qede_interrupt_handler, 2550 (void *)eth_dev); 2551 } 2552 2553 if (rte_intr_enable(&pci_dev->intr_handle)) { 2554 DP_ERR(edev, "rte_intr_enable() failed\n"); 2555 rc = -ENODEV; 2556 goto err; 2557 } 2558 2559 /* Start the Slowpath-process */ 2560 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 2561 2562 params.int_mode = int_mode; 2563 params.drv_major = QEDE_PMD_VERSION_MAJOR; 2564 params.drv_minor = QEDE_PMD_VERSION_MINOR; 2565 params.drv_rev = QEDE_PMD_VERSION_REVISION; 2566 params.drv_eng = QEDE_PMD_VERSION_PATCH; 2567 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, 2568 QEDE_PMD_DRV_VER_STR_SIZE); 2569 2570 qede_assign_rxtx_handlers(eth_dev, true); 2571 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; 2572 2573 /* For CMT mode device do periodic polling for slowpath events. 2574 * This is required since uio device uses only one MSI-x 2575 * interrupt vector but we need one for each engine. 2576 */ 2577 if (ECORE_IS_CMT(edev) && IS_PF(edev)) { 2578 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 2579 qede_poll_sp_sb_cb, 2580 (void *)eth_dev); 2581 if (rc != 0) { 2582 DP_ERR(edev, "Unable to start periodic" 2583 " timer rc %d\n", rc); 2584 rc = -EINVAL; 2585 goto err; 2586 } 2587 } 2588 2589 rc = qed_ops->common->slowpath_start(edev, ¶ms); 2590 if (rc) { 2591 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 2592 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2593 (void *)eth_dev); 2594 rc = -ENODEV; 2595 goto err; 2596 } 2597 2598 rc = qed_ops->fill_dev_info(edev, &dev_info); 2599 if (rc) { 2600 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 2601 qed_ops->common->slowpath_stop(edev); 2602 qed_ops->common->remove(edev); 2603 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2604 (void *)eth_dev); 2605 rc = -ENODEV; 2606 goto err; 2607 } 2608 2609 qede_alloc_etherdev(adapter, &dev_info); 2610 2611 if (do_once) { 2612 qede_print_adapter_info(eth_dev); 2613 do_once = false; 2614 } 2615 2616 adapter->ops->common->set_name(edev, edev->name); 2617 2618 if (!is_vf) 2619 adapter->dev_info.num_mac_filters = 2620 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 2621 ECORE_MAC); 2622 else 2623 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 2624 (uint32_t *)&adapter->dev_info.num_mac_filters); 2625 2626 /* Allocate memory for storing MAC addr */ 2627 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 2628 (RTE_ETHER_ADDR_LEN * 2629 adapter->dev_info.num_mac_filters), 2630 RTE_CACHE_LINE_SIZE); 2631 2632 if (eth_dev->data->mac_addrs == NULL) { 2633 DP_ERR(edev, "Failed to allocate MAC address\n"); 2634 qed_ops->common->slowpath_stop(edev); 2635 qed_ops->common->remove(edev); 2636 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2637 (void *)eth_dev); 2638 return -ENOMEM; 2639 } 2640 2641 if (!is_vf) { 2642 rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0]. 2643 hw_info.hw_mac_addr, 2644 ð_dev->data->mac_addrs[0]); 2645 rte_ether_addr_copy(ð_dev->data->mac_addrs[0], 2646 &adapter->primary_mac); 2647 } else { 2648 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 2649 &bulletin_change); 2650 if (bulletin_change) { 2651 is_mac_exist = 2652 ecore_vf_bulletin_get_forced_mac( 2653 ECORE_LEADING_HWFN(edev), 2654 vf_mac, 2655 &is_mac_forced); 2656 if (is_mac_exist) { 2657 DP_INFO(edev, "VF macaddr received from PF\n"); 2658 rte_ether_addr_copy( 2659 (struct rte_ether_addr *)&vf_mac, 2660 ð_dev->data->mac_addrs[0]); 2661 rte_ether_addr_copy( 2662 ð_dev->data->mac_addrs[0], 2663 &adapter->primary_mac); 2664 } else { 2665 DP_ERR(edev, "No VF macaddr assigned\n"); 2666 } 2667 } 2668 } 2669 2670 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 2671 eth_dev->rx_descriptor_status = qede_rx_descriptor_status; 2672 2673 adapter->num_tx_queues = 0; 2674 adapter->num_rx_queues = 0; 2675 SLIST_INIT(&adapter->arfs_info.arfs_list_head); 2676 SLIST_INIT(&adapter->vlan_list_head); 2677 SLIST_INIT(&adapter->uc_list_head); 2678 SLIST_INIT(&adapter->mc_list_head); 2679 adapter->mtu = RTE_ETHER_MTU; 2680 adapter->vport_started = false; 2681 2682 /* VF tunnel offloads is enabled by default in PF driver */ 2683 adapter->vxlan.num_filters = 0; 2684 adapter->geneve.num_filters = 0; 2685 adapter->ipgre.num_filters = 0; 2686 if (is_vf) { 2687 adapter->vxlan.enable = true; 2688 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | 2689 ETH_TUNNEL_FILTER_IVLAN; 2690 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; 2691 adapter->geneve.enable = true; 2692 adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC | 2693 ETH_TUNNEL_FILTER_IVLAN; 2694 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT; 2695 adapter->ipgre.enable = true; 2696 adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC | 2697 ETH_TUNNEL_FILTER_IVLAN; 2698 } else { 2699 adapter->vxlan.enable = false; 2700 adapter->geneve.enable = false; 2701 adapter->ipgre.enable = false; 2702 } 2703 2704 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", 2705 adapter->primary_mac.addr_bytes[0], 2706 adapter->primary_mac.addr_bytes[1], 2707 adapter->primary_mac.addr_bytes[2], 2708 adapter->primary_mac.addr_bytes[3], 2709 adapter->primary_mac.addr_bytes[4], 2710 adapter->primary_mac.addr_bytes[5]); 2711 2712 DP_INFO(edev, "Device initialized\n"); 2713 2714 return 0; 2715 2716 err: 2717 if (do_once) { 2718 qede_print_adapter_info(eth_dev); 2719 do_once = false; 2720 } 2721 return rc; 2722 } 2723 2724 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 2725 { 2726 return qede_common_dev_init(eth_dev, 1); 2727 } 2728 2729 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 2730 { 2731 return qede_common_dev_init(eth_dev, 0); 2732 } 2733 2734 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 2735 { 2736 struct qede_dev *qdev = eth_dev->data->dev_private; 2737 struct ecore_dev *edev = &qdev->edev; 2738 2739 PMD_INIT_FUNC_TRACE(edev); 2740 2741 /* only uninitialize in the primary process */ 2742 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2743 return 0; 2744 2745 /* safe to close dev here */ 2746 qede_dev_close(eth_dev); 2747 2748 eth_dev->dev_ops = NULL; 2749 eth_dev->rx_pkt_burst = NULL; 2750 eth_dev->tx_pkt_burst = NULL; 2751 2752 return 0; 2753 } 2754 2755 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2756 { 2757 return qede_dev_common_uninit(eth_dev); 2758 } 2759 2760 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2761 { 2762 return qede_dev_common_uninit(eth_dev); 2763 } 2764 2765 static const struct rte_pci_id pci_id_qedevf_map[] = { 2766 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2767 { 2768 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF) 2769 }, 2770 { 2771 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV) 2772 }, 2773 { 2774 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV) 2775 }, 2776 {.vendor_id = 0,} 2777 }; 2778 2779 static const struct rte_pci_id pci_id_qede_map[] = { 2780 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2781 { 2782 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E) 2783 }, 2784 { 2785 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S) 2786 }, 2787 { 2788 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40) 2789 }, 2790 { 2791 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25) 2792 }, 2793 { 2794 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100) 2795 }, 2796 { 2797 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50) 2798 }, 2799 { 2800 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G) 2801 }, 2802 { 2803 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G) 2804 }, 2805 { 2806 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G) 2807 }, 2808 { 2809 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G) 2810 }, 2811 {.vendor_id = 0,} 2812 }; 2813 2814 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2815 struct rte_pci_device *pci_dev) 2816 { 2817 return rte_eth_dev_pci_generic_probe(pci_dev, 2818 sizeof(struct qede_dev), qedevf_eth_dev_init); 2819 } 2820 2821 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2822 { 2823 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit); 2824 } 2825 2826 static struct rte_pci_driver rte_qedevf_pmd = { 2827 .id_table = pci_id_qedevf_map, 2828 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2829 .probe = qedevf_eth_dev_pci_probe, 2830 .remove = qedevf_eth_dev_pci_remove, 2831 }; 2832 2833 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2834 struct rte_pci_device *pci_dev) 2835 { 2836 return rte_eth_dev_pci_generic_probe(pci_dev, 2837 sizeof(struct qede_dev), qede_eth_dev_init); 2838 } 2839 2840 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2841 { 2842 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit); 2843 } 2844 2845 static struct rte_pci_driver rte_qede_pmd = { 2846 .id_table = pci_id_qede_map, 2847 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2848 .probe = qede_eth_dev_pci_probe, 2849 .remove = qede_eth_dev_pci_remove, 2850 }; 2851 2852 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd); 2853 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 2854 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); 2855 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); 2856 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 2857 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); 2858 RTE_LOG_REGISTER(qede_logtype_init, pmd.net.qede.init, NOTICE); 2859 RTE_LOG_REGISTER(qede_logtype_driver, pmd.net.qede.driver, NOTICE); 2860