1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include "qede_ethdev.h" 8 #include <rte_string_fns.h> 9 #include <rte_alarm.h> 10 #include <rte_version.h> 11 #include <rte_kvargs.h> 12 13 /* Globals */ 14 int qede_logtype_init; 15 int qede_logtype_driver; 16 17 static const struct qed_eth_ops *qed_ops; 18 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev); 19 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev); 20 21 #define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */ 22 23 struct rte_qede_xstats_name_off { 24 char name[RTE_ETH_XSTATS_NAME_SIZE]; 25 uint64_t offset; 26 }; 27 28 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 29 {"rx_unicast_bytes", 30 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)}, 31 {"rx_multicast_bytes", 32 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)}, 33 {"rx_broadcast_bytes", 34 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)}, 35 {"rx_unicast_packets", 36 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)}, 37 {"rx_multicast_packets", 38 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)}, 39 {"rx_broadcast_packets", 40 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)}, 41 42 {"tx_unicast_bytes", 43 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)}, 44 {"tx_multicast_bytes", 45 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)}, 46 {"tx_broadcast_bytes", 47 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)}, 48 {"tx_unicast_packets", 49 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)}, 50 {"tx_multicast_packets", 51 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)}, 52 {"tx_broadcast_packets", 53 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)}, 54 55 {"rx_64_byte_packets", 56 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)}, 57 {"rx_65_to_127_byte_packets", 58 offsetof(struct ecore_eth_stats_common, 59 rx_65_to_127_byte_packets)}, 60 {"rx_128_to_255_byte_packets", 61 offsetof(struct ecore_eth_stats_common, 62 rx_128_to_255_byte_packets)}, 63 {"rx_256_to_511_byte_packets", 64 offsetof(struct ecore_eth_stats_common, 65 rx_256_to_511_byte_packets)}, 66 {"rx_512_to_1023_byte_packets", 67 offsetof(struct ecore_eth_stats_common, 68 rx_512_to_1023_byte_packets)}, 69 {"rx_1024_to_1518_byte_packets", 70 offsetof(struct ecore_eth_stats_common, 71 rx_1024_to_1518_byte_packets)}, 72 {"tx_64_byte_packets", 73 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)}, 74 {"tx_65_to_127_byte_packets", 75 offsetof(struct ecore_eth_stats_common, 76 tx_65_to_127_byte_packets)}, 77 {"tx_128_to_255_byte_packets", 78 offsetof(struct ecore_eth_stats_common, 79 tx_128_to_255_byte_packets)}, 80 {"tx_256_to_511_byte_packets", 81 offsetof(struct ecore_eth_stats_common, 82 tx_256_to_511_byte_packets)}, 83 {"tx_512_to_1023_byte_packets", 84 offsetof(struct ecore_eth_stats_common, 85 tx_512_to_1023_byte_packets)}, 86 {"tx_1024_to_1518_byte_packets", 87 offsetof(struct ecore_eth_stats_common, 88 tx_1024_to_1518_byte_packets)}, 89 90 {"rx_mac_crtl_frames", 91 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)}, 92 {"tx_mac_control_frames", 93 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)}, 94 {"rx_pause_frames", 95 offsetof(struct ecore_eth_stats_common, rx_pause_frames)}, 96 {"tx_pause_frames", 97 offsetof(struct ecore_eth_stats_common, tx_pause_frames)}, 98 {"rx_priority_flow_control_frames", 99 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)}, 100 {"tx_priority_flow_control_frames", 101 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)}, 102 103 {"rx_crc_errors", 104 offsetof(struct ecore_eth_stats_common, rx_crc_errors)}, 105 {"rx_align_errors", 106 offsetof(struct ecore_eth_stats_common, rx_align_errors)}, 107 {"rx_carrier_errors", 108 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)}, 109 {"rx_oversize_packet_errors", 110 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)}, 111 {"rx_jabber_errors", 112 offsetof(struct ecore_eth_stats_common, rx_jabbers)}, 113 {"rx_undersize_packet_errors", 114 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)}, 115 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)}, 116 {"rx_host_buffer_not_available", 117 offsetof(struct ecore_eth_stats_common, no_buff_discards)}, 118 /* Number of packets discarded because they are bigger than MTU */ 119 {"rx_packet_too_big_discards", 120 offsetof(struct ecore_eth_stats_common, 121 packet_too_big_discard)}, 122 {"rx_ttl_zero_discards", 123 offsetof(struct ecore_eth_stats_common, ttl0_discard)}, 124 {"rx_multi_function_tag_filter_discards", 125 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)}, 126 {"rx_mac_filter_discards", 127 offsetof(struct ecore_eth_stats_common, mac_filter_discards)}, 128 {"rx_gft_filter_drop", 129 offsetof(struct ecore_eth_stats_common, gft_filter_drop)}, 130 {"rx_hw_buffer_truncates", 131 offsetof(struct ecore_eth_stats_common, brb_truncates)}, 132 {"rx_hw_buffer_discards", 133 offsetof(struct ecore_eth_stats_common, brb_discards)}, 134 {"tx_error_drop_packets", 135 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)}, 136 137 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)}, 138 {"rx_mac_unicast_packets", 139 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)}, 140 {"rx_mac_multicast_packets", 141 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)}, 142 {"rx_mac_broadcast_packets", 143 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)}, 144 {"rx_mac_frames_ok", 145 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)}, 146 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)}, 147 {"tx_mac_unicast_packets", 148 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)}, 149 {"tx_mac_multicast_packets", 150 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)}, 151 {"tx_mac_broadcast_packets", 152 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)}, 153 154 {"lro_coalesced_packets", 155 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)}, 156 {"lro_coalesced_events", 157 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)}, 158 {"lro_aborts_num", 159 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)}, 160 {"lro_not_coalesced_packets", 161 offsetof(struct ecore_eth_stats_common, 162 tpa_not_coalesced_pkts)}, 163 {"lro_coalesced_bytes", 164 offsetof(struct ecore_eth_stats_common, 165 tpa_coalesced_bytes)}, 166 }; 167 168 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = { 169 {"rx_1519_to_1522_byte_packets", 170 offsetof(struct ecore_eth_stats, bb) + 171 offsetof(struct ecore_eth_stats_bb, 172 rx_1519_to_1522_byte_packets)}, 173 {"rx_1519_to_2047_byte_packets", 174 offsetof(struct ecore_eth_stats, bb) + 175 offsetof(struct ecore_eth_stats_bb, 176 rx_1519_to_2047_byte_packets)}, 177 {"rx_2048_to_4095_byte_packets", 178 offsetof(struct ecore_eth_stats, bb) + 179 offsetof(struct ecore_eth_stats_bb, 180 rx_2048_to_4095_byte_packets)}, 181 {"rx_4096_to_9216_byte_packets", 182 offsetof(struct ecore_eth_stats, bb) + 183 offsetof(struct ecore_eth_stats_bb, 184 rx_4096_to_9216_byte_packets)}, 185 {"rx_9217_to_16383_byte_packets", 186 offsetof(struct ecore_eth_stats, bb) + 187 offsetof(struct ecore_eth_stats_bb, 188 rx_9217_to_16383_byte_packets)}, 189 190 {"tx_1519_to_2047_byte_packets", 191 offsetof(struct ecore_eth_stats, bb) + 192 offsetof(struct ecore_eth_stats_bb, 193 tx_1519_to_2047_byte_packets)}, 194 {"tx_2048_to_4095_byte_packets", 195 offsetof(struct ecore_eth_stats, bb) + 196 offsetof(struct ecore_eth_stats_bb, 197 tx_2048_to_4095_byte_packets)}, 198 {"tx_4096_to_9216_byte_packets", 199 offsetof(struct ecore_eth_stats, bb) + 200 offsetof(struct ecore_eth_stats_bb, 201 tx_4096_to_9216_byte_packets)}, 202 {"tx_9217_to_16383_byte_packets", 203 offsetof(struct ecore_eth_stats, bb) + 204 offsetof(struct ecore_eth_stats_bb, 205 tx_9217_to_16383_byte_packets)}, 206 207 {"tx_lpi_entry_count", 208 offsetof(struct ecore_eth_stats, bb) + 209 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)}, 210 {"tx_total_collisions", 211 offsetof(struct ecore_eth_stats, bb) + 212 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)}, 213 }; 214 215 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = { 216 {"rx_1519_to_max_byte_packets", 217 offsetof(struct ecore_eth_stats, ah) + 218 offsetof(struct ecore_eth_stats_ah, 219 rx_1519_to_max_byte_packets)}, 220 {"tx_1519_to_max_byte_packets", 221 offsetof(struct ecore_eth_stats, ah) + 222 offsetof(struct ecore_eth_stats_ah, 223 tx_1519_to_max_byte_packets)}, 224 }; 225 226 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { 227 {"rx_q_segments", 228 offsetof(struct qede_rx_queue, rx_segs)}, 229 {"rx_q_hw_errors", 230 offsetof(struct qede_rx_queue, rx_hw_errors)}, 231 {"rx_q_allocation_errors", 232 offsetof(struct qede_rx_queue, rx_alloc_errors)} 233 }; 234 235 /* Get FW version string based on fw_size */ 236 static int 237 qede_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size) 238 { 239 struct qede_dev *qdev = dev->data->dev_private; 240 struct ecore_dev *edev = &qdev->edev; 241 struct qed_dev_info *info = &qdev->dev_info.common; 242 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 243 size_t size; 244 245 if (fw_ver == NULL) 246 return 0; 247 248 if (IS_PF(edev)) 249 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 250 QEDE_PMD_FW_VERSION); 251 else 252 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", 253 info->fw_major, info->fw_minor, 254 info->fw_rev, info->fw_eng); 255 size = strlen(ver_str); 256 if (size + 1 <= fw_size) /* Add 1 byte for "\0" */ 257 strlcpy(fw_ver, ver_str, fw_size); 258 else 259 return (size + 1); 260 261 snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size), 262 " MFW: %d.%d.%d.%d", 263 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_3), 264 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_2), 265 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_1), 266 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_0)); 267 size = strlen(ver_str); 268 if (size + 1 <= fw_size) 269 strlcpy(fw_ver, ver_str, fw_size); 270 271 if (fw_size <= 32) 272 goto out; 273 274 snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size), 275 " MBI: %d.%d.%d", 276 GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_2), 277 GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_1), 278 GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_0)); 279 size = strlen(ver_str); 280 if (size + 1 <= fw_size) 281 strlcpy(fw_ver, ver_str, fw_size); 282 283 out: 284 return 0; 285 } 286 287 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 288 { 289 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 290 } 291 292 static void 293 qede_interrupt_handler_intx(void *param) 294 { 295 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 296 struct qede_dev *qdev = eth_dev->data->dev_private; 297 struct ecore_dev *edev = &qdev->edev; 298 u64 status; 299 300 /* Check if our device actually raised an interrupt */ 301 status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev)); 302 if (status & 0x1) { 303 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 304 305 if (rte_intr_ack(eth_dev->intr_handle)) 306 DP_ERR(edev, "rte_intr_ack failed\n"); 307 } 308 } 309 310 static void 311 qede_interrupt_handler(void *param) 312 { 313 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 314 struct qede_dev *qdev = eth_dev->data->dev_private; 315 struct ecore_dev *edev = &qdev->edev; 316 317 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 318 if (rte_intr_ack(eth_dev->intr_handle)) 319 DP_ERR(edev, "rte_intr_ack failed\n"); 320 } 321 322 static void 323 qede_assign_rxtx_handlers(struct rte_eth_dev *dev) 324 { 325 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 326 struct qede_dev *qdev = dev->data->dev_private; 327 struct ecore_dev *edev = &qdev->edev; 328 bool use_tx_offload = false; 329 330 if (ECORE_IS_CMT(edev)) { 331 dev->rx_pkt_burst = qede_recv_pkts_cmt; 332 dev->tx_pkt_burst = qede_xmit_pkts_cmt; 333 return; 334 } 335 336 if (dev->data->lro || dev->data->scattered_rx) { 337 DP_INFO(edev, "Assigning qede_recv_pkts\n"); 338 dev->rx_pkt_burst = qede_recv_pkts; 339 } else { 340 DP_INFO(edev, "Assigning qede_recv_pkts_regular\n"); 341 dev->rx_pkt_burst = qede_recv_pkts_regular; 342 } 343 344 use_tx_offload = !!(tx_offloads & 345 (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */ 346 DEV_TX_OFFLOAD_TCP_TSO | /* tso */ 347 DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */ 348 349 if (use_tx_offload) { 350 DP_INFO(edev, "Assigning qede_xmit_pkts\n"); 351 dev->tx_pkt_burst = qede_xmit_pkts; 352 } else { 353 DP_INFO(edev, "Assigning qede_xmit_pkts_regular\n"); 354 dev->tx_pkt_burst = qede_xmit_pkts_regular; 355 } 356 } 357 358 static void 359 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 360 { 361 rte_memcpy(&qdev->dev_info, info, sizeof(*info)); 362 qdev->ops = qed_ops; 363 } 364 365 static void qede_print_adapter_info(struct rte_eth_dev *dev) 366 { 367 struct qede_dev *qdev = dev->data->dev_private; 368 struct ecore_dev *edev = &qdev->edev; 369 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 370 371 DP_INFO(edev, "**************************************************\n"); 372 DP_INFO(edev, " %-20s: %s\n", "DPDK version", rte_version()); 373 DP_INFO(edev, " %-20s: %s %c%d\n", "Chip details", 374 ECORE_IS_BB(edev) ? "BB" : "AH", 375 'A' + edev->chip_rev, 376 (int)edev->chip_metal); 377 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 378 QEDE_PMD_DRV_VERSION); 379 DP_INFO(edev, " %-20s: %s\n", "Driver version", ver_str); 380 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 381 QEDE_PMD_BASE_VERSION); 382 DP_INFO(edev, " %-20s: %s\n", "Base version", ver_str); 383 qede_fw_version_get(dev, ver_str, sizeof(ver_str)); 384 DP_INFO(edev, " %-20s: %s\n", "Firmware version", ver_str); 385 DP_INFO(edev, " %-20s: %s\n", "Firmware file", qede_fw_file); 386 DP_INFO(edev, "**************************************************\n"); 387 } 388 389 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) 390 { 391 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; 392 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 393 unsigned int i = 0, j = 0, qid; 394 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 395 struct qede_tx_queue *txq; 396 397 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); 398 399 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev), 400 RTE_ETHDEV_QUEUE_STAT_CNTRS); 401 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev), 402 RTE_ETHDEV_QUEUE_STAT_CNTRS); 403 404 for (qid = 0; qid < qdev->num_rx_queues; qid++) { 405 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 406 offsetof(struct qede_rx_queue, rcv_pkts), 0, 407 sizeof(uint64_t)); 408 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 409 offsetof(struct qede_rx_queue, rx_hw_errors), 0, 410 sizeof(uint64_t)); 411 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 412 offsetof(struct qede_rx_queue, rx_alloc_errors), 0, 413 sizeof(uint64_t)); 414 415 if (xstats) 416 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++) 417 OSAL_MEMSET((((char *) 418 (qdev->fp_array[qid].rxq)) + 419 qede_rxq_xstats_strings[j].offset), 420 0, 421 sizeof(uint64_t)); 422 423 i++; 424 if (i == rxq_stat_cntrs) 425 break; 426 } 427 428 i = 0; 429 430 for (qid = 0; qid < qdev->num_tx_queues; qid++) { 431 txq = qdev->fp_array[qid].txq; 432 433 OSAL_MEMSET((uint64_t *)(uintptr_t) 434 (((uint64_t)(uintptr_t)(txq)) + 435 offsetof(struct qede_tx_queue, xmit_pkts)), 0, 436 sizeof(uint64_t)); 437 438 i++; 439 if (i == txq_stat_cntrs) 440 break; 441 } 442 } 443 444 static int 445 qede_stop_vport(struct ecore_dev *edev) 446 { 447 struct ecore_hwfn *p_hwfn; 448 uint8_t vport_id; 449 int rc; 450 int i; 451 452 vport_id = 0; 453 for_each_hwfn(edev, i) { 454 p_hwfn = &edev->hwfns[i]; 455 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 456 vport_id); 457 if (rc != ECORE_SUCCESS) { 458 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc); 459 return rc; 460 } 461 } 462 463 DP_INFO(edev, "vport stopped\n"); 464 465 return 0; 466 } 467 468 static int 469 qede_start_vport(struct qede_dev *qdev, uint16_t mtu) 470 { 471 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 472 struct ecore_sp_vport_start_params params; 473 struct ecore_hwfn *p_hwfn; 474 int rc; 475 int i; 476 477 if (qdev->vport_started) 478 qede_stop_vport(edev); 479 480 memset(¶ms, 0, sizeof(params)); 481 params.vport_id = 0; 482 params.mtu = mtu; 483 /* @DPDK - Disable FW placement */ 484 params.zero_placement_offset = 1; 485 for_each_hwfn(edev, i) { 486 p_hwfn = &edev->hwfns[i]; 487 params.concrete_fid = p_hwfn->hw_info.concrete_fid; 488 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 489 rc = ecore_sp_vport_start(p_hwfn, ¶ms); 490 if (rc != ECORE_SUCCESS) { 491 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 492 return rc; 493 } 494 } 495 ecore_reset_vport_stats(edev); 496 qdev->vport_started = true; 497 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); 498 499 return 0; 500 } 501 502 #define QEDE_NPAR_TX_SWITCHING "npar_tx_switching" 503 #define QEDE_VF_TX_SWITCHING "vf_tx_switching" 504 505 /* Activate or deactivate vport via vport-update */ 506 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) 507 { 508 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 509 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 510 struct ecore_sp_vport_update_params params; 511 struct ecore_hwfn *p_hwfn; 512 uint8_t i; 513 int rc = -1; 514 515 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 516 params.vport_id = 0; 517 params.update_vport_active_rx_flg = 1; 518 params.update_vport_active_tx_flg = 1; 519 params.vport_active_rx_flg = flg; 520 params.vport_active_tx_flg = flg; 521 if ((qdev->enable_tx_switching == false) && (flg == true)) { 522 params.update_tx_switching_flg = 1; 523 params.tx_switching_flg = !flg; 524 } 525 for_each_hwfn(edev, i) { 526 p_hwfn = &edev->hwfns[i]; 527 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 528 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 529 ECORE_SPQ_MODE_EBLOCK, NULL); 530 if (rc != ECORE_SUCCESS) { 531 DP_ERR(edev, "Failed to update vport\n"); 532 break; 533 } 534 } 535 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated"); 536 537 return rc; 538 } 539 540 static void 541 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, 542 uint16_t mtu, bool enable) 543 { 544 /* Enable LRO in split mode */ 545 sge_tpa_params->tpa_ipv4_en_flg = enable; 546 sge_tpa_params->tpa_ipv6_en_flg = enable; 547 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable; 548 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable; 549 /* set if tpa enable changes */ 550 sge_tpa_params->update_tpa_en_flg = 1; 551 /* set if tpa parameters should be handled */ 552 sge_tpa_params->update_tpa_param_flg = enable; 553 554 sge_tpa_params->max_buffers_per_cqe = 20; 555 /* Enable TPA in split mode. In this mode each TPA segment 556 * starts on the new BD, so there is one BD per segment. 557 */ 558 sge_tpa_params->tpa_pkt_split_flg = 1; 559 sge_tpa_params->tpa_hdr_data_split_flg = 0; 560 sge_tpa_params->tpa_gro_consistent_flg = 0; 561 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 562 sge_tpa_params->tpa_max_size = 0x7FFF; 563 sge_tpa_params->tpa_min_size_to_start = mtu / 2; 564 sge_tpa_params->tpa_min_size_to_cont = mtu / 2; 565 } 566 567 /* Enable/disable LRO via vport-update */ 568 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) 569 { 570 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 571 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 572 struct ecore_sp_vport_update_params params; 573 struct ecore_sge_tpa_params tpa_params; 574 struct ecore_hwfn *p_hwfn; 575 int rc; 576 int i; 577 578 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 579 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params)); 580 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg); 581 params.vport_id = 0; 582 params.sge_tpa_params = &tpa_params; 583 for_each_hwfn(edev, i) { 584 p_hwfn = &edev->hwfns[i]; 585 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 586 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 587 ECORE_SPQ_MODE_EBLOCK, NULL); 588 if (rc != ECORE_SUCCESS) { 589 DP_ERR(edev, "Failed to update LRO\n"); 590 return -1; 591 } 592 } 593 qdev->enable_lro = flg; 594 eth_dev->data->lro = flg; 595 596 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); 597 598 return 0; 599 } 600 601 static int 602 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, 603 enum qed_filter_rx_mode_type type) 604 { 605 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 606 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 607 struct ecore_filter_accept_flags flags; 608 609 memset(&flags, 0, sizeof(flags)); 610 611 flags.update_rx_mode_config = 1; 612 flags.update_tx_mode_config = 1; 613 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 614 ECORE_ACCEPT_MCAST_MATCHED | 615 ECORE_ACCEPT_BCAST; 616 617 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 618 ECORE_ACCEPT_MCAST_MATCHED | 619 ECORE_ACCEPT_BCAST; 620 621 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 622 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 623 if (IS_VF(edev)) { 624 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 625 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n"); 626 } 627 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 628 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 629 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC | 630 QED_FILTER_RX_MODE_TYPE_PROMISC)) { 631 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED | 632 ECORE_ACCEPT_MCAST_UNMATCHED; 633 } 634 635 return ecore_filter_accept_cmd(edev, 0, flags, false, false, 636 ECORE_SPQ_MODE_CB, NULL); 637 } 638 639 int 640 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 641 bool add) 642 { 643 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 644 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 645 struct qede_ucast_entry *tmp = NULL; 646 struct qede_ucast_entry *u; 647 struct rte_ether_addr *mac_addr; 648 649 mac_addr = (struct rte_ether_addr *)ucast->mac; 650 if (add) { 651 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 652 if ((memcmp(mac_addr, &tmp->mac, 653 RTE_ETHER_ADDR_LEN) == 0) && 654 ucast->vni == tmp->vni && 655 ucast->vlan == tmp->vlan) { 656 DP_INFO(edev, "Unicast MAC is already added" 657 " with vlan = %u, vni = %u\n", 658 ucast->vlan, ucast->vni); 659 return 0; 660 } 661 } 662 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), 663 RTE_CACHE_LINE_SIZE); 664 if (!u) { 665 DP_ERR(edev, "Did not allocate memory for ucast\n"); 666 return -ENOMEM; 667 } 668 rte_ether_addr_copy(mac_addr, &u->mac); 669 u->vlan = ucast->vlan; 670 u->vni = ucast->vni; 671 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); 672 qdev->num_uc_addr++; 673 } else { 674 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 675 if ((memcmp(mac_addr, &tmp->mac, 676 RTE_ETHER_ADDR_LEN) == 0) && 677 ucast->vlan == tmp->vlan && 678 ucast->vni == tmp->vni) 679 break; 680 } 681 if (tmp == NULL) { 682 DP_INFO(edev, "Unicast MAC is not found\n"); 683 return -EINVAL; 684 } 685 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); 686 qdev->num_uc_addr--; 687 } 688 689 return 0; 690 } 691 692 static int 693 qede_add_mcast_filters(struct rte_eth_dev *eth_dev, 694 struct rte_ether_addr *mc_addrs, 695 uint32_t mc_addrs_num) 696 { 697 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 698 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 699 struct ecore_filter_mcast mcast; 700 struct qede_mcast_entry *m = NULL; 701 uint8_t i; 702 int rc; 703 704 for (i = 0; i < mc_addrs_num; i++) { 705 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), 706 RTE_CACHE_LINE_SIZE); 707 if (!m) { 708 DP_ERR(edev, "Did not allocate memory for mcast\n"); 709 return -ENOMEM; 710 } 711 rte_ether_addr_copy(&mc_addrs[i], &m->mac); 712 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); 713 } 714 memset(&mcast, 0, sizeof(mcast)); 715 mcast.num_mc_addrs = mc_addrs_num; 716 mcast.opcode = ECORE_FILTER_ADD; 717 for (i = 0; i < mc_addrs_num; i++) 718 rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *) 719 &mcast.mac[i]); 720 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 721 if (rc != ECORE_SUCCESS) { 722 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc); 723 return -1; 724 } 725 726 return 0; 727 } 728 729 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev) 730 { 731 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 732 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 733 struct qede_mcast_entry *tmp = NULL; 734 struct ecore_filter_mcast mcast; 735 int j; 736 int rc; 737 738 memset(&mcast, 0, sizeof(mcast)); 739 mcast.num_mc_addrs = qdev->num_mc_addr; 740 mcast.opcode = ECORE_FILTER_REMOVE; 741 j = 0; 742 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 743 rte_ether_addr_copy(&tmp->mac, 744 (struct rte_ether_addr *)&mcast.mac[j]); 745 j++; 746 } 747 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 748 if (rc != ECORE_SUCCESS) { 749 DP_ERR(edev, "Failed to delete multicast filter\n"); 750 return -1; 751 } 752 /* Init the list */ 753 while (!SLIST_EMPTY(&qdev->mc_list_head)) { 754 tmp = SLIST_FIRST(&qdev->mc_list_head); 755 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list); 756 } 757 SLIST_INIT(&qdev->mc_list_head); 758 759 return 0; 760 } 761 762 enum _ecore_status_t 763 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 764 bool add) 765 { 766 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 767 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 768 enum _ecore_status_t rc = ECORE_INVAL; 769 770 if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) { 771 DP_ERR(edev, "Ucast filter table limit exceeded," 772 " Please enable promisc mode\n"); 773 return ECORE_INVAL; 774 } 775 776 rc = qede_ucast_filter(eth_dev, ucast, add); 777 if (rc == 0) 778 rc = ecore_filter_ucast_cmd(edev, ucast, 779 ECORE_SPQ_MODE_CB, NULL); 780 /* Indicate error only for add filter operation. 781 * Delete filter operations are not severe. 782 */ 783 if ((rc != ECORE_SUCCESS) && add) 784 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", 785 rc, add); 786 787 return rc; 788 } 789 790 static int 791 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr, 792 __rte_unused uint32_t index, __rte_unused uint32_t pool) 793 { 794 struct ecore_filter_ucast ucast; 795 int re; 796 797 if (!rte_is_valid_assigned_ether_addr(mac_addr)) 798 return -EINVAL; 799 800 qede_set_ucast_cmn_params(&ucast); 801 ucast.opcode = ECORE_FILTER_ADD; 802 ucast.type = ECORE_FILTER_MAC; 803 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac); 804 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); 805 return re; 806 } 807 808 static void 809 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 810 { 811 struct qede_dev *qdev = eth_dev->data->dev_private; 812 struct ecore_dev *edev = &qdev->edev; 813 struct ecore_filter_ucast ucast; 814 815 PMD_INIT_FUNC_TRACE(edev); 816 817 if (index >= qdev->dev_info.num_mac_filters) { 818 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 819 index, qdev->dev_info.num_mac_filters); 820 return; 821 } 822 823 if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) 824 return; 825 826 qede_set_ucast_cmn_params(&ucast); 827 ucast.opcode = ECORE_FILTER_REMOVE; 828 ucast.type = ECORE_FILTER_MAC; 829 830 /* Use the index maintained by rte */ 831 rte_ether_addr_copy(ð_dev->data->mac_addrs[index], 832 (struct rte_ether_addr *)&ucast.mac); 833 834 qede_mac_int_ops(eth_dev, &ucast, false); 835 } 836 837 static int 838 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr) 839 { 840 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 841 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 842 843 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 844 mac_addr->addr_bytes)) { 845 DP_ERR(edev, "Setting MAC address is not allowed\n"); 846 return -EPERM; 847 } 848 849 qede_mac_addr_remove(eth_dev, 0); 850 851 return qede_mac_addr_add(eth_dev, mac_addr, 0, 0); 852 } 853 854 void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) 855 { 856 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 857 struct ecore_sp_vport_update_params params; 858 struct ecore_hwfn *p_hwfn; 859 uint8_t i; 860 int rc; 861 862 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 863 params.vport_id = 0; 864 params.update_accept_any_vlan_flg = 1; 865 params.accept_any_vlan = flg; 866 for_each_hwfn(edev, i) { 867 p_hwfn = &edev->hwfns[i]; 868 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 869 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 870 ECORE_SPQ_MODE_EBLOCK, NULL); 871 if (rc != ECORE_SUCCESS) { 872 DP_ERR(edev, "Failed to configure accept-any-vlan\n"); 873 return; 874 } 875 } 876 877 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled"); 878 } 879 880 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg) 881 { 882 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 883 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 884 struct ecore_sp_vport_update_params params; 885 struct ecore_hwfn *p_hwfn; 886 uint8_t i; 887 int rc; 888 889 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 890 params.vport_id = 0; 891 params.update_inner_vlan_removal_flg = 1; 892 params.inner_vlan_removal_flg = flg; 893 for_each_hwfn(edev, i) { 894 p_hwfn = &edev->hwfns[i]; 895 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 896 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 897 ECORE_SPQ_MODE_EBLOCK, NULL); 898 if (rc != ECORE_SUCCESS) { 899 DP_ERR(edev, "Failed to update vport\n"); 900 return -1; 901 } 902 } 903 904 qdev->vlan_strip_flg = flg; 905 906 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled"); 907 return 0; 908 } 909 910 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 911 uint16_t vlan_id, int on) 912 { 913 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 914 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 915 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 916 struct qede_vlan_entry *tmp = NULL; 917 struct qede_vlan_entry *vlan; 918 struct ecore_filter_ucast ucast; 919 int rc; 920 921 if (on) { 922 if (qdev->configured_vlans == dev_info->num_vlan_filters) { 923 DP_ERR(edev, "Reached max VLAN filter limit" 924 " enabling accept_any_vlan\n"); 925 qede_config_accept_any_vlan(qdev, true); 926 return 0; 927 } 928 929 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 930 if (tmp->vid == vlan_id) { 931 DP_INFO(edev, "VLAN %u already configured\n", 932 vlan_id); 933 return 0; 934 } 935 } 936 937 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), 938 RTE_CACHE_LINE_SIZE); 939 940 if (!vlan) { 941 DP_ERR(edev, "Did not allocate memory for VLAN\n"); 942 return -ENOMEM; 943 } 944 945 qede_set_ucast_cmn_params(&ucast); 946 ucast.opcode = ECORE_FILTER_ADD; 947 ucast.type = ECORE_FILTER_VLAN; 948 ucast.vlan = vlan_id; 949 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 950 NULL); 951 if (rc != 0) { 952 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 953 rc); 954 rte_free(vlan); 955 } else { 956 vlan->vid = vlan_id; 957 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); 958 qdev->configured_vlans++; 959 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", 960 vlan_id, qdev->configured_vlans); 961 } 962 } else { 963 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 964 if (tmp->vid == vlan_id) 965 break; 966 } 967 968 if (!tmp) { 969 if (qdev->configured_vlans == 0) { 970 DP_INFO(edev, 971 "No VLAN filters configured yet\n"); 972 return 0; 973 } 974 975 DP_ERR(edev, "VLAN %u not configured\n", vlan_id); 976 return -EINVAL; 977 } 978 979 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); 980 981 qede_set_ucast_cmn_params(&ucast); 982 ucast.opcode = ECORE_FILTER_REMOVE; 983 ucast.type = ECORE_FILTER_VLAN; 984 ucast.vlan = vlan_id; 985 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 986 NULL); 987 if (rc != 0) { 988 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 989 vlan_id, rc); 990 } else { 991 qdev->configured_vlans--; 992 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", 993 vlan_id, qdev->configured_vlans); 994 } 995 } 996 997 return rc; 998 } 999 1000 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 1001 { 1002 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1003 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1004 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1005 1006 if (mask & ETH_VLAN_STRIP_MASK) { 1007 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1008 (void)qede_vlan_stripping(eth_dev, 1); 1009 else 1010 (void)qede_vlan_stripping(eth_dev, 0); 1011 } 1012 1013 if (mask & ETH_VLAN_FILTER_MASK) { 1014 /* VLAN filtering kicks in when a VLAN is added */ 1015 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 1016 qede_vlan_filter_set(eth_dev, 0, 1); 1017 } else { 1018 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ 1019 DP_ERR(edev, 1020 " Please remove existing VLAN filters" 1021 " before disabling VLAN filtering\n"); 1022 /* Signal app that VLAN filtering is still 1023 * enabled 1024 */ 1025 eth_dev->data->dev_conf.rxmode.offloads |= 1026 DEV_RX_OFFLOAD_VLAN_FILTER; 1027 } else { 1028 qede_vlan_filter_set(eth_dev, 0, 0); 1029 } 1030 } 1031 } 1032 1033 if (mask & ETH_VLAN_EXTEND_MASK) 1034 DP_ERR(edev, "Extend VLAN not supported\n"); 1035 1036 qdev->vlan_offload_mask = mask; 1037 1038 DP_INFO(edev, "VLAN offload mask %d\n", mask); 1039 1040 return 0; 1041 } 1042 1043 static void qede_prandom_bytes(uint32_t *buff) 1044 { 1045 uint8_t i; 1046 1047 srand((unsigned int)time(NULL)); 1048 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 1049 buff[i] = rand(); 1050 } 1051 1052 int qede_config_rss(struct rte_eth_dev *eth_dev) 1053 { 1054 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1055 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1056 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; 1057 struct rte_eth_rss_reta_entry64 reta_conf[2]; 1058 struct rte_eth_rss_conf rss_conf; 1059 uint32_t i, id, pos, q; 1060 1061 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 1062 if (!rss_conf.rss_key) { 1063 DP_INFO(edev, "Applying driver default key\n"); 1064 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1065 qede_prandom_bytes(&def_rss_key[0]); 1066 rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; 1067 } 1068 1069 /* Configure RSS hash */ 1070 if (qede_rss_hash_update(eth_dev, &rss_conf)) 1071 return -EINVAL; 1072 1073 /* Configure default RETA */ 1074 memset(reta_conf, 0, sizeof(reta_conf)); 1075 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) 1076 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; 1077 1078 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 1079 id = i / RTE_RETA_GROUP_SIZE; 1080 pos = i % RTE_RETA_GROUP_SIZE; 1081 q = i % QEDE_RSS_COUNT(eth_dev); 1082 reta_conf[id].reta[pos] = q; 1083 } 1084 if (qede_rss_reta_update(eth_dev, &reta_conf[0], 1085 ECORE_RSS_IND_TABLE_SIZE)) 1086 return -EINVAL; 1087 1088 return 0; 1089 } 1090 1091 static void qede_fastpath_start(struct ecore_dev *edev) 1092 { 1093 struct ecore_hwfn *p_hwfn; 1094 int i; 1095 1096 for_each_hwfn(edev, i) { 1097 p_hwfn = &edev->hwfns[i]; 1098 ecore_hw_start_fastpath(p_hwfn); 1099 } 1100 } 1101 1102 static int qede_dev_start(struct rte_eth_dev *eth_dev) 1103 { 1104 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1105 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1106 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1107 1108 PMD_INIT_FUNC_TRACE(edev); 1109 1110 /* Update MTU only if it has changed */ 1111 if (qdev->new_mtu && qdev->new_mtu != qdev->mtu) { 1112 if (qede_update_mtu(eth_dev, qdev->new_mtu)) 1113 goto err; 1114 qdev->mtu = qdev->new_mtu; 1115 qdev->new_mtu = 0; 1116 } 1117 1118 /* Configure TPA parameters */ 1119 if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1120 if (qede_enable_tpa(eth_dev, true)) 1121 return -EINVAL; 1122 /* Enable scatter mode for LRO */ 1123 if (!eth_dev->data->scattered_rx) 1124 rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER; 1125 } 1126 1127 /* Start queues */ 1128 if (qede_start_queues(eth_dev)) 1129 goto err; 1130 1131 if (IS_PF(edev)) 1132 qede_reset_queue_stats(qdev, true); 1133 1134 /* Newer SR-IOV PF driver expects RX/TX queues to be started before 1135 * enabling RSS. Hence RSS configuration is deferred up to this point. 1136 * Also, we would like to retain similar behavior in PF case, so we 1137 * don't do PF/VF specific check here. 1138 */ 1139 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 1140 if (qede_config_rss(eth_dev)) 1141 goto err; 1142 1143 /* Enable vport*/ 1144 if (qede_activate_vport(eth_dev, true)) 1145 goto err; 1146 1147 /* Update link status */ 1148 qede_link_update(eth_dev, 0); 1149 1150 /* Start/resume traffic */ 1151 qede_fastpath_start(edev); 1152 1153 qede_assign_rxtx_handlers(eth_dev); 1154 DP_INFO(edev, "Device started\n"); 1155 1156 return 0; 1157 err: 1158 DP_ERR(edev, "Device start fails\n"); 1159 return -1; /* common error code is < 0 */ 1160 } 1161 1162 static void qede_dev_stop(struct rte_eth_dev *eth_dev) 1163 { 1164 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1165 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1166 1167 PMD_INIT_FUNC_TRACE(edev); 1168 1169 /* Disable vport */ 1170 if (qede_activate_vport(eth_dev, false)) 1171 return; 1172 1173 if (qdev->enable_lro) 1174 qede_enable_tpa(eth_dev, false); 1175 1176 /* Stop queues */ 1177 qede_stop_queues(eth_dev); 1178 1179 /* Disable traffic */ 1180 ecore_hw_stop_fastpath(edev); /* TBD - loop */ 1181 1182 DP_INFO(edev, "Device is stopped\n"); 1183 } 1184 1185 static const char * const valid_args[] = { 1186 QEDE_NPAR_TX_SWITCHING, 1187 QEDE_VF_TX_SWITCHING, 1188 NULL, 1189 }; 1190 1191 static int qede_args_check(const char *key, const char *val, void *opaque) 1192 { 1193 unsigned long tmp; 1194 int ret = 0; 1195 struct rte_eth_dev *eth_dev = opaque; 1196 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1197 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1198 1199 errno = 0; 1200 tmp = strtoul(val, NULL, 0); 1201 if (errno) { 1202 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val); 1203 return errno; 1204 } 1205 1206 if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) || 1207 ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) { 1208 qdev->enable_tx_switching = !!tmp; 1209 DP_INFO(edev, "Disabling %s tx-switching\n", 1210 strcmp(QEDE_NPAR_TX_SWITCHING, key) ? 1211 "VF" : "NPAR"); 1212 } 1213 1214 return ret; 1215 } 1216 1217 static int qede_args(struct rte_eth_dev *eth_dev) 1218 { 1219 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1220 struct rte_kvargs *kvlist; 1221 struct rte_devargs *devargs; 1222 int ret; 1223 int i; 1224 1225 devargs = pci_dev->device.devargs; 1226 if (!devargs) 1227 return 0; /* return success */ 1228 1229 kvlist = rte_kvargs_parse(devargs->args, valid_args); 1230 if (kvlist == NULL) 1231 return -EINVAL; 1232 1233 /* Process parameters. */ 1234 for (i = 0; (valid_args[i] != NULL); ++i) { 1235 if (rte_kvargs_count(kvlist, valid_args[i])) { 1236 ret = rte_kvargs_process(kvlist, valid_args[i], 1237 qede_args_check, eth_dev); 1238 if (ret != ECORE_SUCCESS) { 1239 rte_kvargs_free(kvlist); 1240 return ret; 1241 } 1242 } 1243 } 1244 rte_kvargs_free(kvlist); 1245 1246 return 0; 1247 } 1248 1249 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 1250 { 1251 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1252 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1253 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1254 int ret; 1255 1256 PMD_INIT_FUNC_TRACE(edev); 1257 1258 if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) 1259 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1260 1261 /* We need to have min 1 RX queue.There is no min check in 1262 * rte_eth_dev_configure(), so we are checking it here. 1263 */ 1264 if (eth_dev->data->nb_rx_queues == 0) { 1265 DP_ERR(edev, "Minimum one RX queue is required\n"); 1266 return -EINVAL; 1267 } 1268 1269 /* Enable Tx switching by default */ 1270 qdev->enable_tx_switching = 1; 1271 1272 /* Parse devargs and fix up rxmode */ 1273 if (qede_args(eth_dev)) 1274 DP_NOTICE(edev, false, 1275 "Invalid devargs supplied, requested change will not take effect\n"); 1276 1277 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || 1278 rxmode->mq_mode == ETH_MQ_RX_RSS)) { 1279 DP_ERR(edev, "Unsupported multi-queue mode\n"); 1280 return -ENOTSUP; 1281 } 1282 /* Flow director mode check */ 1283 if (qede_check_fdir_support(eth_dev)) 1284 return -ENOTSUP; 1285 1286 qede_dealloc_fp_resc(eth_dev); 1287 qdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns; 1288 qdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns; 1289 1290 if (qede_alloc_fp_resc(qdev)) 1291 return -ENOMEM; 1292 1293 /* If jumbo enabled adjust MTU */ 1294 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 1295 eth_dev->data->mtu = 1296 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1297 RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD; 1298 1299 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) 1300 eth_dev->data->scattered_rx = 1; 1301 1302 if (qede_start_vport(qdev, eth_dev->data->mtu)) 1303 return -1; 1304 1305 qdev->mtu = eth_dev->data->mtu; 1306 1307 /* Enable VLAN offloads by default */ 1308 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | 1309 ETH_VLAN_FILTER_MASK); 1310 if (ret) 1311 return ret; 1312 1313 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", 1314 QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev)); 1315 1316 if (ECORE_IS_CMT(edev)) 1317 DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n", 1318 qdev->num_rx_queues, qdev->num_tx_queues); 1319 1320 1321 return 0; 1322 } 1323 1324 /* Info about HW descriptor ring limitations */ 1325 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 1326 .nb_max = 0x8000, /* 32K */ 1327 .nb_min = 128, 1328 .nb_align = 128 /* lowest common multiple */ 1329 }; 1330 1331 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 1332 .nb_max = 0x8000, /* 32K */ 1333 .nb_min = 256, 1334 .nb_align = 256, 1335 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, 1336 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 1337 }; 1338 1339 static int 1340 qede_dev_info_get(struct rte_eth_dev *eth_dev, 1341 struct rte_eth_dev_info *dev_info) 1342 { 1343 struct qede_dev *qdev = eth_dev->data->dev_private; 1344 struct ecore_dev *edev = &qdev->edev; 1345 struct qed_link_output link; 1346 uint32_t speed_cap = 0; 1347 1348 PMD_INIT_FUNC_TRACE(edev); 1349 1350 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; 1351 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 1352 dev_info->rx_desc_lim = qede_rx_desc_lim; 1353 dev_info->tx_desc_lim = qede_tx_desc_lim; 1354 1355 if (IS_PF(edev)) 1356 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1357 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2); 1358 else 1359 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1360 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); 1361 /* Since CMT mode internally doubles the number of queues */ 1362 if (ECORE_IS_CMT(edev)) 1363 dev_info->max_rx_queues = dev_info->max_rx_queues / 2; 1364 1365 dev_info->max_tx_queues = dev_info->max_rx_queues; 1366 1367 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; 1368 dev_info->max_vfs = 0; 1369 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 1370 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1371 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 1372 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 1373 DEV_RX_OFFLOAD_UDP_CKSUM | 1374 DEV_RX_OFFLOAD_TCP_CKSUM | 1375 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1376 DEV_RX_OFFLOAD_TCP_LRO | 1377 DEV_RX_OFFLOAD_KEEP_CRC | 1378 DEV_RX_OFFLOAD_SCATTER | 1379 DEV_RX_OFFLOAD_JUMBO_FRAME | 1380 DEV_RX_OFFLOAD_VLAN_FILTER | 1381 DEV_RX_OFFLOAD_VLAN_STRIP | 1382 DEV_RX_OFFLOAD_RSS_HASH); 1383 dev_info->rx_queue_offload_capa = 0; 1384 1385 /* TX offloads are on a per-packet basis, so it is applicable 1386 * to both at port and queue levels. 1387 */ 1388 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | 1389 DEV_TX_OFFLOAD_IPV4_CKSUM | 1390 DEV_TX_OFFLOAD_UDP_CKSUM | 1391 DEV_TX_OFFLOAD_TCP_CKSUM | 1392 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1393 DEV_TX_OFFLOAD_MULTI_SEGS | 1394 DEV_TX_OFFLOAD_TCP_TSO | 1395 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1396 DEV_TX_OFFLOAD_GENEVE_TNL_TSO); 1397 dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa; 1398 1399 dev_info->default_txconf = (struct rte_eth_txconf) { 1400 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS, 1401 }; 1402 1403 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1404 /* Packets are always dropped if no descriptors are available */ 1405 .rx_drop_en = 1, 1406 .offloads = 0, 1407 }; 1408 1409 memset(&link, 0, sizeof(struct qed_link_output)); 1410 qdev->ops->common->get_link(edev, &link); 1411 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1412 speed_cap |= ETH_LINK_SPEED_1G; 1413 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1414 speed_cap |= ETH_LINK_SPEED_10G; 1415 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1416 speed_cap |= ETH_LINK_SPEED_25G; 1417 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1418 speed_cap |= ETH_LINK_SPEED_40G; 1419 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1420 speed_cap |= ETH_LINK_SPEED_50G; 1421 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1422 speed_cap |= ETH_LINK_SPEED_100G; 1423 dev_info->speed_capa = speed_cap; 1424 1425 return 0; 1426 } 1427 1428 /* return 0 means link status changed, -1 means not changed */ 1429 int 1430 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 1431 { 1432 struct qede_dev *qdev = eth_dev->data->dev_private; 1433 struct ecore_dev *edev = &qdev->edev; 1434 struct qed_link_output q_link; 1435 struct rte_eth_link link; 1436 uint16_t link_duplex; 1437 1438 memset(&q_link, 0, sizeof(q_link)); 1439 memset(&link, 0, sizeof(link)); 1440 1441 qdev->ops->common->get_link(edev, &q_link); 1442 1443 /* Link Speed */ 1444 link.link_speed = q_link.speed; 1445 1446 /* Link Mode */ 1447 switch (q_link.duplex) { 1448 case QEDE_DUPLEX_HALF: 1449 link_duplex = ETH_LINK_HALF_DUPLEX; 1450 break; 1451 case QEDE_DUPLEX_FULL: 1452 link_duplex = ETH_LINK_FULL_DUPLEX; 1453 break; 1454 case QEDE_DUPLEX_UNKNOWN: 1455 default: 1456 link_duplex = -1; 1457 } 1458 link.link_duplex = link_duplex; 1459 1460 /* Link Status */ 1461 link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN; 1462 1463 /* AN */ 1464 link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 1465 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 1466 1467 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 1468 link.link_speed, link.link_duplex, 1469 link.link_autoneg, link.link_status); 1470 1471 return rte_eth_linkstatus_set(eth_dev, &link); 1472 } 1473 1474 static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 1475 { 1476 struct qede_dev *qdev = eth_dev->data->dev_private; 1477 struct ecore_dev *edev = &qdev->edev; 1478 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1479 enum _ecore_status_t ecore_status; 1480 1481 PMD_INIT_FUNC_TRACE(edev); 1482 1483 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1484 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1485 1486 ecore_status = qed_configure_filter_rx_mode(eth_dev, type); 1487 1488 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1489 } 1490 1491 static int qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 1492 { 1493 struct qede_dev *qdev = eth_dev->data->dev_private; 1494 struct ecore_dev *edev = &qdev->edev; 1495 enum _ecore_status_t ecore_status; 1496 1497 PMD_INIT_FUNC_TRACE(edev); 1498 1499 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1500 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1501 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 1502 else 1503 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1504 QED_FILTER_RX_MODE_TYPE_REGULAR); 1505 1506 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1507 } 1508 1509 static void qede_poll_sp_sb_cb(void *param) 1510 { 1511 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1512 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1513 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1514 int rc; 1515 1516 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 1517 qede_interrupt_action(&edev->hwfns[1]); 1518 1519 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 1520 qede_poll_sp_sb_cb, 1521 (void *)eth_dev); 1522 if (rc != 0) { 1523 DP_ERR(edev, "Unable to start periodic" 1524 " timer rc %d\n", rc); 1525 } 1526 } 1527 1528 static void qede_dev_close(struct rte_eth_dev *eth_dev) 1529 { 1530 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1531 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1532 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1533 1534 PMD_INIT_FUNC_TRACE(edev); 1535 1536 /* dev_stop() shall cleanup fp resources in hw but without releasing 1537 * dma memories and sw structures so that dev_start() can be called 1538 * by the app without reconfiguration. However, in dev_close() we 1539 * can release all the resources and device can be brought up newly 1540 */ 1541 if (eth_dev->data->dev_started) 1542 qede_dev_stop(eth_dev); 1543 1544 if (qdev->vport_started) 1545 qede_stop_vport(edev); 1546 qdev->vport_started = false; 1547 qede_fdir_dealloc_resc(eth_dev); 1548 qede_dealloc_fp_resc(eth_dev); 1549 1550 eth_dev->data->nb_rx_queues = 0; 1551 eth_dev->data->nb_tx_queues = 0; 1552 1553 /* Bring the link down */ 1554 qede_dev_set_link_state(eth_dev, false); 1555 qdev->ops->common->slowpath_stop(edev); 1556 qdev->ops->common->remove(edev); 1557 rte_intr_disable(&pci_dev->intr_handle); 1558 1559 switch (pci_dev->intr_handle.type) { 1560 case RTE_INTR_HANDLE_UIO_INTX: 1561 case RTE_INTR_HANDLE_VFIO_LEGACY: 1562 rte_intr_callback_unregister(&pci_dev->intr_handle, 1563 qede_interrupt_handler_intx, 1564 (void *)eth_dev); 1565 break; 1566 default: 1567 rte_intr_callback_unregister(&pci_dev->intr_handle, 1568 qede_interrupt_handler, 1569 (void *)eth_dev); 1570 } 1571 1572 if (ECORE_IS_CMT(edev)) 1573 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 1574 } 1575 1576 static int 1577 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 1578 { 1579 struct qede_dev *qdev = eth_dev->data->dev_private; 1580 struct ecore_dev *edev = &qdev->edev; 1581 struct ecore_eth_stats stats; 1582 unsigned int i = 0, j = 0, qid, idx, hw_fn; 1583 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 1584 struct qede_tx_queue *txq; 1585 1586 ecore_get_vport_stats(edev, &stats); 1587 1588 /* RX Stats */ 1589 eth_stats->ipackets = stats.common.rx_ucast_pkts + 1590 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts; 1591 1592 eth_stats->ibytes = stats.common.rx_ucast_bytes + 1593 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes; 1594 1595 eth_stats->ierrors = stats.common.rx_crc_errors + 1596 stats.common.rx_align_errors + 1597 stats.common.rx_carrier_errors + 1598 stats.common.rx_oversize_packets + 1599 stats.common.rx_jabbers + stats.common.rx_undersize_packets; 1600 1601 eth_stats->rx_nombuf = stats.common.no_buff_discards; 1602 1603 eth_stats->imissed = stats.common.mftag_filter_discards + 1604 stats.common.mac_filter_discards + 1605 stats.common.no_buff_discards + 1606 stats.common.brb_truncates + stats.common.brb_discards; 1607 1608 /* TX stats */ 1609 eth_stats->opackets = stats.common.tx_ucast_pkts + 1610 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts; 1611 1612 eth_stats->obytes = stats.common.tx_ucast_bytes + 1613 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes; 1614 1615 eth_stats->oerrors = stats.common.tx_err_drop_pkts; 1616 1617 /* Queue stats */ 1618 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev), 1619 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1620 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev), 1621 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1622 if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) || 1623 txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev)) 1624 DP_VERBOSE(edev, ECORE_MSG_DEBUG, 1625 "Not all the queue stats will be displayed. Set" 1626 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" 1627 " appropriately and retry.\n"); 1628 1629 for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) { 1630 eth_stats->q_ipackets[i] = 0; 1631 eth_stats->q_errors[i] = 0; 1632 1633 for_each_hwfn(edev, hw_fn) { 1634 idx = qid * edev->num_hwfns + hw_fn; 1635 1636 eth_stats->q_ipackets[i] += 1637 *(uint64_t *) 1638 (((char *)(qdev->fp_array[idx].rxq)) + 1639 offsetof(struct qede_rx_queue, 1640 rcv_pkts)); 1641 eth_stats->q_errors[i] += 1642 *(uint64_t *) 1643 (((char *)(qdev->fp_array[idx].rxq)) + 1644 offsetof(struct qede_rx_queue, 1645 rx_hw_errors)) + 1646 *(uint64_t *) 1647 (((char *)(qdev->fp_array[idx].rxq)) + 1648 offsetof(struct qede_rx_queue, 1649 rx_alloc_errors)); 1650 } 1651 1652 i++; 1653 if (i == rxq_stat_cntrs) 1654 break; 1655 } 1656 1657 for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) { 1658 eth_stats->q_opackets[j] = 0; 1659 1660 for_each_hwfn(edev, hw_fn) { 1661 idx = qid * edev->num_hwfns + hw_fn; 1662 1663 txq = qdev->fp_array[idx].txq; 1664 eth_stats->q_opackets[j] += 1665 *((uint64_t *)(uintptr_t) 1666 (((uint64_t)(uintptr_t)(txq)) + 1667 offsetof(struct qede_tx_queue, 1668 xmit_pkts))); 1669 } 1670 1671 j++; 1672 if (j == txq_stat_cntrs) 1673 break; 1674 } 1675 1676 return 0; 1677 } 1678 1679 static unsigned 1680 qede_get_xstats_count(struct qede_dev *qdev) { 1681 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; 1682 1683 if (ECORE_IS_BB(&qdev->edev)) 1684 return RTE_DIM(qede_xstats_strings) + 1685 RTE_DIM(qede_bb_xstats_strings) + 1686 (RTE_DIM(qede_rxq_xstats_strings) * 1687 QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns); 1688 else 1689 return RTE_DIM(qede_xstats_strings) + 1690 RTE_DIM(qede_ah_xstats_strings) + 1691 (RTE_DIM(qede_rxq_xstats_strings) * 1692 QEDE_RSS_COUNT(dev)); 1693 } 1694 1695 static int 1696 qede_get_xstats_names(struct rte_eth_dev *dev, 1697 struct rte_eth_xstat_name *xstats_names, 1698 __rte_unused unsigned int limit) 1699 { 1700 struct qede_dev *qdev = dev->data->dev_private; 1701 struct ecore_dev *edev = &qdev->edev; 1702 const unsigned int stat_cnt = qede_get_xstats_count(qdev); 1703 unsigned int i, qid, hw_fn, stat_idx = 0; 1704 1705 if (xstats_names == NULL) 1706 return stat_cnt; 1707 1708 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1709 strlcpy(xstats_names[stat_idx].name, 1710 qede_xstats_strings[i].name, 1711 sizeof(xstats_names[stat_idx].name)); 1712 stat_idx++; 1713 } 1714 1715 if (ECORE_IS_BB(edev)) { 1716 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1717 strlcpy(xstats_names[stat_idx].name, 1718 qede_bb_xstats_strings[i].name, 1719 sizeof(xstats_names[stat_idx].name)); 1720 stat_idx++; 1721 } 1722 } else { 1723 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1724 strlcpy(xstats_names[stat_idx].name, 1725 qede_ah_xstats_strings[i].name, 1726 sizeof(xstats_names[stat_idx].name)); 1727 stat_idx++; 1728 } 1729 } 1730 1731 for (qid = 0; qid < QEDE_RSS_COUNT(dev); qid++) { 1732 for_each_hwfn(edev, hw_fn) { 1733 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1734 snprintf(xstats_names[stat_idx].name, 1735 RTE_ETH_XSTATS_NAME_SIZE, 1736 "%.4s%d.%d%s", 1737 qede_rxq_xstats_strings[i].name, 1738 hw_fn, qid, 1739 qede_rxq_xstats_strings[i].name + 4); 1740 stat_idx++; 1741 } 1742 } 1743 } 1744 1745 return stat_cnt; 1746 } 1747 1748 static int 1749 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1750 unsigned int n) 1751 { 1752 struct qede_dev *qdev = dev->data->dev_private; 1753 struct ecore_dev *edev = &qdev->edev; 1754 struct ecore_eth_stats stats; 1755 const unsigned int num = qede_get_xstats_count(qdev); 1756 unsigned int i, qid, hw_fn, fpidx, stat_idx = 0; 1757 1758 if (n < num) 1759 return num; 1760 1761 ecore_get_vport_stats(edev, &stats); 1762 1763 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1764 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + 1765 qede_xstats_strings[i].offset); 1766 xstats[stat_idx].id = stat_idx; 1767 stat_idx++; 1768 } 1769 1770 if (ECORE_IS_BB(edev)) { 1771 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1772 xstats[stat_idx].value = 1773 *(uint64_t *)(((char *)&stats) + 1774 qede_bb_xstats_strings[i].offset); 1775 xstats[stat_idx].id = stat_idx; 1776 stat_idx++; 1777 } 1778 } else { 1779 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1780 xstats[stat_idx].value = 1781 *(uint64_t *)(((char *)&stats) + 1782 qede_ah_xstats_strings[i].offset); 1783 xstats[stat_idx].id = stat_idx; 1784 stat_idx++; 1785 } 1786 } 1787 1788 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 1789 for_each_hwfn(edev, hw_fn) { 1790 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1791 fpidx = qid * edev->num_hwfns + hw_fn; 1792 xstats[stat_idx].value = *(uint64_t *) 1793 (((char *)(qdev->fp_array[fpidx].rxq)) + 1794 qede_rxq_xstats_strings[i].offset); 1795 xstats[stat_idx].id = stat_idx; 1796 stat_idx++; 1797 } 1798 1799 } 1800 } 1801 1802 return stat_idx; 1803 } 1804 1805 static int 1806 qede_reset_xstats(struct rte_eth_dev *dev) 1807 { 1808 struct qede_dev *qdev = dev->data->dev_private; 1809 struct ecore_dev *edev = &qdev->edev; 1810 1811 ecore_reset_vport_stats(edev); 1812 qede_reset_queue_stats(qdev, true); 1813 1814 return 0; 1815 } 1816 1817 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 1818 { 1819 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1820 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1821 struct qed_link_params link_params; 1822 int rc; 1823 1824 DP_INFO(edev, "setting link state %d\n", link_up); 1825 memset(&link_params, 0, sizeof(link_params)); 1826 link_params.link_up = link_up; 1827 rc = qdev->ops->common->set_link(edev, &link_params); 1828 if (rc != ECORE_SUCCESS) 1829 DP_ERR(edev, "Unable to set link state %d\n", link_up); 1830 1831 return rc; 1832 } 1833 1834 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 1835 { 1836 return qede_dev_set_link_state(eth_dev, true); 1837 } 1838 1839 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 1840 { 1841 return qede_dev_set_link_state(eth_dev, false); 1842 } 1843 1844 static int qede_reset_stats(struct rte_eth_dev *eth_dev) 1845 { 1846 struct qede_dev *qdev = eth_dev->data->dev_private; 1847 struct ecore_dev *edev = &qdev->edev; 1848 1849 ecore_reset_vport_stats(edev); 1850 qede_reset_queue_stats(qdev, false); 1851 1852 return 0; 1853 } 1854 1855 static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 1856 { 1857 enum qed_filter_rx_mode_type type = 1858 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1859 enum _ecore_status_t ecore_status; 1860 1861 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1862 type |= QED_FILTER_RX_MODE_TYPE_PROMISC; 1863 1864 ecore_status = qed_configure_filter_rx_mode(eth_dev, type); 1865 1866 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1867 } 1868 1869 static int qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 1870 { 1871 enum _ecore_status_t ecore_status; 1872 1873 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1874 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1875 QED_FILTER_RX_MODE_TYPE_PROMISC); 1876 else 1877 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1878 QED_FILTER_RX_MODE_TYPE_REGULAR); 1879 1880 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1881 } 1882 1883 static int 1884 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, 1885 struct rte_ether_addr *mc_addrs, 1886 uint32_t mc_addrs_num) 1887 { 1888 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1889 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1890 uint8_t i; 1891 1892 if (mc_addrs_num > ECORE_MAX_MC_ADDRS) { 1893 DP_ERR(edev, "Reached max multicast filters limit," 1894 "Please enable multicast promisc mode\n"); 1895 return -ENOSPC; 1896 } 1897 1898 for (i = 0; i < mc_addrs_num; i++) { 1899 if (!rte_is_multicast_ether_addr(&mc_addrs[i])) { 1900 DP_ERR(edev, "Not a valid multicast MAC\n"); 1901 return -EINVAL; 1902 } 1903 } 1904 1905 /* Flush all existing entries */ 1906 if (qede_del_mcast_filters(eth_dev)) 1907 return -1; 1908 1909 /* Set new mcast list */ 1910 return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num); 1911 } 1912 1913 /* Update MTU via vport-update without doing port restart. 1914 * The vport must be deactivated before calling this API. 1915 */ 1916 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) 1917 { 1918 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1919 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1920 struct ecore_hwfn *p_hwfn; 1921 int rc; 1922 int i; 1923 1924 if (IS_PF(edev)) { 1925 struct ecore_sp_vport_update_params params; 1926 1927 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 1928 params.vport_id = 0; 1929 params.mtu = mtu; 1930 params.vport_id = 0; 1931 for_each_hwfn(edev, i) { 1932 p_hwfn = &edev->hwfns[i]; 1933 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1934 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 1935 ECORE_SPQ_MODE_EBLOCK, NULL); 1936 if (rc != ECORE_SUCCESS) 1937 goto err; 1938 } 1939 } else { 1940 for_each_hwfn(edev, i) { 1941 p_hwfn = &edev->hwfns[i]; 1942 rc = ecore_vf_pf_update_mtu(p_hwfn, mtu); 1943 if (rc == ECORE_INVAL) { 1944 DP_INFO(edev, "VF MTU Update TLV not supported\n"); 1945 /* Recreate vport */ 1946 rc = qede_start_vport(qdev, mtu); 1947 if (rc != ECORE_SUCCESS) 1948 goto err; 1949 1950 /* Restore config lost due to vport stop */ 1951 if (eth_dev->data->promiscuous) 1952 qede_promiscuous_enable(eth_dev); 1953 else 1954 qede_promiscuous_disable(eth_dev); 1955 1956 if (eth_dev->data->all_multicast) 1957 qede_allmulticast_enable(eth_dev); 1958 else 1959 qede_allmulticast_disable(eth_dev); 1960 1961 qede_vlan_offload_set(eth_dev, 1962 qdev->vlan_offload_mask); 1963 } else if (rc != ECORE_SUCCESS) { 1964 goto err; 1965 } 1966 } 1967 } 1968 DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu); 1969 1970 return 0; 1971 1972 err: 1973 DP_ERR(edev, "Failed to update MTU\n"); 1974 return -1; 1975 } 1976 1977 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 1978 struct rte_eth_fc_conf *fc_conf) 1979 { 1980 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1981 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1982 struct qed_link_output current_link; 1983 struct qed_link_params params; 1984 1985 memset(¤t_link, 0, sizeof(current_link)); 1986 qdev->ops->common->get_link(edev, ¤t_link); 1987 1988 memset(¶ms, 0, sizeof(params)); 1989 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 1990 if (fc_conf->autoneg) { 1991 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 1992 DP_ERR(edev, "Autoneg not supported\n"); 1993 return -EINVAL; 1994 } 1995 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1996 } 1997 1998 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 1999 if (fc_conf->mode == RTE_FC_FULL) 2000 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 2001 QED_LINK_PAUSE_RX_ENABLE); 2002 if (fc_conf->mode == RTE_FC_TX_PAUSE) 2003 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 2004 if (fc_conf->mode == RTE_FC_RX_PAUSE) 2005 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 2006 2007 params.link_up = true; 2008 (void)qdev->ops->common->set_link(edev, ¶ms); 2009 2010 return 0; 2011 } 2012 2013 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 2014 struct rte_eth_fc_conf *fc_conf) 2015 { 2016 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2017 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2018 struct qed_link_output current_link; 2019 2020 memset(¤t_link, 0, sizeof(current_link)); 2021 qdev->ops->common->get_link(edev, ¤t_link); 2022 2023 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 2024 fc_conf->autoneg = true; 2025 2026 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 2027 QED_LINK_PAUSE_TX_ENABLE)) 2028 fc_conf->mode = RTE_FC_FULL; 2029 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 2030 fc_conf->mode = RTE_FC_RX_PAUSE; 2031 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 2032 fc_conf->mode = RTE_FC_TX_PAUSE; 2033 else 2034 fc_conf->mode = RTE_FC_NONE; 2035 2036 return 0; 2037 } 2038 2039 static const uint32_t * 2040 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 2041 { 2042 static const uint32_t ptypes[] = { 2043 RTE_PTYPE_L2_ETHER, 2044 RTE_PTYPE_L2_ETHER_VLAN, 2045 RTE_PTYPE_L3_IPV4, 2046 RTE_PTYPE_L3_IPV6, 2047 RTE_PTYPE_L4_TCP, 2048 RTE_PTYPE_L4_UDP, 2049 RTE_PTYPE_TUNNEL_VXLAN, 2050 RTE_PTYPE_L4_FRAG, 2051 RTE_PTYPE_TUNNEL_GENEVE, 2052 RTE_PTYPE_TUNNEL_GRE, 2053 /* Inner */ 2054 RTE_PTYPE_INNER_L2_ETHER, 2055 RTE_PTYPE_INNER_L2_ETHER_VLAN, 2056 RTE_PTYPE_INNER_L3_IPV4, 2057 RTE_PTYPE_INNER_L3_IPV6, 2058 RTE_PTYPE_INNER_L4_TCP, 2059 RTE_PTYPE_INNER_L4_UDP, 2060 RTE_PTYPE_INNER_L4_FRAG, 2061 RTE_PTYPE_UNKNOWN 2062 }; 2063 2064 if (eth_dev->rx_pkt_burst == qede_recv_pkts || 2065 eth_dev->rx_pkt_burst == qede_recv_pkts_regular || 2066 eth_dev->rx_pkt_burst == qede_recv_pkts_cmt) 2067 return ptypes; 2068 2069 return NULL; 2070 } 2071 2072 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) 2073 { 2074 *rss_caps = 0; 2075 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 2076 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 2077 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 2078 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 2079 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 2080 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 2081 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; 2082 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; 2083 } 2084 2085 int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 2086 struct rte_eth_rss_conf *rss_conf) 2087 { 2088 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2089 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2090 struct ecore_sp_vport_update_params vport_update_params; 2091 struct ecore_rss_params rss_params; 2092 struct ecore_hwfn *p_hwfn; 2093 uint32_t *key = (uint32_t *)rss_conf->rss_key; 2094 uint64_t hf = rss_conf->rss_hf; 2095 uint8_t len = rss_conf->rss_key_len; 2096 uint8_t idx, i, j, fpidx; 2097 int rc; 2098 2099 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2100 memset(&rss_params, 0, sizeof(rss_params)); 2101 2102 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", 2103 (unsigned long)hf, len, key); 2104 2105 if (hf != 0) { 2106 /* Enabling RSS */ 2107 DP_INFO(edev, "Enabling rss\n"); 2108 2109 /* RSS caps */ 2110 qede_init_rss_caps(&rss_params.rss_caps, hf); 2111 rss_params.update_rss_capabilities = 1; 2112 2113 /* RSS hash key */ 2114 if (key) { 2115 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { 2116 DP_ERR(edev, "RSS key length exceeds limit\n"); 2117 return -EINVAL; 2118 } 2119 DP_INFO(edev, "Applying user supplied hash key\n"); 2120 rss_params.update_rss_key = 1; 2121 memcpy(&rss_params.rss_key, key, len); 2122 } 2123 rss_params.rss_enable = 1; 2124 } 2125 2126 rss_params.update_rss_config = 1; 2127 /* tbl_size has to be set with capabilities */ 2128 rss_params.rss_table_size_log = 7; 2129 vport_update_params.vport_id = 0; 2130 2131 for_each_hwfn(edev, i) { 2132 /* pass the L2 handles instead of qids */ 2133 for (j = 0 ; j < ECORE_RSS_IND_TABLE_SIZE ; j++) { 2134 idx = j % QEDE_RSS_COUNT(eth_dev); 2135 fpidx = idx * edev->num_hwfns + i; 2136 rss_params.rss_ind_table[j] = 2137 qdev->fp_array[fpidx].rxq->handle; 2138 } 2139 2140 vport_update_params.rss_params = &rss_params; 2141 2142 p_hwfn = &edev->hwfns[i]; 2143 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2144 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2145 ECORE_SPQ_MODE_EBLOCK, NULL); 2146 if (rc) { 2147 DP_ERR(edev, "vport-update for RSS failed\n"); 2148 return rc; 2149 } 2150 } 2151 qdev->rss_enable = rss_params.rss_enable; 2152 2153 /* Update local structure for hash query */ 2154 qdev->rss_conf.rss_hf = hf; 2155 qdev->rss_conf.rss_key_len = len; 2156 if (qdev->rss_enable) { 2157 if (qdev->rss_conf.rss_key == NULL) { 2158 qdev->rss_conf.rss_key = (uint8_t *)malloc(len); 2159 if (qdev->rss_conf.rss_key == NULL) { 2160 DP_ERR(edev, "No memory to store RSS key\n"); 2161 return -ENOMEM; 2162 } 2163 } 2164 if (key && len) { 2165 DP_INFO(edev, "Storing RSS key\n"); 2166 memcpy(qdev->rss_conf.rss_key, key, len); 2167 } 2168 } else if (!qdev->rss_enable && len == 0) { 2169 if (qdev->rss_conf.rss_key) { 2170 free(qdev->rss_conf.rss_key); 2171 qdev->rss_conf.rss_key = NULL; 2172 DP_INFO(edev, "Free RSS key\n"); 2173 } 2174 } 2175 2176 return 0; 2177 } 2178 2179 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 2180 struct rte_eth_rss_conf *rss_conf) 2181 { 2182 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2183 2184 rss_conf->rss_hf = qdev->rss_conf.rss_hf; 2185 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; 2186 2187 if (rss_conf->rss_key && qdev->rss_conf.rss_key) 2188 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, 2189 rss_conf->rss_key_len); 2190 return 0; 2191 } 2192 2193 int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 2194 struct rte_eth_rss_reta_entry64 *reta_conf, 2195 uint16_t reta_size) 2196 { 2197 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2198 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2199 struct ecore_sp_vport_update_params vport_update_params; 2200 struct ecore_rss_params *params; 2201 uint16_t i, j, idx, fid, shift; 2202 struct ecore_hwfn *p_hwfn; 2203 uint8_t entry; 2204 int rc = 0; 2205 2206 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2207 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 2208 reta_size); 2209 return -EINVAL; 2210 } 2211 2212 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2213 params = rte_zmalloc("qede_rss", sizeof(*params), RTE_CACHE_LINE_SIZE); 2214 if (params == NULL) { 2215 DP_ERR(edev, "failed to allocate memory\n"); 2216 return -ENOMEM; 2217 } 2218 2219 params->update_rss_ind_table = 1; 2220 params->rss_table_size_log = 7; 2221 params->update_rss_config = 1; 2222 2223 vport_update_params.vport_id = 0; 2224 /* Use the current value of rss_enable */ 2225 params->rss_enable = qdev->rss_enable; 2226 vport_update_params.rss_params = params; 2227 2228 for_each_hwfn(edev, i) { 2229 for (j = 0; j < reta_size; j++) { 2230 idx = j / RTE_RETA_GROUP_SIZE; 2231 shift = j % RTE_RETA_GROUP_SIZE; 2232 if (reta_conf[idx].mask & (1ULL << shift)) { 2233 entry = reta_conf[idx].reta[shift]; 2234 fid = entry * edev->num_hwfns + i; 2235 /* Pass rxq handles to ecore */ 2236 params->rss_ind_table[j] = 2237 qdev->fp_array[fid].rxq->handle; 2238 /* Update the local copy for RETA query cmd */ 2239 qdev->rss_ind_table[j] = entry; 2240 } 2241 } 2242 2243 p_hwfn = &edev->hwfns[i]; 2244 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2245 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2246 ECORE_SPQ_MODE_EBLOCK, NULL); 2247 if (rc) { 2248 DP_ERR(edev, "vport-update for RSS failed\n"); 2249 goto out; 2250 } 2251 } 2252 2253 out: 2254 rte_free(params); 2255 return rc; 2256 } 2257 2258 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 2259 struct rte_eth_rss_reta_entry64 *reta_conf, 2260 uint16_t reta_size) 2261 { 2262 struct qede_dev *qdev = eth_dev->data->dev_private; 2263 struct ecore_dev *edev = &qdev->edev; 2264 uint16_t i, idx, shift; 2265 uint8_t entry; 2266 2267 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2268 DP_ERR(edev, "reta_size %d is not supported\n", 2269 reta_size); 2270 return -EINVAL; 2271 } 2272 2273 for (i = 0; i < reta_size; i++) { 2274 idx = i / RTE_RETA_GROUP_SIZE; 2275 shift = i % RTE_RETA_GROUP_SIZE; 2276 if (reta_conf[idx].mask & (1ULL << shift)) { 2277 entry = qdev->rss_ind_table[i]; 2278 reta_conf[idx].reta[shift] = entry; 2279 } 2280 } 2281 2282 return 0; 2283 } 2284 2285 2286 2287 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 2288 { 2289 struct qede_dev *qdev = QEDE_INIT_QDEV(dev); 2290 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2291 struct rte_eth_dev_info dev_info = {0}; 2292 struct qede_fastpath *fp; 2293 uint32_t max_rx_pkt_len; 2294 uint32_t frame_size; 2295 uint16_t bufsz; 2296 bool restart = false; 2297 int i, rc; 2298 2299 PMD_INIT_FUNC_TRACE(edev); 2300 rc = qede_dev_info_get(dev, &dev_info); 2301 if (rc != 0) { 2302 DP_ERR(edev, "Error during getting ethernet device info\n"); 2303 return rc; 2304 } 2305 max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN; 2306 frame_size = max_rx_pkt_len; 2307 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) { 2308 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n", 2309 mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN - 2310 QEDE_ETH_OVERHEAD); 2311 return -EINVAL; 2312 } 2313 if (!dev->data->scattered_rx && 2314 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 2315 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", 2316 dev->data->min_rx_buf_size); 2317 return -EINVAL; 2318 } 2319 /* Temporarily replace I/O functions with dummy ones. It cannot 2320 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. 2321 */ 2322 dev->rx_pkt_burst = qede_rxtx_pkts_dummy; 2323 dev->tx_pkt_burst = qede_rxtx_pkts_dummy; 2324 if (dev->data->dev_started) { 2325 dev->data->dev_started = 0; 2326 qede_dev_stop(dev); 2327 restart = true; 2328 } 2329 rte_delay_ms(1000); 2330 qdev->new_mtu = mtu; 2331 2332 /* Fix up RX buf size for all queues of the port */ 2333 for (i = 0; i < qdev->num_rx_queues; i++) { 2334 fp = &qdev->fp_array[i]; 2335 if (fp->rxq != NULL) { 2336 bufsz = (uint16_t)rte_pktmbuf_data_room_size( 2337 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; 2338 /* cache align the mbuf size to simplfy rx_buf_size 2339 * calculation 2340 */ 2341 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz); 2342 rc = qede_calc_rx_buf_size(dev, bufsz, frame_size); 2343 if (rc < 0) 2344 return rc; 2345 2346 fp->rxq->rx_buf_size = rc; 2347 } 2348 } 2349 if (max_rx_pkt_len > RTE_ETHER_MAX_LEN) 2350 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 2351 else 2352 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2353 2354 if (!dev->data->dev_started && restart) { 2355 qede_dev_start(dev); 2356 dev->data->dev_started = 1; 2357 } 2358 2359 /* update max frame size */ 2360 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; 2361 2362 /* Reassign back */ 2363 qede_assign_rxtx_handlers(dev); 2364 if (ECORE_IS_CMT(edev)) { 2365 dev->rx_pkt_burst = qede_recv_pkts_cmt; 2366 dev->tx_pkt_burst = qede_xmit_pkts_cmt; 2367 } else { 2368 dev->rx_pkt_burst = qede_recv_pkts; 2369 dev->tx_pkt_burst = qede_xmit_pkts; 2370 } 2371 return 0; 2372 } 2373 2374 static int 2375 qede_dev_reset(struct rte_eth_dev *dev) 2376 { 2377 int ret; 2378 2379 ret = qede_eth_dev_uninit(dev); 2380 if (ret) 2381 return ret; 2382 2383 return qede_eth_dev_init(dev); 2384 } 2385 2386 static const struct eth_dev_ops qede_eth_dev_ops = { 2387 .dev_configure = qede_dev_configure, 2388 .dev_infos_get = qede_dev_info_get, 2389 .rx_queue_setup = qede_rx_queue_setup, 2390 .rx_queue_release = qede_rx_queue_release, 2391 .rx_descriptor_status = qede_rx_descriptor_status, 2392 .tx_queue_setup = qede_tx_queue_setup, 2393 .tx_queue_release = qede_tx_queue_release, 2394 .dev_start = qede_dev_start, 2395 .dev_reset = qede_dev_reset, 2396 .dev_set_link_up = qede_dev_set_link_up, 2397 .dev_set_link_down = qede_dev_set_link_down, 2398 .link_update = qede_link_update, 2399 .promiscuous_enable = qede_promiscuous_enable, 2400 .promiscuous_disable = qede_promiscuous_disable, 2401 .allmulticast_enable = qede_allmulticast_enable, 2402 .allmulticast_disable = qede_allmulticast_disable, 2403 .set_mc_addr_list = qede_set_mc_addr_list, 2404 .dev_stop = qede_dev_stop, 2405 .dev_close = qede_dev_close, 2406 .stats_get = qede_get_stats, 2407 .stats_reset = qede_reset_stats, 2408 .xstats_get = qede_get_xstats, 2409 .xstats_reset = qede_reset_xstats, 2410 .xstats_get_names = qede_get_xstats_names, 2411 .mac_addr_add = qede_mac_addr_add, 2412 .mac_addr_remove = qede_mac_addr_remove, 2413 .mac_addr_set = qede_mac_addr_set, 2414 .vlan_offload_set = qede_vlan_offload_set, 2415 .vlan_filter_set = qede_vlan_filter_set, 2416 .flow_ctrl_set = qede_flow_ctrl_set, 2417 .flow_ctrl_get = qede_flow_ctrl_get, 2418 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2419 .rss_hash_update = qede_rss_hash_update, 2420 .rss_hash_conf_get = qede_rss_hash_conf_get, 2421 .reta_update = qede_rss_reta_update, 2422 .reta_query = qede_rss_reta_query, 2423 .mtu_set = qede_set_mtu, 2424 .filter_ctrl = qede_dev_filter_ctrl, 2425 .udp_tunnel_port_add = qede_udp_dst_port_add, 2426 .udp_tunnel_port_del = qede_udp_dst_port_del, 2427 .fw_version_get = qede_fw_version_get, 2428 }; 2429 2430 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 2431 .dev_configure = qede_dev_configure, 2432 .dev_infos_get = qede_dev_info_get, 2433 .rx_queue_setup = qede_rx_queue_setup, 2434 .rx_queue_release = qede_rx_queue_release, 2435 .rx_descriptor_status = qede_rx_descriptor_status, 2436 .tx_queue_setup = qede_tx_queue_setup, 2437 .tx_queue_release = qede_tx_queue_release, 2438 .dev_start = qede_dev_start, 2439 .dev_reset = qede_dev_reset, 2440 .dev_set_link_up = qede_dev_set_link_up, 2441 .dev_set_link_down = qede_dev_set_link_down, 2442 .link_update = qede_link_update, 2443 .promiscuous_enable = qede_promiscuous_enable, 2444 .promiscuous_disable = qede_promiscuous_disable, 2445 .allmulticast_enable = qede_allmulticast_enable, 2446 .allmulticast_disable = qede_allmulticast_disable, 2447 .set_mc_addr_list = qede_set_mc_addr_list, 2448 .dev_stop = qede_dev_stop, 2449 .dev_close = qede_dev_close, 2450 .stats_get = qede_get_stats, 2451 .stats_reset = qede_reset_stats, 2452 .xstats_get = qede_get_xstats, 2453 .xstats_reset = qede_reset_xstats, 2454 .xstats_get_names = qede_get_xstats_names, 2455 .vlan_offload_set = qede_vlan_offload_set, 2456 .vlan_filter_set = qede_vlan_filter_set, 2457 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2458 .rss_hash_update = qede_rss_hash_update, 2459 .rss_hash_conf_get = qede_rss_hash_conf_get, 2460 .reta_update = qede_rss_reta_update, 2461 .reta_query = qede_rss_reta_query, 2462 .mtu_set = qede_set_mtu, 2463 .udp_tunnel_port_add = qede_udp_dst_port_add, 2464 .udp_tunnel_port_del = qede_udp_dst_port_del, 2465 .mac_addr_add = qede_mac_addr_add, 2466 .mac_addr_remove = qede_mac_addr_remove, 2467 .mac_addr_set = qede_mac_addr_set, 2468 .fw_version_get = qede_fw_version_get, 2469 }; 2470 2471 static void qede_update_pf_params(struct ecore_dev *edev) 2472 { 2473 struct ecore_pf_params pf_params; 2474 2475 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 2476 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; 2477 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 2478 qed_ops->common->update_pf_params(edev, &pf_params); 2479 } 2480 2481 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 2482 { 2483 struct rte_pci_device *pci_dev; 2484 struct rte_pci_addr pci_addr; 2485 struct qede_dev *adapter; 2486 struct ecore_dev *edev; 2487 struct qed_dev_eth_info dev_info; 2488 struct qed_slowpath_params params; 2489 static bool do_once = true; 2490 uint8_t bulletin_change; 2491 uint8_t vf_mac[RTE_ETHER_ADDR_LEN]; 2492 uint8_t is_mac_forced; 2493 bool is_mac_exist; 2494 /* Fix up ecore debug level */ 2495 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 2496 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 2497 uint32_t int_mode; 2498 int rc; 2499 2500 /* Extract key data structures */ 2501 adapter = eth_dev->data->dev_private; 2502 adapter->ethdev = eth_dev; 2503 edev = &adapter->edev; 2504 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2505 pci_addr = pci_dev->addr; 2506 2507 PMD_INIT_FUNC_TRACE(edev); 2508 2509 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 2510 pci_addr.bus, pci_addr.devid, pci_addr.function, 2511 eth_dev->data->port_id); 2512 2513 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2514 DP_ERR(edev, "Skipping device init from secondary process\n"); 2515 return 0; 2516 } 2517 2518 rte_eth_copy_pci_info(eth_dev, pci_dev); 2519 2520 /* @DPDK */ 2521 edev->vendor_id = pci_dev->id.vendor_id; 2522 edev->device_id = pci_dev->id.device_id; 2523 2524 qed_ops = qed_get_eth_ops(); 2525 if (!qed_ops) { 2526 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 2527 rc = -EINVAL; 2528 goto err; 2529 } 2530 2531 DP_INFO(edev, "Starting qede probe\n"); 2532 rc = qed_ops->common->probe(edev, pci_dev, dp_module, 2533 dp_level, is_vf); 2534 if (rc != 0) { 2535 DP_ERR(edev, "qede probe failed rc %d\n", rc); 2536 rc = -ENODEV; 2537 goto err; 2538 } 2539 qede_update_pf_params(edev); 2540 2541 switch (pci_dev->intr_handle.type) { 2542 case RTE_INTR_HANDLE_UIO_INTX: 2543 case RTE_INTR_HANDLE_VFIO_LEGACY: 2544 int_mode = ECORE_INT_MODE_INTA; 2545 rte_intr_callback_register(&pci_dev->intr_handle, 2546 qede_interrupt_handler_intx, 2547 (void *)eth_dev); 2548 break; 2549 default: 2550 int_mode = ECORE_INT_MODE_MSIX; 2551 rte_intr_callback_register(&pci_dev->intr_handle, 2552 qede_interrupt_handler, 2553 (void *)eth_dev); 2554 } 2555 2556 if (rte_intr_enable(&pci_dev->intr_handle)) { 2557 DP_ERR(edev, "rte_intr_enable() failed\n"); 2558 rc = -ENODEV; 2559 goto err; 2560 } 2561 2562 /* Start the Slowpath-process */ 2563 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 2564 2565 params.int_mode = int_mode; 2566 params.drv_major = QEDE_PMD_VERSION_MAJOR; 2567 params.drv_minor = QEDE_PMD_VERSION_MINOR; 2568 params.drv_rev = QEDE_PMD_VERSION_REVISION; 2569 params.drv_eng = QEDE_PMD_VERSION_PATCH; 2570 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, 2571 QEDE_PMD_DRV_VER_STR_SIZE); 2572 2573 qede_assign_rxtx_handlers(eth_dev); 2574 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; 2575 2576 /* For CMT mode device do periodic polling for slowpath events. 2577 * This is required since uio device uses only one MSI-x 2578 * interrupt vector but we need one for each engine. 2579 */ 2580 if (ECORE_IS_CMT(edev) && IS_PF(edev)) { 2581 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 2582 qede_poll_sp_sb_cb, 2583 (void *)eth_dev); 2584 if (rc != 0) { 2585 DP_ERR(edev, "Unable to start periodic" 2586 " timer rc %d\n", rc); 2587 rc = -EINVAL; 2588 goto err; 2589 } 2590 } 2591 2592 rc = qed_ops->common->slowpath_start(edev, ¶ms); 2593 if (rc) { 2594 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 2595 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2596 (void *)eth_dev); 2597 rc = -ENODEV; 2598 goto err; 2599 } 2600 2601 rc = qed_ops->fill_dev_info(edev, &dev_info); 2602 if (rc) { 2603 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 2604 qed_ops->common->slowpath_stop(edev); 2605 qed_ops->common->remove(edev); 2606 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2607 (void *)eth_dev); 2608 rc = -ENODEV; 2609 goto err; 2610 } 2611 2612 qede_alloc_etherdev(adapter, &dev_info); 2613 2614 if (do_once) { 2615 qede_print_adapter_info(eth_dev); 2616 do_once = false; 2617 } 2618 2619 adapter->ops->common->set_name(edev, edev->name); 2620 2621 if (!is_vf) 2622 adapter->dev_info.num_mac_filters = 2623 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 2624 ECORE_MAC); 2625 else 2626 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 2627 (uint32_t *)&adapter->dev_info.num_mac_filters); 2628 2629 /* Allocate memory for storing MAC addr */ 2630 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 2631 (RTE_ETHER_ADDR_LEN * 2632 adapter->dev_info.num_mac_filters), 2633 RTE_CACHE_LINE_SIZE); 2634 2635 if (eth_dev->data->mac_addrs == NULL) { 2636 DP_ERR(edev, "Failed to allocate MAC address\n"); 2637 qed_ops->common->slowpath_stop(edev); 2638 qed_ops->common->remove(edev); 2639 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2640 (void *)eth_dev); 2641 return -ENOMEM; 2642 } 2643 2644 if (!is_vf) { 2645 rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0]. 2646 hw_info.hw_mac_addr, 2647 ð_dev->data->mac_addrs[0]); 2648 rte_ether_addr_copy(ð_dev->data->mac_addrs[0], 2649 &adapter->primary_mac); 2650 } else { 2651 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 2652 &bulletin_change); 2653 if (bulletin_change) { 2654 is_mac_exist = 2655 ecore_vf_bulletin_get_forced_mac( 2656 ECORE_LEADING_HWFN(edev), 2657 vf_mac, 2658 &is_mac_forced); 2659 if (is_mac_exist) { 2660 DP_INFO(edev, "VF macaddr received from PF\n"); 2661 rte_ether_addr_copy( 2662 (struct rte_ether_addr *)&vf_mac, 2663 ð_dev->data->mac_addrs[0]); 2664 rte_ether_addr_copy( 2665 ð_dev->data->mac_addrs[0], 2666 &adapter->primary_mac); 2667 } else { 2668 DP_ERR(edev, "No VF macaddr assigned\n"); 2669 } 2670 } 2671 } 2672 2673 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 2674 2675 /* Bring-up the link */ 2676 qede_dev_set_link_state(eth_dev, true); 2677 2678 adapter->num_tx_queues = 0; 2679 adapter->num_rx_queues = 0; 2680 SLIST_INIT(&adapter->arfs_info.arfs_list_head); 2681 SLIST_INIT(&adapter->vlan_list_head); 2682 SLIST_INIT(&adapter->uc_list_head); 2683 SLIST_INIT(&adapter->mc_list_head); 2684 adapter->mtu = RTE_ETHER_MTU; 2685 adapter->vport_started = false; 2686 2687 /* VF tunnel offloads is enabled by default in PF driver */ 2688 adapter->vxlan.num_filters = 0; 2689 adapter->geneve.num_filters = 0; 2690 adapter->ipgre.num_filters = 0; 2691 if (is_vf) { 2692 adapter->vxlan.enable = true; 2693 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | 2694 ETH_TUNNEL_FILTER_IVLAN; 2695 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; 2696 adapter->geneve.enable = true; 2697 adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC | 2698 ETH_TUNNEL_FILTER_IVLAN; 2699 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT; 2700 adapter->ipgre.enable = true; 2701 adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC | 2702 ETH_TUNNEL_FILTER_IVLAN; 2703 } else { 2704 adapter->vxlan.enable = false; 2705 adapter->geneve.enable = false; 2706 adapter->ipgre.enable = false; 2707 } 2708 2709 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", 2710 adapter->primary_mac.addr_bytes[0], 2711 adapter->primary_mac.addr_bytes[1], 2712 adapter->primary_mac.addr_bytes[2], 2713 adapter->primary_mac.addr_bytes[3], 2714 adapter->primary_mac.addr_bytes[4], 2715 adapter->primary_mac.addr_bytes[5]); 2716 2717 DP_INFO(edev, "Device initialized\n"); 2718 2719 return 0; 2720 2721 err: 2722 if (do_once) { 2723 qede_print_adapter_info(eth_dev); 2724 do_once = false; 2725 } 2726 return rc; 2727 } 2728 2729 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 2730 { 2731 return qede_common_dev_init(eth_dev, 1); 2732 } 2733 2734 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 2735 { 2736 return qede_common_dev_init(eth_dev, 0); 2737 } 2738 2739 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 2740 { 2741 struct qede_dev *qdev = eth_dev->data->dev_private; 2742 struct ecore_dev *edev = &qdev->edev; 2743 2744 PMD_INIT_FUNC_TRACE(edev); 2745 2746 /* only uninitialize in the primary process */ 2747 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2748 return 0; 2749 2750 /* safe to close dev here */ 2751 qede_dev_close(eth_dev); 2752 2753 eth_dev->dev_ops = NULL; 2754 eth_dev->rx_pkt_burst = NULL; 2755 eth_dev->tx_pkt_burst = NULL; 2756 2757 return 0; 2758 } 2759 2760 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2761 { 2762 return qede_dev_common_uninit(eth_dev); 2763 } 2764 2765 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2766 { 2767 return qede_dev_common_uninit(eth_dev); 2768 } 2769 2770 static const struct rte_pci_id pci_id_qedevf_map[] = { 2771 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2772 { 2773 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF) 2774 }, 2775 { 2776 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV) 2777 }, 2778 { 2779 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV) 2780 }, 2781 {.vendor_id = 0,} 2782 }; 2783 2784 static const struct rte_pci_id pci_id_qede_map[] = { 2785 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2786 { 2787 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E) 2788 }, 2789 { 2790 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S) 2791 }, 2792 { 2793 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40) 2794 }, 2795 { 2796 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25) 2797 }, 2798 { 2799 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100) 2800 }, 2801 { 2802 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50) 2803 }, 2804 { 2805 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G) 2806 }, 2807 { 2808 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G) 2809 }, 2810 { 2811 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G) 2812 }, 2813 { 2814 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G) 2815 }, 2816 {.vendor_id = 0,} 2817 }; 2818 2819 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2820 struct rte_pci_device *pci_dev) 2821 { 2822 return rte_eth_dev_pci_generic_probe(pci_dev, 2823 sizeof(struct qede_dev), qedevf_eth_dev_init); 2824 } 2825 2826 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2827 { 2828 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit); 2829 } 2830 2831 static struct rte_pci_driver rte_qedevf_pmd = { 2832 .id_table = pci_id_qedevf_map, 2833 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2834 .probe = qedevf_eth_dev_pci_probe, 2835 .remove = qedevf_eth_dev_pci_remove, 2836 }; 2837 2838 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2839 struct rte_pci_device *pci_dev) 2840 { 2841 return rte_eth_dev_pci_generic_probe(pci_dev, 2842 sizeof(struct qede_dev), qede_eth_dev_init); 2843 } 2844 2845 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2846 { 2847 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit); 2848 } 2849 2850 static struct rte_pci_driver rte_qede_pmd = { 2851 .id_table = pci_id_qede_map, 2852 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2853 .probe = qede_eth_dev_pci_probe, 2854 .remove = qede_eth_dev_pci_remove, 2855 }; 2856 2857 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd); 2858 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 2859 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); 2860 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); 2861 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 2862 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); 2863 2864 RTE_INIT(qede_init_log) 2865 { 2866 qede_logtype_init = rte_log_register("pmd.net.qede.init"); 2867 if (qede_logtype_init >= 0) 2868 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE); 2869 qede_logtype_driver = rte_log_register("pmd.net.qede.driver"); 2870 if (qede_logtype_driver >= 0) 2871 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE); 2872 } 2873