1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include "qede_ethdev.h" 8 #include <rte_string_fns.h> 9 #include <rte_alarm.h> 10 #include <rte_kvargs.h> 11 12 static const struct qed_eth_ops *qed_ops; 13 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev); 14 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev); 15 16 #define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */ 17 18 struct rte_qede_xstats_name_off { 19 char name[RTE_ETH_XSTATS_NAME_SIZE]; 20 uint64_t offset; 21 }; 22 23 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 24 {"rx_unicast_bytes", 25 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)}, 26 {"rx_multicast_bytes", 27 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)}, 28 {"rx_broadcast_bytes", 29 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)}, 30 {"rx_unicast_packets", 31 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)}, 32 {"rx_multicast_packets", 33 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)}, 34 {"rx_broadcast_packets", 35 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)}, 36 37 {"tx_unicast_bytes", 38 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)}, 39 {"tx_multicast_bytes", 40 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)}, 41 {"tx_broadcast_bytes", 42 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)}, 43 {"tx_unicast_packets", 44 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)}, 45 {"tx_multicast_packets", 46 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)}, 47 {"tx_broadcast_packets", 48 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)}, 49 50 {"rx_64_byte_packets", 51 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)}, 52 {"rx_65_to_127_byte_packets", 53 offsetof(struct ecore_eth_stats_common, 54 rx_65_to_127_byte_packets)}, 55 {"rx_128_to_255_byte_packets", 56 offsetof(struct ecore_eth_stats_common, 57 rx_128_to_255_byte_packets)}, 58 {"rx_256_to_511_byte_packets", 59 offsetof(struct ecore_eth_stats_common, 60 rx_256_to_511_byte_packets)}, 61 {"rx_512_to_1023_byte_packets", 62 offsetof(struct ecore_eth_stats_common, 63 rx_512_to_1023_byte_packets)}, 64 {"rx_1024_to_1518_byte_packets", 65 offsetof(struct ecore_eth_stats_common, 66 rx_1024_to_1518_byte_packets)}, 67 {"tx_64_byte_packets", 68 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)}, 69 {"tx_65_to_127_byte_packets", 70 offsetof(struct ecore_eth_stats_common, 71 tx_65_to_127_byte_packets)}, 72 {"tx_128_to_255_byte_packets", 73 offsetof(struct ecore_eth_stats_common, 74 tx_128_to_255_byte_packets)}, 75 {"tx_256_to_511_byte_packets", 76 offsetof(struct ecore_eth_stats_common, 77 tx_256_to_511_byte_packets)}, 78 {"tx_512_to_1023_byte_packets", 79 offsetof(struct ecore_eth_stats_common, 80 tx_512_to_1023_byte_packets)}, 81 {"tx_1024_to_1518_byte_packets", 82 offsetof(struct ecore_eth_stats_common, 83 tx_1024_to_1518_byte_packets)}, 84 85 {"rx_mac_crtl_frames", 86 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)}, 87 {"tx_mac_control_frames", 88 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)}, 89 {"rx_pause_frames", 90 offsetof(struct ecore_eth_stats_common, rx_pause_frames)}, 91 {"tx_pause_frames", 92 offsetof(struct ecore_eth_stats_common, tx_pause_frames)}, 93 {"rx_priority_flow_control_frames", 94 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)}, 95 {"tx_priority_flow_control_frames", 96 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)}, 97 98 {"rx_crc_errors", 99 offsetof(struct ecore_eth_stats_common, rx_crc_errors)}, 100 {"rx_align_errors", 101 offsetof(struct ecore_eth_stats_common, rx_align_errors)}, 102 {"rx_carrier_errors", 103 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)}, 104 {"rx_oversize_packet_errors", 105 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)}, 106 {"rx_jabber_errors", 107 offsetof(struct ecore_eth_stats_common, rx_jabbers)}, 108 {"rx_undersize_packet_errors", 109 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)}, 110 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)}, 111 {"rx_host_buffer_not_available", 112 offsetof(struct ecore_eth_stats_common, no_buff_discards)}, 113 /* Number of packets discarded because they are bigger than MTU */ 114 {"rx_packet_too_big_discards", 115 offsetof(struct ecore_eth_stats_common, 116 packet_too_big_discard)}, 117 {"rx_ttl_zero_discards", 118 offsetof(struct ecore_eth_stats_common, ttl0_discard)}, 119 {"rx_multi_function_tag_filter_discards", 120 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)}, 121 {"rx_mac_filter_discards", 122 offsetof(struct ecore_eth_stats_common, mac_filter_discards)}, 123 {"rx_gft_filter_drop", 124 offsetof(struct ecore_eth_stats_common, gft_filter_drop)}, 125 {"rx_hw_buffer_truncates", 126 offsetof(struct ecore_eth_stats_common, brb_truncates)}, 127 {"rx_hw_buffer_discards", 128 offsetof(struct ecore_eth_stats_common, brb_discards)}, 129 {"tx_error_drop_packets", 130 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)}, 131 132 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)}, 133 {"rx_mac_unicast_packets", 134 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)}, 135 {"rx_mac_multicast_packets", 136 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)}, 137 {"rx_mac_broadcast_packets", 138 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)}, 139 {"rx_mac_frames_ok", 140 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)}, 141 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)}, 142 {"tx_mac_unicast_packets", 143 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)}, 144 {"tx_mac_multicast_packets", 145 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)}, 146 {"tx_mac_broadcast_packets", 147 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)}, 148 149 {"lro_coalesced_packets", 150 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)}, 151 {"lro_coalesced_events", 152 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)}, 153 {"lro_aborts_num", 154 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)}, 155 {"lro_not_coalesced_packets", 156 offsetof(struct ecore_eth_stats_common, 157 tpa_not_coalesced_pkts)}, 158 {"lro_coalesced_bytes", 159 offsetof(struct ecore_eth_stats_common, 160 tpa_coalesced_bytes)}, 161 }; 162 163 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = { 164 {"rx_1519_to_1522_byte_packets", 165 offsetof(struct ecore_eth_stats, bb) + 166 offsetof(struct ecore_eth_stats_bb, 167 rx_1519_to_1522_byte_packets)}, 168 {"rx_1519_to_2047_byte_packets", 169 offsetof(struct ecore_eth_stats, bb) + 170 offsetof(struct ecore_eth_stats_bb, 171 rx_1519_to_2047_byte_packets)}, 172 {"rx_2048_to_4095_byte_packets", 173 offsetof(struct ecore_eth_stats, bb) + 174 offsetof(struct ecore_eth_stats_bb, 175 rx_2048_to_4095_byte_packets)}, 176 {"rx_4096_to_9216_byte_packets", 177 offsetof(struct ecore_eth_stats, bb) + 178 offsetof(struct ecore_eth_stats_bb, 179 rx_4096_to_9216_byte_packets)}, 180 {"rx_9217_to_16383_byte_packets", 181 offsetof(struct ecore_eth_stats, bb) + 182 offsetof(struct ecore_eth_stats_bb, 183 rx_9217_to_16383_byte_packets)}, 184 185 {"tx_1519_to_2047_byte_packets", 186 offsetof(struct ecore_eth_stats, bb) + 187 offsetof(struct ecore_eth_stats_bb, 188 tx_1519_to_2047_byte_packets)}, 189 {"tx_2048_to_4095_byte_packets", 190 offsetof(struct ecore_eth_stats, bb) + 191 offsetof(struct ecore_eth_stats_bb, 192 tx_2048_to_4095_byte_packets)}, 193 {"tx_4096_to_9216_byte_packets", 194 offsetof(struct ecore_eth_stats, bb) + 195 offsetof(struct ecore_eth_stats_bb, 196 tx_4096_to_9216_byte_packets)}, 197 {"tx_9217_to_16383_byte_packets", 198 offsetof(struct ecore_eth_stats, bb) + 199 offsetof(struct ecore_eth_stats_bb, 200 tx_9217_to_16383_byte_packets)}, 201 202 {"tx_lpi_entry_count", 203 offsetof(struct ecore_eth_stats, bb) + 204 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)}, 205 {"tx_total_collisions", 206 offsetof(struct ecore_eth_stats, bb) + 207 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)}, 208 }; 209 210 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = { 211 {"rx_1519_to_max_byte_packets", 212 offsetof(struct ecore_eth_stats, ah) + 213 offsetof(struct ecore_eth_stats_ah, 214 rx_1519_to_max_byte_packets)}, 215 {"tx_1519_to_max_byte_packets", 216 offsetof(struct ecore_eth_stats, ah) + 217 offsetof(struct ecore_eth_stats_ah, 218 tx_1519_to_max_byte_packets)}, 219 }; 220 221 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { 222 {"rx_q_segments", 223 offsetof(struct qede_rx_queue, rx_segs)}, 224 {"rx_q_hw_errors", 225 offsetof(struct qede_rx_queue, rx_hw_errors)}, 226 {"rx_q_allocation_errors", 227 offsetof(struct qede_rx_queue, rx_alloc_errors)} 228 }; 229 230 /* Get FW version string based on fw_size */ 231 static int 232 qede_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size) 233 { 234 struct qede_dev *qdev = dev->data->dev_private; 235 struct ecore_dev *edev = &qdev->edev; 236 struct qed_dev_info *info = &qdev->dev_info.common; 237 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 238 size_t size; 239 240 if (IS_PF(edev)) 241 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 242 QEDE_PMD_FW_VERSION); 243 else 244 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", 245 info->fw_major, info->fw_minor, 246 info->fw_rev, info->fw_eng); 247 size = strlen(ver_str); 248 if (size + 1 <= fw_size) /* Add 1 byte for "\0" */ 249 strlcpy(fw_ver, ver_str, fw_size); 250 else 251 return (size + 1); 252 253 snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size), 254 " MFW: %d.%d.%d.%d", 255 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_3), 256 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_2), 257 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_1), 258 GET_MFW_FIELD(info->mfw_rev, QED_MFW_VERSION_0)); 259 size = strlen(ver_str); 260 if (size + 1 <= fw_size) 261 strlcpy(fw_ver, ver_str, fw_size); 262 263 if (fw_size <= 32) 264 goto out; 265 266 snprintf(ver_str + size, (QEDE_PMD_DRV_VER_STR_SIZE - size), 267 " MBI: %d.%d.%d", 268 GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_2), 269 GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_1), 270 GET_MFW_FIELD(info->mbi_version, QED_MBI_VERSION_0)); 271 size = strlen(ver_str); 272 if (size + 1 <= fw_size) 273 strlcpy(fw_ver, ver_str, fw_size); 274 275 out: 276 return 0; 277 } 278 279 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 280 { 281 OSAL_SPIN_LOCK(&p_hwfn->spq_lock); 282 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 283 OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock); 284 } 285 286 static void 287 qede_interrupt_handler_intx(void *param) 288 { 289 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 290 struct qede_dev *qdev = eth_dev->data->dev_private; 291 struct ecore_dev *edev = &qdev->edev; 292 u64 status; 293 294 /* Check if our device actually raised an interrupt */ 295 status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev)); 296 if (status & 0x1) { 297 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 298 299 if (rte_intr_ack(eth_dev->intr_handle)) 300 DP_ERR(edev, "rte_intr_ack failed\n"); 301 } 302 } 303 304 static void 305 qede_interrupt_handler(void *param) 306 { 307 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 308 struct qede_dev *qdev = eth_dev->data->dev_private; 309 struct ecore_dev *edev = &qdev->edev; 310 311 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 312 if (rte_intr_ack(eth_dev->intr_handle)) 313 DP_ERR(edev, "rte_intr_ack failed\n"); 314 } 315 316 static void 317 qede_assign_rxtx_handlers(struct rte_eth_dev *dev, bool is_dummy) 318 { 319 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 320 struct qede_dev *qdev = dev->data->dev_private; 321 struct ecore_dev *edev = &qdev->edev; 322 bool use_tx_offload = false; 323 324 if (is_dummy) { 325 dev->rx_pkt_burst = rte_eth_pkt_burst_dummy; 326 dev->tx_pkt_burst = rte_eth_pkt_burst_dummy; 327 return; 328 } 329 330 if (ECORE_IS_CMT(edev)) { 331 dev->rx_pkt_burst = qede_recv_pkts_cmt; 332 dev->tx_pkt_burst = qede_xmit_pkts_cmt; 333 return; 334 } 335 336 if (dev->data->lro || dev->data->scattered_rx) { 337 DP_INFO(edev, "Assigning qede_recv_pkts\n"); 338 dev->rx_pkt_burst = qede_recv_pkts; 339 } else { 340 DP_INFO(edev, "Assigning qede_recv_pkts_regular\n"); 341 dev->rx_pkt_burst = qede_recv_pkts_regular; 342 } 343 344 use_tx_offload = !!(tx_offloads & 345 (RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */ 346 RTE_ETH_TX_OFFLOAD_TCP_TSO | /* tso */ 347 RTE_ETH_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */ 348 349 if (use_tx_offload) { 350 DP_INFO(edev, "Assigning qede_xmit_pkts\n"); 351 dev->tx_pkt_burst = qede_xmit_pkts; 352 } else { 353 DP_INFO(edev, "Assigning qede_xmit_pkts_regular\n"); 354 dev->tx_pkt_burst = qede_xmit_pkts_regular; 355 } 356 } 357 358 static void 359 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 360 { 361 qdev->dev_info = *info; 362 qdev->ops = qed_ops; 363 } 364 365 static void qede_print_adapter_info(struct rte_eth_dev *dev) 366 { 367 struct qede_dev *qdev = dev->data->dev_private; 368 struct ecore_dev *edev = &qdev->edev; 369 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 370 371 DP_INFO(edev, "**************************************************\n"); 372 DP_INFO(edev, " %-20s: %s\n", "DPDK version", rte_version()); 373 DP_INFO(edev, " %-20s: %s %c%d\n", "Chip details", 374 ECORE_IS_BB(edev) ? "BB" : "AH", 375 'A' + edev->chip_rev, 376 (int)edev->chip_metal); 377 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 378 QEDE_PMD_DRV_VERSION); 379 DP_INFO(edev, " %-20s: %s\n", "Driver version", ver_str); 380 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 381 QEDE_PMD_BASE_VERSION); 382 DP_INFO(edev, " %-20s: %s\n", "Base version", ver_str); 383 qede_fw_version_get(dev, ver_str, sizeof(ver_str)); 384 DP_INFO(edev, " %-20s: %s\n", "Firmware version", ver_str); 385 DP_INFO(edev, " %-20s: %s\n", "Firmware file", qede_fw_file); 386 DP_INFO(edev, "**************************************************\n"); 387 } 388 389 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) 390 { 391 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; 392 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 393 unsigned int i = 0, j = 0, qid; 394 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 395 struct qede_tx_queue *txq; 396 397 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); 398 399 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev), 400 RTE_ETHDEV_QUEUE_STAT_CNTRS); 401 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev), 402 RTE_ETHDEV_QUEUE_STAT_CNTRS); 403 404 for (qid = 0; qid < qdev->num_rx_queues; qid++) { 405 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 406 offsetof(struct qede_rx_queue, rcv_pkts), 0, 407 sizeof(uint64_t)); 408 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 409 offsetof(struct qede_rx_queue, rx_hw_errors), 0, 410 sizeof(uint64_t)); 411 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 412 offsetof(struct qede_rx_queue, rx_alloc_errors), 0, 413 sizeof(uint64_t)); 414 415 if (xstats) 416 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++) 417 OSAL_MEMSET((((char *) 418 (qdev->fp_array[qid].rxq)) + 419 qede_rxq_xstats_strings[j].offset), 420 0, 421 sizeof(uint64_t)); 422 423 i++; 424 if (i == rxq_stat_cntrs) 425 break; 426 } 427 428 i = 0; 429 430 for (qid = 0; qid < qdev->num_tx_queues; qid++) { 431 txq = qdev->fp_array[qid].txq; 432 433 OSAL_MEMSET((uint64_t *)(uintptr_t) 434 (((uint64_t)(uintptr_t)(txq)) + 435 offsetof(struct qede_tx_queue, xmit_pkts)), 0, 436 sizeof(uint64_t)); 437 438 i++; 439 if (i == txq_stat_cntrs) 440 break; 441 } 442 } 443 444 static int 445 qede_stop_vport(struct ecore_dev *edev) 446 { 447 struct ecore_hwfn *p_hwfn; 448 uint8_t vport_id; 449 int rc; 450 int i; 451 452 vport_id = 0; 453 for_each_hwfn(edev, i) { 454 p_hwfn = &edev->hwfns[i]; 455 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 456 vport_id); 457 if (rc != ECORE_SUCCESS) { 458 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc); 459 return rc; 460 } 461 } 462 463 DP_INFO(edev, "vport stopped\n"); 464 465 return 0; 466 } 467 468 static int 469 qede_start_vport(struct qede_dev *qdev, uint16_t mtu) 470 { 471 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 472 struct ecore_sp_vport_start_params params; 473 struct ecore_hwfn *p_hwfn; 474 int rc; 475 int i; 476 477 if (qdev->vport_started) 478 qede_stop_vport(edev); 479 480 memset(¶ms, 0, sizeof(params)); 481 params.vport_id = 0; 482 params.mtu = mtu; 483 /* @DPDK - Disable FW placement */ 484 params.zero_placement_offset = 1; 485 for_each_hwfn(edev, i) { 486 p_hwfn = &edev->hwfns[i]; 487 params.concrete_fid = p_hwfn->hw_info.concrete_fid; 488 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 489 rc = ecore_sp_vport_start(p_hwfn, ¶ms); 490 if (rc != ECORE_SUCCESS) { 491 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 492 return rc; 493 } 494 } 495 ecore_reset_vport_stats(edev); 496 qdev->vport_started = true; 497 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); 498 499 return 0; 500 } 501 502 #define QEDE_NPAR_TX_SWITCHING "npar_tx_switching" 503 #define QEDE_VF_TX_SWITCHING "vf_tx_switching" 504 505 /* Activate or deactivate vport via vport-update */ 506 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) 507 { 508 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 509 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 510 struct ecore_sp_vport_update_params params; 511 struct ecore_hwfn *p_hwfn; 512 uint8_t i; 513 int rc = -1; 514 515 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 516 params.vport_id = 0; 517 params.update_vport_active_rx_flg = 1; 518 params.update_vport_active_tx_flg = 1; 519 params.vport_active_rx_flg = flg; 520 params.vport_active_tx_flg = flg; 521 if ((qdev->enable_tx_switching == false) && (flg == true)) { 522 params.update_tx_switching_flg = 1; 523 params.tx_switching_flg = !flg; 524 } 525 for_each_hwfn(edev, i) { 526 p_hwfn = &edev->hwfns[i]; 527 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 528 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 529 ECORE_SPQ_MODE_EBLOCK, NULL); 530 if (rc != ECORE_SUCCESS) { 531 DP_ERR(edev, "Failed to update vport\n"); 532 break; 533 } 534 } 535 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated"); 536 537 return rc; 538 } 539 540 static void 541 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, 542 uint16_t mtu, bool enable) 543 { 544 /* Enable LRO in split mode */ 545 sge_tpa_params->tpa_ipv4_en_flg = enable; 546 sge_tpa_params->tpa_ipv6_en_flg = enable; 547 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable; 548 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable; 549 /* set if tpa enable changes */ 550 sge_tpa_params->update_tpa_en_flg = 1; 551 /* set if tpa parameters should be handled */ 552 sge_tpa_params->update_tpa_param_flg = enable; 553 554 sge_tpa_params->max_buffers_per_cqe = 20; 555 /* Enable TPA in split mode. In this mode each TPA segment 556 * starts on the new BD, so there is one BD per segment. 557 */ 558 sge_tpa_params->tpa_pkt_split_flg = 1; 559 sge_tpa_params->tpa_hdr_data_split_flg = 0; 560 sge_tpa_params->tpa_gro_consistent_flg = 0; 561 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 562 sge_tpa_params->tpa_max_size = 0x7FFF; 563 sge_tpa_params->tpa_min_size_to_start = mtu / 2; 564 sge_tpa_params->tpa_min_size_to_cont = mtu / 2; 565 } 566 567 /* Enable/disable LRO via vport-update */ 568 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) 569 { 570 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 571 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 572 struct ecore_sp_vport_update_params params; 573 struct ecore_sge_tpa_params tpa_params; 574 struct ecore_hwfn *p_hwfn; 575 int rc; 576 int i; 577 578 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 579 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params)); 580 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg); 581 params.vport_id = 0; 582 params.sge_tpa_params = &tpa_params; 583 for_each_hwfn(edev, i) { 584 p_hwfn = &edev->hwfns[i]; 585 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 586 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 587 ECORE_SPQ_MODE_EBLOCK, NULL); 588 if (rc != ECORE_SUCCESS) { 589 DP_ERR(edev, "Failed to update LRO\n"); 590 return -1; 591 } 592 } 593 qdev->enable_lro = flg; 594 eth_dev->data->lro = flg; 595 596 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); 597 598 return 0; 599 } 600 601 static int 602 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, 603 enum qed_filter_rx_mode_type type) 604 { 605 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 606 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 607 struct ecore_filter_accept_flags flags; 608 609 memset(&flags, 0, sizeof(flags)); 610 611 flags.update_rx_mode_config = 1; 612 flags.update_tx_mode_config = 1; 613 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 614 ECORE_ACCEPT_MCAST_MATCHED | 615 ECORE_ACCEPT_BCAST; 616 617 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 618 ECORE_ACCEPT_MCAST_MATCHED | 619 ECORE_ACCEPT_BCAST; 620 621 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 622 flags.rx_accept_filter |= (ECORE_ACCEPT_UCAST_UNMATCHED | 623 ECORE_ACCEPT_MCAST_UNMATCHED); 624 if (IS_VF(edev)) { 625 flags.tx_accept_filter |= 626 (ECORE_ACCEPT_UCAST_UNMATCHED | 627 ECORE_ACCEPT_MCAST_UNMATCHED); 628 DP_INFO(edev, "Enabling Tx unmatched flags for VF\n"); 629 } 630 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 631 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 632 } 633 634 return ecore_filter_accept_cmd(edev, 0, flags, false, false, 635 ECORE_SPQ_MODE_CB, NULL); 636 } 637 638 int 639 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 640 bool add) 641 { 642 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 643 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 644 struct qede_ucast_entry *tmp = NULL; 645 struct qede_ucast_entry *u; 646 struct rte_ether_addr *mac_addr; 647 648 mac_addr = (struct rte_ether_addr *)ucast->mac; 649 if (add) { 650 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 651 if ((memcmp(mac_addr, &tmp->mac, 652 RTE_ETHER_ADDR_LEN) == 0) && 653 ucast->vni == tmp->vni && 654 ucast->vlan == tmp->vlan) { 655 DP_INFO(edev, "Unicast MAC is already added" 656 " with vlan = %u, vni = %u\n", 657 ucast->vlan, ucast->vni); 658 return 0; 659 } 660 } 661 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), 662 RTE_CACHE_LINE_SIZE); 663 if (!u) { 664 DP_ERR(edev, "Did not allocate memory for ucast\n"); 665 return -ENOMEM; 666 } 667 rte_ether_addr_copy(mac_addr, &u->mac); 668 u->vlan = ucast->vlan; 669 u->vni = ucast->vni; 670 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); 671 qdev->num_uc_addr++; 672 } else { 673 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 674 if ((memcmp(mac_addr, &tmp->mac, 675 RTE_ETHER_ADDR_LEN) == 0) && 676 ucast->vlan == tmp->vlan && 677 ucast->vni == tmp->vni) 678 break; 679 } 680 if (tmp == NULL) { 681 DP_INFO(edev, "Unicast MAC is not found\n"); 682 return -EINVAL; 683 } 684 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); 685 qdev->num_uc_addr--; 686 } 687 688 return 0; 689 } 690 691 static int 692 qede_add_mcast_filters(struct rte_eth_dev *eth_dev, 693 struct rte_ether_addr *mc_addrs, 694 uint32_t mc_addrs_num) 695 { 696 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 697 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 698 struct ecore_filter_mcast mcast; 699 struct qede_mcast_entry *m = NULL; 700 uint8_t i; 701 int rc; 702 703 for (i = 0; i < mc_addrs_num; i++) { 704 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), 705 RTE_CACHE_LINE_SIZE); 706 if (!m) { 707 DP_ERR(edev, "Did not allocate memory for mcast\n"); 708 return -ENOMEM; 709 } 710 rte_ether_addr_copy(&mc_addrs[i], &m->mac); 711 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); 712 } 713 memset(&mcast, 0, sizeof(mcast)); 714 mcast.num_mc_addrs = mc_addrs_num; 715 mcast.opcode = ECORE_FILTER_ADD; 716 for (i = 0; i < mc_addrs_num; i++) 717 rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *) 718 &mcast.mac[i]); 719 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 720 if (rc != ECORE_SUCCESS) { 721 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc); 722 return -1; 723 } 724 725 return 0; 726 } 727 728 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev) 729 { 730 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 731 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 732 struct qede_mcast_entry *tmp = NULL; 733 struct ecore_filter_mcast mcast; 734 int j; 735 int rc; 736 737 memset(&mcast, 0, sizeof(mcast)); 738 mcast.num_mc_addrs = qdev->num_mc_addr; 739 mcast.opcode = ECORE_FILTER_REMOVE; 740 j = 0; 741 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 742 rte_ether_addr_copy(&tmp->mac, 743 (struct rte_ether_addr *)&mcast.mac[j]); 744 j++; 745 } 746 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 747 if (rc != ECORE_SUCCESS) { 748 DP_ERR(edev, "Failed to delete multicast filter\n"); 749 return -1; 750 } 751 /* Init the list */ 752 while (!SLIST_EMPTY(&qdev->mc_list_head)) { 753 tmp = SLIST_FIRST(&qdev->mc_list_head); 754 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list); 755 } 756 SLIST_INIT(&qdev->mc_list_head); 757 758 return 0; 759 } 760 761 enum _ecore_status_t 762 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 763 bool add) 764 { 765 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 766 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 767 enum _ecore_status_t rc = ECORE_INVAL; 768 769 if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) { 770 DP_ERR(edev, "Ucast filter table limit exceeded," 771 " Please enable promisc mode\n"); 772 return ECORE_INVAL; 773 } 774 775 rc = qede_ucast_filter(eth_dev, ucast, add); 776 if (rc == 0) 777 rc = ecore_filter_ucast_cmd(edev, ucast, 778 ECORE_SPQ_MODE_CB, NULL); 779 /* Indicate error only for add filter operation. 780 * Delete filter operations are not severe. 781 */ 782 if ((rc != ECORE_SUCCESS) && add) 783 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", 784 rc, add); 785 786 return rc; 787 } 788 789 static int 790 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr, 791 __rte_unused uint32_t index, __rte_unused uint32_t pool) 792 { 793 struct ecore_filter_ucast ucast; 794 int re; 795 796 if (!rte_is_valid_assigned_ether_addr(mac_addr)) 797 return -EINVAL; 798 799 qede_set_ucast_cmn_params(&ucast); 800 ucast.opcode = ECORE_FILTER_ADD; 801 ucast.type = ECORE_FILTER_MAC; 802 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac); 803 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); 804 return re; 805 } 806 807 static void 808 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 809 { 810 struct qede_dev *qdev = eth_dev->data->dev_private; 811 struct ecore_dev *edev = &qdev->edev; 812 struct ecore_filter_ucast ucast; 813 814 PMD_INIT_FUNC_TRACE(edev); 815 816 if (index >= qdev->dev_info.num_mac_filters) { 817 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 818 index, qdev->dev_info.num_mac_filters); 819 return; 820 } 821 822 if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) 823 return; 824 825 qede_set_ucast_cmn_params(&ucast); 826 ucast.opcode = ECORE_FILTER_REMOVE; 827 ucast.type = ECORE_FILTER_MAC; 828 829 /* Use the index maintained by rte */ 830 rte_ether_addr_copy(ð_dev->data->mac_addrs[index], 831 (struct rte_ether_addr *)&ucast.mac); 832 833 qede_mac_int_ops(eth_dev, &ucast, false); 834 } 835 836 static int 837 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr) 838 { 839 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 840 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 841 842 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 843 mac_addr->addr_bytes)) { 844 DP_ERR(edev, "Setting MAC address is not allowed\n"); 845 return -EPERM; 846 } 847 848 qede_mac_addr_remove(eth_dev, 0); 849 850 return qede_mac_addr_add(eth_dev, mac_addr, 0, 0); 851 } 852 853 void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) 854 { 855 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 856 struct ecore_sp_vport_update_params params; 857 struct ecore_hwfn *p_hwfn; 858 uint8_t i; 859 int rc; 860 861 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 862 params.vport_id = 0; 863 params.update_accept_any_vlan_flg = 1; 864 params.accept_any_vlan = flg; 865 for_each_hwfn(edev, i) { 866 p_hwfn = &edev->hwfns[i]; 867 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 868 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 869 ECORE_SPQ_MODE_EBLOCK, NULL); 870 if (rc != ECORE_SUCCESS) { 871 DP_ERR(edev, "Failed to configure accept-any-vlan\n"); 872 return; 873 } 874 } 875 876 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled"); 877 } 878 879 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg) 880 { 881 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 882 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 883 struct ecore_sp_vport_update_params params; 884 struct ecore_hwfn *p_hwfn; 885 uint8_t i; 886 int rc; 887 888 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 889 params.vport_id = 0; 890 params.update_inner_vlan_removal_flg = 1; 891 params.inner_vlan_removal_flg = flg; 892 for_each_hwfn(edev, i) { 893 p_hwfn = &edev->hwfns[i]; 894 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 895 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 896 ECORE_SPQ_MODE_EBLOCK, NULL); 897 if (rc != ECORE_SUCCESS) { 898 DP_ERR(edev, "Failed to update vport\n"); 899 return -1; 900 } 901 } 902 903 qdev->vlan_strip_flg = flg; 904 905 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled"); 906 return 0; 907 } 908 909 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 910 uint16_t vlan_id, int on) 911 { 912 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 913 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 914 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 915 struct qede_vlan_entry *tmp = NULL; 916 struct qede_vlan_entry *vlan; 917 struct ecore_filter_ucast ucast; 918 int rc; 919 920 if (on) { 921 if (qdev->configured_vlans == dev_info->num_vlan_filters) { 922 DP_ERR(edev, "Reached max VLAN filter limit" 923 " enabling accept_any_vlan\n"); 924 qede_config_accept_any_vlan(qdev, true); 925 return 0; 926 } 927 928 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 929 if (tmp->vid == vlan_id) { 930 DP_INFO(edev, "VLAN %u already configured\n", 931 vlan_id); 932 return 0; 933 } 934 } 935 936 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), 937 RTE_CACHE_LINE_SIZE); 938 939 if (!vlan) { 940 DP_ERR(edev, "Did not allocate memory for VLAN\n"); 941 return -ENOMEM; 942 } 943 944 qede_set_ucast_cmn_params(&ucast); 945 ucast.opcode = ECORE_FILTER_ADD; 946 ucast.type = ECORE_FILTER_VLAN; 947 ucast.vlan = vlan_id; 948 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 949 NULL); 950 if (rc != 0) { 951 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 952 rc); 953 rte_free(vlan); 954 } else { 955 vlan->vid = vlan_id; 956 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); 957 qdev->configured_vlans++; 958 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", 959 vlan_id, qdev->configured_vlans); 960 } 961 } else { 962 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 963 if (tmp->vid == vlan_id) 964 break; 965 } 966 967 if (!tmp) { 968 if (qdev->configured_vlans == 0) { 969 DP_INFO(edev, 970 "No VLAN filters configured yet\n"); 971 return 0; 972 } 973 974 DP_ERR(edev, "VLAN %u not configured\n", vlan_id); 975 return -EINVAL; 976 } 977 978 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); 979 980 qede_set_ucast_cmn_params(&ucast); 981 ucast.opcode = ECORE_FILTER_REMOVE; 982 ucast.type = ECORE_FILTER_VLAN; 983 ucast.vlan = vlan_id; 984 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 985 NULL); 986 if (rc != 0) { 987 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 988 vlan_id, rc); 989 } else { 990 qdev->configured_vlans--; 991 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", 992 vlan_id, qdev->configured_vlans); 993 } 994 } 995 996 return rc; 997 } 998 999 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 1000 { 1001 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1002 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1003 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 1004 1005 if (mask & RTE_ETH_VLAN_STRIP_MASK) { 1006 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 1007 (void)qede_vlan_stripping(eth_dev, 1); 1008 else 1009 (void)qede_vlan_stripping(eth_dev, 0); 1010 } 1011 1012 if (mask & RTE_ETH_VLAN_FILTER_MASK) { 1013 /* VLAN filtering kicks in when a VLAN is added */ 1014 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) { 1015 qede_vlan_filter_set(eth_dev, 0, 1); 1016 } else { 1017 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ 1018 DP_ERR(edev, 1019 " Please remove existing VLAN filters" 1020 " before disabling VLAN filtering\n"); 1021 /* Signal app that VLAN filtering is still 1022 * enabled 1023 */ 1024 eth_dev->data->dev_conf.rxmode.offloads |= 1025 RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 1026 } else { 1027 qede_vlan_filter_set(eth_dev, 0, 0); 1028 } 1029 } 1030 } 1031 1032 qdev->vlan_offload_mask = mask; 1033 1034 DP_INFO(edev, "VLAN offload mask %d\n", mask); 1035 1036 return 0; 1037 } 1038 1039 static void qede_prandom_bytes(uint32_t *buff) 1040 { 1041 uint8_t i; 1042 1043 srand((unsigned int)time(NULL)); 1044 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 1045 buff[i] = rand(); 1046 } 1047 1048 int qede_config_rss(struct rte_eth_dev *eth_dev) 1049 { 1050 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1051 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1052 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; 1053 struct rte_eth_rss_reta_entry64 reta_conf[2]; 1054 struct rte_eth_rss_conf rss_conf; 1055 uint32_t i, id, pos, q; 1056 1057 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 1058 if (!rss_conf.rss_key) { 1059 DP_INFO(edev, "Applying driver default key\n"); 1060 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1061 qede_prandom_bytes(&def_rss_key[0]); 1062 rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; 1063 } 1064 1065 /* Configure RSS hash */ 1066 if (qede_rss_hash_update(eth_dev, &rss_conf)) 1067 return -EINVAL; 1068 1069 /* Configure default RETA */ 1070 memset(reta_conf, 0, sizeof(reta_conf)); 1071 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) 1072 reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX; 1073 1074 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 1075 id = i / RTE_ETH_RETA_GROUP_SIZE; 1076 pos = i % RTE_ETH_RETA_GROUP_SIZE; 1077 q = i % QEDE_RSS_COUNT(eth_dev); 1078 reta_conf[id].reta[pos] = q; 1079 } 1080 if (qede_rss_reta_update(eth_dev, &reta_conf[0], 1081 ECORE_RSS_IND_TABLE_SIZE)) 1082 return -EINVAL; 1083 1084 return 0; 1085 } 1086 1087 static void qede_fastpath_start(struct ecore_dev *edev) 1088 { 1089 struct ecore_hwfn *p_hwfn; 1090 int i; 1091 1092 for_each_hwfn(edev, i) { 1093 p_hwfn = &edev->hwfns[i]; 1094 ecore_hw_start_fastpath(p_hwfn); 1095 } 1096 } 1097 1098 static int qede_dev_start(struct rte_eth_dev *eth_dev) 1099 { 1100 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1101 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1102 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1103 1104 PMD_INIT_FUNC_TRACE(edev); 1105 1106 /* Update MTU only if it has changed */ 1107 if (qdev->new_mtu && qdev->new_mtu != qdev->mtu) { 1108 if (qede_update_mtu(eth_dev, qdev->new_mtu)) 1109 goto err; 1110 qdev->mtu = qdev->new_mtu; 1111 qdev->new_mtu = 0; 1112 } 1113 1114 /* Configure TPA parameters */ 1115 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) { 1116 if (qede_enable_tpa(eth_dev, true)) 1117 return -EINVAL; 1118 /* Enable scatter mode for LRO */ 1119 if (!eth_dev->data->scattered_rx) 1120 rxmode->offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; 1121 } 1122 1123 /* Start queues */ 1124 if (qede_start_queues(eth_dev)) 1125 goto err; 1126 1127 if (IS_PF(edev)) 1128 qede_reset_queue_stats(qdev, true); 1129 1130 /* Newer SR-IOV PF driver expects RX/TX queues to be started before 1131 * enabling RSS. Hence RSS configuration is deferred up to this point. 1132 * Also, we would like to retain similar behavior in PF case, so we 1133 * don't do PF/VF specific check here. 1134 */ 1135 if (eth_dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) 1136 if (qede_config_rss(eth_dev)) 1137 goto err; 1138 1139 /* Enable vport*/ 1140 if (qede_activate_vport(eth_dev, true)) 1141 goto err; 1142 1143 /* Bring-up the link */ 1144 qede_dev_set_link_state(eth_dev, true); 1145 1146 /* Update link status */ 1147 qede_link_update(eth_dev, 0); 1148 1149 /* Start/resume traffic */ 1150 qede_fastpath_start(edev); 1151 1152 /* Assign I/O handlers */ 1153 qede_assign_rxtx_handlers(eth_dev, false); 1154 1155 DP_INFO(edev, "Device started\n"); 1156 1157 return 0; 1158 err: 1159 DP_ERR(edev, "Device start fails\n"); 1160 return -1; /* common error code is < 0 */ 1161 } 1162 1163 static int qede_dev_stop(struct rte_eth_dev *eth_dev) 1164 { 1165 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1166 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1167 1168 PMD_INIT_FUNC_TRACE(edev); 1169 eth_dev->data->dev_started = 0; 1170 1171 /* Bring the link down */ 1172 qede_dev_set_link_state(eth_dev, false); 1173 1174 /* Update link status */ 1175 qede_link_update(eth_dev, 0); 1176 1177 /* Replace I/O functions with dummy ones. It cannot 1178 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. 1179 */ 1180 qede_assign_rxtx_handlers(eth_dev, true); 1181 1182 /* Disable vport */ 1183 if (qede_activate_vport(eth_dev, false)) 1184 return 0; 1185 1186 if (qdev->enable_lro) 1187 qede_enable_tpa(eth_dev, false); 1188 1189 /* Stop queues */ 1190 qede_stop_queues(eth_dev); 1191 1192 /* Disable traffic */ 1193 ecore_hw_stop_fastpath(edev); /* TBD - loop */ 1194 1195 DP_INFO(edev, "Device is stopped\n"); 1196 1197 return 0; 1198 } 1199 1200 static const char * const valid_args[] = { 1201 QEDE_NPAR_TX_SWITCHING, 1202 QEDE_VF_TX_SWITCHING, 1203 NULL, 1204 }; 1205 1206 static int qede_args_check(const char *key, const char *val, void *opaque) 1207 { 1208 unsigned long tmp; 1209 int ret = 0; 1210 struct rte_eth_dev *eth_dev = opaque; 1211 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1212 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1213 1214 errno = 0; 1215 tmp = strtoul(val, NULL, 0); 1216 if (errno) { 1217 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val); 1218 return errno; 1219 } 1220 1221 if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) || 1222 ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) { 1223 qdev->enable_tx_switching = !!tmp; 1224 DP_INFO(edev, "Disabling %s tx-switching\n", 1225 strcmp(QEDE_NPAR_TX_SWITCHING, key) ? 1226 "VF" : "NPAR"); 1227 } 1228 1229 return ret; 1230 } 1231 1232 static int qede_args(struct rte_eth_dev *eth_dev) 1233 { 1234 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1235 struct rte_kvargs *kvlist; 1236 struct rte_devargs *devargs; 1237 int ret; 1238 int i; 1239 1240 devargs = pci_dev->device.devargs; 1241 if (!devargs) 1242 return 0; /* return success */ 1243 1244 kvlist = rte_kvargs_parse(devargs->args, valid_args); 1245 if (kvlist == NULL) 1246 return -EINVAL; 1247 1248 /* Process parameters. */ 1249 for (i = 0; (valid_args[i] != NULL); ++i) { 1250 if (rte_kvargs_count(kvlist, valid_args[i])) { 1251 ret = rte_kvargs_process(kvlist, valid_args[i], 1252 qede_args_check, eth_dev); 1253 if (ret != ECORE_SUCCESS) { 1254 rte_kvargs_free(kvlist); 1255 return ret; 1256 } 1257 } 1258 } 1259 rte_kvargs_free(kvlist); 1260 1261 return 0; 1262 } 1263 1264 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 1265 { 1266 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1267 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1268 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1269 uint8_t num_rxqs; 1270 uint8_t num_txqs; 1271 int ret; 1272 1273 PMD_INIT_FUNC_TRACE(edev); 1274 1275 if (rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) 1276 rxmode->offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 1277 1278 /* We need to have min 1 RX queue.There is no min check in 1279 * rte_eth_dev_configure(), so we are checking it here. 1280 */ 1281 if (eth_dev->data->nb_rx_queues == 0) { 1282 DP_ERR(edev, "Minimum one RX queue is required\n"); 1283 return -EINVAL; 1284 } 1285 1286 /* Enable Tx switching by default */ 1287 qdev->enable_tx_switching = 1; 1288 1289 /* Parse devargs and fix up rxmode */ 1290 if (qede_args(eth_dev)) 1291 DP_NOTICE(edev, false, 1292 "Invalid devargs supplied, requested change will not take effect\n"); 1293 1294 if (!(rxmode->mq_mode == RTE_ETH_MQ_RX_NONE || 1295 rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)) { 1296 DP_ERR(edev, "Unsupported multi-queue mode\n"); 1297 return -ENOTSUP; 1298 } 1299 /* Flow director mode check */ 1300 if (qede_check_fdir_support(eth_dev)) 1301 return -ENOTSUP; 1302 1303 /* Allocate/reallocate fastpath resources only for new queue config */ 1304 num_txqs = eth_dev->data->nb_tx_queues * edev->num_hwfns; 1305 num_rxqs = eth_dev->data->nb_rx_queues * edev->num_hwfns; 1306 if (qdev->num_tx_queues != num_txqs || 1307 qdev->num_rx_queues != num_rxqs) { 1308 qede_dealloc_fp_resc(eth_dev); 1309 qdev->num_tx_queues = num_txqs; 1310 qdev->num_rx_queues = num_rxqs; 1311 if (qede_alloc_fp_resc(qdev)) 1312 return -ENOMEM; 1313 } 1314 1315 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) 1316 eth_dev->data->scattered_rx = 1; 1317 1318 if (qede_start_vport(qdev, eth_dev->data->mtu)) 1319 return -1; 1320 1321 qdev->mtu = eth_dev->data->mtu; 1322 1323 /* Enable VLAN offloads by default */ 1324 ret = qede_vlan_offload_set(eth_dev, RTE_ETH_VLAN_STRIP_MASK | 1325 RTE_ETH_VLAN_FILTER_MASK); 1326 if (ret) 1327 return ret; 1328 1329 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", 1330 QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev)); 1331 1332 if (ECORE_IS_CMT(edev)) 1333 DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n", 1334 qdev->num_rx_queues, qdev->num_tx_queues); 1335 1336 1337 return 0; 1338 } 1339 1340 /* Info about HW descriptor ring limitations */ 1341 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 1342 .nb_max = 0x8000, /* 32K */ 1343 .nb_min = 128, 1344 .nb_align = 128 /* lowest common multiple */ 1345 }; 1346 1347 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 1348 .nb_max = 0x8000, /* 32K */ 1349 .nb_min = 256, 1350 .nb_align = 256, 1351 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, 1352 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 1353 }; 1354 1355 static int 1356 qede_dev_info_get(struct rte_eth_dev *eth_dev, 1357 struct rte_eth_dev_info *dev_info) 1358 { 1359 struct qede_dev *qdev = eth_dev->data->dev_private; 1360 struct ecore_dev *edev = &qdev->edev; 1361 struct qed_link_output link; 1362 uint32_t speed_cap = 0; 1363 1364 PMD_INIT_FUNC_TRACE(edev); 1365 1366 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; 1367 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 1368 dev_info->rx_desc_lim = qede_rx_desc_lim; 1369 dev_info->tx_desc_lim = qede_tx_desc_lim; 1370 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP; 1371 1372 if (IS_PF(edev)) 1373 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1374 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2); 1375 else 1376 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1377 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); 1378 /* Since CMT mode internally doubles the number of queues */ 1379 if (ECORE_IS_CMT(edev)) 1380 dev_info->max_rx_queues = dev_info->max_rx_queues / 2; 1381 1382 dev_info->max_tx_queues = dev_info->max_rx_queues; 1383 1384 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; 1385 dev_info->max_vfs = 0; 1386 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 1387 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1388 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 1389 dev_info->rx_offload_capa = (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 1390 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 1391 RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 1392 RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1393 RTE_ETH_RX_OFFLOAD_TCP_LRO | 1394 RTE_ETH_RX_OFFLOAD_KEEP_CRC | 1395 RTE_ETH_RX_OFFLOAD_SCATTER | 1396 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | 1397 RTE_ETH_RX_OFFLOAD_VLAN_STRIP | 1398 RTE_ETH_RX_OFFLOAD_RSS_HASH); 1399 dev_info->rx_queue_offload_capa = 0; 1400 1401 /* TX offloads are on a per-packet basis, so it is applicable 1402 * to both at port and queue levels. 1403 */ 1404 dev_info->tx_offload_capa = (RTE_ETH_TX_OFFLOAD_VLAN_INSERT | 1405 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | 1406 RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 1407 RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 1408 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1409 RTE_ETH_TX_OFFLOAD_MULTI_SEGS | 1410 RTE_ETH_TX_OFFLOAD_TCP_TSO | 1411 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | 1412 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO); 1413 dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa; 1414 1415 dev_info->default_txconf = (struct rte_eth_txconf) { 1416 .offloads = RTE_ETH_TX_OFFLOAD_MULTI_SEGS, 1417 }; 1418 1419 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1420 /* Packets are always dropped if no descriptors are available */ 1421 .rx_drop_en = 1, 1422 .offloads = 0, 1423 }; 1424 1425 memset(&link, 0, sizeof(struct qed_link_output)); 1426 qdev->ops->common->get_link(edev, &link); 1427 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1428 speed_cap |= RTE_ETH_LINK_SPEED_1G; 1429 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1430 speed_cap |= RTE_ETH_LINK_SPEED_10G; 1431 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1432 speed_cap |= RTE_ETH_LINK_SPEED_25G; 1433 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1434 speed_cap |= RTE_ETH_LINK_SPEED_40G; 1435 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1436 speed_cap |= RTE_ETH_LINK_SPEED_50G; 1437 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1438 speed_cap |= RTE_ETH_LINK_SPEED_100G; 1439 dev_info->speed_capa = speed_cap; 1440 1441 return 0; 1442 } 1443 1444 /* return 0 means link status changed, -1 means not changed */ 1445 int 1446 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 1447 { 1448 struct qede_dev *qdev = eth_dev->data->dev_private; 1449 struct ecore_dev *edev = &qdev->edev; 1450 struct qed_link_output q_link; 1451 struct rte_eth_link link; 1452 uint16_t link_duplex; 1453 1454 memset(&q_link, 0, sizeof(q_link)); 1455 memset(&link, 0, sizeof(link)); 1456 1457 qdev->ops->common->get_link(edev, &q_link); 1458 1459 /* Link Speed */ 1460 link.link_speed = q_link.speed; 1461 1462 /* Link Mode */ 1463 switch (q_link.duplex) { 1464 case QEDE_DUPLEX_HALF: 1465 link_duplex = RTE_ETH_LINK_HALF_DUPLEX; 1466 break; 1467 case QEDE_DUPLEX_FULL: 1468 link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 1469 break; 1470 case QEDE_DUPLEX_UNKNOWN: 1471 default: 1472 link_duplex = -1; 1473 } 1474 link.link_duplex = link_duplex; 1475 1476 /* Link Status */ 1477 link.link_status = q_link.link_up ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; 1478 1479 /* AN */ 1480 link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 1481 RTE_ETH_LINK_AUTONEG : RTE_ETH_LINK_FIXED; 1482 1483 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 1484 link.link_speed, link.link_duplex, 1485 link.link_autoneg, link.link_status); 1486 1487 return rte_eth_linkstatus_set(eth_dev, &link); 1488 } 1489 1490 static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 1491 { 1492 enum _ecore_status_t ecore_status; 1493 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1494 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1495 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1496 1497 PMD_INIT_FUNC_TRACE(edev); 1498 1499 ecore_status = qed_configure_filter_rx_mode(eth_dev, type); 1500 1501 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1502 } 1503 1504 static int qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 1505 { 1506 struct qede_dev *qdev = eth_dev->data->dev_private; 1507 struct ecore_dev *edev = &qdev->edev; 1508 enum _ecore_status_t ecore_status; 1509 1510 PMD_INIT_FUNC_TRACE(edev); 1511 1512 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1513 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1514 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 1515 else 1516 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1517 QED_FILTER_RX_MODE_TYPE_REGULAR); 1518 1519 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1520 } 1521 1522 static void qede_poll_sp_sb_cb(void *param) 1523 { 1524 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1525 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1526 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1527 int rc; 1528 1529 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 1530 qede_interrupt_action(&edev->hwfns[1]); 1531 1532 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 1533 qede_poll_sp_sb_cb, 1534 (void *)eth_dev); 1535 if (rc != 0) { 1536 DP_ERR(edev, "Unable to start periodic" 1537 " timer rc %d\n", rc); 1538 } 1539 } 1540 1541 static int qede_dev_close(struct rte_eth_dev *eth_dev) 1542 { 1543 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1544 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1545 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1546 int ret = 0; 1547 1548 PMD_INIT_FUNC_TRACE(edev); 1549 1550 /* only close in case of the primary process */ 1551 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1552 return 0; 1553 1554 /* dev_stop() shall cleanup fp resources in hw but without releasing 1555 * dma memories and sw structures so that dev_start() can be called 1556 * by the app without reconfiguration. However, in dev_close() we 1557 * can release all the resources and device can be brought up newly 1558 */ 1559 if (eth_dev->data->dev_started) 1560 ret = qede_dev_stop(eth_dev); 1561 1562 if (qdev->vport_started) 1563 qede_stop_vport(edev); 1564 qdev->vport_started = false; 1565 qede_fdir_dealloc_resc(eth_dev); 1566 qede_dealloc_fp_resc(eth_dev); 1567 1568 eth_dev->data->nb_rx_queues = 0; 1569 eth_dev->data->nb_tx_queues = 0; 1570 1571 qdev->ops->common->slowpath_stop(edev); 1572 qdev->ops->common->remove(edev); 1573 rte_intr_disable(pci_dev->intr_handle); 1574 1575 switch (rte_intr_type_get(pci_dev->intr_handle)) { 1576 case RTE_INTR_HANDLE_UIO_INTX: 1577 case RTE_INTR_HANDLE_VFIO_LEGACY: 1578 rte_intr_callback_unregister(pci_dev->intr_handle, 1579 qede_interrupt_handler_intx, 1580 (void *)eth_dev); 1581 break; 1582 default: 1583 rte_intr_callback_unregister(pci_dev->intr_handle, 1584 qede_interrupt_handler, 1585 (void *)eth_dev); 1586 } 1587 1588 if (ECORE_IS_CMT(edev)) 1589 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 1590 1591 return ret; 1592 } 1593 1594 static int 1595 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 1596 { 1597 struct qede_dev *qdev = eth_dev->data->dev_private; 1598 struct ecore_dev *edev = &qdev->edev; 1599 struct ecore_eth_stats stats; 1600 unsigned int i = 0, j = 0, qid, idx, hw_fn; 1601 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 1602 struct qede_tx_queue *txq; 1603 1604 ecore_get_vport_stats(edev, &stats); 1605 1606 /* RX Stats */ 1607 eth_stats->ipackets = stats.common.rx_ucast_pkts + 1608 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts; 1609 1610 eth_stats->ibytes = stats.common.rx_ucast_bytes + 1611 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes; 1612 1613 eth_stats->ierrors = stats.common.rx_crc_errors + 1614 stats.common.rx_align_errors + 1615 stats.common.rx_carrier_errors + 1616 stats.common.rx_oversize_packets + 1617 stats.common.rx_jabbers + stats.common.rx_undersize_packets; 1618 1619 eth_stats->rx_nombuf = stats.common.no_buff_discards; 1620 1621 eth_stats->imissed = stats.common.mftag_filter_discards + 1622 stats.common.mac_filter_discards + 1623 stats.common.no_buff_discards + 1624 stats.common.brb_truncates + stats.common.brb_discards; 1625 1626 /* TX stats */ 1627 eth_stats->opackets = stats.common.tx_ucast_pkts + 1628 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts; 1629 1630 eth_stats->obytes = stats.common.tx_ucast_bytes + 1631 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes; 1632 1633 eth_stats->oerrors = stats.common.tx_err_drop_pkts; 1634 1635 /* Queue stats */ 1636 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev), 1637 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1638 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev), 1639 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1640 if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) || 1641 txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev)) 1642 DP_VERBOSE(edev, ECORE_MSG_DEBUG, 1643 "Not all the queue stats will be displayed. Set" 1644 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" 1645 " appropriately and retry.\n"); 1646 1647 for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) { 1648 eth_stats->q_ipackets[i] = 0; 1649 eth_stats->q_errors[i] = 0; 1650 1651 for_each_hwfn(edev, hw_fn) { 1652 idx = qid * edev->num_hwfns + hw_fn; 1653 1654 eth_stats->q_ipackets[i] += 1655 *(uint64_t *) 1656 (((char *)(qdev->fp_array[idx].rxq)) + 1657 offsetof(struct qede_rx_queue, 1658 rcv_pkts)); 1659 eth_stats->q_errors[i] += 1660 *(uint64_t *) 1661 (((char *)(qdev->fp_array[idx].rxq)) + 1662 offsetof(struct qede_rx_queue, 1663 rx_hw_errors)) + 1664 *(uint64_t *) 1665 (((char *)(qdev->fp_array[idx].rxq)) + 1666 offsetof(struct qede_rx_queue, 1667 rx_alloc_errors)); 1668 } 1669 1670 i++; 1671 if (i == rxq_stat_cntrs) 1672 break; 1673 } 1674 1675 for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) { 1676 eth_stats->q_opackets[j] = 0; 1677 1678 for_each_hwfn(edev, hw_fn) { 1679 idx = qid * edev->num_hwfns + hw_fn; 1680 1681 txq = qdev->fp_array[idx].txq; 1682 eth_stats->q_opackets[j] += 1683 *((uint64_t *)(uintptr_t) 1684 (((uint64_t)(uintptr_t)(txq)) + 1685 offsetof(struct qede_tx_queue, 1686 xmit_pkts))); 1687 } 1688 1689 j++; 1690 if (j == txq_stat_cntrs) 1691 break; 1692 } 1693 1694 return 0; 1695 } 1696 1697 static unsigned 1698 qede_get_xstats_count(struct qede_dev *qdev) { 1699 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; 1700 1701 if (ECORE_IS_BB(&qdev->edev)) 1702 return RTE_DIM(qede_xstats_strings) + 1703 RTE_DIM(qede_bb_xstats_strings) + 1704 (RTE_DIM(qede_rxq_xstats_strings) * 1705 QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns); 1706 else 1707 return RTE_DIM(qede_xstats_strings) + 1708 RTE_DIM(qede_ah_xstats_strings) + 1709 (RTE_DIM(qede_rxq_xstats_strings) * 1710 QEDE_RSS_COUNT(dev)); 1711 } 1712 1713 static int 1714 qede_get_xstats_names(struct rte_eth_dev *dev, 1715 struct rte_eth_xstat_name *xstats_names, 1716 __rte_unused unsigned int limit) 1717 { 1718 struct qede_dev *qdev = dev->data->dev_private; 1719 struct ecore_dev *edev = &qdev->edev; 1720 const unsigned int stat_cnt = qede_get_xstats_count(qdev); 1721 unsigned int i, qid, hw_fn, stat_idx = 0; 1722 1723 if (xstats_names == NULL) 1724 return stat_cnt; 1725 1726 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1727 strlcpy(xstats_names[stat_idx].name, 1728 qede_xstats_strings[i].name, 1729 sizeof(xstats_names[stat_idx].name)); 1730 stat_idx++; 1731 } 1732 1733 if (ECORE_IS_BB(edev)) { 1734 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1735 strlcpy(xstats_names[stat_idx].name, 1736 qede_bb_xstats_strings[i].name, 1737 sizeof(xstats_names[stat_idx].name)); 1738 stat_idx++; 1739 } 1740 } else { 1741 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1742 strlcpy(xstats_names[stat_idx].name, 1743 qede_ah_xstats_strings[i].name, 1744 sizeof(xstats_names[stat_idx].name)); 1745 stat_idx++; 1746 } 1747 } 1748 1749 for (qid = 0; qid < QEDE_RSS_COUNT(dev); qid++) { 1750 for_each_hwfn(edev, hw_fn) { 1751 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1752 snprintf(xstats_names[stat_idx].name, 1753 RTE_ETH_XSTATS_NAME_SIZE, 1754 "%.4s%d.%d%s", 1755 qede_rxq_xstats_strings[i].name, 1756 hw_fn, qid, 1757 qede_rxq_xstats_strings[i].name + 4); 1758 stat_idx++; 1759 } 1760 } 1761 } 1762 1763 return stat_cnt; 1764 } 1765 1766 static int 1767 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1768 unsigned int n) 1769 { 1770 struct qede_dev *qdev = dev->data->dev_private; 1771 struct ecore_dev *edev = &qdev->edev; 1772 struct ecore_eth_stats stats; 1773 const unsigned int num = qede_get_xstats_count(qdev); 1774 unsigned int i, qid, hw_fn, fpidx, stat_idx = 0; 1775 1776 if (n < num) 1777 return num; 1778 1779 ecore_get_vport_stats(edev, &stats); 1780 1781 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1782 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + 1783 qede_xstats_strings[i].offset); 1784 xstats[stat_idx].id = stat_idx; 1785 stat_idx++; 1786 } 1787 1788 if (ECORE_IS_BB(edev)) { 1789 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1790 xstats[stat_idx].value = 1791 *(uint64_t *)(((char *)&stats) + 1792 qede_bb_xstats_strings[i].offset); 1793 xstats[stat_idx].id = stat_idx; 1794 stat_idx++; 1795 } 1796 } else { 1797 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1798 xstats[stat_idx].value = 1799 *(uint64_t *)(((char *)&stats) + 1800 qede_ah_xstats_strings[i].offset); 1801 xstats[stat_idx].id = stat_idx; 1802 stat_idx++; 1803 } 1804 } 1805 1806 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 1807 for_each_hwfn(edev, hw_fn) { 1808 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1809 fpidx = qid * edev->num_hwfns + hw_fn; 1810 xstats[stat_idx].value = *(uint64_t *) 1811 (((char *)(qdev->fp_array[fpidx].rxq)) + 1812 qede_rxq_xstats_strings[i].offset); 1813 xstats[stat_idx].id = stat_idx; 1814 stat_idx++; 1815 } 1816 1817 } 1818 } 1819 1820 return stat_idx; 1821 } 1822 1823 static int 1824 qede_reset_xstats(struct rte_eth_dev *dev) 1825 { 1826 struct qede_dev *qdev = dev->data->dev_private; 1827 struct ecore_dev *edev = &qdev->edev; 1828 1829 ecore_reset_vport_stats(edev); 1830 qede_reset_queue_stats(qdev, true); 1831 1832 return 0; 1833 } 1834 1835 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 1836 { 1837 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1838 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1839 struct qed_link_params link_params; 1840 int rc; 1841 1842 DP_INFO(edev, "setting link state %d\n", link_up); 1843 memset(&link_params, 0, sizeof(link_params)); 1844 link_params.link_up = link_up; 1845 rc = qdev->ops->common->set_link(edev, &link_params); 1846 if (rc != ECORE_SUCCESS) 1847 DP_ERR(edev, "Unable to set link state %d\n", link_up); 1848 1849 return rc; 1850 } 1851 1852 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 1853 { 1854 return qede_dev_set_link_state(eth_dev, true); 1855 } 1856 1857 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 1858 { 1859 return qede_dev_set_link_state(eth_dev, false); 1860 } 1861 1862 static int qede_reset_stats(struct rte_eth_dev *eth_dev) 1863 { 1864 struct qede_dev *qdev = eth_dev->data->dev_private; 1865 struct ecore_dev *edev = &qdev->edev; 1866 1867 ecore_reset_vport_stats(edev); 1868 qede_reset_queue_stats(qdev, false); 1869 1870 return 0; 1871 } 1872 1873 static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 1874 { 1875 enum qed_filter_rx_mode_type type = 1876 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1877 enum _ecore_status_t ecore_status; 1878 1879 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1880 type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1881 ecore_status = qed_configure_filter_rx_mode(eth_dev, type); 1882 1883 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1884 } 1885 1886 static int qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 1887 { 1888 enum _ecore_status_t ecore_status; 1889 1890 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1891 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1892 QED_FILTER_RX_MODE_TYPE_PROMISC); 1893 else 1894 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1895 QED_FILTER_RX_MODE_TYPE_REGULAR); 1896 1897 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1898 } 1899 1900 static int 1901 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, 1902 struct rte_ether_addr *mc_addrs, 1903 uint32_t mc_addrs_num) 1904 { 1905 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1906 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1907 uint8_t i; 1908 1909 if (mc_addrs_num > ECORE_MAX_MC_ADDRS) { 1910 DP_ERR(edev, "Reached max multicast filters limit," 1911 "Please enable multicast promisc mode\n"); 1912 return -ENOSPC; 1913 } 1914 1915 for (i = 0; i < mc_addrs_num; i++) { 1916 if (!rte_is_multicast_ether_addr(&mc_addrs[i])) { 1917 DP_ERR(edev, "Not a valid multicast MAC\n"); 1918 return -EINVAL; 1919 } 1920 } 1921 1922 /* Flush all existing entries */ 1923 if (qede_del_mcast_filters(eth_dev)) 1924 return -1; 1925 1926 /* Set new mcast list */ 1927 return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num); 1928 } 1929 1930 /* Update MTU via vport-update without doing port restart. 1931 * The vport must be deactivated before calling this API. 1932 */ 1933 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) 1934 { 1935 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1936 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1937 struct ecore_hwfn *p_hwfn; 1938 int rc; 1939 int i; 1940 1941 if (IS_PF(edev)) { 1942 struct ecore_sp_vport_update_params params; 1943 1944 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 1945 params.vport_id = 0; 1946 params.mtu = mtu; 1947 params.vport_id = 0; 1948 for_each_hwfn(edev, i) { 1949 p_hwfn = &edev->hwfns[i]; 1950 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1951 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 1952 ECORE_SPQ_MODE_EBLOCK, NULL); 1953 if (rc != ECORE_SUCCESS) 1954 goto err; 1955 } 1956 } else { 1957 for_each_hwfn(edev, i) { 1958 p_hwfn = &edev->hwfns[i]; 1959 rc = ecore_vf_pf_update_mtu(p_hwfn, mtu); 1960 if (rc == ECORE_INVAL) { 1961 DP_INFO(edev, "VF MTU Update TLV not supported\n"); 1962 /* Recreate vport */ 1963 rc = qede_start_vport(qdev, mtu); 1964 if (rc != ECORE_SUCCESS) 1965 goto err; 1966 1967 /* Restore config lost due to vport stop */ 1968 if (eth_dev->data->promiscuous) 1969 qede_promiscuous_enable(eth_dev); 1970 else 1971 qede_promiscuous_disable(eth_dev); 1972 1973 if (eth_dev->data->all_multicast) 1974 qede_allmulticast_enable(eth_dev); 1975 else 1976 qede_allmulticast_disable(eth_dev); 1977 1978 qede_vlan_offload_set(eth_dev, 1979 qdev->vlan_offload_mask); 1980 } else if (rc != ECORE_SUCCESS) { 1981 goto err; 1982 } 1983 } 1984 } 1985 DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu); 1986 1987 return 0; 1988 1989 err: 1990 DP_ERR(edev, "Failed to update MTU\n"); 1991 return -1; 1992 } 1993 1994 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 1995 struct rte_eth_fc_conf *fc_conf) 1996 { 1997 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1998 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1999 struct qed_link_output current_link; 2000 struct qed_link_params params; 2001 2002 memset(¤t_link, 0, sizeof(current_link)); 2003 qdev->ops->common->get_link(edev, ¤t_link); 2004 2005 memset(¶ms, 0, sizeof(params)); 2006 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 2007 if (fc_conf->autoneg) { 2008 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 2009 DP_ERR(edev, "Autoneg not supported\n"); 2010 return -EINVAL; 2011 } 2012 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 2013 } 2014 2015 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 2016 if (fc_conf->mode == RTE_ETH_FC_FULL) 2017 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 2018 QED_LINK_PAUSE_RX_ENABLE); 2019 if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE) 2020 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 2021 if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE) 2022 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 2023 2024 params.link_up = true; 2025 (void)qdev->ops->common->set_link(edev, ¶ms); 2026 2027 return 0; 2028 } 2029 2030 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 2031 struct rte_eth_fc_conf *fc_conf) 2032 { 2033 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2034 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2035 struct qed_link_output current_link; 2036 2037 memset(¤t_link, 0, sizeof(current_link)); 2038 qdev->ops->common->get_link(edev, ¤t_link); 2039 2040 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 2041 fc_conf->autoneg = true; 2042 2043 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 2044 QED_LINK_PAUSE_TX_ENABLE)) 2045 fc_conf->mode = RTE_ETH_FC_FULL; 2046 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 2047 fc_conf->mode = RTE_ETH_FC_RX_PAUSE; 2048 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 2049 fc_conf->mode = RTE_ETH_FC_TX_PAUSE; 2050 else 2051 fc_conf->mode = RTE_ETH_FC_NONE; 2052 2053 return 0; 2054 } 2055 2056 static const uint32_t * 2057 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev, 2058 size_t *no_of_elements) 2059 { 2060 static const uint32_t ptypes[] = { 2061 RTE_PTYPE_L2_ETHER, 2062 RTE_PTYPE_L2_ETHER_VLAN, 2063 RTE_PTYPE_L3_IPV4, 2064 RTE_PTYPE_L3_IPV6, 2065 RTE_PTYPE_L4_TCP, 2066 RTE_PTYPE_L4_UDP, 2067 RTE_PTYPE_TUNNEL_VXLAN, 2068 RTE_PTYPE_L4_FRAG, 2069 RTE_PTYPE_TUNNEL_GENEVE, 2070 RTE_PTYPE_TUNNEL_GRE, 2071 /* Inner */ 2072 RTE_PTYPE_INNER_L2_ETHER, 2073 RTE_PTYPE_INNER_L2_ETHER_VLAN, 2074 RTE_PTYPE_INNER_L3_IPV4, 2075 RTE_PTYPE_INNER_L3_IPV6, 2076 RTE_PTYPE_INNER_L4_TCP, 2077 RTE_PTYPE_INNER_L4_UDP, 2078 RTE_PTYPE_INNER_L4_FRAG, 2079 }; 2080 2081 if (eth_dev->rx_pkt_burst == qede_recv_pkts || 2082 eth_dev->rx_pkt_burst == qede_recv_pkts_regular || 2083 eth_dev->rx_pkt_burst == qede_recv_pkts_cmt) { 2084 *no_of_elements = RTE_DIM(ptypes); 2085 return ptypes; 2086 } 2087 2088 return NULL; 2089 } 2090 2091 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) 2092 { 2093 *rss_caps = 0; 2094 *rss_caps |= (hf & RTE_ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 2095 *rss_caps |= (hf & RTE_ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 2096 *rss_caps |= (hf & RTE_ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 2097 *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 2098 *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 2099 *rss_caps |= (hf & RTE_ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 2100 *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; 2101 *rss_caps |= (hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; 2102 } 2103 2104 int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 2105 struct rte_eth_rss_conf *rss_conf) 2106 { 2107 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2108 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2109 struct ecore_sp_vport_update_params vport_update_params; 2110 struct ecore_rss_params rss_params; 2111 struct ecore_hwfn *p_hwfn; 2112 uint32_t *key = (uint32_t *)rss_conf->rss_key; 2113 uint64_t hf = rss_conf->rss_hf; 2114 uint8_t len = rss_conf->rss_key_len; 2115 uint8_t idx, i, j, fpidx; 2116 int rc; 2117 2118 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2119 memset(&rss_params, 0, sizeof(rss_params)); 2120 2121 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", 2122 (unsigned long)hf, len, key); 2123 2124 if (hf != 0) { 2125 /* Enabling RSS */ 2126 DP_INFO(edev, "Enabling rss\n"); 2127 2128 /* RSS caps */ 2129 qede_init_rss_caps(&rss_params.rss_caps, hf); 2130 rss_params.update_rss_capabilities = 1; 2131 2132 /* RSS hash key */ 2133 if (key) { 2134 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { 2135 len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 2136 DP_NOTICE(edev, false, 2137 "RSS key length too big, trimmed to %d\n", 2138 len); 2139 } 2140 DP_INFO(edev, "Applying user supplied hash key\n"); 2141 rss_params.update_rss_key = 1; 2142 memcpy(&rss_params.rss_key, key, len); 2143 } 2144 rss_params.rss_enable = 1; 2145 } 2146 2147 rss_params.update_rss_ind_table = 1; 2148 rss_params.update_rss_config = 1; 2149 /* tbl_size has to be set with capabilities */ 2150 rss_params.rss_table_size_log = 7; 2151 vport_update_params.vport_id = 0; 2152 2153 for_each_hwfn(edev, i) { 2154 /* pass the L2 handles instead of qids */ 2155 for (j = 0 ; j < ECORE_RSS_IND_TABLE_SIZE ; j++) { 2156 idx = j % QEDE_RSS_COUNT(eth_dev); 2157 fpidx = idx * edev->num_hwfns + i; 2158 rss_params.rss_ind_table[j] = 2159 qdev->fp_array[fpidx].rxq->handle; 2160 } 2161 2162 vport_update_params.rss_params = &rss_params; 2163 2164 p_hwfn = &edev->hwfns[i]; 2165 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2166 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2167 ECORE_SPQ_MODE_EBLOCK, NULL); 2168 if (rc) { 2169 DP_ERR(edev, "vport-update for RSS failed\n"); 2170 return rc; 2171 } 2172 } 2173 qdev->rss_enable = rss_params.rss_enable; 2174 2175 /* Update local structure for hash query */ 2176 qdev->rss_conf.rss_hf = hf; 2177 qdev->rss_conf.rss_key_len = len; 2178 if (qdev->rss_enable) { 2179 if (qdev->rss_conf.rss_key == NULL) { 2180 qdev->rss_conf.rss_key = (uint8_t *)malloc(len); 2181 if (qdev->rss_conf.rss_key == NULL) { 2182 DP_ERR(edev, "No memory to store RSS key\n"); 2183 return -ENOMEM; 2184 } 2185 } 2186 if (key && len) { 2187 DP_INFO(edev, "Storing RSS key\n"); 2188 memcpy(qdev->rss_conf.rss_key, key, len); 2189 } 2190 } else if (!qdev->rss_enable && len == 0) { 2191 if (qdev->rss_conf.rss_key) { 2192 free(qdev->rss_conf.rss_key); 2193 qdev->rss_conf.rss_key = NULL; 2194 DP_INFO(edev, "Free RSS key\n"); 2195 } 2196 } 2197 2198 return 0; 2199 } 2200 2201 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 2202 struct rte_eth_rss_conf *rss_conf) 2203 { 2204 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2205 2206 rss_conf->rss_hf = qdev->rss_conf.rss_hf; 2207 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; 2208 2209 if (rss_conf->rss_key && qdev->rss_conf.rss_key) 2210 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, 2211 rss_conf->rss_key_len); 2212 return 0; 2213 } 2214 2215 int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 2216 struct rte_eth_rss_reta_entry64 *reta_conf, 2217 uint16_t reta_size) 2218 { 2219 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2220 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2221 struct ecore_sp_vport_update_params vport_update_params; 2222 struct ecore_rss_params *params; 2223 uint16_t i, j, idx, fid, shift; 2224 struct ecore_hwfn *p_hwfn; 2225 uint8_t entry; 2226 int rc = 0; 2227 2228 if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) { 2229 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 2230 reta_size); 2231 return -EINVAL; 2232 } 2233 2234 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2235 params = rte_zmalloc("qede_rss", sizeof(*params), RTE_CACHE_LINE_SIZE); 2236 if (params == NULL) { 2237 DP_ERR(edev, "failed to allocate memory\n"); 2238 return -ENOMEM; 2239 } 2240 2241 params->update_rss_ind_table = 1; 2242 params->rss_table_size_log = 7; 2243 params->update_rss_config = 1; 2244 2245 vport_update_params.vport_id = 0; 2246 /* Use the current value of rss_enable */ 2247 params->rss_enable = qdev->rss_enable; 2248 vport_update_params.rss_params = params; 2249 2250 for_each_hwfn(edev, i) { 2251 for (j = 0; j < reta_size; j++) { 2252 idx = j / RTE_ETH_RETA_GROUP_SIZE; 2253 shift = j % RTE_ETH_RETA_GROUP_SIZE; 2254 if (reta_conf[idx].mask & (1ULL << shift)) { 2255 entry = reta_conf[idx].reta[shift]; 2256 fid = entry * edev->num_hwfns + i; 2257 /* Pass rxq handles to ecore */ 2258 params->rss_ind_table[j] = 2259 qdev->fp_array[fid].rxq->handle; 2260 /* Update the local copy for RETA query cmd */ 2261 qdev->rss_ind_table[j] = entry; 2262 } 2263 } 2264 2265 p_hwfn = &edev->hwfns[i]; 2266 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2267 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2268 ECORE_SPQ_MODE_EBLOCK, NULL); 2269 if (rc) { 2270 DP_ERR(edev, "vport-update for RSS failed\n"); 2271 goto out; 2272 } 2273 } 2274 2275 out: 2276 rte_free(params); 2277 return rc; 2278 } 2279 2280 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 2281 struct rte_eth_rss_reta_entry64 *reta_conf, 2282 uint16_t reta_size) 2283 { 2284 struct qede_dev *qdev = eth_dev->data->dev_private; 2285 struct ecore_dev *edev = &qdev->edev; 2286 uint16_t i, idx, shift; 2287 uint8_t entry; 2288 2289 if (reta_size > RTE_ETH_RSS_RETA_SIZE_128) { 2290 DP_ERR(edev, "reta_size %d is not supported\n", 2291 reta_size); 2292 return -EINVAL; 2293 } 2294 2295 for (i = 0; i < reta_size; i++) { 2296 idx = i / RTE_ETH_RETA_GROUP_SIZE; 2297 shift = i % RTE_ETH_RETA_GROUP_SIZE; 2298 if (reta_conf[idx].mask & (1ULL << shift)) { 2299 entry = qdev->rss_ind_table[i]; 2300 reta_conf[idx].reta[shift] = entry; 2301 } 2302 } 2303 2304 return 0; 2305 } 2306 2307 2308 2309 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 2310 { 2311 struct qede_dev *qdev = QEDE_INIT_QDEV(dev); 2312 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2313 struct qede_fastpath *fp; 2314 uint32_t frame_size; 2315 uint16_t bufsz; 2316 bool restart = false; 2317 int i, rc; 2318 2319 PMD_INIT_FUNC_TRACE(edev); 2320 2321 frame_size = mtu + QEDE_MAX_ETHER_HDR_LEN; 2322 if (!dev->data->scattered_rx && 2323 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 2324 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", 2325 dev->data->min_rx_buf_size); 2326 return -EINVAL; 2327 } 2328 if (dev->data->dev_started) { 2329 dev->data->dev_started = 0; 2330 rc = qede_dev_stop(dev); 2331 if (rc != 0) 2332 return rc; 2333 restart = true; 2334 } 2335 rte_delay_ms(1000); 2336 qdev->new_mtu = mtu; 2337 2338 /* Fix up RX buf size for all queues of the port */ 2339 for (i = 0; i < qdev->num_rx_queues; i++) { 2340 fp = &qdev->fp_array[i]; 2341 if (fp->rxq != NULL) { 2342 bufsz = (uint16_t)rte_pktmbuf_data_room_size( 2343 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; 2344 /* cache align the mbuf size to simplify rx_buf_size 2345 * calculation 2346 */ 2347 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz); 2348 rc = qede_calc_rx_buf_size(dev, bufsz, frame_size); 2349 if (rc < 0) 2350 return rc; 2351 2352 fp->rxq->rx_buf_size = rc; 2353 } 2354 } 2355 2356 if (!dev->data->dev_started && restart) { 2357 qede_dev_start(dev); 2358 dev->data->dev_started = 1; 2359 } 2360 2361 return 0; 2362 } 2363 2364 static int 2365 qede_dev_reset(struct rte_eth_dev *dev) 2366 { 2367 int ret; 2368 2369 ret = qede_eth_dev_uninit(dev); 2370 if (ret) 2371 return ret; 2372 2373 return qede_eth_dev_init(dev); 2374 } 2375 2376 static void 2377 qede_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 2378 { 2379 qede_rx_queue_release(dev->data->rx_queues[qid]); 2380 } 2381 2382 static void 2383 qede_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 2384 { 2385 qede_tx_queue_release(dev->data->tx_queues[qid]); 2386 } 2387 2388 static const struct eth_dev_ops qede_eth_dev_ops = { 2389 .dev_configure = qede_dev_configure, 2390 .dev_infos_get = qede_dev_info_get, 2391 .rx_queue_setup = qede_rx_queue_setup, 2392 .rx_queue_release = qede_dev_rx_queue_release, 2393 .tx_queue_setup = qede_tx_queue_setup, 2394 .tx_queue_release = qede_dev_tx_queue_release, 2395 .dev_start = qede_dev_start, 2396 .dev_reset = qede_dev_reset, 2397 .dev_set_link_up = qede_dev_set_link_up, 2398 .dev_set_link_down = qede_dev_set_link_down, 2399 .link_update = qede_link_update, 2400 .promiscuous_enable = qede_promiscuous_enable, 2401 .promiscuous_disable = qede_promiscuous_disable, 2402 .allmulticast_enable = qede_allmulticast_enable, 2403 .allmulticast_disable = qede_allmulticast_disable, 2404 .set_mc_addr_list = qede_set_mc_addr_list, 2405 .dev_stop = qede_dev_stop, 2406 .dev_close = qede_dev_close, 2407 .stats_get = qede_get_stats, 2408 .stats_reset = qede_reset_stats, 2409 .xstats_get = qede_get_xstats, 2410 .xstats_reset = qede_reset_xstats, 2411 .xstats_get_names = qede_get_xstats_names, 2412 .mac_addr_add = qede_mac_addr_add, 2413 .mac_addr_remove = qede_mac_addr_remove, 2414 .mac_addr_set = qede_mac_addr_set, 2415 .vlan_offload_set = qede_vlan_offload_set, 2416 .vlan_filter_set = qede_vlan_filter_set, 2417 .flow_ctrl_set = qede_flow_ctrl_set, 2418 .flow_ctrl_get = qede_flow_ctrl_get, 2419 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2420 .rss_hash_update = qede_rss_hash_update, 2421 .rss_hash_conf_get = qede_rss_hash_conf_get, 2422 .reta_update = qede_rss_reta_update, 2423 .reta_query = qede_rss_reta_query, 2424 .mtu_set = qede_set_mtu, 2425 .flow_ops_get = qede_dev_flow_ops_get, 2426 .udp_tunnel_port_add = qede_udp_dst_port_add, 2427 .udp_tunnel_port_del = qede_udp_dst_port_del, 2428 .fw_version_get = qede_fw_version_get, 2429 .get_reg = qede_get_regs, 2430 }; 2431 2432 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 2433 .dev_configure = qede_dev_configure, 2434 .dev_infos_get = qede_dev_info_get, 2435 .rx_queue_setup = qede_rx_queue_setup, 2436 .rx_queue_release = qede_dev_rx_queue_release, 2437 .tx_queue_setup = qede_tx_queue_setup, 2438 .tx_queue_release = qede_dev_tx_queue_release, 2439 .dev_start = qede_dev_start, 2440 .dev_reset = qede_dev_reset, 2441 .dev_set_link_up = qede_dev_set_link_up, 2442 .dev_set_link_down = qede_dev_set_link_down, 2443 .link_update = qede_link_update, 2444 .promiscuous_enable = qede_promiscuous_enable, 2445 .promiscuous_disable = qede_promiscuous_disable, 2446 .allmulticast_enable = qede_allmulticast_enable, 2447 .allmulticast_disable = qede_allmulticast_disable, 2448 .set_mc_addr_list = qede_set_mc_addr_list, 2449 .dev_stop = qede_dev_stop, 2450 .dev_close = qede_dev_close, 2451 .stats_get = qede_get_stats, 2452 .stats_reset = qede_reset_stats, 2453 .xstats_get = qede_get_xstats, 2454 .xstats_reset = qede_reset_xstats, 2455 .xstats_get_names = qede_get_xstats_names, 2456 .vlan_offload_set = qede_vlan_offload_set, 2457 .vlan_filter_set = qede_vlan_filter_set, 2458 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2459 .rss_hash_update = qede_rss_hash_update, 2460 .rss_hash_conf_get = qede_rss_hash_conf_get, 2461 .reta_update = qede_rss_reta_update, 2462 .reta_query = qede_rss_reta_query, 2463 .mtu_set = qede_set_mtu, 2464 .udp_tunnel_port_add = qede_udp_dst_port_add, 2465 .udp_tunnel_port_del = qede_udp_dst_port_del, 2466 .mac_addr_add = qede_mac_addr_add, 2467 .mac_addr_remove = qede_mac_addr_remove, 2468 .mac_addr_set = qede_mac_addr_set, 2469 .fw_version_get = qede_fw_version_get, 2470 }; 2471 2472 static void qede_update_pf_params(struct ecore_dev *edev) 2473 { 2474 struct ecore_pf_params pf_params; 2475 2476 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 2477 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; 2478 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 2479 qed_ops->common->update_pf_params(edev, &pf_params); 2480 } 2481 2482 static void qede_generate_random_mac_addr(struct rte_ether_addr *mac_addr) 2483 { 2484 uint64_t random; 2485 2486 /* Set Organizationally Unique Identifier (OUI) prefix. */ 2487 mac_addr->addr_bytes[0] = 0x00; 2488 mac_addr->addr_bytes[1] = 0x09; 2489 mac_addr->addr_bytes[2] = 0xC0; 2490 2491 /* Force indication of locally assigned MAC address. */ 2492 mac_addr->addr_bytes[0] |= RTE_ETHER_LOCAL_ADMIN_ADDR; 2493 2494 /* Generate the last 3 bytes of the MAC address with a random number. */ 2495 random = rte_rand(); 2496 2497 memcpy(&mac_addr->addr_bytes[3], &random, 3); 2498 } 2499 2500 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 2501 { 2502 struct rte_pci_device *pci_dev; 2503 struct rte_pci_addr pci_addr; 2504 struct qede_dev *adapter; 2505 struct ecore_dev *edev; 2506 struct qed_dev_eth_info dev_info; 2507 struct qed_slowpath_params params; 2508 static bool do_once = true; 2509 uint8_t bulletin_change; 2510 uint8_t vf_mac[RTE_ETHER_ADDR_LEN]; 2511 uint8_t is_mac_forced; 2512 bool is_mac_exist = false; 2513 /* Fix up ecore debug level */ 2514 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 2515 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 2516 uint32_t int_mode; 2517 int rc; 2518 2519 /* Extract key data structures */ 2520 adapter = eth_dev->data->dev_private; 2521 adapter->ethdev = eth_dev; 2522 edev = &adapter->edev; 2523 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2524 pci_addr = pci_dev->addr; 2525 2526 PMD_INIT_FUNC_TRACE(edev); 2527 2528 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 2529 pci_addr.bus, pci_addr.devid, pci_addr.function, 2530 eth_dev->data->port_id); 2531 2532 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2533 DP_ERR(edev, "Skipping device init from secondary process\n"); 2534 return 0; 2535 } 2536 2537 rte_eth_copy_pci_info(eth_dev, pci_dev); 2538 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 2539 2540 /* @DPDK */ 2541 edev->vendor_id = pci_dev->id.vendor_id; 2542 edev->device_id = pci_dev->id.device_id; 2543 2544 qed_ops = qed_get_eth_ops(); 2545 if (!qed_ops) { 2546 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 2547 rc = -EINVAL; 2548 goto err; 2549 } 2550 2551 DP_INFO(edev, "Starting qede probe\n"); 2552 rc = qed_ops->common->probe(edev, pci_dev, dp_module, 2553 dp_level, is_vf); 2554 if (rc != 0) { 2555 DP_ERR(edev, "qede probe failed rc %d\n", rc); 2556 rc = -ENODEV; 2557 goto err; 2558 } 2559 qede_update_pf_params(edev); 2560 2561 switch (rte_intr_type_get(pci_dev->intr_handle)) { 2562 case RTE_INTR_HANDLE_UIO_INTX: 2563 case RTE_INTR_HANDLE_VFIO_LEGACY: 2564 int_mode = ECORE_INT_MODE_INTA; 2565 rte_intr_callback_register(pci_dev->intr_handle, 2566 qede_interrupt_handler_intx, 2567 (void *)eth_dev); 2568 break; 2569 default: 2570 int_mode = ECORE_INT_MODE_MSIX; 2571 rte_intr_callback_register(pci_dev->intr_handle, 2572 qede_interrupt_handler, 2573 (void *)eth_dev); 2574 } 2575 2576 if (rte_intr_enable(pci_dev->intr_handle)) { 2577 DP_ERR(edev, "rte_intr_enable() failed\n"); 2578 rc = -ENODEV; 2579 goto err; 2580 } 2581 2582 /* Start the Slowpath-process */ 2583 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 2584 2585 params.int_mode = int_mode; 2586 params.drv_major = QEDE_PMD_VERSION_MAJOR; 2587 params.drv_minor = QEDE_PMD_VERSION_MINOR; 2588 params.drv_rev = QEDE_PMD_VERSION_REVISION; 2589 params.drv_eng = QEDE_PMD_VERSION_PATCH; 2590 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, 2591 QEDE_PMD_DRV_VER_STR_SIZE); 2592 2593 qede_assign_rxtx_handlers(eth_dev, true); 2594 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; 2595 2596 /* For CMT mode device do periodic polling for slowpath events. 2597 * This is required since uio device uses only one MSI-x 2598 * interrupt vector but we need one for each engine. 2599 */ 2600 if (ECORE_IS_CMT(edev) && IS_PF(edev)) { 2601 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 2602 qede_poll_sp_sb_cb, 2603 (void *)eth_dev); 2604 if (rc != 0) { 2605 DP_ERR(edev, "Unable to start periodic" 2606 " timer rc %d\n", rc); 2607 rc = -EINVAL; 2608 goto err; 2609 } 2610 } 2611 2612 rc = qed_ops->common->slowpath_start(edev, ¶ms); 2613 if (rc) { 2614 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 2615 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2616 (void *)eth_dev); 2617 rc = -ENODEV; 2618 goto err; 2619 } 2620 2621 rc = qed_ops->fill_dev_info(edev, &dev_info); 2622 if (rc) { 2623 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 2624 qed_ops->common->slowpath_stop(edev); 2625 qed_ops->common->remove(edev); 2626 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2627 (void *)eth_dev); 2628 rc = -ENODEV; 2629 goto err; 2630 } 2631 2632 qede_alloc_etherdev(adapter, &dev_info); 2633 2634 if (do_once) { 2635 qede_print_adapter_info(eth_dev); 2636 do_once = false; 2637 } 2638 2639 adapter->ops->common->set_name(edev, edev->name); 2640 2641 if (!is_vf) 2642 adapter->dev_info.num_mac_filters = 2643 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 2644 ECORE_MAC); 2645 else 2646 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 2647 (uint32_t *)&adapter->dev_info.num_mac_filters); 2648 2649 /* Allocate memory for storing MAC addr */ 2650 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 2651 (RTE_ETHER_ADDR_LEN * 2652 adapter->dev_info.num_mac_filters), 2653 RTE_CACHE_LINE_SIZE); 2654 2655 if (eth_dev->data->mac_addrs == NULL) { 2656 DP_ERR(edev, "Failed to allocate MAC address\n"); 2657 qed_ops->common->slowpath_stop(edev); 2658 qed_ops->common->remove(edev); 2659 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2660 (void *)eth_dev); 2661 return -ENOMEM; 2662 } 2663 2664 if (!is_vf) { 2665 rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0]. 2666 hw_info.hw_mac_addr, 2667 ð_dev->data->mac_addrs[0]); 2668 rte_ether_addr_copy(ð_dev->data->mac_addrs[0], 2669 &adapter->primary_mac); 2670 } else { 2671 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 2672 &bulletin_change); 2673 if (bulletin_change) { 2674 is_mac_exist = 2675 ecore_vf_bulletin_get_forced_mac( 2676 ECORE_LEADING_HWFN(edev), 2677 vf_mac, 2678 &is_mac_forced); 2679 if (is_mac_exist) { 2680 DP_INFO(edev, "VF macaddr received from PF\n"); 2681 rte_ether_addr_copy( 2682 (struct rte_ether_addr *)&vf_mac, 2683 ð_dev->data->mac_addrs[0]); 2684 rte_ether_addr_copy( 2685 ð_dev->data->mac_addrs[0], 2686 &adapter->primary_mac); 2687 } else { 2688 DP_ERR(edev, "No VF macaddr assigned\n"); 2689 } 2690 } 2691 2692 /* If MAC doesn't exist from PF, generate random one */ 2693 if (!is_mac_exist) { 2694 struct rte_ether_addr *mac_addr; 2695 2696 mac_addr = (struct rte_ether_addr *)&vf_mac; 2697 qede_generate_random_mac_addr(mac_addr); 2698 2699 rte_ether_addr_copy(mac_addr, 2700 ð_dev->data->mac_addrs[0]); 2701 2702 rte_ether_addr_copy(ð_dev->data->mac_addrs[0], 2703 &adapter->primary_mac); 2704 } 2705 } 2706 2707 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 2708 eth_dev->rx_descriptor_status = qede_rx_descriptor_status; 2709 2710 adapter->num_tx_queues = 0; 2711 adapter->num_rx_queues = 0; 2712 SLIST_INIT(&adapter->arfs_info.arfs_list_head); 2713 SLIST_INIT(&adapter->vlan_list_head); 2714 SLIST_INIT(&adapter->uc_list_head); 2715 SLIST_INIT(&adapter->mc_list_head); 2716 adapter->mtu = RTE_ETHER_MTU; 2717 adapter->vport_started = false; 2718 2719 /* VF tunnel offloads is enabled by default in PF driver */ 2720 adapter->vxlan.num_filters = 0; 2721 adapter->geneve.num_filters = 0; 2722 adapter->ipgre.num_filters = 0; 2723 if (is_vf) { 2724 adapter->vxlan.enable = true; 2725 adapter->vxlan.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC | 2726 RTE_ETH_TUNNEL_FILTER_IVLAN; 2727 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; 2728 adapter->geneve.enable = true; 2729 adapter->geneve.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC | 2730 RTE_ETH_TUNNEL_FILTER_IVLAN; 2731 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT; 2732 adapter->ipgre.enable = true; 2733 adapter->ipgre.filter_type = RTE_ETH_TUNNEL_FILTER_IMAC | 2734 RTE_ETH_TUNNEL_FILTER_IVLAN; 2735 } else { 2736 adapter->vxlan.enable = false; 2737 adapter->geneve.enable = false; 2738 adapter->ipgre.enable = false; 2739 qed_ops->sriov_configure(edev, pci_dev->max_vfs); 2740 } 2741 2742 DP_INFO(edev, "MAC address : " RTE_ETHER_ADDR_PRT_FMT "\n", 2743 RTE_ETHER_ADDR_BYTES(&adapter->primary_mac)); 2744 2745 DP_INFO(edev, "Device initialized\n"); 2746 2747 return 0; 2748 2749 err: 2750 if (do_once) { 2751 qede_print_adapter_info(eth_dev); 2752 do_once = false; 2753 } 2754 return rc; 2755 } 2756 2757 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 2758 { 2759 return qede_common_dev_init(eth_dev, 1); 2760 } 2761 2762 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 2763 { 2764 return qede_common_dev_init(eth_dev, 0); 2765 } 2766 2767 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 2768 { 2769 struct qede_dev *qdev = eth_dev->data->dev_private; 2770 struct ecore_dev *edev = &qdev->edev; 2771 PMD_INIT_FUNC_TRACE(edev); 2772 qede_dev_close(eth_dev); 2773 return 0; 2774 } 2775 2776 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2777 { 2778 return qede_dev_common_uninit(eth_dev); 2779 } 2780 2781 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2782 { 2783 return qede_dev_common_uninit(eth_dev); 2784 } 2785 2786 static const struct rte_pci_id pci_id_qedevf_map[] = { 2787 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2788 { 2789 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF) 2790 }, 2791 { 2792 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV) 2793 }, 2794 { 2795 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV) 2796 }, 2797 {.vendor_id = 0,} 2798 }; 2799 2800 static const struct rte_pci_id pci_id_qede_map[] = { 2801 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2802 { 2803 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E) 2804 }, 2805 { 2806 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S) 2807 }, 2808 { 2809 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40) 2810 }, 2811 { 2812 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25) 2813 }, 2814 { 2815 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100) 2816 }, 2817 { 2818 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50) 2819 }, 2820 { 2821 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G) 2822 }, 2823 { 2824 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G) 2825 }, 2826 { 2827 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G) 2828 }, 2829 { 2830 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G) 2831 }, 2832 {.vendor_id = 0,} 2833 }; 2834 2835 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2836 struct rte_pci_device *pci_dev) 2837 { 2838 return rte_eth_dev_pci_generic_probe(pci_dev, 2839 sizeof(struct qede_dev), qedevf_eth_dev_init); 2840 } 2841 2842 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2843 { 2844 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit); 2845 } 2846 2847 static struct rte_pci_driver rte_qedevf_pmd = { 2848 .id_table = pci_id_qedevf_map, 2849 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2850 .probe = qedevf_eth_dev_pci_probe, 2851 .remove = qedevf_eth_dev_pci_remove, 2852 }; 2853 2854 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2855 struct rte_pci_device *pci_dev) 2856 { 2857 return rte_eth_dev_pci_generic_probe(pci_dev, 2858 sizeof(struct qede_dev), qede_eth_dev_init); 2859 } 2860 2861 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2862 { 2863 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit); 2864 } 2865 2866 static struct rte_pci_driver rte_qede_pmd = { 2867 .id_table = pci_id_qede_map, 2868 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2869 .probe = qede_eth_dev_pci_probe, 2870 .remove = qede_eth_dev_pci_remove, 2871 }; 2872 2873 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd); 2874 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 2875 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); 2876 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); 2877 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 2878 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); 2879 RTE_LOG_REGISTER_SUFFIX(qede_logtype_init, init, NOTICE); 2880 RTE_LOG_REGISTER_SUFFIX(qede_logtype_driver, driver, NOTICE); 2881