1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "qede_ethdev.h" 10 #include <rte_alarm.h> 11 #include <rte_version.h> 12 13 /* Globals */ 14 static const struct qed_eth_ops *qed_ops; 15 static int64_t timer_period = 1; 16 17 /* VXLAN tunnel classification mapping */ 18 const struct _qede_vxlan_tunn_types { 19 uint16_t rte_filter_type; 20 enum ecore_filter_ucast_type qede_type; 21 enum ecore_tunn_clss qede_tunn_clss; 22 const char *string; 23 } qede_tunn_types[] = { 24 { 25 ETH_TUNNEL_FILTER_OMAC, 26 ECORE_FILTER_MAC, 27 ECORE_TUNN_CLSS_MAC_VLAN, 28 "outer-mac" 29 }, 30 { 31 ETH_TUNNEL_FILTER_TENID, 32 ECORE_FILTER_VNI, 33 ECORE_TUNN_CLSS_MAC_VNI, 34 "vni" 35 }, 36 { 37 ETH_TUNNEL_FILTER_IMAC, 38 ECORE_FILTER_INNER_MAC, 39 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 40 "inner-mac" 41 }, 42 { 43 ETH_TUNNEL_FILTER_IVLAN, 44 ECORE_FILTER_INNER_VLAN, 45 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 46 "inner-vlan" 47 }, 48 { 49 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID, 50 ECORE_FILTER_MAC_VNI_PAIR, 51 ECORE_TUNN_CLSS_MAC_VNI, 52 "outer-mac and vni" 53 }, 54 { 55 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC, 56 ECORE_FILTER_UNUSED, 57 MAX_ECORE_TUNN_CLSS, 58 "outer-mac and inner-mac" 59 }, 60 { 61 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN, 62 ECORE_FILTER_UNUSED, 63 MAX_ECORE_TUNN_CLSS, 64 "outer-mac and inner-vlan" 65 }, 66 { 67 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC, 68 ECORE_FILTER_INNER_MAC_VNI_PAIR, 69 ECORE_TUNN_CLSS_INNER_MAC_VNI, 70 "vni and inner-mac", 71 }, 72 { 73 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN, 74 ECORE_FILTER_UNUSED, 75 MAX_ECORE_TUNN_CLSS, 76 "vni and inner-vlan", 77 }, 78 { 79 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN, 80 ECORE_FILTER_INNER_PAIR, 81 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 82 "inner-mac and inner-vlan", 83 }, 84 { 85 ETH_TUNNEL_FILTER_OIP, 86 ECORE_FILTER_UNUSED, 87 MAX_ECORE_TUNN_CLSS, 88 "outer-IP" 89 }, 90 { 91 ETH_TUNNEL_FILTER_IIP, 92 ECORE_FILTER_UNUSED, 93 MAX_ECORE_TUNN_CLSS, 94 "inner-IP" 95 }, 96 { 97 RTE_TUNNEL_FILTER_IMAC_IVLAN, 98 ECORE_FILTER_UNUSED, 99 MAX_ECORE_TUNN_CLSS, 100 "IMAC_IVLAN" 101 }, 102 { 103 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID, 104 ECORE_FILTER_UNUSED, 105 MAX_ECORE_TUNN_CLSS, 106 "IMAC_IVLAN_TENID" 107 }, 108 { 109 RTE_TUNNEL_FILTER_IMAC_TENID, 110 ECORE_FILTER_UNUSED, 111 MAX_ECORE_TUNN_CLSS, 112 "IMAC_TENID" 113 }, 114 { 115 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC, 116 ECORE_FILTER_UNUSED, 117 MAX_ECORE_TUNN_CLSS, 118 "OMAC_TENID_IMAC" 119 }, 120 }; 121 122 struct rte_qede_xstats_name_off { 123 char name[RTE_ETH_XSTATS_NAME_SIZE]; 124 uint64_t offset; 125 }; 126 127 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 128 {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)}, 129 {"rx_multicast_bytes", 130 offsetof(struct ecore_eth_stats, rx_mcast_bytes)}, 131 {"rx_broadcast_bytes", 132 offsetof(struct ecore_eth_stats, rx_bcast_bytes)}, 133 {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)}, 134 {"rx_multicast_packets", 135 offsetof(struct ecore_eth_stats, rx_mcast_pkts)}, 136 {"rx_broadcast_packets", 137 offsetof(struct ecore_eth_stats, rx_bcast_pkts)}, 138 139 {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)}, 140 {"tx_multicast_bytes", 141 offsetof(struct ecore_eth_stats, tx_mcast_bytes)}, 142 {"tx_broadcast_bytes", 143 offsetof(struct ecore_eth_stats, tx_bcast_bytes)}, 144 {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)}, 145 {"tx_multicast_packets", 146 offsetof(struct ecore_eth_stats, tx_mcast_pkts)}, 147 {"tx_broadcast_packets", 148 offsetof(struct ecore_eth_stats, tx_bcast_pkts)}, 149 150 {"rx_64_byte_packets", 151 offsetof(struct ecore_eth_stats, rx_64_byte_packets)}, 152 {"rx_65_to_127_byte_packets", 153 offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)}, 154 {"rx_128_to_255_byte_packets", 155 offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)}, 156 {"rx_256_to_511_byte_packets", 157 offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)}, 158 {"rx_512_to_1023_byte_packets", 159 offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)}, 160 {"rx_1024_to_1518_byte_packets", 161 offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)}, 162 {"rx_1519_to_1522_byte_packets", 163 offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)}, 164 {"rx_1519_to_2047_byte_packets", 165 offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)}, 166 {"rx_2048_to_4095_byte_packets", 167 offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)}, 168 {"rx_4096_to_9216_byte_packets", 169 offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)}, 170 {"rx_9217_to_16383_byte_packets", 171 offsetof(struct ecore_eth_stats, 172 rx_9217_to_16383_byte_packets)}, 173 {"tx_64_byte_packets", 174 offsetof(struct ecore_eth_stats, tx_64_byte_packets)}, 175 {"tx_65_to_127_byte_packets", 176 offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)}, 177 {"tx_128_to_255_byte_packets", 178 offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)}, 179 {"tx_256_to_511_byte_packets", 180 offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)}, 181 {"tx_512_to_1023_byte_packets", 182 offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)}, 183 {"tx_1024_to_1518_byte_packets", 184 offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)}, 185 {"trx_1519_to_1522_byte_packets", 186 offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)}, 187 {"tx_2048_to_4095_byte_packets", 188 offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)}, 189 {"tx_4096_to_9216_byte_packets", 190 offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)}, 191 {"tx_9217_to_16383_byte_packets", 192 offsetof(struct ecore_eth_stats, 193 tx_9217_to_16383_byte_packets)}, 194 195 {"rx_mac_crtl_frames", 196 offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)}, 197 {"tx_mac_control_frames", 198 offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)}, 199 {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)}, 200 {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)}, 201 {"rx_priority_flow_control_frames", 202 offsetof(struct ecore_eth_stats, rx_pfc_frames)}, 203 {"tx_priority_flow_control_frames", 204 offsetof(struct ecore_eth_stats, tx_pfc_frames)}, 205 206 {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)}, 207 {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)}, 208 {"rx_carrier_errors", 209 offsetof(struct ecore_eth_stats, rx_carrier_errors)}, 210 {"rx_oversize_packet_errors", 211 offsetof(struct ecore_eth_stats, rx_oversize_packets)}, 212 {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)}, 213 {"rx_undersize_packet_errors", 214 offsetof(struct ecore_eth_stats, rx_undersize_packets)}, 215 {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)}, 216 {"rx_host_buffer_not_available", 217 offsetof(struct ecore_eth_stats, no_buff_discards)}, 218 /* Number of packets discarded because they are bigger than MTU */ 219 {"rx_packet_too_big_discards", 220 offsetof(struct ecore_eth_stats, packet_too_big_discard)}, 221 {"rx_ttl_zero_discards", 222 offsetof(struct ecore_eth_stats, ttl0_discard)}, 223 {"rx_multi_function_tag_filter_discards", 224 offsetof(struct ecore_eth_stats, mftag_filter_discards)}, 225 {"rx_mac_filter_discards", 226 offsetof(struct ecore_eth_stats, mac_filter_discards)}, 227 {"rx_hw_buffer_truncates", 228 offsetof(struct ecore_eth_stats, brb_truncates)}, 229 {"rx_hw_buffer_discards", 230 offsetof(struct ecore_eth_stats, brb_discards)}, 231 {"tx_lpi_entry_count", 232 offsetof(struct ecore_eth_stats, tx_lpi_entry_count)}, 233 {"tx_total_collisions", 234 offsetof(struct ecore_eth_stats, tx_total_collisions)}, 235 {"tx_error_drop_packets", 236 offsetof(struct ecore_eth_stats, tx_err_drop_pkts)}, 237 238 {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)}, 239 {"rx_mac_unicast_packets", 240 offsetof(struct ecore_eth_stats, rx_mac_uc_packets)}, 241 {"rx_mac_multicast_packets", 242 offsetof(struct ecore_eth_stats, rx_mac_mc_packets)}, 243 {"rx_mac_broadcast_packets", 244 offsetof(struct ecore_eth_stats, rx_mac_bc_packets)}, 245 {"rx_mac_frames_ok", 246 offsetof(struct ecore_eth_stats, rx_mac_frames_ok)}, 247 {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)}, 248 {"tx_mac_unicast_packets", 249 offsetof(struct ecore_eth_stats, tx_mac_uc_packets)}, 250 {"tx_mac_multicast_packets", 251 offsetof(struct ecore_eth_stats, tx_mac_mc_packets)}, 252 {"tx_mac_broadcast_packets", 253 offsetof(struct ecore_eth_stats, tx_mac_bc_packets)}, 254 255 {"lro_coalesced_packets", 256 offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)}, 257 {"lro_coalesced_events", 258 offsetof(struct ecore_eth_stats, tpa_coalesced_events)}, 259 {"lro_aborts_num", 260 offsetof(struct ecore_eth_stats, tpa_aborts_num)}, 261 {"lro_not_coalesced_packets", 262 offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)}, 263 {"lro_coalesced_bytes", 264 offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)}, 265 }; 266 267 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { 268 {"rx_q_segments", 269 offsetof(struct qede_rx_queue, rx_segs)}, 270 {"rx_q_hw_errors", 271 offsetof(struct qede_rx_queue, rx_hw_errors)}, 272 {"rx_q_allocation_errors", 273 offsetof(struct qede_rx_queue, rx_alloc_errors)} 274 }; 275 276 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 277 { 278 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 279 } 280 281 static void 282 qede_interrupt_handler(void *param) 283 { 284 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 285 struct qede_dev *qdev = eth_dev->data->dev_private; 286 struct ecore_dev *edev = &qdev->edev; 287 288 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 289 if (rte_intr_enable(eth_dev->intr_handle)) 290 DP_ERR(edev, "rte_intr_enable failed\n"); 291 } 292 293 static void 294 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 295 { 296 rte_memcpy(&qdev->dev_info, info, sizeof(*info)); 297 qdev->ops = qed_ops; 298 } 299 300 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO 301 static void qede_print_adapter_info(struct qede_dev *qdev) 302 { 303 struct ecore_dev *edev = &qdev->edev; 304 struct qed_dev_info *info = &qdev->dev_info.common; 305 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE]; 306 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 307 308 DP_INFO(edev, "*********************************\n"); 309 DP_INFO(edev, " DPDK version:%s\n", rte_version()); 310 DP_INFO(edev, " Chip details : %s%d\n", 311 ECORE_IS_BB(edev) ? "BB" : "AH", 312 CHIP_REV_IS_A0(edev) ? 0 : 1); 313 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", 314 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng); 315 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s", 316 ver_str, QEDE_PMD_VERSION); 317 DP_INFO(edev, " Driver version : %s\n", drv_ver); 318 DP_INFO(edev, " Firmware version : %s\n", ver_str); 319 320 snprintf(ver_str, MCP_DRV_VER_STR_SIZE, 321 "%d.%d.%d.%d", 322 (info->mfw_rev >> 24) & 0xff, 323 (info->mfw_rev >> 16) & 0xff, 324 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff); 325 DP_INFO(edev, " Management Firmware version : %s\n", ver_str); 326 DP_INFO(edev, " Firmware file : %s\n", fw_file); 327 DP_INFO(edev, "*********************************\n"); 328 } 329 #endif 330 331 static int 332 qede_start_vport(struct qede_dev *qdev, uint16_t mtu) 333 { 334 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 335 struct ecore_sp_vport_start_params params; 336 struct ecore_hwfn *p_hwfn; 337 int rc; 338 int i; 339 340 memset(¶ms, 0, sizeof(params)); 341 params.vport_id = 0; 342 params.mtu = mtu; 343 /* @DPDK - Disable FW placement */ 344 params.zero_placement_offset = 1; 345 for_each_hwfn(edev, i) { 346 p_hwfn = &edev->hwfns[i]; 347 params.concrete_fid = p_hwfn->hw_info.concrete_fid; 348 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 349 rc = ecore_sp_vport_start(p_hwfn, ¶ms); 350 if (rc != ECORE_SUCCESS) { 351 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 352 return rc; 353 } 354 } 355 ecore_reset_vport_stats(edev); 356 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); 357 358 return 0; 359 } 360 361 static int 362 qede_stop_vport(struct ecore_dev *edev) 363 { 364 struct ecore_hwfn *p_hwfn; 365 uint8_t vport_id; 366 int rc; 367 int i; 368 369 vport_id = 0; 370 for_each_hwfn(edev, i) { 371 p_hwfn = &edev->hwfns[i]; 372 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 373 vport_id); 374 if (rc != ECORE_SUCCESS) { 375 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc); 376 return rc; 377 } 378 } 379 380 return 0; 381 } 382 383 /* Activate or deactivate vport via vport-update */ 384 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) 385 { 386 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 387 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 388 struct ecore_sp_vport_update_params params; 389 struct ecore_hwfn *p_hwfn; 390 uint8_t i; 391 int rc = -1; 392 393 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 394 params.vport_id = 0; 395 params.update_vport_active_rx_flg = 1; 396 params.update_vport_active_tx_flg = 1; 397 params.vport_active_rx_flg = flg; 398 params.vport_active_tx_flg = flg; 399 for_each_hwfn(edev, i) { 400 p_hwfn = &edev->hwfns[i]; 401 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 402 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 403 ECORE_SPQ_MODE_EBLOCK, NULL); 404 if (rc != ECORE_SUCCESS) { 405 DP_ERR(edev, "Failed to update vport\n"); 406 break; 407 } 408 } 409 DP_INFO(edev, "vport %s\n", flg ? "activated" : "deactivated"); 410 return rc; 411 } 412 413 static void 414 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, 415 uint16_t mtu, bool enable) 416 { 417 /* Enable LRO in split mode */ 418 sge_tpa_params->tpa_ipv4_en_flg = enable; 419 sge_tpa_params->tpa_ipv6_en_flg = enable; 420 sge_tpa_params->tpa_ipv4_tunn_en_flg = false; 421 sge_tpa_params->tpa_ipv6_tunn_en_flg = false; 422 /* set if tpa enable changes */ 423 sge_tpa_params->update_tpa_en_flg = 1; 424 /* set if tpa parameters should be handled */ 425 sge_tpa_params->update_tpa_param_flg = enable; 426 427 sge_tpa_params->max_buffers_per_cqe = 20; 428 /* Enable TPA in split mode. In this mode each TPA segment 429 * starts on the new BD, so there is one BD per segment. 430 */ 431 sge_tpa_params->tpa_pkt_split_flg = 1; 432 sge_tpa_params->tpa_hdr_data_split_flg = 0; 433 sge_tpa_params->tpa_gro_consistent_flg = 0; 434 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 435 sge_tpa_params->tpa_max_size = 0x7FFF; 436 sge_tpa_params->tpa_min_size_to_start = mtu / 2; 437 sge_tpa_params->tpa_min_size_to_cont = mtu / 2; 438 } 439 440 /* Enable/disable LRO via vport-update */ 441 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) 442 { 443 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 444 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 445 struct ecore_sp_vport_update_params params; 446 struct ecore_sge_tpa_params tpa_params; 447 struct ecore_hwfn *p_hwfn; 448 int rc; 449 int i; 450 451 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 452 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params)); 453 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg); 454 params.vport_id = 0; 455 params.sge_tpa_params = &tpa_params; 456 for_each_hwfn(edev, i) { 457 p_hwfn = &edev->hwfns[i]; 458 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 459 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 460 ECORE_SPQ_MODE_EBLOCK, NULL); 461 if (rc != ECORE_SUCCESS) { 462 DP_ERR(edev, "Failed to update LRO\n"); 463 return -1; 464 } 465 } 466 467 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); 468 469 return 0; 470 } 471 472 /* Update MTU via vport-update without doing port restart. 473 * The vport must be deactivated before calling this API. 474 */ 475 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) 476 { 477 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 478 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 479 struct ecore_sp_vport_update_params params; 480 struct ecore_hwfn *p_hwfn; 481 int rc; 482 int i; 483 484 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 485 params.vport_id = 0; 486 params.mtu = mtu; 487 params.vport_id = 0; 488 for_each_hwfn(edev, i) { 489 p_hwfn = &edev->hwfns[i]; 490 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 491 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 492 ECORE_SPQ_MODE_EBLOCK, NULL); 493 if (rc != ECORE_SUCCESS) { 494 DP_ERR(edev, "Failed to update MTU\n"); 495 return -1; 496 } 497 } 498 DP_INFO(edev, "MTU updated to %u\n", mtu); 499 500 return 0; 501 } 502 503 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast) 504 { 505 memset(ucast, 0, sizeof(struct ecore_filter_ucast)); 506 ucast->is_rx_filter = true; 507 ucast->is_tx_filter = true; 508 /* ucast->assert_on_error = true; - For debug */ 509 } 510 511 static int 512 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, 513 enum qed_filter_rx_mode_type type) 514 { 515 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 516 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 517 struct ecore_filter_accept_flags flags; 518 519 memset(&flags, 0, sizeof(flags)); 520 521 flags.update_rx_mode_config = 1; 522 flags.update_tx_mode_config = 1; 523 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 524 ECORE_ACCEPT_MCAST_MATCHED | 525 ECORE_ACCEPT_BCAST; 526 527 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 528 ECORE_ACCEPT_MCAST_MATCHED | 529 ECORE_ACCEPT_BCAST; 530 531 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 532 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 533 if (IS_VF(edev)) { 534 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 535 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n"); 536 } 537 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 538 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 539 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC | 540 QED_FILTER_RX_MODE_TYPE_PROMISC)) { 541 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED | 542 ECORE_ACCEPT_MCAST_UNMATCHED; 543 } 544 545 return ecore_filter_accept_cmd(edev, 0, flags, false, false, 546 ECORE_SPQ_MODE_CB, NULL); 547 } 548 static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn, 549 uint8_t clss, bool mode, bool mask) 550 { 551 memset(p_tunn, 0, sizeof(struct ecore_tunnel_info)); 552 p_tunn->vxlan.b_update_mode = mode; 553 p_tunn->vxlan.b_mode_enabled = mask; 554 p_tunn->b_update_rx_cls = true; 555 p_tunn->b_update_tx_cls = true; 556 p_tunn->vxlan.tun_cls = clss; 557 } 558 559 static int 560 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 561 bool add) 562 { 563 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 564 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 565 struct qede_ucast_entry *tmp = NULL; 566 struct qede_ucast_entry *u; 567 struct ether_addr *mac_addr; 568 569 mac_addr = (struct ether_addr *)ucast->mac; 570 if (add) { 571 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 572 if ((memcmp(mac_addr, &tmp->mac, 573 ETHER_ADDR_LEN) == 0) && 574 ucast->vni == tmp->vni && 575 ucast->vlan == tmp->vlan) { 576 DP_ERR(edev, "Unicast MAC is already added" 577 " with vlan = %u, vni = %u\n", 578 ucast->vlan, ucast->vni); 579 return -EEXIST; 580 } 581 } 582 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), 583 RTE_CACHE_LINE_SIZE); 584 if (!u) { 585 DP_ERR(edev, "Did not allocate memory for ucast\n"); 586 return -ENOMEM; 587 } 588 ether_addr_copy(mac_addr, &u->mac); 589 u->vlan = ucast->vlan; 590 u->vni = ucast->vni; 591 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); 592 qdev->num_uc_addr++; 593 } else { 594 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 595 if ((memcmp(mac_addr, &tmp->mac, 596 ETHER_ADDR_LEN) == 0) && 597 ucast->vlan == tmp->vlan && 598 ucast->vni == tmp->vni) 599 break; 600 } 601 if (tmp == NULL) { 602 DP_INFO(edev, "Unicast MAC is not found\n"); 603 return -EINVAL; 604 } 605 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); 606 qdev->num_uc_addr--; 607 } 608 609 return 0; 610 } 611 612 static int 613 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast, 614 bool add) 615 { 616 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 617 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 618 struct ether_addr *mac_addr; 619 struct qede_mcast_entry *tmp = NULL; 620 struct qede_mcast_entry *m; 621 622 mac_addr = (struct ether_addr *)mcast->mac; 623 if (add) { 624 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 625 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) { 626 DP_ERR(edev, 627 "Multicast MAC is already added\n"); 628 return -EEXIST; 629 } 630 } 631 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), 632 RTE_CACHE_LINE_SIZE); 633 if (!m) { 634 DP_ERR(edev, 635 "Did not allocate memory for mcast\n"); 636 return -ENOMEM; 637 } 638 ether_addr_copy(mac_addr, &m->mac); 639 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); 640 qdev->num_mc_addr++; 641 } else { 642 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 643 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) 644 break; 645 } 646 if (tmp == NULL) { 647 DP_INFO(edev, "Multicast mac is not found\n"); 648 return -EINVAL; 649 } 650 SLIST_REMOVE(&qdev->mc_list_head, tmp, 651 qede_mcast_entry, list); 652 qdev->num_mc_addr--; 653 } 654 655 return 0; 656 } 657 658 static enum _ecore_status_t 659 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 660 bool add) 661 { 662 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 663 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 664 enum _ecore_status_t rc; 665 struct ecore_filter_mcast mcast; 666 struct qede_mcast_entry *tmp; 667 uint16_t j = 0; 668 669 /* Multicast */ 670 if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) { 671 if (add) { 672 if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) { 673 DP_ERR(edev, 674 "Mcast filter table limit exceeded, " 675 "Please enable mcast promisc mode\n"); 676 return -ECORE_INVAL; 677 } 678 } 679 rc = qede_mcast_filter(eth_dev, ucast, add); 680 if (rc == 0) { 681 DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr); 682 memset(&mcast, 0, sizeof(mcast)); 683 mcast.num_mc_addrs = qdev->num_mc_addr; 684 mcast.opcode = ECORE_FILTER_ADD; 685 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 686 ether_addr_copy(&tmp->mac, 687 (struct ether_addr *)&mcast.mac[j]); 688 j++; 689 } 690 rc = ecore_filter_mcast_cmd(edev, &mcast, 691 ECORE_SPQ_MODE_CB, NULL); 692 } 693 if (rc != ECORE_SUCCESS) { 694 DP_ERR(edev, "Failed to add multicast filter" 695 " rc = %d, op = %d\n", rc, add); 696 } 697 } else { /* Unicast */ 698 if (add) { 699 if (qdev->num_uc_addr >= 700 qdev->dev_info.num_mac_filters) { 701 DP_ERR(edev, 702 "Ucast filter table limit exceeded," 703 " Please enable promisc mode\n"); 704 return -ECORE_INVAL; 705 } 706 } 707 rc = qede_ucast_filter(eth_dev, ucast, add); 708 if (rc == 0) 709 rc = ecore_filter_ucast_cmd(edev, ucast, 710 ECORE_SPQ_MODE_CB, NULL); 711 if (rc != ECORE_SUCCESS) { 712 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", 713 rc, add); 714 } 715 } 716 717 return rc; 718 } 719 720 static int 721 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, 722 __rte_unused uint32_t index, __rte_unused uint32_t pool) 723 { 724 struct ecore_filter_ucast ucast; 725 int re; 726 727 qede_set_ucast_cmn_params(&ucast); 728 ucast.type = ECORE_FILTER_MAC; 729 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); 730 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); 731 return re; 732 } 733 734 static void 735 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 736 { 737 struct qede_dev *qdev = eth_dev->data->dev_private; 738 struct ecore_dev *edev = &qdev->edev; 739 struct ecore_filter_ucast ucast; 740 741 PMD_INIT_FUNC_TRACE(edev); 742 743 if (index >= qdev->dev_info.num_mac_filters) { 744 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 745 index, qdev->dev_info.num_mac_filters); 746 return; 747 } 748 749 qede_set_ucast_cmn_params(&ucast); 750 ucast.opcode = ECORE_FILTER_REMOVE; 751 ucast.type = ECORE_FILTER_MAC; 752 753 /* Use the index maintained by rte */ 754 ether_addr_copy(ð_dev->data->mac_addrs[index], 755 (struct ether_addr *)&ucast.mac); 756 757 ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); 758 } 759 760 static void 761 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) 762 { 763 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 764 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 765 766 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 767 mac_addr->addr_bytes)) { 768 DP_ERR(edev, "Setting MAC address is not allowed\n"); 769 ether_addr_copy(&qdev->primary_mac, 770 ð_dev->data->mac_addrs[0]); 771 return; 772 } 773 774 qede_mac_addr_add(eth_dev, mac_addr, 0, 0); 775 } 776 777 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) 778 { 779 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 780 struct ecore_sp_vport_update_params params; 781 struct ecore_hwfn *p_hwfn; 782 uint8_t i; 783 int rc; 784 785 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 786 params.vport_id = 0; 787 params.update_accept_any_vlan_flg = 1; 788 params.accept_any_vlan = flg; 789 for_each_hwfn(edev, i) { 790 p_hwfn = &edev->hwfns[i]; 791 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 792 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 793 ECORE_SPQ_MODE_EBLOCK, NULL); 794 if (rc != ECORE_SUCCESS) { 795 DP_ERR(edev, "Failed to configure accept-any-vlan\n"); 796 return; 797 } 798 } 799 800 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled"); 801 } 802 803 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg) 804 { 805 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 806 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 807 struct ecore_sp_vport_update_params params; 808 struct ecore_hwfn *p_hwfn; 809 uint8_t i; 810 int rc; 811 812 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 813 params.vport_id = 0; 814 params.update_inner_vlan_removal_flg = 1; 815 params.inner_vlan_removal_flg = flg; 816 for_each_hwfn(edev, i) { 817 p_hwfn = &edev->hwfns[i]; 818 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 819 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 820 ECORE_SPQ_MODE_EBLOCK, NULL); 821 if (rc != ECORE_SUCCESS) { 822 DP_ERR(edev, "Failed to update vport\n"); 823 return -1; 824 } 825 } 826 827 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled"); 828 return 0; 829 } 830 831 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 832 uint16_t vlan_id, int on) 833 { 834 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 835 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 836 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 837 struct qede_vlan_entry *tmp = NULL; 838 struct qede_vlan_entry *vlan; 839 struct ecore_filter_ucast ucast; 840 int rc; 841 842 if (on) { 843 if (qdev->configured_vlans == dev_info->num_vlan_filters) { 844 DP_ERR(edev, "Reached max VLAN filter limit" 845 " enabling accept_any_vlan\n"); 846 qede_config_accept_any_vlan(qdev, true); 847 return 0; 848 } 849 850 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 851 if (tmp->vid == vlan_id) { 852 DP_ERR(edev, "VLAN %u already configured\n", 853 vlan_id); 854 return -EEXIST; 855 } 856 } 857 858 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), 859 RTE_CACHE_LINE_SIZE); 860 861 if (!vlan) { 862 DP_ERR(edev, "Did not allocate memory for VLAN\n"); 863 return -ENOMEM; 864 } 865 866 qede_set_ucast_cmn_params(&ucast); 867 ucast.opcode = ECORE_FILTER_ADD; 868 ucast.type = ECORE_FILTER_VLAN; 869 ucast.vlan = vlan_id; 870 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 871 NULL); 872 if (rc != 0) { 873 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 874 rc); 875 rte_free(vlan); 876 } else { 877 vlan->vid = vlan_id; 878 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); 879 qdev->configured_vlans++; 880 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", 881 vlan_id, qdev->configured_vlans); 882 } 883 } else { 884 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 885 if (tmp->vid == vlan_id) 886 break; 887 } 888 889 if (!tmp) { 890 if (qdev->configured_vlans == 0) { 891 DP_INFO(edev, 892 "No VLAN filters configured yet\n"); 893 return 0; 894 } 895 896 DP_ERR(edev, "VLAN %u not configured\n", vlan_id); 897 return -EINVAL; 898 } 899 900 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); 901 902 qede_set_ucast_cmn_params(&ucast); 903 ucast.opcode = ECORE_FILTER_REMOVE; 904 ucast.type = ECORE_FILTER_VLAN; 905 ucast.vlan = vlan_id; 906 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 907 NULL); 908 if (rc != 0) { 909 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 910 vlan_id, rc); 911 } else { 912 qdev->configured_vlans--; 913 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", 914 vlan_id, qdev->configured_vlans); 915 } 916 } 917 918 return rc; 919 } 920 921 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 922 { 923 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 924 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 925 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 926 927 if (mask & ETH_VLAN_STRIP_MASK) { 928 if (rxmode->hw_vlan_strip) 929 (void)qede_vlan_stripping(eth_dev, 1); 930 else 931 (void)qede_vlan_stripping(eth_dev, 0); 932 } 933 934 if (mask & ETH_VLAN_FILTER_MASK) { 935 /* VLAN filtering kicks in when a VLAN is added */ 936 if (rxmode->hw_vlan_filter) { 937 qede_vlan_filter_set(eth_dev, 0, 1); 938 } else { 939 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ 940 DP_ERR(edev, 941 " Please remove existing VLAN filters" 942 " before disabling VLAN filtering\n"); 943 /* Signal app that VLAN filtering is still 944 * enabled 945 */ 946 rxmode->hw_vlan_filter = true; 947 } else { 948 qede_vlan_filter_set(eth_dev, 0, 0); 949 } 950 } 951 } 952 953 if (mask & ETH_VLAN_EXTEND_MASK) 954 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q" 955 " and classification is based on outer tag only\n"); 956 957 DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n", 958 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); 959 } 960 961 static void qede_prandom_bytes(uint32_t *buff) 962 { 963 uint8_t i; 964 965 srand((unsigned int)time(NULL)); 966 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 967 buff[i] = rand(); 968 } 969 970 int qede_config_rss(struct rte_eth_dev *eth_dev) 971 { 972 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 973 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO 974 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 975 #endif 976 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; 977 struct rte_eth_rss_reta_entry64 reta_conf[2]; 978 struct rte_eth_rss_conf rss_conf; 979 uint32_t i, id, pos, q; 980 981 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 982 if (!rss_conf.rss_key) { 983 DP_INFO(edev, "Applying driver default key\n"); 984 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 985 qede_prandom_bytes(&def_rss_key[0]); 986 rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; 987 } 988 989 /* Configure RSS hash */ 990 if (qede_rss_hash_update(eth_dev, &rss_conf)) 991 return -EINVAL; 992 993 /* Configure default RETA */ 994 memset(reta_conf, 0, sizeof(reta_conf)); 995 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) 996 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; 997 998 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 999 id = i / RTE_RETA_GROUP_SIZE; 1000 pos = i % RTE_RETA_GROUP_SIZE; 1001 q = i % QEDE_RSS_COUNT(qdev); 1002 reta_conf[id].reta[pos] = q; 1003 } 1004 if (qede_rss_reta_update(eth_dev, &reta_conf[0], 1005 ECORE_RSS_IND_TABLE_SIZE)) 1006 return -EINVAL; 1007 1008 return 0; 1009 } 1010 1011 static void qede_fastpath_start(struct ecore_dev *edev) 1012 { 1013 struct ecore_hwfn *p_hwfn; 1014 int i; 1015 1016 for_each_hwfn(edev, i) { 1017 p_hwfn = &edev->hwfns[i]; 1018 ecore_hw_start_fastpath(p_hwfn); 1019 } 1020 } 1021 1022 static int qede_dev_start(struct rte_eth_dev *eth_dev) 1023 { 1024 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1025 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1026 1027 PMD_INIT_FUNC_TRACE(edev); 1028 1029 /* Update MTU only if it has changed */ 1030 if (qdev->mtu != qdev->new_mtu) { 1031 if (qede_update_mtu(eth_dev, qdev->new_mtu)) 1032 goto err; 1033 qdev->mtu = qdev->new_mtu; 1034 /* If MTU has changed then update TPA too */ 1035 if (qdev->enable_lro) 1036 if (qede_enable_tpa(eth_dev, true)) 1037 goto err; 1038 } 1039 1040 /* Start queues */ 1041 if (qede_start_queues(eth_dev)) 1042 goto err; 1043 1044 /* Newer SR-IOV PF driver expects RX/TX queues to be started before 1045 * enabling RSS. Hence RSS configuration is deferred upto this point. 1046 * Also, we would like to retain similar behavior in PF case, so we 1047 * don't do PF/VF specific check here. 1048 */ 1049 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 1050 if (qede_config_rss(eth_dev)) 1051 goto err; 1052 1053 /* Enable vport*/ 1054 if (qede_activate_vport(eth_dev, true)) 1055 goto err; 1056 1057 /* Bring-up the link */ 1058 qede_dev_set_link_state(eth_dev, true); 1059 1060 /* Start/resume traffic */ 1061 qede_fastpath_start(edev); 1062 1063 DP_INFO(edev, "Device started\n"); 1064 1065 return 0; 1066 err: 1067 DP_ERR(edev, "Device start fails\n"); 1068 return -1; /* common error code is < 0 */ 1069 } 1070 1071 static void qede_dev_stop(struct rte_eth_dev *eth_dev) 1072 { 1073 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1074 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1075 1076 PMD_INIT_FUNC_TRACE(edev); 1077 1078 /* Disable vport */ 1079 if (qede_activate_vport(eth_dev, false)) 1080 return; 1081 1082 if (qdev->enable_lro) 1083 qede_enable_tpa(eth_dev, false); 1084 1085 /* TODO: Do we need disable LRO or RSS */ 1086 /* Stop queues */ 1087 qede_stop_queues(eth_dev); 1088 1089 /* Disable traffic */ 1090 ecore_hw_stop_fastpath(edev); /* TBD - loop */ 1091 1092 /* Bring the link down */ 1093 qede_dev_set_link_state(eth_dev, false); 1094 1095 DP_INFO(edev, "Device is stopped\n"); 1096 } 1097 1098 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 1099 { 1100 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1101 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1102 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1103 1104 PMD_INIT_FUNC_TRACE(edev); 1105 1106 /* Check requirements for 100G mode */ 1107 if (edev->num_hwfns > 1) { 1108 if (eth_dev->data->nb_rx_queues < 2 || 1109 eth_dev->data->nb_tx_queues < 2) { 1110 DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n"); 1111 return -EINVAL; 1112 } 1113 1114 if ((eth_dev->data->nb_rx_queues % 2 != 0) || 1115 (eth_dev->data->nb_tx_queues % 2 != 0)) { 1116 DP_ERR(edev, 1117 "100G mode needs even no. of RX/TX queues\n"); 1118 return -EINVAL; 1119 } 1120 } 1121 1122 /* Sanity checks and throw warnings */ 1123 if (rxmode->enable_scatter) 1124 eth_dev->data->scattered_rx = 1; 1125 1126 if (!rxmode->hw_strip_crc) 1127 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n"); 1128 1129 if (!rxmode->hw_ip_checksum) 1130 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " 1131 "in hw\n"); 1132 if (rxmode->header_split) 1133 DP_INFO(edev, "Header split enable is not supported\n"); 1134 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode == 1135 ETH_MQ_RX_RSS)) { 1136 DP_ERR(edev, "Unsupported multi-queue mode\n"); 1137 return -ENOTSUP; 1138 } 1139 /* Flow director mode check */ 1140 if (qede_check_fdir_support(eth_dev)) 1141 return -ENOTSUP; 1142 1143 /* Deallocate resources if held previously. It is needed only if the 1144 * queue count has been changed from previous configuration. If its 1145 * going to change then it means RX/TX queue setup will be called 1146 * again and the fastpath pointers will be reinitialized there. 1147 */ 1148 if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues || 1149 qdev->num_rx_queues != eth_dev->data->nb_rx_queues) { 1150 qede_dealloc_fp_resc(eth_dev); 1151 /* Proceed with updated queue count */ 1152 qdev->num_tx_queues = eth_dev->data->nb_tx_queues; 1153 qdev->num_rx_queues = eth_dev->data->nb_rx_queues; 1154 if (qede_alloc_fp_resc(qdev)) 1155 return -ENOMEM; 1156 } 1157 1158 /* VF's MTU has to be set using vport-start where as 1159 * PF's MTU can be updated via vport-update. 1160 */ 1161 if (IS_VF(edev)) { 1162 if (qede_start_vport(qdev, rxmode->max_rx_pkt_len)) 1163 return -1; 1164 } else { 1165 if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len)) 1166 return -1; 1167 } 1168 1169 qdev->mtu = rxmode->max_rx_pkt_len; 1170 qdev->new_mtu = qdev->mtu; 1171 1172 /* Configure TPA parameters */ 1173 if (rxmode->enable_lro) { 1174 if (qede_enable_tpa(eth_dev, true)) 1175 return -EINVAL; 1176 /* Enable scatter mode for LRO */ 1177 if (!rxmode->enable_scatter) 1178 eth_dev->data->scattered_rx = 1; 1179 } 1180 qdev->enable_lro = rxmode->enable_lro; 1181 1182 /* Enable VLAN offloads by default */ 1183 qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | 1184 ETH_VLAN_FILTER_MASK | 1185 ETH_VLAN_EXTEND_MASK); 1186 1187 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", 1188 QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)); 1189 1190 return 0; 1191 } 1192 1193 /* Info about HW descriptor ring limitations */ 1194 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 1195 .nb_max = 0x8000, /* 32K */ 1196 .nb_min = 128, 1197 .nb_align = 128 /* lowest common multiple */ 1198 }; 1199 1200 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 1201 .nb_max = 0x8000, /* 32K */ 1202 .nb_min = 256, 1203 .nb_align = 256, 1204 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, 1205 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 1206 }; 1207 1208 static void 1209 qede_dev_info_get(struct rte_eth_dev *eth_dev, 1210 struct rte_eth_dev_info *dev_info) 1211 { 1212 struct qede_dev *qdev = eth_dev->data->dev_private; 1213 struct ecore_dev *edev = &qdev->edev; 1214 struct qed_link_output link; 1215 uint32_t speed_cap = 0; 1216 1217 PMD_INIT_FUNC_TRACE(edev); 1218 1219 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1220 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; 1221 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 1222 dev_info->rx_desc_lim = qede_rx_desc_lim; 1223 dev_info->tx_desc_lim = qede_tx_desc_lim; 1224 1225 if (IS_PF(edev)) 1226 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1227 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2); 1228 else 1229 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1230 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); 1231 dev_info->max_tx_queues = dev_info->max_rx_queues; 1232 1233 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; 1234 dev_info->max_vfs = 0; 1235 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 1236 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1237 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 1238 1239 dev_info->default_txconf = (struct rte_eth_txconf) { 1240 .txq_flags = QEDE_TXQ_FLAGS, 1241 }; 1242 1243 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP | 1244 DEV_RX_OFFLOAD_IPV4_CKSUM | 1245 DEV_RX_OFFLOAD_UDP_CKSUM | 1246 DEV_RX_OFFLOAD_TCP_CKSUM | 1247 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1248 DEV_RX_OFFLOAD_TCP_LRO); 1249 1250 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | 1251 DEV_TX_OFFLOAD_IPV4_CKSUM | 1252 DEV_TX_OFFLOAD_UDP_CKSUM | 1253 DEV_TX_OFFLOAD_TCP_CKSUM | 1254 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1255 DEV_TX_OFFLOAD_TCP_TSO | 1256 DEV_TX_OFFLOAD_VXLAN_TNL_TSO); 1257 1258 memset(&link, 0, sizeof(struct qed_link_output)); 1259 qdev->ops->common->get_link(edev, &link); 1260 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1261 speed_cap |= ETH_LINK_SPEED_1G; 1262 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1263 speed_cap |= ETH_LINK_SPEED_10G; 1264 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1265 speed_cap |= ETH_LINK_SPEED_25G; 1266 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1267 speed_cap |= ETH_LINK_SPEED_40G; 1268 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1269 speed_cap |= ETH_LINK_SPEED_50G; 1270 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1271 speed_cap |= ETH_LINK_SPEED_100G; 1272 dev_info->speed_capa = speed_cap; 1273 } 1274 1275 /* return 0 means link status changed, -1 means not changed */ 1276 static int 1277 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 1278 { 1279 struct qede_dev *qdev = eth_dev->data->dev_private; 1280 struct ecore_dev *edev = &qdev->edev; 1281 uint16_t link_duplex; 1282 struct qed_link_output link; 1283 struct rte_eth_link *curr = ð_dev->data->dev_link; 1284 1285 memset(&link, 0, sizeof(struct qed_link_output)); 1286 qdev->ops->common->get_link(edev, &link); 1287 1288 /* Link Speed */ 1289 curr->link_speed = link.speed; 1290 1291 /* Link Mode */ 1292 switch (link.duplex) { 1293 case QEDE_DUPLEX_HALF: 1294 link_duplex = ETH_LINK_HALF_DUPLEX; 1295 break; 1296 case QEDE_DUPLEX_FULL: 1297 link_duplex = ETH_LINK_FULL_DUPLEX; 1298 break; 1299 case QEDE_DUPLEX_UNKNOWN: 1300 default: 1301 link_duplex = -1; 1302 } 1303 curr->link_duplex = link_duplex; 1304 1305 /* Link Status */ 1306 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN; 1307 1308 /* AN */ 1309 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 1310 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 1311 1312 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 1313 curr->link_speed, curr->link_duplex, 1314 curr->link_autoneg, curr->link_status); 1315 1316 /* return 0 means link status changed, -1 means not changed */ 1317 return ((curr->link_status == link.link_up) ? -1 : 0); 1318 } 1319 1320 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 1321 { 1322 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT 1323 struct qede_dev *qdev = eth_dev->data->dev_private; 1324 struct ecore_dev *edev = &qdev->edev; 1325 1326 PMD_INIT_FUNC_TRACE(edev); 1327 #endif 1328 1329 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1330 1331 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1332 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1333 1334 qed_configure_filter_rx_mode(eth_dev, type); 1335 } 1336 1337 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 1338 { 1339 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT 1340 struct qede_dev *qdev = eth_dev->data->dev_private; 1341 struct ecore_dev *edev = &qdev->edev; 1342 1343 PMD_INIT_FUNC_TRACE(edev); 1344 #endif 1345 1346 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1347 qed_configure_filter_rx_mode(eth_dev, 1348 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 1349 else 1350 qed_configure_filter_rx_mode(eth_dev, 1351 QED_FILTER_RX_MODE_TYPE_REGULAR); 1352 } 1353 1354 static void qede_poll_sp_sb_cb(void *param) 1355 { 1356 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1357 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1358 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1359 int rc; 1360 1361 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 1362 qede_interrupt_action(&edev->hwfns[1]); 1363 1364 rc = rte_eal_alarm_set(timer_period * US_PER_S, 1365 qede_poll_sp_sb_cb, 1366 (void *)eth_dev); 1367 if (rc != 0) { 1368 DP_ERR(edev, "Unable to start periodic" 1369 " timer rc %d\n", rc); 1370 assert(false && "Unable to start periodic timer"); 1371 } 1372 } 1373 1374 static void qede_dev_close(struct rte_eth_dev *eth_dev) 1375 { 1376 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1377 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1378 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1379 1380 PMD_INIT_FUNC_TRACE(edev); 1381 1382 /* dev_stop() shall cleanup fp resources in hw but without releasing 1383 * dma memories and sw structures so that dev_start() can be called 1384 * by the app without reconfiguration. However, in dev_close() we 1385 * can release all the resources and device can be brought up newly 1386 */ 1387 if (eth_dev->data->dev_started) 1388 qede_dev_stop(eth_dev); 1389 1390 qede_stop_vport(edev); 1391 qede_fdir_dealloc_resc(eth_dev); 1392 qede_dealloc_fp_resc(eth_dev); 1393 1394 eth_dev->data->nb_rx_queues = 0; 1395 eth_dev->data->nb_tx_queues = 0; 1396 1397 qdev->ops->common->slowpath_stop(edev); 1398 qdev->ops->common->remove(edev); 1399 rte_intr_disable(&pci_dev->intr_handle); 1400 rte_intr_callback_unregister(&pci_dev->intr_handle, 1401 qede_interrupt_handler, (void *)eth_dev); 1402 if (edev->num_hwfns > 1) 1403 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 1404 } 1405 1406 static void 1407 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 1408 { 1409 struct qede_dev *qdev = eth_dev->data->dev_private; 1410 struct ecore_dev *edev = &qdev->edev; 1411 struct ecore_eth_stats stats; 1412 unsigned int i = 0, j = 0, qid; 1413 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 1414 struct qede_tx_queue *txq; 1415 1416 ecore_get_vport_stats(edev, &stats); 1417 1418 /* RX Stats */ 1419 eth_stats->ipackets = stats.rx_ucast_pkts + 1420 stats.rx_mcast_pkts + stats.rx_bcast_pkts; 1421 1422 eth_stats->ibytes = stats.rx_ucast_bytes + 1423 stats.rx_mcast_bytes + stats.rx_bcast_bytes; 1424 1425 eth_stats->ierrors = stats.rx_crc_errors + 1426 stats.rx_align_errors + 1427 stats.rx_carrier_errors + 1428 stats.rx_oversize_packets + 1429 stats.rx_jabbers + stats.rx_undersize_packets; 1430 1431 eth_stats->rx_nombuf = stats.no_buff_discards; 1432 1433 eth_stats->imissed = stats.mftag_filter_discards + 1434 stats.mac_filter_discards + 1435 stats.no_buff_discards + stats.brb_truncates + stats.brb_discards; 1436 1437 /* TX stats */ 1438 eth_stats->opackets = stats.tx_ucast_pkts + 1439 stats.tx_mcast_pkts + stats.tx_bcast_pkts; 1440 1441 eth_stats->obytes = stats.tx_ucast_bytes + 1442 stats.tx_mcast_bytes + stats.tx_bcast_bytes; 1443 1444 eth_stats->oerrors = stats.tx_err_drop_pkts; 1445 1446 /* Queue stats */ 1447 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1448 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1449 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), 1450 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1451 if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) || 1452 (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev))) 1453 DP_VERBOSE(edev, ECORE_MSG_DEBUG, 1454 "Not all the queue stats will be displayed. Set" 1455 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" 1456 " appropriately and retry.\n"); 1457 1458 for_each_rss(qid) { 1459 eth_stats->q_ipackets[i] = 1460 *(uint64_t *)( 1461 ((char *)(qdev->fp_array[qid].rxq)) + 1462 offsetof(struct qede_rx_queue, 1463 rcv_pkts)); 1464 eth_stats->q_errors[i] = 1465 *(uint64_t *)( 1466 ((char *)(qdev->fp_array[qid].rxq)) + 1467 offsetof(struct qede_rx_queue, 1468 rx_hw_errors)) + 1469 *(uint64_t *)( 1470 ((char *)(qdev->fp_array[qid].rxq)) + 1471 offsetof(struct qede_rx_queue, 1472 rx_alloc_errors)); 1473 i++; 1474 if (i == rxq_stat_cntrs) 1475 break; 1476 } 1477 1478 for_each_tss(qid) { 1479 txq = qdev->fp_array[qid].txq; 1480 eth_stats->q_opackets[j] = 1481 *((uint64_t *)(uintptr_t) 1482 (((uint64_t)(uintptr_t)(txq)) + 1483 offsetof(struct qede_tx_queue, 1484 xmit_pkts))); 1485 j++; 1486 if (j == txq_stat_cntrs) 1487 break; 1488 } 1489 } 1490 1491 static unsigned 1492 qede_get_xstats_count(struct qede_dev *qdev) { 1493 return RTE_DIM(qede_xstats_strings) + 1494 (RTE_DIM(qede_rxq_xstats_strings) * 1495 RTE_MIN(QEDE_RSS_COUNT(qdev), 1496 RTE_ETHDEV_QUEUE_STAT_CNTRS)); 1497 } 1498 1499 static int 1500 qede_get_xstats_names(struct rte_eth_dev *dev, 1501 struct rte_eth_xstat_name *xstats_names, 1502 __rte_unused unsigned int limit) 1503 { 1504 struct qede_dev *qdev = dev->data->dev_private; 1505 const unsigned int stat_cnt = qede_get_xstats_count(qdev); 1506 unsigned int i, qid, stat_idx = 0; 1507 unsigned int rxq_stat_cntrs; 1508 1509 if (xstats_names != NULL) { 1510 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1511 snprintf(xstats_names[stat_idx].name, 1512 sizeof(xstats_names[stat_idx].name), 1513 "%s", 1514 qede_xstats_strings[i].name); 1515 stat_idx++; 1516 } 1517 1518 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1519 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1520 for (qid = 0; qid < rxq_stat_cntrs; qid++) { 1521 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1522 snprintf(xstats_names[stat_idx].name, 1523 sizeof(xstats_names[stat_idx].name), 1524 "%.4s%d%s", 1525 qede_rxq_xstats_strings[i].name, qid, 1526 qede_rxq_xstats_strings[i].name + 4); 1527 stat_idx++; 1528 } 1529 } 1530 } 1531 1532 return stat_cnt; 1533 } 1534 1535 static int 1536 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1537 unsigned int n) 1538 { 1539 struct qede_dev *qdev = dev->data->dev_private; 1540 struct ecore_dev *edev = &qdev->edev; 1541 struct ecore_eth_stats stats; 1542 const unsigned int num = qede_get_xstats_count(qdev); 1543 unsigned int i, qid, stat_idx = 0; 1544 unsigned int rxq_stat_cntrs; 1545 1546 if (n < num) 1547 return num; 1548 1549 ecore_get_vport_stats(edev, &stats); 1550 1551 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1552 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + 1553 qede_xstats_strings[i].offset); 1554 xstats[stat_idx].id = stat_idx; 1555 stat_idx++; 1556 } 1557 1558 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1559 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1560 for (qid = 0; qid < rxq_stat_cntrs; qid++) { 1561 for_each_rss(qid) { 1562 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1563 xstats[stat_idx].value = *(uint64_t *)( 1564 ((char *)(qdev->fp_array[qid].rxq)) + 1565 qede_rxq_xstats_strings[i].offset); 1566 xstats[stat_idx].id = stat_idx; 1567 stat_idx++; 1568 } 1569 } 1570 } 1571 1572 return stat_idx; 1573 } 1574 1575 static void 1576 qede_reset_xstats(struct rte_eth_dev *dev) 1577 { 1578 struct qede_dev *qdev = dev->data->dev_private; 1579 struct ecore_dev *edev = &qdev->edev; 1580 1581 ecore_reset_vport_stats(edev); 1582 } 1583 1584 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 1585 { 1586 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1587 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1588 struct qed_link_params link_params; 1589 int rc; 1590 1591 DP_INFO(edev, "setting link state %d\n", link_up); 1592 memset(&link_params, 0, sizeof(link_params)); 1593 link_params.link_up = link_up; 1594 rc = qdev->ops->common->set_link(edev, &link_params); 1595 if (rc != ECORE_SUCCESS) 1596 DP_ERR(edev, "Unable to set link state %d\n", link_up); 1597 1598 return rc; 1599 } 1600 1601 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 1602 { 1603 return qede_dev_set_link_state(eth_dev, true); 1604 } 1605 1606 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 1607 { 1608 return qede_dev_set_link_state(eth_dev, false); 1609 } 1610 1611 static void qede_reset_stats(struct rte_eth_dev *eth_dev) 1612 { 1613 struct qede_dev *qdev = eth_dev->data->dev_private; 1614 struct ecore_dev *edev = &qdev->edev; 1615 1616 ecore_reset_vport_stats(edev); 1617 } 1618 1619 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 1620 { 1621 enum qed_filter_rx_mode_type type = 1622 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1623 1624 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1625 type |= QED_FILTER_RX_MODE_TYPE_PROMISC; 1626 1627 qed_configure_filter_rx_mode(eth_dev, type); 1628 } 1629 1630 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 1631 { 1632 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1633 qed_configure_filter_rx_mode(eth_dev, 1634 QED_FILTER_RX_MODE_TYPE_PROMISC); 1635 else 1636 qed_configure_filter_rx_mode(eth_dev, 1637 QED_FILTER_RX_MODE_TYPE_REGULAR); 1638 } 1639 1640 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 1641 struct rte_eth_fc_conf *fc_conf) 1642 { 1643 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1644 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1645 struct qed_link_output current_link; 1646 struct qed_link_params params; 1647 1648 memset(¤t_link, 0, sizeof(current_link)); 1649 qdev->ops->common->get_link(edev, ¤t_link); 1650 1651 memset(¶ms, 0, sizeof(params)); 1652 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 1653 if (fc_conf->autoneg) { 1654 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 1655 DP_ERR(edev, "Autoneg not supported\n"); 1656 return -EINVAL; 1657 } 1658 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1659 } 1660 1661 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 1662 if (fc_conf->mode == RTE_FC_FULL) 1663 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 1664 QED_LINK_PAUSE_RX_ENABLE); 1665 if (fc_conf->mode == RTE_FC_TX_PAUSE) 1666 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1667 if (fc_conf->mode == RTE_FC_RX_PAUSE) 1668 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1669 1670 params.link_up = true; 1671 (void)qdev->ops->common->set_link(edev, ¶ms); 1672 1673 return 0; 1674 } 1675 1676 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 1677 struct rte_eth_fc_conf *fc_conf) 1678 { 1679 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1680 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1681 struct qed_link_output current_link; 1682 1683 memset(¤t_link, 0, sizeof(current_link)); 1684 qdev->ops->common->get_link(edev, ¤t_link); 1685 1686 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1687 fc_conf->autoneg = true; 1688 1689 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 1690 QED_LINK_PAUSE_TX_ENABLE)) 1691 fc_conf->mode = RTE_FC_FULL; 1692 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 1693 fc_conf->mode = RTE_FC_RX_PAUSE; 1694 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 1695 fc_conf->mode = RTE_FC_TX_PAUSE; 1696 else 1697 fc_conf->mode = RTE_FC_NONE; 1698 1699 return 0; 1700 } 1701 1702 static const uint32_t * 1703 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 1704 { 1705 static const uint32_t ptypes[] = { 1706 RTE_PTYPE_L3_IPV4, 1707 RTE_PTYPE_L3_IPV6, 1708 RTE_PTYPE_UNKNOWN 1709 }; 1710 1711 if (eth_dev->rx_pkt_burst == qede_recv_pkts) 1712 return ptypes; 1713 1714 return NULL; 1715 } 1716 1717 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) 1718 { 1719 *rss_caps = 0; 1720 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 1721 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 1722 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 1723 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 1724 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 1725 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 1726 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; 1727 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; 1728 } 1729 1730 int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 1731 struct rte_eth_rss_conf *rss_conf) 1732 { 1733 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1734 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1735 struct ecore_sp_vport_update_params vport_update_params; 1736 struct ecore_rss_params rss_params; 1737 struct ecore_hwfn *p_hwfn; 1738 uint32_t *key = (uint32_t *)rss_conf->rss_key; 1739 uint64_t hf = rss_conf->rss_hf; 1740 uint8_t len = rss_conf->rss_key_len; 1741 uint8_t idx; 1742 uint8_t i; 1743 int rc; 1744 1745 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1746 memset(&rss_params, 0, sizeof(rss_params)); 1747 1748 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", 1749 (unsigned long)hf, len, key); 1750 1751 if (hf != 0) { 1752 /* Enabling RSS */ 1753 DP_INFO(edev, "Enabling rss\n"); 1754 1755 /* RSS caps */ 1756 qede_init_rss_caps(&rss_params.rss_caps, hf); 1757 rss_params.update_rss_capabilities = 1; 1758 1759 /* RSS hash key */ 1760 if (key) { 1761 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { 1762 DP_ERR(edev, "RSS key length exceeds limit\n"); 1763 return -EINVAL; 1764 } 1765 DP_INFO(edev, "Applying user supplied hash key\n"); 1766 rss_params.update_rss_key = 1; 1767 memcpy(&rss_params.rss_key, key, len); 1768 } 1769 rss_params.rss_enable = 1; 1770 } 1771 1772 rss_params.update_rss_config = 1; 1773 /* tbl_size has to be set with capabilities */ 1774 rss_params.rss_table_size_log = 7; 1775 vport_update_params.vport_id = 0; 1776 /* pass the L2 handles instead of qids */ 1777 for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) { 1778 idx = qdev->rss_ind_table[i]; 1779 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle; 1780 } 1781 vport_update_params.rss_params = &rss_params; 1782 1783 for_each_hwfn(edev, i) { 1784 p_hwfn = &edev->hwfns[i]; 1785 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1786 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 1787 ECORE_SPQ_MODE_EBLOCK, NULL); 1788 if (rc) { 1789 DP_ERR(edev, "vport-update for RSS failed\n"); 1790 return rc; 1791 } 1792 } 1793 qdev->rss_enable = rss_params.rss_enable; 1794 1795 /* Update local structure for hash query */ 1796 qdev->rss_conf.rss_hf = hf; 1797 qdev->rss_conf.rss_key_len = len; 1798 if (qdev->rss_enable) { 1799 if (qdev->rss_conf.rss_key == NULL) { 1800 qdev->rss_conf.rss_key = (uint8_t *)malloc(len); 1801 if (qdev->rss_conf.rss_key == NULL) { 1802 DP_ERR(edev, "No memory to store RSS key\n"); 1803 return -ENOMEM; 1804 } 1805 } 1806 if (key && len) { 1807 DP_INFO(edev, "Storing RSS key\n"); 1808 memcpy(qdev->rss_conf.rss_key, key, len); 1809 } 1810 } else if (!qdev->rss_enable && len == 0) { 1811 if (qdev->rss_conf.rss_key) { 1812 free(qdev->rss_conf.rss_key); 1813 qdev->rss_conf.rss_key = NULL; 1814 DP_INFO(edev, "Free RSS key\n"); 1815 } 1816 } 1817 1818 return 0; 1819 } 1820 1821 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 1822 struct rte_eth_rss_conf *rss_conf) 1823 { 1824 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1825 1826 rss_conf->rss_hf = qdev->rss_conf.rss_hf; 1827 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; 1828 1829 if (rss_conf->rss_key && qdev->rss_conf.rss_key) 1830 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, 1831 rss_conf->rss_key_len); 1832 return 0; 1833 } 1834 1835 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev, 1836 struct ecore_rss_params *rss) 1837 { 1838 int i, fn; 1839 bool rss_mode = 1; /* enable */ 1840 struct ecore_queue_cid *cid; 1841 struct ecore_rss_params *t_rss; 1842 1843 /* In regular scenario, we'd simply need to take input handlers. 1844 * But in CMT, we'd have to split the handlers according to the 1845 * engine they were configured on. We'd then have to understand 1846 * whether RSS is really required, since 2-queues on CMT doesn't 1847 * require RSS. 1848 */ 1849 1850 /* CMT should be round-robin */ 1851 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 1852 cid = rss->rss_ind_table[i]; 1853 1854 if (cid->p_owner == ECORE_LEADING_HWFN(edev)) 1855 t_rss = &rss[0]; 1856 else 1857 t_rss = &rss[1]; 1858 1859 t_rss->rss_ind_table[i / edev->num_hwfns] = cid; 1860 } 1861 1862 t_rss = &rss[1]; 1863 t_rss->update_rss_ind_table = 1; 1864 t_rss->rss_table_size_log = 7; 1865 t_rss->update_rss_config = 1; 1866 1867 /* Make sure RSS is actually required */ 1868 for_each_hwfn(edev, fn) { 1869 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns; 1870 i++) { 1871 if (rss[fn].rss_ind_table[i] != 1872 rss[fn].rss_ind_table[0]) 1873 break; 1874 } 1875 1876 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) { 1877 DP_INFO(edev, 1878 "CMT - 1 queue per-hwfn; Disabling RSS\n"); 1879 rss_mode = 0; 1880 goto out; 1881 } 1882 } 1883 1884 out: 1885 t_rss->rss_enable = rss_mode; 1886 1887 return rss_mode; 1888 } 1889 1890 int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 1891 struct rte_eth_rss_reta_entry64 *reta_conf, 1892 uint16_t reta_size) 1893 { 1894 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1895 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1896 struct ecore_sp_vport_update_params vport_update_params; 1897 struct ecore_rss_params *params; 1898 struct ecore_hwfn *p_hwfn; 1899 uint16_t i, idx, shift; 1900 uint8_t entry; 1901 int rc = 0; 1902 1903 if (reta_size > ETH_RSS_RETA_SIZE_128) { 1904 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 1905 reta_size); 1906 return -EINVAL; 1907 } 1908 1909 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1910 params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns, 1911 RTE_CACHE_LINE_SIZE); 1912 1913 for (i = 0; i < reta_size; i++) { 1914 idx = i / RTE_RETA_GROUP_SIZE; 1915 shift = i % RTE_RETA_GROUP_SIZE; 1916 if (reta_conf[idx].mask & (1ULL << shift)) { 1917 entry = reta_conf[idx].reta[shift]; 1918 /* Pass rxq handles to ecore */ 1919 params->rss_ind_table[i] = 1920 qdev->fp_array[entry].rxq->handle; 1921 /* Update the local copy for RETA query command */ 1922 qdev->rss_ind_table[i] = entry; 1923 } 1924 } 1925 1926 params->update_rss_ind_table = 1; 1927 params->rss_table_size_log = 7; 1928 params->update_rss_config = 1; 1929 1930 /* Fix up RETA for CMT mode device */ 1931 if (edev->num_hwfns > 1) 1932 qdev->rss_enable = qede_update_rss_parm_cmt(edev, 1933 params); 1934 vport_update_params.vport_id = 0; 1935 /* Use the current value of rss_enable */ 1936 params->rss_enable = qdev->rss_enable; 1937 vport_update_params.rss_params = params; 1938 1939 for_each_hwfn(edev, i) { 1940 p_hwfn = &edev->hwfns[i]; 1941 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1942 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 1943 ECORE_SPQ_MODE_EBLOCK, NULL); 1944 if (rc) { 1945 DP_ERR(edev, "vport-update for RSS failed\n"); 1946 goto out; 1947 } 1948 } 1949 1950 out: 1951 rte_free(params); 1952 return rc; 1953 } 1954 1955 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 1956 struct rte_eth_rss_reta_entry64 *reta_conf, 1957 uint16_t reta_size) 1958 { 1959 struct qede_dev *qdev = eth_dev->data->dev_private; 1960 struct ecore_dev *edev = &qdev->edev; 1961 uint16_t i, idx, shift; 1962 uint8_t entry; 1963 1964 if (reta_size > ETH_RSS_RETA_SIZE_128) { 1965 DP_ERR(edev, "reta_size %d is not supported\n", 1966 reta_size); 1967 return -EINVAL; 1968 } 1969 1970 for (i = 0; i < reta_size; i++) { 1971 idx = i / RTE_RETA_GROUP_SIZE; 1972 shift = i % RTE_RETA_GROUP_SIZE; 1973 if (reta_conf[idx].mask & (1ULL << shift)) { 1974 entry = qdev->rss_ind_table[i]; 1975 reta_conf[idx].reta[shift] = entry; 1976 } 1977 } 1978 1979 return 0; 1980 } 1981 1982 1983 1984 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 1985 { 1986 struct qede_dev *qdev = QEDE_INIT_QDEV(dev); 1987 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1988 struct rte_eth_dev_info dev_info = {0}; 1989 struct qede_fastpath *fp; 1990 uint32_t frame_size; 1991 uint16_t rx_buf_size; 1992 uint16_t bufsz; 1993 int i; 1994 1995 PMD_INIT_FUNC_TRACE(edev); 1996 qede_dev_info_get(dev, &dev_info); 1997 frame_size = mtu + QEDE_ETH_OVERHEAD; 1998 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) { 1999 DP_ERR(edev, "MTU %u out of range\n", mtu); 2000 return -EINVAL; 2001 } 2002 if (!dev->data->scattered_rx && 2003 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 2004 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", 2005 dev->data->min_rx_buf_size); 2006 return -EINVAL; 2007 } 2008 /* Temporarily replace I/O functions with dummy ones. It cannot 2009 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. 2010 */ 2011 dev->rx_pkt_burst = qede_rxtx_pkts_dummy; 2012 dev->tx_pkt_burst = qede_rxtx_pkts_dummy; 2013 qede_dev_stop(dev); 2014 rte_delay_ms(1000); 2015 qdev->mtu = mtu; 2016 /* Fix up RX buf size for all queues of the port */ 2017 for_each_rss(i) { 2018 fp = &qdev->fp_array[i]; 2019 bufsz = (uint16_t)rte_pktmbuf_data_room_size( 2020 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; 2021 if (dev->data->scattered_rx) 2022 rx_buf_size = bufsz + QEDE_ETH_OVERHEAD; 2023 else 2024 rx_buf_size = mtu + QEDE_ETH_OVERHEAD; 2025 rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size); 2026 fp->rxq->rx_buf_size = rx_buf_size; 2027 DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size); 2028 } 2029 qede_dev_start(dev); 2030 if (frame_size > ETHER_MAX_LEN) 2031 dev->data->dev_conf.rxmode.jumbo_frame = 1; 2032 else 2033 dev->data->dev_conf.rxmode.jumbo_frame = 0; 2034 /* update max frame size */ 2035 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 2036 /* Reassign back */ 2037 dev->rx_pkt_burst = qede_recv_pkts; 2038 dev->tx_pkt_burst = qede_xmit_pkts; 2039 2040 return 0; 2041 } 2042 2043 static int 2044 qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev, 2045 struct rte_eth_udp_tunnel *tunnel_udp, 2046 bool add) 2047 { 2048 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2049 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2050 struct ecore_tunnel_info tunn; /* @DPDK */ 2051 struct ecore_hwfn *p_hwfn; 2052 int rc, i; 2053 2054 PMD_INIT_FUNC_TRACE(edev); 2055 2056 memset(&tunn, 0, sizeof(tunn)); 2057 if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) { 2058 tunn.vxlan_port.b_update_port = true; 2059 tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port : 2060 QEDE_VXLAN_DEF_PORT; 2061 for_each_hwfn(edev, i) { 2062 p_hwfn = &edev->hwfns[i]; 2063 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn, 2064 ECORE_SPQ_MODE_CB, NULL); 2065 if (rc != ECORE_SUCCESS) { 2066 DP_ERR(edev, "Unable to config UDP port %u\n", 2067 tunn.vxlan_port.port); 2068 return rc; 2069 } 2070 } 2071 } 2072 2073 return 0; 2074 } 2075 2076 static int 2077 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, 2078 struct rte_eth_udp_tunnel *tunnel_udp) 2079 { 2080 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false); 2081 } 2082 2083 static int 2084 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, 2085 struct rte_eth_udp_tunnel *tunnel_udp) 2086 { 2087 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true); 2088 } 2089 2090 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type, 2091 uint32_t *clss, char *str) 2092 { 2093 uint16_t j; 2094 *clss = MAX_ECORE_TUNN_CLSS; 2095 2096 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) { 2097 if (filter == qede_tunn_types[j].rte_filter_type) { 2098 *type = qede_tunn_types[j].qede_type; 2099 *clss = qede_tunn_types[j].qede_tunn_clss; 2100 strcpy(str, qede_tunn_types[j].string); 2101 return; 2102 } 2103 } 2104 } 2105 2106 static int 2107 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast, 2108 const struct rte_eth_tunnel_filter_conf *conf, 2109 uint32_t type) 2110 { 2111 /* Init commmon ucast params first */ 2112 qede_set_ucast_cmn_params(ucast); 2113 2114 /* Copy out the required fields based on classification type */ 2115 ucast->type = type; 2116 2117 switch (type) { 2118 case ECORE_FILTER_VNI: 2119 ucast->vni = conf->tenant_id; 2120 break; 2121 case ECORE_FILTER_INNER_VLAN: 2122 ucast->vlan = conf->inner_vlan; 2123 break; 2124 case ECORE_FILTER_MAC: 2125 memcpy(ucast->mac, conf->outer_mac.addr_bytes, 2126 ETHER_ADDR_LEN); 2127 break; 2128 case ECORE_FILTER_INNER_MAC: 2129 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 2130 ETHER_ADDR_LEN); 2131 break; 2132 case ECORE_FILTER_MAC_VNI_PAIR: 2133 memcpy(ucast->mac, conf->outer_mac.addr_bytes, 2134 ETHER_ADDR_LEN); 2135 ucast->vni = conf->tenant_id; 2136 break; 2137 case ECORE_FILTER_INNER_MAC_VNI_PAIR: 2138 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 2139 ETHER_ADDR_LEN); 2140 ucast->vni = conf->tenant_id; 2141 break; 2142 case ECORE_FILTER_INNER_PAIR: 2143 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 2144 ETHER_ADDR_LEN); 2145 ucast->vlan = conf->inner_vlan; 2146 break; 2147 default: 2148 return -EINVAL; 2149 } 2150 2151 return ECORE_SUCCESS; 2152 } 2153 2154 static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev, 2155 enum rte_filter_op filter_op, 2156 const struct rte_eth_tunnel_filter_conf *conf) 2157 { 2158 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2159 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2160 struct ecore_tunnel_info tunn; 2161 struct ecore_hwfn *p_hwfn; 2162 enum ecore_filter_ucast_type type; 2163 enum ecore_tunn_clss clss; 2164 struct ecore_filter_ucast ucast; 2165 char str[80]; 2166 uint16_t filter_type; 2167 int rc, i; 2168 2169 PMD_INIT_FUNC_TRACE(edev); 2170 2171 filter_type = conf->filter_type | qdev->vxlan_filter_type; 2172 /* First determine if the given filter classification is supported */ 2173 qede_get_ecore_tunn_params(filter_type, &type, &clss, str); 2174 if (clss == MAX_ECORE_TUNN_CLSS) { 2175 DP_ERR(edev, "Wrong filter type\n"); 2176 return -EINVAL; 2177 } 2178 /* Init tunnel ucast params */ 2179 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type); 2180 if (rc != ECORE_SUCCESS) { 2181 DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n", 2182 conf->filter_type); 2183 return rc; 2184 } 2185 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n", 2186 str, filter_op, ucast.type); 2187 switch (filter_op) { 2188 case RTE_ETH_FILTER_ADD: 2189 ucast.opcode = ECORE_FILTER_ADD; 2190 2191 /* Skip MAC/VLAN if filter is based on VNI */ 2192 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { 2193 rc = qede_mac_int_ops(eth_dev, &ucast, 1); 2194 if (rc == 0) { 2195 /* Enable accept anyvlan */ 2196 qede_config_accept_any_vlan(qdev, true); 2197 } 2198 } else { 2199 rc = qede_ucast_filter(eth_dev, &ucast, 1); 2200 if (rc == 0) 2201 rc = ecore_filter_ucast_cmd(edev, &ucast, 2202 ECORE_SPQ_MODE_CB, NULL); 2203 } 2204 2205 if (rc != ECORE_SUCCESS) 2206 return rc; 2207 2208 qdev->vxlan_filter_type = filter_type; 2209 2210 DP_INFO(edev, "Enabling VXLAN tunneling\n"); 2211 qede_set_cmn_tunn_param(&tunn, clss, true, true); 2212 for_each_hwfn(edev, i) { 2213 p_hwfn = &edev->hwfns[i]; 2214 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, 2215 &tunn, ECORE_SPQ_MODE_CB, NULL); 2216 if (rc != ECORE_SUCCESS) { 2217 DP_ERR(edev, "Failed to update tunn_clss %u\n", 2218 tunn.vxlan.tun_cls); 2219 } 2220 } 2221 qdev->num_tunn_filters++; /* Filter added successfully */ 2222 break; 2223 case RTE_ETH_FILTER_DELETE: 2224 ucast.opcode = ECORE_FILTER_REMOVE; 2225 2226 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { 2227 rc = qede_mac_int_ops(eth_dev, &ucast, 0); 2228 } else { 2229 rc = qede_ucast_filter(eth_dev, &ucast, 0); 2230 if (rc == 0) 2231 rc = ecore_filter_ucast_cmd(edev, &ucast, 2232 ECORE_SPQ_MODE_CB, NULL); 2233 } 2234 if (rc != ECORE_SUCCESS) 2235 return rc; 2236 2237 qdev->vxlan_filter_type = filter_type; 2238 qdev->num_tunn_filters--; 2239 2240 /* Disable VXLAN if VXLAN filters become 0 */ 2241 if (qdev->num_tunn_filters == 0) { 2242 DP_INFO(edev, "Disabling VXLAN tunneling\n"); 2243 2244 /* Use 0 as tunnel mode */ 2245 qede_set_cmn_tunn_param(&tunn, clss, false, true); 2246 for_each_hwfn(edev, i) { 2247 p_hwfn = &edev->hwfns[i]; 2248 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn, 2249 ECORE_SPQ_MODE_CB, NULL); 2250 if (rc != ECORE_SUCCESS) { 2251 DP_ERR(edev, 2252 "Failed to update tunn_clss %u\n", 2253 tunn.vxlan.tun_cls); 2254 break; 2255 } 2256 } 2257 } 2258 break; 2259 default: 2260 DP_ERR(edev, "Unsupported operation %d\n", filter_op); 2261 return -EINVAL; 2262 } 2263 DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters); 2264 2265 return 0; 2266 } 2267 2268 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, 2269 enum rte_filter_type filter_type, 2270 enum rte_filter_op filter_op, 2271 void *arg) 2272 { 2273 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2274 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2275 struct rte_eth_tunnel_filter_conf *filter_conf = 2276 (struct rte_eth_tunnel_filter_conf *)arg; 2277 2278 switch (filter_type) { 2279 case RTE_ETH_FILTER_TUNNEL: 2280 switch (filter_conf->tunnel_type) { 2281 case RTE_TUNNEL_TYPE_VXLAN: 2282 DP_INFO(edev, 2283 "Packet steering to the specified Rx queue" 2284 " is not supported with VXLAN tunneling"); 2285 return(qede_vxlan_tunn_config(eth_dev, filter_op, 2286 filter_conf)); 2287 /* Place holders for future tunneling support */ 2288 case RTE_TUNNEL_TYPE_GENEVE: 2289 case RTE_TUNNEL_TYPE_TEREDO: 2290 case RTE_TUNNEL_TYPE_NVGRE: 2291 case RTE_TUNNEL_TYPE_IP_IN_GRE: 2292 case RTE_L2_TUNNEL_TYPE_E_TAG: 2293 DP_ERR(edev, "Unsupported tunnel type %d\n", 2294 filter_conf->tunnel_type); 2295 return -EINVAL; 2296 case RTE_TUNNEL_TYPE_NONE: 2297 default: 2298 return 0; 2299 } 2300 break; 2301 case RTE_ETH_FILTER_FDIR: 2302 return qede_fdir_filter_conf(eth_dev, filter_op, arg); 2303 case RTE_ETH_FILTER_NTUPLE: 2304 return qede_ntuple_filter_conf(eth_dev, filter_op, arg); 2305 case RTE_ETH_FILTER_MACVLAN: 2306 case RTE_ETH_FILTER_ETHERTYPE: 2307 case RTE_ETH_FILTER_FLEXIBLE: 2308 case RTE_ETH_FILTER_SYN: 2309 case RTE_ETH_FILTER_HASH: 2310 case RTE_ETH_FILTER_L2_TUNNEL: 2311 case RTE_ETH_FILTER_MAX: 2312 default: 2313 DP_ERR(edev, "Unsupported filter type %d\n", 2314 filter_type); 2315 return -EINVAL; 2316 } 2317 2318 return 0; 2319 } 2320 2321 static const struct eth_dev_ops qede_eth_dev_ops = { 2322 .dev_configure = qede_dev_configure, 2323 .dev_infos_get = qede_dev_info_get, 2324 .rx_queue_setup = qede_rx_queue_setup, 2325 .rx_queue_release = qede_rx_queue_release, 2326 .tx_queue_setup = qede_tx_queue_setup, 2327 .tx_queue_release = qede_tx_queue_release, 2328 .dev_start = qede_dev_start, 2329 .dev_set_link_up = qede_dev_set_link_up, 2330 .dev_set_link_down = qede_dev_set_link_down, 2331 .link_update = qede_link_update, 2332 .promiscuous_enable = qede_promiscuous_enable, 2333 .promiscuous_disable = qede_promiscuous_disable, 2334 .allmulticast_enable = qede_allmulticast_enable, 2335 .allmulticast_disable = qede_allmulticast_disable, 2336 .dev_stop = qede_dev_stop, 2337 .dev_close = qede_dev_close, 2338 .stats_get = qede_get_stats, 2339 .stats_reset = qede_reset_stats, 2340 .xstats_get = qede_get_xstats, 2341 .xstats_reset = qede_reset_xstats, 2342 .xstats_get_names = qede_get_xstats_names, 2343 .mac_addr_add = qede_mac_addr_add, 2344 .mac_addr_remove = qede_mac_addr_remove, 2345 .mac_addr_set = qede_mac_addr_set, 2346 .vlan_offload_set = qede_vlan_offload_set, 2347 .vlan_filter_set = qede_vlan_filter_set, 2348 .flow_ctrl_set = qede_flow_ctrl_set, 2349 .flow_ctrl_get = qede_flow_ctrl_get, 2350 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2351 .rss_hash_update = qede_rss_hash_update, 2352 .rss_hash_conf_get = qede_rss_hash_conf_get, 2353 .reta_update = qede_rss_reta_update, 2354 .reta_query = qede_rss_reta_query, 2355 .mtu_set = qede_set_mtu, 2356 .filter_ctrl = qede_dev_filter_ctrl, 2357 .udp_tunnel_port_add = qede_udp_dst_port_add, 2358 .udp_tunnel_port_del = qede_udp_dst_port_del, 2359 }; 2360 2361 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 2362 .dev_configure = qede_dev_configure, 2363 .dev_infos_get = qede_dev_info_get, 2364 .rx_queue_setup = qede_rx_queue_setup, 2365 .rx_queue_release = qede_rx_queue_release, 2366 .tx_queue_setup = qede_tx_queue_setup, 2367 .tx_queue_release = qede_tx_queue_release, 2368 .dev_start = qede_dev_start, 2369 .dev_set_link_up = qede_dev_set_link_up, 2370 .dev_set_link_down = qede_dev_set_link_down, 2371 .link_update = qede_link_update, 2372 .promiscuous_enable = qede_promiscuous_enable, 2373 .promiscuous_disable = qede_promiscuous_disable, 2374 .allmulticast_enable = qede_allmulticast_enable, 2375 .allmulticast_disable = qede_allmulticast_disable, 2376 .dev_stop = qede_dev_stop, 2377 .dev_close = qede_dev_close, 2378 .stats_get = qede_get_stats, 2379 .stats_reset = qede_reset_stats, 2380 .xstats_get = qede_get_xstats, 2381 .xstats_reset = qede_reset_xstats, 2382 .xstats_get_names = qede_get_xstats_names, 2383 .vlan_offload_set = qede_vlan_offload_set, 2384 .vlan_filter_set = qede_vlan_filter_set, 2385 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2386 .rss_hash_update = qede_rss_hash_update, 2387 .rss_hash_conf_get = qede_rss_hash_conf_get, 2388 .reta_update = qede_rss_reta_update, 2389 .reta_query = qede_rss_reta_query, 2390 .mtu_set = qede_set_mtu, 2391 }; 2392 2393 static void qede_update_pf_params(struct ecore_dev *edev) 2394 { 2395 struct ecore_pf_params pf_params; 2396 2397 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 2398 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; 2399 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 2400 qed_ops->common->update_pf_params(edev, &pf_params); 2401 } 2402 2403 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 2404 { 2405 struct rte_pci_device *pci_dev; 2406 struct rte_pci_addr pci_addr; 2407 struct qede_dev *adapter; 2408 struct ecore_dev *edev; 2409 struct qed_dev_eth_info dev_info; 2410 struct qed_slowpath_params params; 2411 static bool do_once = true; 2412 uint8_t bulletin_change; 2413 uint8_t vf_mac[ETHER_ADDR_LEN]; 2414 uint8_t is_mac_forced; 2415 bool is_mac_exist; 2416 /* Fix up ecore debug level */ 2417 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 2418 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 2419 int rc; 2420 2421 /* Extract key data structures */ 2422 adapter = eth_dev->data->dev_private; 2423 edev = &adapter->edev; 2424 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2425 pci_addr = pci_dev->addr; 2426 2427 PMD_INIT_FUNC_TRACE(edev); 2428 2429 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 2430 pci_addr.bus, pci_addr.devid, pci_addr.function, 2431 eth_dev->data->port_id); 2432 2433 eth_dev->rx_pkt_burst = qede_recv_pkts; 2434 eth_dev->tx_pkt_burst = qede_xmit_pkts; 2435 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; 2436 2437 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2438 DP_ERR(edev, "Skipping device init from secondary process\n"); 2439 return 0; 2440 } 2441 2442 rte_eth_copy_pci_info(eth_dev, pci_dev); 2443 2444 /* @DPDK */ 2445 edev->vendor_id = pci_dev->id.vendor_id; 2446 edev->device_id = pci_dev->id.device_id; 2447 2448 qed_ops = qed_get_eth_ops(); 2449 if (!qed_ops) { 2450 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 2451 return -EINVAL; 2452 } 2453 2454 DP_INFO(edev, "Starting qede probe\n"); 2455 rc = qed_ops->common->probe(edev, pci_dev, dp_module, 2456 dp_level, is_vf); 2457 if (rc != 0) { 2458 DP_ERR(edev, "qede probe failed rc %d\n", rc); 2459 return -ENODEV; 2460 } 2461 qede_update_pf_params(edev); 2462 rte_intr_callback_register(&pci_dev->intr_handle, 2463 qede_interrupt_handler, (void *)eth_dev); 2464 if (rte_intr_enable(&pci_dev->intr_handle)) { 2465 DP_ERR(edev, "rte_intr_enable() failed\n"); 2466 return -ENODEV; 2467 } 2468 2469 /* Start the Slowpath-process */ 2470 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 2471 params.int_mode = ECORE_INT_MODE_MSIX; 2472 params.drv_major = QEDE_PMD_VERSION_MAJOR; 2473 params.drv_minor = QEDE_PMD_VERSION_MINOR; 2474 params.drv_rev = QEDE_PMD_VERSION_REVISION; 2475 params.drv_eng = QEDE_PMD_VERSION_PATCH; 2476 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, 2477 QEDE_PMD_DRV_VER_STR_SIZE); 2478 2479 /* For CMT mode device do periodic polling for slowpath events. 2480 * This is required since uio device uses only one MSI-x 2481 * interrupt vector but we need one for each engine. 2482 */ 2483 if (edev->num_hwfns > 1 && IS_PF(edev)) { 2484 rc = rte_eal_alarm_set(timer_period * US_PER_S, 2485 qede_poll_sp_sb_cb, 2486 (void *)eth_dev); 2487 if (rc != 0) { 2488 DP_ERR(edev, "Unable to start periodic" 2489 " timer rc %d\n", rc); 2490 return -EINVAL; 2491 } 2492 } 2493 2494 rc = qed_ops->common->slowpath_start(edev, ¶ms); 2495 if (rc) { 2496 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 2497 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2498 (void *)eth_dev); 2499 return -ENODEV; 2500 } 2501 2502 rc = qed_ops->fill_dev_info(edev, &dev_info); 2503 if (rc) { 2504 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 2505 qed_ops->common->slowpath_stop(edev); 2506 qed_ops->common->remove(edev); 2507 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2508 (void *)eth_dev); 2509 return -ENODEV; 2510 } 2511 2512 qede_alloc_etherdev(adapter, &dev_info); 2513 2514 adapter->ops->common->set_name(edev, edev->name); 2515 2516 if (!is_vf) 2517 adapter->dev_info.num_mac_filters = 2518 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 2519 ECORE_MAC); 2520 else 2521 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 2522 (uint32_t *)&adapter->dev_info.num_mac_filters); 2523 2524 /* Allocate memory for storing MAC addr */ 2525 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 2526 (ETHER_ADDR_LEN * 2527 adapter->dev_info.num_mac_filters), 2528 RTE_CACHE_LINE_SIZE); 2529 2530 if (eth_dev->data->mac_addrs == NULL) { 2531 DP_ERR(edev, "Failed to allocate MAC address\n"); 2532 qed_ops->common->slowpath_stop(edev); 2533 qed_ops->common->remove(edev); 2534 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2535 (void *)eth_dev); 2536 return -ENOMEM; 2537 } 2538 2539 if (!is_vf) { 2540 ether_addr_copy((struct ether_addr *)edev->hwfns[0]. 2541 hw_info.hw_mac_addr, 2542 ð_dev->data->mac_addrs[0]); 2543 ether_addr_copy(ð_dev->data->mac_addrs[0], 2544 &adapter->primary_mac); 2545 } else { 2546 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 2547 &bulletin_change); 2548 if (bulletin_change) { 2549 is_mac_exist = 2550 ecore_vf_bulletin_get_forced_mac( 2551 ECORE_LEADING_HWFN(edev), 2552 vf_mac, 2553 &is_mac_forced); 2554 if (is_mac_exist && is_mac_forced) { 2555 DP_INFO(edev, "VF macaddr received from PF\n"); 2556 ether_addr_copy((struct ether_addr *)&vf_mac, 2557 ð_dev->data->mac_addrs[0]); 2558 ether_addr_copy(ð_dev->data->mac_addrs[0], 2559 &adapter->primary_mac); 2560 } else { 2561 DP_ERR(edev, "No VF macaddr assigned\n"); 2562 } 2563 } 2564 } 2565 2566 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 2567 2568 if (do_once) { 2569 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO 2570 qede_print_adapter_info(adapter); 2571 #endif 2572 do_once = false; 2573 } 2574 2575 adapter->num_tx_queues = 0; 2576 adapter->num_rx_queues = 0; 2577 SLIST_INIT(&adapter->fdir_info.fdir_list_head); 2578 SLIST_INIT(&adapter->vlan_list_head); 2579 SLIST_INIT(&adapter->uc_list_head); 2580 adapter->mtu = ETHER_MTU; 2581 adapter->new_mtu = ETHER_MTU; 2582 if (!is_vf) 2583 if (qede_start_vport(adapter, adapter->mtu)) 2584 return -1; 2585 2586 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", 2587 adapter->primary_mac.addr_bytes[0], 2588 adapter->primary_mac.addr_bytes[1], 2589 adapter->primary_mac.addr_bytes[2], 2590 adapter->primary_mac.addr_bytes[3], 2591 adapter->primary_mac.addr_bytes[4], 2592 adapter->primary_mac.addr_bytes[5]); 2593 2594 DP_INFO(edev, "Device initialized\n"); 2595 2596 return 0; 2597 } 2598 2599 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 2600 { 2601 return qede_common_dev_init(eth_dev, 1); 2602 } 2603 2604 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 2605 { 2606 return qede_common_dev_init(eth_dev, 0); 2607 } 2608 2609 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 2610 { 2611 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT 2612 struct qede_dev *qdev = eth_dev->data->dev_private; 2613 struct ecore_dev *edev = &qdev->edev; 2614 2615 PMD_INIT_FUNC_TRACE(edev); 2616 #endif 2617 2618 /* only uninitialize in the primary process */ 2619 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2620 return 0; 2621 2622 /* safe to close dev here */ 2623 qede_dev_close(eth_dev); 2624 2625 eth_dev->dev_ops = NULL; 2626 eth_dev->rx_pkt_burst = NULL; 2627 eth_dev->tx_pkt_burst = NULL; 2628 2629 if (eth_dev->data->mac_addrs) 2630 rte_free(eth_dev->data->mac_addrs); 2631 2632 eth_dev->data->mac_addrs = NULL; 2633 2634 return 0; 2635 } 2636 2637 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2638 { 2639 return qede_dev_common_uninit(eth_dev); 2640 } 2641 2642 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2643 { 2644 return qede_dev_common_uninit(eth_dev); 2645 } 2646 2647 static const struct rte_pci_id pci_id_qedevf_map[] = { 2648 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2649 { 2650 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF) 2651 }, 2652 { 2653 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV) 2654 }, 2655 { 2656 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV) 2657 }, 2658 {.vendor_id = 0,} 2659 }; 2660 2661 static const struct rte_pci_id pci_id_qede_map[] = { 2662 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2663 { 2664 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E) 2665 }, 2666 { 2667 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S) 2668 }, 2669 { 2670 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40) 2671 }, 2672 { 2673 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25) 2674 }, 2675 { 2676 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100) 2677 }, 2678 { 2679 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50) 2680 }, 2681 { 2682 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G) 2683 }, 2684 { 2685 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G) 2686 }, 2687 { 2688 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G) 2689 }, 2690 { 2691 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G) 2692 }, 2693 {.vendor_id = 0,} 2694 }; 2695 2696 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2697 struct rte_pci_device *pci_dev) 2698 { 2699 return rte_eth_dev_pci_generic_probe(pci_dev, 2700 sizeof(struct qede_dev), qedevf_eth_dev_init); 2701 } 2702 2703 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2704 { 2705 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit); 2706 } 2707 2708 static struct rte_pci_driver rte_qedevf_pmd = { 2709 .id_table = pci_id_qedevf_map, 2710 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2711 .probe = qedevf_eth_dev_pci_probe, 2712 .remove = qedevf_eth_dev_pci_remove, 2713 }; 2714 2715 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2716 struct rte_pci_device *pci_dev) 2717 { 2718 return rte_eth_dev_pci_generic_probe(pci_dev, 2719 sizeof(struct qede_dev), qede_eth_dev_init); 2720 } 2721 2722 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2723 { 2724 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit); 2725 } 2726 2727 static struct rte_pci_driver rte_qede_pmd = { 2728 .id_table = pci_id_qede_map, 2729 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2730 .probe = qede_eth_dev_pci_probe, 2731 .remove = qede_eth_dev_pci_remove, 2732 }; 2733 2734 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd); 2735 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 2736 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); 2737 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); 2738 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 2739 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); 2740