1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include "qede_ethdev.h" 8 #include <rte_string_fns.h> 9 #include <rte_alarm.h> 10 #include <rte_version.h> 11 #include <rte_kvargs.h> 12 13 /* Globals */ 14 int qede_logtype_init; 15 int qede_logtype_driver; 16 17 static const struct qed_eth_ops *qed_ops; 18 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev); 19 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev); 20 21 #define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */ 22 23 struct rte_qede_xstats_name_off { 24 char name[RTE_ETH_XSTATS_NAME_SIZE]; 25 uint64_t offset; 26 }; 27 28 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 29 {"rx_unicast_bytes", 30 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)}, 31 {"rx_multicast_bytes", 32 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)}, 33 {"rx_broadcast_bytes", 34 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)}, 35 {"rx_unicast_packets", 36 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)}, 37 {"rx_multicast_packets", 38 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)}, 39 {"rx_broadcast_packets", 40 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)}, 41 42 {"tx_unicast_bytes", 43 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)}, 44 {"tx_multicast_bytes", 45 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)}, 46 {"tx_broadcast_bytes", 47 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)}, 48 {"tx_unicast_packets", 49 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)}, 50 {"tx_multicast_packets", 51 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)}, 52 {"tx_broadcast_packets", 53 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)}, 54 55 {"rx_64_byte_packets", 56 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)}, 57 {"rx_65_to_127_byte_packets", 58 offsetof(struct ecore_eth_stats_common, 59 rx_65_to_127_byte_packets)}, 60 {"rx_128_to_255_byte_packets", 61 offsetof(struct ecore_eth_stats_common, 62 rx_128_to_255_byte_packets)}, 63 {"rx_256_to_511_byte_packets", 64 offsetof(struct ecore_eth_stats_common, 65 rx_256_to_511_byte_packets)}, 66 {"rx_512_to_1023_byte_packets", 67 offsetof(struct ecore_eth_stats_common, 68 rx_512_to_1023_byte_packets)}, 69 {"rx_1024_to_1518_byte_packets", 70 offsetof(struct ecore_eth_stats_common, 71 rx_1024_to_1518_byte_packets)}, 72 {"tx_64_byte_packets", 73 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)}, 74 {"tx_65_to_127_byte_packets", 75 offsetof(struct ecore_eth_stats_common, 76 tx_65_to_127_byte_packets)}, 77 {"tx_128_to_255_byte_packets", 78 offsetof(struct ecore_eth_stats_common, 79 tx_128_to_255_byte_packets)}, 80 {"tx_256_to_511_byte_packets", 81 offsetof(struct ecore_eth_stats_common, 82 tx_256_to_511_byte_packets)}, 83 {"tx_512_to_1023_byte_packets", 84 offsetof(struct ecore_eth_stats_common, 85 tx_512_to_1023_byte_packets)}, 86 {"tx_1024_to_1518_byte_packets", 87 offsetof(struct ecore_eth_stats_common, 88 tx_1024_to_1518_byte_packets)}, 89 90 {"rx_mac_crtl_frames", 91 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)}, 92 {"tx_mac_control_frames", 93 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)}, 94 {"rx_pause_frames", 95 offsetof(struct ecore_eth_stats_common, rx_pause_frames)}, 96 {"tx_pause_frames", 97 offsetof(struct ecore_eth_stats_common, tx_pause_frames)}, 98 {"rx_priority_flow_control_frames", 99 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)}, 100 {"tx_priority_flow_control_frames", 101 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)}, 102 103 {"rx_crc_errors", 104 offsetof(struct ecore_eth_stats_common, rx_crc_errors)}, 105 {"rx_align_errors", 106 offsetof(struct ecore_eth_stats_common, rx_align_errors)}, 107 {"rx_carrier_errors", 108 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)}, 109 {"rx_oversize_packet_errors", 110 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)}, 111 {"rx_jabber_errors", 112 offsetof(struct ecore_eth_stats_common, rx_jabbers)}, 113 {"rx_undersize_packet_errors", 114 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)}, 115 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)}, 116 {"rx_host_buffer_not_available", 117 offsetof(struct ecore_eth_stats_common, no_buff_discards)}, 118 /* Number of packets discarded because they are bigger than MTU */ 119 {"rx_packet_too_big_discards", 120 offsetof(struct ecore_eth_stats_common, 121 packet_too_big_discard)}, 122 {"rx_ttl_zero_discards", 123 offsetof(struct ecore_eth_stats_common, ttl0_discard)}, 124 {"rx_multi_function_tag_filter_discards", 125 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)}, 126 {"rx_mac_filter_discards", 127 offsetof(struct ecore_eth_stats_common, mac_filter_discards)}, 128 {"rx_hw_buffer_truncates", 129 offsetof(struct ecore_eth_stats_common, brb_truncates)}, 130 {"rx_hw_buffer_discards", 131 offsetof(struct ecore_eth_stats_common, brb_discards)}, 132 {"tx_error_drop_packets", 133 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)}, 134 135 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)}, 136 {"rx_mac_unicast_packets", 137 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)}, 138 {"rx_mac_multicast_packets", 139 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)}, 140 {"rx_mac_broadcast_packets", 141 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)}, 142 {"rx_mac_frames_ok", 143 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)}, 144 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)}, 145 {"tx_mac_unicast_packets", 146 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)}, 147 {"tx_mac_multicast_packets", 148 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)}, 149 {"tx_mac_broadcast_packets", 150 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)}, 151 152 {"lro_coalesced_packets", 153 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)}, 154 {"lro_coalesced_events", 155 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)}, 156 {"lro_aborts_num", 157 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)}, 158 {"lro_not_coalesced_packets", 159 offsetof(struct ecore_eth_stats_common, 160 tpa_not_coalesced_pkts)}, 161 {"lro_coalesced_bytes", 162 offsetof(struct ecore_eth_stats_common, 163 tpa_coalesced_bytes)}, 164 }; 165 166 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = { 167 {"rx_1519_to_1522_byte_packets", 168 offsetof(struct ecore_eth_stats, bb) + 169 offsetof(struct ecore_eth_stats_bb, 170 rx_1519_to_1522_byte_packets)}, 171 {"rx_1519_to_2047_byte_packets", 172 offsetof(struct ecore_eth_stats, bb) + 173 offsetof(struct ecore_eth_stats_bb, 174 rx_1519_to_2047_byte_packets)}, 175 {"rx_2048_to_4095_byte_packets", 176 offsetof(struct ecore_eth_stats, bb) + 177 offsetof(struct ecore_eth_stats_bb, 178 rx_2048_to_4095_byte_packets)}, 179 {"rx_4096_to_9216_byte_packets", 180 offsetof(struct ecore_eth_stats, bb) + 181 offsetof(struct ecore_eth_stats_bb, 182 rx_4096_to_9216_byte_packets)}, 183 {"rx_9217_to_16383_byte_packets", 184 offsetof(struct ecore_eth_stats, bb) + 185 offsetof(struct ecore_eth_stats_bb, 186 rx_9217_to_16383_byte_packets)}, 187 188 {"tx_1519_to_2047_byte_packets", 189 offsetof(struct ecore_eth_stats, bb) + 190 offsetof(struct ecore_eth_stats_bb, 191 tx_1519_to_2047_byte_packets)}, 192 {"tx_2048_to_4095_byte_packets", 193 offsetof(struct ecore_eth_stats, bb) + 194 offsetof(struct ecore_eth_stats_bb, 195 tx_2048_to_4095_byte_packets)}, 196 {"tx_4096_to_9216_byte_packets", 197 offsetof(struct ecore_eth_stats, bb) + 198 offsetof(struct ecore_eth_stats_bb, 199 tx_4096_to_9216_byte_packets)}, 200 {"tx_9217_to_16383_byte_packets", 201 offsetof(struct ecore_eth_stats, bb) + 202 offsetof(struct ecore_eth_stats_bb, 203 tx_9217_to_16383_byte_packets)}, 204 205 {"tx_lpi_entry_count", 206 offsetof(struct ecore_eth_stats, bb) + 207 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)}, 208 {"tx_total_collisions", 209 offsetof(struct ecore_eth_stats, bb) + 210 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)}, 211 }; 212 213 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = { 214 {"rx_1519_to_max_byte_packets", 215 offsetof(struct ecore_eth_stats, ah) + 216 offsetof(struct ecore_eth_stats_ah, 217 rx_1519_to_max_byte_packets)}, 218 {"tx_1519_to_max_byte_packets", 219 offsetof(struct ecore_eth_stats, ah) + 220 offsetof(struct ecore_eth_stats_ah, 221 tx_1519_to_max_byte_packets)}, 222 }; 223 224 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { 225 {"rx_q_segments", 226 offsetof(struct qede_rx_queue, rx_segs)}, 227 {"rx_q_hw_errors", 228 offsetof(struct qede_rx_queue, rx_hw_errors)}, 229 {"rx_q_allocation_errors", 230 offsetof(struct qede_rx_queue, rx_alloc_errors)} 231 }; 232 233 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 234 { 235 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 236 } 237 238 static void 239 qede_interrupt_handler_intx(void *param) 240 { 241 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 242 struct qede_dev *qdev = eth_dev->data->dev_private; 243 struct ecore_dev *edev = &qdev->edev; 244 u64 status; 245 246 /* Check if our device actually raised an interrupt */ 247 status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev)); 248 if (status & 0x1) { 249 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 250 251 if (rte_intr_enable(eth_dev->intr_handle)) 252 DP_ERR(edev, "rte_intr_enable failed\n"); 253 } 254 } 255 256 static void 257 qede_interrupt_handler(void *param) 258 { 259 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 260 struct qede_dev *qdev = eth_dev->data->dev_private; 261 struct ecore_dev *edev = &qdev->edev; 262 263 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 264 if (rte_intr_enable(eth_dev->intr_handle)) 265 DP_ERR(edev, "rte_intr_enable failed\n"); 266 } 267 268 static void 269 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 270 { 271 rte_memcpy(&qdev->dev_info, info, sizeof(*info)); 272 qdev->ops = qed_ops; 273 } 274 275 static void qede_print_adapter_info(struct qede_dev *qdev) 276 { 277 struct ecore_dev *edev = &qdev->edev; 278 struct qed_dev_info *info = &qdev->dev_info.common; 279 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE]; 280 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 281 282 DP_INFO(edev, "*********************************\n"); 283 DP_INFO(edev, " DPDK version:%s\n", rte_version()); 284 DP_INFO(edev, " Chip details : %s %c%d\n", 285 ECORE_IS_BB(edev) ? "BB" : "AH", 286 'A' + edev->chip_rev, 287 (int)edev->chip_metal); 288 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", 289 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng); 290 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s", 291 ver_str, QEDE_PMD_VERSION); 292 DP_INFO(edev, " Driver version : %s\n", drv_ver); 293 DP_INFO(edev, " Firmware version : %s\n", ver_str); 294 295 snprintf(ver_str, MCP_DRV_VER_STR_SIZE, 296 "%d.%d.%d.%d", 297 (info->mfw_rev >> 24) & 0xff, 298 (info->mfw_rev >> 16) & 0xff, 299 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff); 300 DP_INFO(edev, " Management Firmware version : %s\n", ver_str); 301 DP_INFO(edev, " Firmware file : %s\n", qede_fw_file); 302 DP_INFO(edev, "*********************************\n"); 303 } 304 305 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) 306 { 307 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 308 unsigned int i = 0, j = 0, qid; 309 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 310 struct qede_tx_queue *txq; 311 312 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); 313 314 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 315 RTE_ETHDEV_QUEUE_STAT_CNTRS); 316 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), 317 RTE_ETHDEV_QUEUE_STAT_CNTRS); 318 319 for_each_rss(qid) { 320 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 321 offsetof(struct qede_rx_queue, rcv_pkts), 0, 322 sizeof(uint64_t)); 323 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 324 offsetof(struct qede_rx_queue, rx_hw_errors), 0, 325 sizeof(uint64_t)); 326 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 327 offsetof(struct qede_rx_queue, rx_alloc_errors), 0, 328 sizeof(uint64_t)); 329 330 if (xstats) 331 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++) 332 OSAL_MEMSET((((char *) 333 (qdev->fp_array[qid].rxq)) + 334 qede_rxq_xstats_strings[j].offset), 335 0, 336 sizeof(uint64_t)); 337 338 i++; 339 if (i == rxq_stat_cntrs) 340 break; 341 } 342 343 i = 0; 344 345 for_each_tss(qid) { 346 txq = qdev->fp_array[qid].txq; 347 348 OSAL_MEMSET((uint64_t *)(uintptr_t) 349 (((uint64_t)(uintptr_t)(txq)) + 350 offsetof(struct qede_tx_queue, xmit_pkts)), 0, 351 sizeof(uint64_t)); 352 353 i++; 354 if (i == txq_stat_cntrs) 355 break; 356 } 357 } 358 359 static int 360 qede_stop_vport(struct ecore_dev *edev) 361 { 362 struct ecore_hwfn *p_hwfn; 363 uint8_t vport_id; 364 int rc; 365 int i; 366 367 vport_id = 0; 368 for_each_hwfn(edev, i) { 369 p_hwfn = &edev->hwfns[i]; 370 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 371 vport_id); 372 if (rc != ECORE_SUCCESS) { 373 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc); 374 return rc; 375 } 376 } 377 378 DP_INFO(edev, "vport stopped\n"); 379 380 return 0; 381 } 382 383 static int 384 qede_start_vport(struct qede_dev *qdev, uint16_t mtu) 385 { 386 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 387 struct ecore_sp_vport_start_params params; 388 struct ecore_hwfn *p_hwfn; 389 int rc; 390 int i; 391 392 if (qdev->vport_started) 393 qede_stop_vport(edev); 394 395 memset(¶ms, 0, sizeof(params)); 396 params.vport_id = 0; 397 params.mtu = mtu; 398 /* @DPDK - Disable FW placement */ 399 params.zero_placement_offset = 1; 400 for_each_hwfn(edev, i) { 401 p_hwfn = &edev->hwfns[i]; 402 params.concrete_fid = p_hwfn->hw_info.concrete_fid; 403 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 404 rc = ecore_sp_vport_start(p_hwfn, ¶ms); 405 if (rc != ECORE_SUCCESS) { 406 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 407 return rc; 408 } 409 } 410 ecore_reset_vport_stats(edev); 411 qdev->vport_started = true; 412 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); 413 414 return 0; 415 } 416 417 #define QEDE_NPAR_TX_SWITCHING "npar_tx_switching" 418 #define QEDE_VF_TX_SWITCHING "vf_tx_switching" 419 420 /* Activate or deactivate vport via vport-update */ 421 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) 422 { 423 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 424 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 425 struct ecore_sp_vport_update_params params; 426 struct ecore_hwfn *p_hwfn; 427 uint8_t i; 428 int rc = -1; 429 430 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 431 params.vport_id = 0; 432 params.update_vport_active_rx_flg = 1; 433 params.update_vport_active_tx_flg = 1; 434 params.vport_active_rx_flg = flg; 435 params.vport_active_tx_flg = flg; 436 if (~qdev->enable_tx_switching & flg) { 437 params.update_tx_switching_flg = 1; 438 params.tx_switching_flg = !flg; 439 } 440 for_each_hwfn(edev, i) { 441 p_hwfn = &edev->hwfns[i]; 442 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 443 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 444 ECORE_SPQ_MODE_EBLOCK, NULL); 445 if (rc != ECORE_SUCCESS) { 446 DP_ERR(edev, "Failed to update vport\n"); 447 break; 448 } 449 } 450 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated"); 451 452 return rc; 453 } 454 455 static void 456 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, 457 uint16_t mtu, bool enable) 458 { 459 /* Enable LRO in split mode */ 460 sge_tpa_params->tpa_ipv4_en_flg = enable; 461 sge_tpa_params->tpa_ipv6_en_flg = enable; 462 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable; 463 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable; 464 /* set if tpa enable changes */ 465 sge_tpa_params->update_tpa_en_flg = 1; 466 /* set if tpa parameters should be handled */ 467 sge_tpa_params->update_tpa_param_flg = enable; 468 469 sge_tpa_params->max_buffers_per_cqe = 20; 470 /* Enable TPA in split mode. In this mode each TPA segment 471 * starts on the new BD, so there is one BD per segment. 472 */ 473 sge_tpa_params->tpa_pkt_split_flg = 1; 474 sge_tpa_params->tpa_hdr_data_split_flg = 0; 475 sge_tpa_params->tpa_gro_consistent_flg = 0; 476 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 477 sge_tpa_params->tpa_max_size = 0x7FFF; 478 sge_tpa_params->tpa_min_size_to_start = mtu / 2; 479 sge_tpa_params->tpa_min_size_to_cont = mtu / 2; 480 } 481 482 /* Enable/disable LRO via vport-update */ 483 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) 484 { 485 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 486 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 487 struct ecore_sp_vport_update_params params; 488 struct ecore_sge_tpa_params tpa_params; 489 struct ecore_hwfn *p_hwfn; 490 int rc; 491 int i; 492 493 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 494 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params)); 495 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg); 496 params.vport_id = 0; 497 params.sge_tpa_params = &tpa_params; 498 for_each_hwfn(edev, i) { 499 p_hwfn = &edev->hwfns[i]; 500 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 501 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 502 ECORE_SPQ_MODE_EBLOCK, NULL); 503 if (rc != ECORE_SUCCESS) { 504 DP_ERR(edev, "Failed to update LRO\n"); 505 return -1; 506 } 507 } 508 qdev->enable_lro = flg; 509 eth_dev->data->lro = flg; 510 511 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); 512 513 return 0; 514 } 515 516 static int 517 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, 518 enum qed_filter_rx_mode_type type) 519 { 520 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 521 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 522 struct ecore_filter_accept_flags flags; 523 524 memset(&flags, 0, sizeof(flags)); 525 526 flags.update_rx_mode_config = 1; 527 flags.update_tx_mode_config = 1; 528 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 529 ECORE_ACCEPT_MCAST_MATCHED | 530 ECORE_ACCEPT_BCAST; 531 532 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 533 ECORE_ACCEPT_MCAST_MATCHED | 534 ECORE_ACCEPT_BCAST; 535 536 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 537 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 538 if (IS_VF(edev)) { 539 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 540 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n"); 541 } 542 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 543 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 544 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC | 545 QED_FILTER_RX_MODE_TYPE_PROMISC)) { 546 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED | 547 ECORE_ACCEPT_MCAST_UNMATCHED; 548 } 549 550 return ecore_filter_accept_cmd(edev, 0, flags, false, false, 551 ECORE_SPQ_MODE_CB, NULL); 552 } 553 554 int 555 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 556 bool add) 557 { 558 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 559 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 560 struct qede_ucast_entry *tmp = NULL; 561 struct qede_ucast_entry *u; 562 struct rte_ether_addr *mac_addr; 563 564 mac_addr = (struct rte_ether_addr *)ucast->mac; 565 if (add) { 566 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 567 if ((memcmp(mac_addr, &tmp->mac, 568 RTE_ETHER_ADDR_LEN) == 0) && 569 ucast->vni == tmp->vni && 570 ucast->vlan == tmp->vlan) { 571 DP_INFO(edev, "Unicast MAC is already added" 572 " with vlan = %u, vni = %u\n", 573 ucast->vlan, ucast->vni); 574 return 0; 575 } 576 } 577 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), 578 RTE_CACHE_LINE_SIZE); 579 if (!u) { 580 DP_ERR(edev, "Did not allocate memory for ucast\n"); 581 return -ENOMEM; 582 } 583 rte_ether_addr_copy(mac_addr, &u->mac); 584 u->vlan = ucast->vlan; 585 u->vni = ucast->vni; 586 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); 587 qdev->num_uc_addr++; 588 } else { 589 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 590 if ((memcmp(mac_addr, &tmp->mac, 591 RTE_ETHER_ADDR_LEN) == 0) && 592 ucast->vlan == tmp->vlan && 593 ucast->vni == tmp->vni) 594 break; 595 } 596 if (tmp == NULL) { 597 DP_INFO(edev, "Unicast MAC is not found\n"); 598 return -EINVAL; 599 } 600 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); 601 qdev->num_uc_addr--; 602 } 603 604 return 0; 605 } 606 607 static int 608 qede_add_mcast_filters(struct rte_eth_dev *eth_dev, 609 struct rte_ether_addr *mc_addrs, 610 uint32_t mc_addrs_num) 611 { 612 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 613 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 614 struct ecore_filter_mcast mcast; 615 struct qede_mcast_entry *m = NULL; 616 uint8_t i; 617 int rc; 618 619 for (i = 0; i < mc_addrs_num; i++) { 620 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), 621 RTE_CACHE_LINE_SIZE); 622 if (!m) { 623 DP_ERR(edev, "Did not allocate memory for mcast\n"); 624 return -ENOMEM; 625 } 626 rte_ether_addr_copy(&mc_addrs[i], &m->mac); 627 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); 628 } 629 memset(&mcast, 0, sizeof(mcast)); 630 mcast.num_mc_addrs = mc_addrs_num; 631 mcast.opcode = ECORE_FILTER_ADD; 632 for (i = 0; i < mc_addrs_num; i++) 633 rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *) 634 &mcast.mac[i]); 635 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 636 if (rc != ECORE_SUCCESS) { 637 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc); 638 return -1; 639 } 640 641 return 0; 642 } 643 644 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev) 645 { 646 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 647 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 648 struct qede_mcast_entry *tmp = NULL; 649 struct ecore_filter_mcast mcast; 650 int j; 651 int rc; 652 653 memset(&mcast, 0, sizeof(mcast)); 654 mcast.num_mc_addrs = qdev->num_mc_addr; 655 mcast.opcode = ECORE_FILTER_REMOVE; 656 j = 0; 657 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 658 rte_ether_addr_copy(&tmp->mac, 659 (struct rte_ether_addr *)&mcast.mac[j]); 660 j++; 661 } 662 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 663 if (rc != ECORE_SUCCESS) { 664 DP_ERR(edev, "Failed to delete multicast filter\n"); 665 return -1; 666 } 667 /* Init the list */ 668 while (!SLIST_EMPTY(&qdev->mc_list_head)) { 669 tmp = SLIST_FIRST(&qdev->mc_list_head); 670 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list); 671 } 672 SLIST_INIT(&qdev->mc_list_head); 673 674 return 0; 675 } 676 677 enum _ecore_status_t 678 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 679 bool add) 680 { 681 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 682 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 683 enum _ecore_status_t rc = ECORE_INVAL; 684 685 if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) { 686 DP_ERR(edev, "Ucast filter table limit exceeded," 687 " Please enable promisc mode\n"); 688 return ECORE_INVAL; 689 } 690 691 rc = qede_ucast_filter(eth_dev, ucast, add); 692 if (rc == 0) 693 rc = ecore_filter_ucast_cmd(edev, ucast, 694 ECORE_SPQ_MODE_CB, NULL); 695 /* Indicate error only for add filter operation. 696 * Delete filter operations are not severe. 697 */ 698 if ((rc != ECORE_SUCCESS) && add) 699 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", 700 rc, add); 701 702 return rc; 703 } 704 705 static int 706 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr, 707 __rte_unused uint32_t index, __rte_unused uint32_t pool) 708 { 709 struct ecore_filter_ucast ucast; 710 int re; 711 712 if (!rte_is_valid_assigned_ether_addr(mac_addr)) 713 return -EINVAL; 714 715 qede_set_ucast_cmn_params(&ucast); 716 ucast.opcode = ECORE_FILTER_ADD; 717 ucast.type = ECORE_FILTER_MAC; 718 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac); 719 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); 720 return re; 721 } 722 723 static void 724 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 725 { 726 struct qede_dev *qdev = eth_dev->data->dev_private; 727 struct ecore_dev *edev = &qdev->edev; 728 struct ecore_filter_ucast ucast; 729 730 PMD_INIT_FUNC_TRACE(edev); 731 732 if (index >= qdev->dev_info.num_mac_filters) { 733 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 734 index, qdev->dev_info.num_mac_filters); 735 return; 736 } 737 738 if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) 739 return; 740 741 qede_set_ucast_cmn_params(&ucast); 742 ucast.opcode = ECORE_FILTER_REMOVE; 743 ucast.type = ECORE_FILTER_MAC; 744 745 /* Use the index maintained by rte */ 746 rte_ether_addr_copy(ð_dev->data->mac_addrs[index], 747 (struct rte_ether_addr *)&ucast.mac); 748 749 qede_mac_int_ops(eth_dev, &ucast, false); 750 } 751 752 static int 753 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr) 754 { 755 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 756 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 757 758 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 759 mac_addr->addr_bytes)) { 760 DP_ERR(edev, "Setting MAC address is not allowed\n"); 761 return -EPERM; 762 } 763 764 qede_mac_addr_remove(eth_dev, 0); 765 766 return qede_mac_addr_add(eth_dev, mac_addr, 0, 0); 767 } 768 769 void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) 770 { 771 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 772 struct ecore_sp_vport_update_params params; 773 struct ecore_hwfn *p_hwfn; 774 uint8_t i; 775 int rc; 776 777 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 778 params.vport_id = 0; 779 params.update_accept_any_vlan_flg = 1; 780 params.accept_any_vlan = flg; 781 for_each_hwfn(edev, i) { 782 p_hwfn = &edev->hwfns[i]; 783 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 784 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 785 ECORE_SPQ_MODE_EBLOCK, NULL); 786 if (rc != ECORE_SUCCESS) { 787 DP_ERR(edev, "Failed to configure accept-any-vlan\n"); 788 return; 789 } 790 } 791 792 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled"); 793 } 794 795 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg) 796 { 797 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 798 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 799 struct ecore_sp_vport_update_params params; 800 struct ecore_hwfn *p_hwfn; 801 uint8_t i; 802 int rc; 803 804 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 805 params.vport_id = 0; 806 params.update_inner_vlan_removal_flg = 1; 807 params.inner_vlan_removal_flg = flg; 808 for_each_hwfn(edev, i) { 809 p_hwfn = &edev->hwfns[i]; 810 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 811 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 812 ECORE_SPQ_MODE_EBLOCK, NULL); 813 if (rc != ECORE_SUCCESS) { 814 DP_ERR(edev, "Failed to update vport\n"); 815 return -1; 816 } 817 } 818 819 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled"); 820 return 0; 821 } 822 823 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 824 uint16_t vlan_id, int on) 825 { 826 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 827 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 828 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 829 struct qede_vlan_entry *tmp = NULL; 830 struct qede_vlan_entry *vlan; 831 struct ecore_filter_ucast ucast; 832 int rc; 833 834 if (on) { 835 if (qdev->configured_vlans == dev_info->num_vlan_filters) { 836 DP_ERR(edev, "Reached max VLAN filter limit" 837 " enabling accept_any_vlan\n"); 838 qede_config_accept_any_vlan(qdev, true); 839 return 0; 840 } 841 842 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 843 if (tmp->vid == vlan_id) { 844 DP_INFO(edev, "VLAN %u already configured\n", 845 vlan_id); 846 return 0; 847 } 848 } 849 850 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), 851 RTE_CACHE_LINE_SIZE); 852 853 if (!vlan) { 854 DP_ERR(edev, "Did not allocate memory for VLAN\n"); 855 return -ENOMEM; 856 } 857 858 qede_set_ucast_cmn_params(&ucast); 859 ucast.opcode = ECORE_FILTER_ADD; 860 ucast.type = ECORE_FILTER_VLAN; 861 ucast.vlan = vlan_id; 862 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 863 NULL); 864 if (rc != 0) { 865 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 866 rc); 867 rte_free(vlan); 868 } else { 869 vlan->vid = vlan_id; 870 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); 871 qdev->configured_vlans++; 872 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", 873 vlan_id, qdev->configured_vlans); 874 } 875 } else { 876 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 877 if (tmp->vid == vlan_id) 878 break; 879 } 880 881 if (!tmp) { 882 if (qdev->configured_vlans == 0) { 883 DP_INFO(edev, 884 "No VLAN filters configured yet\n"); 885 return 0; 886 } 887 888 DP_ERR(edev, "VLAN %u not configured\n", vlan_id); 889 return -EINVAL; 890 } 891 892 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); 893 894 qede_set_ucast_cmn_params(&ucast); 895 ucast.opcode = ECORE_FILTER_REMOVE; 896 ucast.type = ECORE_FILTER_VLAN; 897 ucast.vlan = vlan_id; 898 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 899 NULL); 900 if (rc != 0) { 901 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 902 vlan_id, rc); 903 } else { 904 qdev->configured_vlans--; 905 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", 906 vlan_id, qdev->configured_vlans); 907 } 908 } 909 910 return rc; 911 } 912 913 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 914 { 915 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 916 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 917 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 918 919 if (mask & ETH_VLAN_STRIP_MASK) { 920 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 921 (void)qede_vlan_stripping(eth_dev, 1); 922 else 923 (void)qede_vlan_stripping(eth_dev, 0); 924 } 925 926 if (mask & ETH_VLAN_FILTER_MASK) { 927 /* VLAN filtering kicks in when a VLAN is added */ 928 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 929 qede_vlan_filter_set(eth_dev, 0, 1); 930 } else { 931 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ 932 DP_ERR(edev, 933 " Please remove existing VLAN filters" 934 " before disabling VLAN filtering\n"); 935 /* Signal app that VLAN filtering is still 936 * enabled 937 */ 938 eth_dev->data->dev_conf.rxmode.offloads |= 939 DEV_RX_OFFLOAD_VLAN_FILTER; 940 } else { 941 qede_vlan_filter_set(eth_dev, 0, 0); 942 } 943 } 944 } 945 946 if (mask & ETH_VLAN_EXTEND_MASK) 947 DP_ERR(edev, "Extend VLAN not supported\n"); 948 949 qdev->vlan_offload_mask = mask; 950 951 DP_INFO(edev, "VLAN offload mask %d\n", mask); 952 953 return 0; 954 } 955 956 static void qede_prandom_bytes(uint32_t *buff) 957 { 958 uint8_t i; 959 960 srand((unsigned int)time(NULL)); 961 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 962 buff[i] = rand(); 963 } 964 965 int qede_config_rss(struct rte_eth_dev *eth_dev) 966 { 967 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 968 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 969 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; 970 struct rte_eth_rss_reta_entry64 reta_conf[2]; 971 struct rte_eth_rss_conf rss_conf; 972 uint32_t i, id, pos, q; 973 974 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 975 if (!rss_conf.rss_key) { 976 DP_INFO(edev, "Applying driver default key\n"); 977 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 978 qede_prandom_bytes(&def_rss_key[0]); 979 rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; 980 } 981 982 /* Configure RSS hash */ 983 if (qede_rss_hash_update(eth_dev, &rss_conf)) 984 return -EINVAL; 985 986 /* Configure default RETA */ 987 memset(reta_conf, 0, sizeof(reta_conf)); 988 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) 989 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; 990 991 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 992 id = i / RTE_RETA_GROUP_SIZE; 993 pos = i % RTE_RETA_GROUP_SIZE; 994 q = i % QEDE_RSS_COUNT(qdev); 995 reta_conf[id].reta[pos] = q; 996 } 997 if (qede_rss_reta_update(eth_dev, &reta_conf[0], 998 ECORE_RSS_IND_TABLE_SIZE)) 999 return -EINVAL; 1000 1001 return 0; 1002 } 1003 1004 static void qede_fastpath_start(struct ecore_dev *edev) 1005 { 1006 struct ecore_hwfn *p_hwfn; 1007 int i; 1008 1009 for_each_hwfn(edev, i) { 1010 p_hwfn = &edev->hwfns[i]; 1011 ecore_hw_start_fastpath(p_hwfn); 1012 } 1013 } 1014 1015 static int qede_dev_start(struct rte_eth_dev *eth_dev) 1016 { 1017 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1018 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1019 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1020 1021 PMD_INIT_FUNC_TRACE(edev); 1022 1023 /* Update MTU only if it has changed */ 1024 if (eth_dev->data->mtu != qdev->mtu) { 1025 if (qede_update_mtu(eth_dev, qdev->mtu)) 1026 goto err; 1027 } 1028 1029 /* Configure TPA parameters */ 1030 if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1031 if (qede_enable_tpa(eth_dev, true)) 1032 return -EINVAL; 1033 /* Enable scatter mode for LRO */ 1034 if (!eth_dev->data->scattered_rx) 1035 rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER; 1036 } 1037 1038 /* Start queues */ 1039 if (qede_start_queues(eth_dev)) 1040 goto err; 1041 1042 if (IS_PF(edev)) 1043 qede_reset_queue_stats(qdev, true); 1044 1045 /* Newer SR-IOV PF driver expects RX/TX queues to be started before 1046 * enabling RSS. Hence RSS configuration is deferred upto this point. 1047 * Also, we would like to retain similar behavior in PF case, so we 1048 * don't do PF/VF specific check here. 1049 */ 1050 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 1051 if (qede_config_rss(eth_dev)) 1052 goto err; 1053 1054 /* Enable vport*/ 1055 if (qede_activate_vport(eth_dev, true)) 1056 goto err; 1057 1058 /* Update link status */ 1059 qede_link_update(eth_dev, 0); 1060 1061 /* Start/resume traffic */ 1062 qede_fastpath_start(edev); 1063 1064 DP_INFO(edev, "Device started\n"); 1065 1066 return 0; 1067 err: 1068 DP_ERR(edev, "Device start fails\n"); 1069 return -1; /* common error code is < 0 */ 1070 } 1071 1072 static void qede_dev_stop(struct rte_eth_dev *eth_dev) 1073 { 1074 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1075 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1076 1077 PMD_INIT_FUNC_TRACE(edev); 1078 1079 /* Disable vport */ 1080 if (qede_activate_vport(eth_dev, false)) 1081 return; 1082 1083 if (qdev->enable_lro) 1084 qede_enable_tpa(eth_dev, false); 1085 1086 /* Stop queues */ 1087 qede_stop_queues(eth_dev); 1088 1089 /* Disable traffic */ 1090 ecore_hw_stop_fastpath(edev); /* TBD - loop */ 1091 1092 DP_INFO(edev, "Device is stopped\n"); 1093 } 1094 1095 static const char * const valid_args[] = { 1096 QEDE_NPAR_TX_SWITCHING, 1097 QEDE_VF_TX_SWITCHING, 1098 NULL, 1099 }; 1100 1101 static int qede_args_check(const char *key, const char *val, void *opaque) 1102 { 1103 unsigned long tmp; 1104 int ret = 0; 1105 struct rte_eth_dev *eth_dev = opaque; 1106 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1107 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1108 1109 errno = 0; 1110 tmp = strtoul(val, NULL, 0); 1111 if (errno) { 1112 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val); 1113 return errno; 1114 } 1115 1116 if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) || 1117 ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) { 1118 qdev->enable_tx_switching = !!tmp; 1119 DP_INFO(edev, "Disabling %s tx-switching\n", 1120 strcmp(QEDE_NPAR_TX_SWITCHING, key) ? 1121 "VF" : "NPAR"); 1122 } 1123 1124 return ret; 1125 } 1126 1127 static int qede_args(struct rte_eth_dev *eth_dev) 1128 { 1129 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1130 struct rte_kvargs *kvlist; 1131 struct rte_devargs *devargs; 1132 int ret; 1133 int i; 1134 1135 devargs = pci_dev->device.devargs; 1136 if (!devargs) 1137 return 0; /* return success */ 1138 1139 kvlist = rte_kvargs_parse(devargs->args, valid_args); 1140 if (kvlist == NULL) 1141 return -EINVAL; 1142 1143 /* Process parameters. */ 1144 for (i = 0; (valid_args[i] != NULL); ++i) { 1145 if (rte_kvargs_count(kvlist, valid_args[i])) { 1146 ret = rte_kvargs_process(kvlist, valid_args[i], 1147 qede_args_check, eth_dev); 1148 if (ret != ECORE_SUCCESS) { 1149 rte_kvargs_free(kvlist); 1150 return ret; 1151 } 1152 } 1153 } 1154 rte_kvargs_free(kvlist); 1155 1156 return 0; 1157 } 1158 1159 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 1160 { 1161 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1162 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1163 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1164 int ret; 1165 1166 PMD_INIT_FUNC_TRACE(edev); 1167 1168 /* Check requirements for 100G mode */ 1169 if (ECORE_IS_CMT(edev)) { 1170 if (eth_dev->data->nb_rx_queues < 2 || 1171 eth_dev->data->nb_tx_queues < 2) { 1172 DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n"); 1173 return -EINVAL; 1174 } 1175 1176 if ((eth_dev->data->nb_rx_queues % 2 != 0) || 1177 (eth_dev->data->nb_tx_queues % 2 != 0)) { 1178 DP_ERR(edev, 1179 "100G mode needs even no. of RX/TX queues\n"); 1180 return -EINVAL; 1181 } 1182 } 1183 1184 /* We need to have min 1 RX queue.There is no min check in 1185 * rte_eth_dev_configure(), so we are checking it here. 1186 */ 1187 if (eth_dev->data->nb_rx_queues == 0) { 1188 DP_ERR(edev, "Minimum one RX queue is required\n"); 1189 return -EINVAL; 1190 } 1191 1192 /* Enable Tx switching by default */ 1193 qdev->enable_tx_switching = 1; 1194 1195 /* Parse devargs and fix up rxmode */ 1196 if (qede_args(eth_dev)) 1197 DP_NOTICE(edev, false, 1198 "Invalid devargs supplied, requested change will not take effect\n"); 1199 1200 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || 1201 rxmode->mq_mode == ETH_MQ_RX_RSS)) { 1202 DP_ERR(edev, "Unsupported multi-queue mode\n"); 1203 return -ENOTSUP; 1204 } 1205 /* Flow director mode check */ 1206 if (qede_check_fdir_support(eth_dev)) 1207 return -ENOTSUP; 1208 1209 qede_dealloc_fp_resc(eth_dev); 1210 qdev->num_tx_queues = eth_dev->data->nb_tx_queues; 1211 qdev->num_rx_queues = eth_dev->data->nb_rx_queues; 1212 if (qede_alloc_fp_resc(qdev)) 1213 return -ENOMEM; 1214 1215 /* If jumbo enabled adjust MTU */ 1216 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 1217 eth_dev->data->mtu = 1218 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1219 RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD; 1220 1221 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) 1222 eth_dev->data->scattered_rx = 1; 1223 1224 if (qede_start_vport(qdev, eth_dev->data->mtu)) 1225 return -1; 1226 1227 qdev->mtu = eth_dev->data->mtu; 1228 1229 /* Enable VLAN offloads by default */ 1230 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | 1231 ETH_VLAN_FILTER_MASK); 1232 if (ret) 1233 return ret; 1234 1235 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", 1236 QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)); 1237 1238 return 0; 1239 } 1240 1241 /* Info about HW descriptor ring limitations */ 1242 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 1243 .nb_max = 0x8000, /* 32K */ 1244 .nb_min = 128, 1245 .nb_align = 128 /* lowest common multiple */ 1246 }; 1247 1248 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 1249 .nb_max = 0x8000, /* 32K */ 1250 .nb_min = 256, 1251 .nb_align = 256, 1252 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, 1253 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 1254 }; 1255 1256 static void 1257 qede_dev_info_get(struct rte_eth_dev *eth_dev, 1258 struct rte_eth_dev_info *dev_info) 1259 { 1260 struct qede_dev *qdev = eth_dev->data->dev_private; 1261 struct ecore_dev *edev = &qdev->edev; 1262 struct qed_link_output link; 1263 uint32_t speed_cap = 0; 1264 1265 PMD_INIT_FUNC_TRACE(edev); 1266 1267 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; 1268 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 1269 dev_info->rx_desc_lim = qede_rx_desc_lim; 1270 dev_info->tx_desc_lim = qede_tx_desc_lim; 1271 1272 if (IS_PF(edev)) 1273 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1274 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2); 1275 else 1276 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1277 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); 1278 dev_info->max_tx_queues = dev_info->max_rx_queues; 1279 1280 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; 1281 dev_info->max_vfs = 0; 1282 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 1283 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1284 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 1285 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 1286 DEV_RX_OFFLOAD_UDP_CKSUM | 1287 DEV_RX_OFFLOAD_TCP_CKSUM | 1288 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1289 DEV_RX_OFFLOAD_TCP_LRO | 1290 DEV_RX_OFFLOAD_KEEP_CRC | 1291 DEV_RX_OFFLOAD_SCATTER | 1292 DEV_RX_OFFLOAD_JUMBO_FRAME | 1293 DEV_RX_OFFLOAD_VLAN_FILTER | 1294 DEV_RX_OFFLOAD_VLAN_STRIP); 1295 dev_info->rx_queue_offload_capa = 0; 1296 1297 /* TX offloads are on a per-packet basis, so it is applicable 1298 * to both at port and queue levels. 1299 */ 1300 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | 1301 DEV_TX_OFFLOAD_IPV4_CKSUM | 1302 DEV_TX_OFFLOAD_UDP_CKSUM | 1303 DEV_TX_OFFLOAD_TCP_CKSUM | 1304 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1305 DEV_TX_OFFLOAD_MULTI_SEGS | 1306 DEV_TX_OFFLOAD_TCP_TSO | 1307 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1308 DEV_TX_OFFLOAD_GENEVE_TNL_TSO); 1309 dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa; 1310 1311 dev_info->default_txconf = (struct rte_eth_txconf) { 1312 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS, 1313 }; 1314 1315 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1316 /* Packets are always dropped if no descriptors are available */ 1317 .rx_drop_en = 1, 1318 .offloads = 0, 1319 }; 1320 1321 memset(&link, 0, sizeof(struct qed_link_output)); 1322 qdev->ops->common->get_link(edev, &link); 1323 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1324 speed_cap |= ETH_LINK_SPEED_1G; 1325 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1326 speed_cap |= ETH_LINK_SPEED_10G; 1327 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1328 speed_cap |= ETH_LINK_SPEED_25G; 1329 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1330 speed_cap |= ETH_LINK_SPEED_40G; 1331 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1332 speed_cap |= ETH_LINK_SPEED_50G; 1333 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1334 speed_cap |= ETH_LINK_SPEED_100G; 1335 dev_info->speed_capa = speed_cap; 1336 } 1337 1338 /* return 0 means link status changed, -1 means not changed */ 1339 int 1340 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 1341 { 1342 struct qede_dev *qdev = eth_dev->data->dev_private; 1343 struct ecore_dev *edev = &qdev->edev; 1344 struct qed_link_output q_link; 1345 struct rte_eth_link link; 1346 uint16_t link_duplex; 1347 1348 memset(&q_link, 0, sizeof(q_link)); 1349 memset(&link, 0, sizeof(link)); 1350 1351 qdev->ops->common->get_link(edev, &q_link); 1352 1353 /* Link Speed */ 1354 link.link_speed = q_link.speed; 1355 1356 /* Link Mode */ 1357 switch (q_link.duplex) { 1358 case QEDE_DUPLEX_HALF: 1359 link_duplex = ETH_LINK_HALF_DUPLEX; 1360 break; 1361 case QEDE_DUPLEX_FULL: 1362 link_duplex = ETH_LINK_FULL_DUPLEX; 1363 break; 1364 case QEDE_DUPLEX_UNKNOWN: 1365 default: 1366 link_duplex = -1; 1367 } 1368 link.link_duplex = link_duplex; 1369 1370 /* Link Status */ 1371 link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN; 1372 1373 /* AN */ 1374 link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 1375 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 1376 1377 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 1378 link.link_speed, link.link_duplex, 1379 link.link_autoneg, link.link_status); 1380 1381 return rte_eth_linkstatus_set(eth_dev, &link); 1382 } 1383 1384 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 1385 { 1386 struct qede_dev *qdev = eth_dev->data->dev_private; 1387 struct ecore_dev *edev = &qdev->edev; 1388 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1389 1390 PMD_INIT_FUNC_TRACE(edev); 1391 1392 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1393 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1394 1395 qed_configure_filter_rx_mode(eth_dev, type); 1396 } 1397 1398 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 1399 { 1400 struct qede_dev *qdev = eth_dev->data->dev_private; 1401 struct ecore_dev *edev = &qdev->edev; 1402 1403 PMD_INIT_FUNC_TRACE(edev); 1404 1405 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1406 qed_configure_filter_rx_mode(eth_dev, 1407 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 1408 else 1409 qed_configure_filter_rx_mode(eth_dev, 1410 QED_FILTER_RX_MODE_TYPE_REGULAR); 1411 } 1412 1413 static void qede_poll_sp_sb_cb(void *param) 1414 { 1415 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1416 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1417 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1418 int rc; 1419 1420 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 1421 qede_interrupt_action(&edev->hwfns[1]); 1422 1423 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 1424 qede_poll_sp_sb_cb, 1425 (void *)eth_dev); 1426 if (rc != 0) { 1427 DP_ERR(edev, "Unable to start periodic" 1428 " timer rc %d\n", rc); 1429 assert(false && "Unable to start periodic timer"); 1430 } 1431 } 1432 1433 static void qede_dev_close(struct rte_eth_dev *eth_dev) 1434 { 1435 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1436 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1437 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1438 1439 PMD_INIT_FUNC_TRACE(edev); 1440 1441 /* dev_stop() shall cleanup fp resources in hw but without releasing 1442 * dma memories and sw structures so that dev_start() can be called 1443 * by the app without reconfiguration. However, in dev_close() we 1444 * can release all the resources and device can be brought up newly 1445 */ 1446 if (eth_dev->data->dev_started) 1447 qede_dev_stop(eth_dev); 1448 1449 qede_stop_vport(edev); 1450 qdev->vport_started = false; 1451 qede_fdir_dealloc_resc(eth_dev); 1452 qede_dealloc_fp_resc(eth_dev); 1453 1454 eth_dev->data->nb_rx_queues = 0; 1455 eth_dev->data->nb_tx_queues = 0; 1456 1457 /* Bring the link down */ 1458 qede_dev_set_link_state(eth_dev, false); 1459 qdev->ops->common->slowpath_stop(edev); 1460 qdev->ops->common->remove(edev); 1461 rte_intr_disable(&pci_dev->intr_handle); 1462 1463 switch (pci_dev->intr_handle.type) { 1464 case RTE_INTR_HANDLE_UIO_INTX: 1465 case RTE_INTR_HANDLE_VFIO_LEGACY: 1466 rte_intr_callback_unregister(&pci_dev->intr_handle, 1467 qede_interrupt_handler_intx, 1468 (void *)eth_dev); 1469 break; 1470 default: 1471 rte_intr_callback_unregister(&pci_dev->intr_handle, 1472 qede_interrupt_handler, 1473 (void *)eth_dev); 1474 } 1475 1476 if (ECORE_IS_CMT(edev)) 1477 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 1478 } 1479 1480 static int 1481 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 1482 { 1483 struct qede_dev *qdev = eth_dev->data->dev_private; 1484 struct ecore_dev *edev = &qdev->edev; 1485 struct ecore_eth_stats stats; 1486 unsigned int i = 0, j = 0, qid; 1487 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 1488 struct qede_tx_queue *txq; 1489 1490 ecore_get_vport_stats(edev, &stats); 1491 1492 /* RX Stats */ 1493 eth_stats->ipackets = stats.common.rx_ucast_pkts + 1494 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts; 1495 1496 eth_stats->ibytes = stats.common.rx_ucast_bytes + 1497 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes; 1498 1499 eth_stats->ierrors = stats.common.rx_crc_errors + 1500 stats.common.rx_align_errors + 1501 stats.common.rx_carrier_errors + 1502 stats.common.rx_oversize_packets + 1503 stats.common.rx_jabbers + stats.common.rx_undersize_packets; 1504 1505 eth_stats->rx_nombuf = stats.common.no_buff_discards; 1506 1507 eth_stats->imissed = stats.common.mftag_filter_discards + 1508 stats.common.mac_filter_discards + 1509 stats.common.no_buff_discards + 1510 stats.common.brb_truncates + stats.common.brb_discards; 1511 1512 /* TX stats */ 1513 eth_stats->opackets = stats.common.tx_ucast_pkts + 1514 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts; 1515 1516 eth_stats->obytes = stats.common.tx_ucast_bytes + 1517 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes; 1518 1519 eth_stats->oerrors = stats.common.tx_err_drop_pkts; 1520 1521 /* Queue stats */ 1522 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1523 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1524 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), 1525 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1526 if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) || 1527 (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev))) 1528 DP_VERBOSE(edev, ECORE_MSG_DEBUG, 1529 "Not all the queue stats will be displayed. Set" 1530 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" 1531 " appropriately and retry.\n"); 1532 1533 for_each_rss(qid) { 1534 eth_stats->q_ipackets[i] = 1535 *(uint64_t *)( 1536 ((char *)(qdev->fp_array[qid].rxq)) + 1537 offsetof(struct qede_rx_queue, 1538 rcv_pkts)); 1539 eth_stats->q_errors[i] = 1540 *(uint64_t *)( 1541 ((char *)(qdev->fp_array[qid].rxq)) + 1542 offsetof(struct qede_rx_queue, 1543 rx_hw_errors)) + 1544 *(uint64_t *)( 1545 ((char *)(qdev->fp_array[qid].rxq)) + 1546 offsetof(struct qede_rx_queue, 1547 rx_alloc_errors)); 1548 i++; 1549 if (i == rxq_stat_cntrs) 1550 break; 1551 } 1552 1553 for_each_tss(qid) { 1554 txq = qdev->fp_array[qid].txq; 1555 eth_stats->q_opackets[j] = 1556 *((uint64_t *)(uintptr_t) 1557 (((uint64_t)(uintptr_t)(txq)) + 1558 offsetof(struct qede_tx_queue, 1559 xmit_pkts))); 1560 j++; 1561 if (j == txq_stat_cntrs) 1562 break; 1563 } 1564 1565 return 0; 1566 } 1567 1568 static unsigned 1569 qede_get_xstats_count(struct qede_dev *qdev) { 1570 if (ECORE_IS_BB(&qdev->edev)) 1571 return RTE_DIM(qede_xstats_strings) + 1572 RTE_DIM(qede_bb_xstats_strings) + 1573 (RTE_DIM(qede_rxq_xstats_strings) * 1574 RTE_MIN(QEDE_RSS_COUNT(qdev), 1575 RTE_ETHDEV_QUEUE_STAT_CNTRS)); 1576 else 1577 return RTE_DIM(qede_xstats_strings) + 1578 RTE_DIM(qede_ah_xstats_strings) + 1579 (RTE_DIM(qede_rxq_xstats_strings) * 1580 RTE_MIN(QEDE_RSS_COUNT(qdev), 1581 RTE_ETHDEV_QUEUE_STAT_CNTRS)); 1582 } 1583 1584 static int 1585 qede_get_xstats_names(struct rte_eth_dev *dev, 1586 struct rte_eth_xstat_name *xstats_names, 1587 __rte_unused unsigned int limit) 1588 { 1589 struct qede_dev *qdev = dev->data->dev_private; 1590 struct ecore_dev *edev = &qdev->edev; 1591 const unsigned int stat_cnt = qede_get_xstats_count(qdev); 1592 unsigned int i, qid, stat_idx = 0; 1593 unsigned int rxq_stat_cntrs; 1594 1595 if (xstats_names != NULL) { 1596 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1597 strlcpy(xstats_names[stat_idx].name, 1598 qede_xstats_strings[i].name, 1599 sizeof(xstats_names[stat_idx].name)); 1600 stat_idx++; 1601 } 1602 1603 if (ECORE_IS_BB(edev)) { 1604 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1605 strlcpy(xstats_names[stat_idx].name, 1606 qede_bb_xstats_strings[i].name, 1607 sizeof(xstats_names[stat_idx].name)); 1608 stat_idx++; 1609 } 1610 } else { 1611 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1612 strlcpy(xstats_names[stat_idx].name, 1613 qede_ah_xstats_strings[i].name, 1614 sizeof(xstats_names[stat_idx].name)); 1615 stat_idx++; 1616 } 1617 } 1618 1619 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1620 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1621 for (qid = 0; qid < rxq_stat_cntrs; qid++) { 1622 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1623 snprintf(xstats_names[stat_idx].name, 1624 sizeof(xstats_names[stat_idx].name), 1625 "%.4s%d%s", 1626 qede_rxq_xstats_strings[i].name, qid, 1627 qede_rxq_xstats_strings[i].name + 4); 1628 stat_idx++; 1629 } 1630 } 1631 } 1632 1633 return stat_cnt; 1634 } 1635 1636 static int 1637 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1638 unsigned int n) 1639 { 1640 struct qede_dev *qdev = dev->data->dev_private; 1641 struct ecore_dev *edev = &qdev->edev; 1642 struct ecore_eth_stats stats; 1643 const unsigned int num = qede_get_xstats_count(qdev); 1644 unsigned int i, qid, stat_idx = 0; 1645 unsigned int rxq_stat_cntrs; 1646 1647 if (n < num) 1648 return num; 1649 1650 ecore_get_vport_stats(edev, &stats); 1651 1652 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1653 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + 1654 qede_xstats_strings[i].offset); 1655 xstats[stat_idx].id = stat_idx; 1656 stat_idx++; 1657 } 1658 1659 if (ECORE_IS_BB(edev)) { 1660 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1661 xstats[stat_idx].value = 1662 *(uint64_t *)(((char *)&stats) + 1663 qede_bb_xstats_strings[i].offset); 1664 xstats[stat_idx].id = stat_idx; 1665 stat_idx++; 1666 } 1667 } else { 1668 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1669 xstats[stat_idx].value = 1670 *(uint64_t *)(((char *)&stats) + 1671 qede_ah_xstats_strings[i].offset); 1672 xstats[stat_idx].id = stat_idx; 1673 stat_idx++; 1674 } 1675 } 1676 1677 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1678 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1679 for (qid = 0; qid < rxq_stat_cntrs; qid++) { 1680 for_each_rss(qid) { 1681 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1682 xstats[stat_idx].value = *(uint64_t *)( 1683 ((char *)(qdev->fp_array[qid].rxq)) + 1684 qede_rxq_xstats_strings[i].offset); 1685 xstats[stat_idx].id = stat_idx; 1686 stat_idx++; 1687 } 1688 } 1689 } 1690 1691 return stat_idx; 1692 } 1693 1694 static void 1695 qede_reset_xstats(struct rte_eth_dev *dev) 1696 { 1697 struct qede_dev *qdev = dev->data->dev_private; 1698 struct ecore_dev *edev = &qdev->edev; 1699 1700 ecore_reset_vport_stats(edev); 1701 qede_reset_queue_stats(qdev, true); 1702 } 1703 1704 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 1705 { 1706 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1707 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1708 struct qed_link_params link_params; 1709 int rc; 1710 1711 DP_INFO(edev, "setting link state %d\n", link_up); 1712 memset(&link_params, 0, sizeof(link_params)); 1713 link_params.link_up = link_up; 1714 rc = qdev->ops->common->set_link(edev, &link_params); 1715 if (rc != ECORE_SUCCESS) 1716 DP_ERR(edev, "Unable to set link state %d\n", link_up); 1717 1718 return rc; 1719 } 1720 1721 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 1722 { 1723 return qede_dev_set_link_state(eth_dev, true); 1724 } 1725 1726 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 1727 { 1728 return qede_dev_set_link_state(eth_dev, false); 1729 } 1730 1731 static void qede_reset_stats(struct rte_eth_dev *eth_dev) 1732 { 1733 struct qede_dev *qdev = eth_dev->data->dev_private; 1734 struct ecore_dev *edev = &qdev->edev; 1735 1736 ecore_reset_vport_stats(edev); 1737 qede_reset_queue_stats(qdev, false); 1738 } 1739 1740 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 1741 { 1742 enum qed_filter_rx_mode_type type = 1743 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1744 1745 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1746 type |= QED_FILTER_RX_MODE_TYPE_PROMISC; 1747 1748 qed_configure_filter_rx_mode(eth_dev, type); 1749 } 1750 1751 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 1752 { 1753 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1754 qed_configure_filter_rx_mode(eth_dev, 1755 QED_FILTER_RX_MODE_TYPE_PROMISC); 1756 else 1757 qed_configure_filter_rx_mode(eth_dev, 1758 QED_FILTER_RX_MODE_TYPE_REGULAR); 1759 } 1760 1761 static int 1762 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, 1763 struct rte_ether_addr *mc_addrs, 1764 uint32_t mc_addrs_num) 1765 { 1766 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1767 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1768 uint8_t i; 1769 1770 if (mc_addrs_num > ECORE_MAX_MC_ADDRS) { 1771 DP_ERR(edev, "Reached max multicast filters limit," 1772 "Please enable multicast promisc mode\n"); 1773 return -ENOSPC; 1774 } 1775 1776 for (i = 0; i < mc_addrs_num; i++) { 1777 if (!rte_is_multicast_ether_addr(&mc_addrs[i])) { 1778 DP_ERR(edev, "Not a valid multicast MAC\n"); 1779 return -EINVAL; 1780 } 1781 } 1782 1783 /* Flush all existing entries */ 1784 if (qede_del_mcast_filters(eth_dev)) 1785 return -1; 1786 1787 /* Set new mcast list */ 1788 return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num); 1789 } 1790 1791 /* Update MTU via vport-update without doing port restart. 1792 * The vport must be deactivated before calling this API. 1793 */ 1794 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) 1795 { 1796 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1797 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1798 struct ecore_hwfn *p_hwfn; 1799 int rc; 1800 int i; 1801 1802 if (IS_PF(edev)) { 1803 struct ecore_sp_vport_update_params params; 1804 1805 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 1806 params.vport_id = 0; 1807 params.mtu = mtu; 1808 params.vport_id = 0; 1809 for_each_hwfn(edev, i) { 1810 p_hwfn = &edev->hwfns[i]; 1811 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1812 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 1813 ECORE_SPQ_MODE_EBLOCK, NULL); 1814 if (rc != ECORE_SUCCESS) 1815 goto err; 1816 } 1817 } else { 1818 for_each_hwfn(edev, i) { 1819 p_hwfn = &edev->hwfns[i]; 1820 rc = ecore_vf_pf_update_mtu(p_hwfn, mtu); 1821 if (rc == ECORE_INVAL) { 1822 DP_INFO(edev, "VF MTU Update TLV not supported\n"); 1823 /* Recreate vport */ 1824 rc = qede_start_vport(qdev, mtu); 1825 if (rc != ECORE_SUCCESS) 1826 goto err; 1827 1828 /* Restore config lost due to vport stop */ 1829 if (eth_dev->data->promiscuous) 1830 qede_promiscuous_enable(eth_dev); 1831 else 1832 qede_promiscuous_disable(eth_dev); 1833 1834 if (eth_dev->data->all_multicast) 1835 qede_allmulticast_enable(eth_dev); 1836 else 1837 qede_allmulticast_disable(eth_dev); 1838 1839 qede_vlan_offload_set(eth_dev, 1840 qdev->vlan_offload_mask); 1841 } else if (rc != ECORE_SUCCESS) { 1842 goto err; 1843 } 1844 } 1845 } 1846 DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu); 1847 1848 return 0; 1849 1850 err: 1851 DP_ERR(edev, "Failed to update MTU\n"); 1852 return -1; 1853 } 1854 1855 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 1856 struct rte_eth_fc_conf *fc_conf) 1857 { 1858 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1859 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1860 struct qed_link_output current_link; 1861 struct qed_link_params params; 1862 1863 memset(¤t_link, 0, sizeof(current_link)); 1864 qdev->ops->common->get_link(edev, ¤t_link); 1865 1866 memset(¶ms, 0, sizeof(params)); 1867 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 1868 if (fc_conf->autoneg) { 1869 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 1870 DP_ERR(edev, "Autoneg not supported\n"); 1871 return -EINVAL; 1872 } 1873 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1874 } 1875 1876 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 1877 if (fc_conf->mode == RTE_FC_FULL) 1878 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 1879 QED_LINK_PAUSE_RX_ENABLE); 1880 if (fc_conf->mode == RTE_FC_TX_PAUSE) 1881 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1882 if (fc_conf->mode == RTE_FC_RX_PAUSE) 1883 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1884 1885 params.link_up = true; 1886 (void)qdev->ops->common->set_link(edev, ¶ms); 1887 1888 return 0; 1889 } 1890 1891 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 1892 struct rte_eth_fc_conf *fc_conf) 1893 { 1894 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1895 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1896 struct qed_link_output current_link; 1897 1898 memset(¤t_link, 0, sizeof(current_link)); 1899 qdev->ops->common->get_link(edev, ¤t_link); 1900 1901 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1902 fc_conf->autoneg = true; 1903 1904 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 1905 QED_LINK_PAUSE_TX_ENABLE)) 1906 fc_conf->mode = RTE_FC_FULL; 1907 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 1908 fc_conf->mode = RTE_FC_RX_PAUSE; 1909 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 1910 fc_conf->mode = RTE_FC_TX_PAUSE; 1911 else 1912 fc_conf->mode = RTE_FC_NONE; 1913 1914 return 0; 1915 } 1916 1917 static const uint32_t * 1918 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 1919 { 1920 static const uint32_t ptypes[] = { 1921 RTE_PTYPE_L2_ETHER, 1922 RTE_PTYPE_L2_ETHER_VLAN, 1923 RTE_PTYPE_L3_IPV4, 1924 RTE_PTYPE_L3_IPV6, 1925 RTE_PTYPE_L4_TCP, 1926 RTE_PTYPE_L4_UDP, 1927 RTE_PTYPE_TUNNEL_VXLAN, 1928 RTE_PTYPE_L4_FRAG, 1929 RTE_PTYPE_TUNNEL_GENEVE, 1930 RTE_PTYPE_TUNNEL_GRE, 1931 /* Inner */ 1932 RTE_PTYPE_INNER_L2_ETHER, 1933 RTE_PTYPE_INNER_L2_ETHER_VLAN, 1934 RTE_PTYPE_INNER_L3_IPV4, 1935 RTE_PTYPE_INNER_L3_IPV6, 1936 RTE_PTYPE_INNER_L4_TCP, 1937 RTE_PTYPE_INNER_L4_UDP, 1938 RTE_PTYPE_INNER_L4_FRAG, 1939 RTE_PTYPE_UNKNOWN 1940 }; 1941 1942 if (eth_dev->rx_pkt_burst == qede_recv_pkts) 1943 return ptypes; 1944 1945 return NULL; 1946 } 1947 1948 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) 1949 { 1950 *rss_caps = 0; 1951 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 1952 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 1953 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 1954 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 1955 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 1956 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 1957 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; 1958 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; 1959 } 1960 1961 int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 1962 struct rte_eth_rss_conf *rss_conf) 1963 { 1964 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1965 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1966 struct ecore_sp_vport_update_params vport_update_params; 1967 struct ecore_rss_params rss_params; 1968 struct ecore_hwfn *p_hwfn; 1969 uint32_t *key = (uint32_t *)rss_conf->rss_key; 1970 uint64_t hf = rss_conf->rss_hf; 1971 uint8_t len = rss_conf->rss_key_len; 1972 uint8_t idx; 1973 uint8_t i; 1974 int rc; 1975 1976 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1977 memset(&rss_params, 0, sizeof(rss_params)); 1978 1979 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", 1980 (unsigned long)hf, len, key); 1981 1982 if (hf != 0) { 1983 /* Enabling RSS */ 1984 DP_INFO(edev, "Enabling rss\n"); 1985 1986 /* RSS caps */ 1987 qede_init_rss_caps(&rss_params.rss_caps, hf); 1988 rss_params.update_rss_capabilities = 1; 1989 1990 /* RSS hash key */ 1991 if (key) { 1992 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { 1993 DP_ERR(edev, "RSS key length exceeds limit\n"); 1994 return -EINVAL; 1995 } 1996 DP_INFO(edev, "Applying user supplied hash key\n"); 1997 rss_params.update_rss_key = 1; 1998 memcpy(&rss_params.rss_key, key, len); 1999 } 2000 rss_params.rss_enable = 1; 2001 } 2002 2003 rss_params.update_rss_config = 1; 2004 /* tbl_size has to be set with capabilities */ 2005 rss_params.rss_table_size_log = 7; 2006 vport_update_params.vport_id = 0; 2007 /* pass the L2 handles instead of qids */ 2008 for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) { 2009 idx = i % QEDE_RSS_COUNT(qdev); 2010 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle; 2011 } 2012 vport_update_params.rss_params = &rss_params; 2013 2014 for_each_hwfn(edev, i) { 2015 p_hwfn = &edev->hwfns[i]; 2016 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2017 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2018 ECORE_SPQ_MODE_EBLOCK, NULL); 2019 if (rc) { 2020 DP_ERR(edev, "vport-update for RSS failed\n"); 2021 return rc; 2022 } 2023 } 2024 qdev->rss_enable = rss_params.rss_enable; 2025 2026 /* Update local structure for hash query */ 2027 qdev->rss_conf.rss_hf = hf; 2028 qdev->rss_conf.rss_key_len = len; 2029 if (qdev->rss_enable) { 2030 if (qdev->rss_conf.rss_key == NULL) { 2031 qdev->rss_conf.rss_key = (uint8_t *)malloc(len); 2032 if (qdev->rss_conf.rss_key == NULL) { 2033 DP_ERR(edev, "No memory to store RSS key\n"); 2034 return -ENOMEM; 2035 } 2036 } 2037 if (key && len) { 2038 DP_INFO(edev, "Storing RSS key\n"); 2039 memcpy(qdev->rss_conf.rss_key, key, len); 2040 } 2041 } else if (!qdev->rss_enable && len == 0) { 2042 if (qdev->rss_conf.rss_key) { 2043 free(qdev->rss_conf.rss_key); 2044 qdev->rss_conf.rss_key = NULL; 2045 DP_INFO(edev, "Free RSS key\n"); 2046 } 2047 } 2048 2049 return 0; 2050 } 2051 2052 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 2053 struct rte_eth_rss_conf *rss_conf) 2054 { 2055 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2056 2057 rss_conf->rss_hf = qdev->rss_conf.rss_hf; 2058 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; 2059 2060 if (rss_conf->rss_key && qdev->rss_conf.rss_key) 2061 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, 2062 rss_conf->rss_key_len); 2063 return 0; 2064 } 2065 2066 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev, 2067 struct ecore_rss_params *rss) 2068 { 2069 int i, fn; 2070 bool rss_mode = 1; /* enable */ 2071 struct ecore_queue_cid *cid; 2072 struct ecore_rss_params *t_rss; 2073 2074 /* In regular scenario, we'd simply need to take input handlers. 2075 * But in CMT, we'd have to split the handlers according to the 2076 * engine they were configured on. We'd then have to understand 2077 * whether RSS is really required, since 2-queues on CMT doesn't 2078 * require RSS. 2079 */ 2080 2081 /* CMT should be round-robin */ 2082 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 2083 cid = rss->rss_ind_table[i]; 2084 2085 if (cid->p_owner == ECORE_LEADING_HWFN(edev)) 2086 t_rss = &rss[0]; 2087 else 2088 t_rss = &rss[1]; 2089 2090 t_rss->rss_ind_table[i / edev->num_hwfns] = cid; 2091 } 2092 2093 t_rss = &rss[1]; 2094 t_rss->update_rss_ind_table = 1; 2095 t_rss->rss_table_size_log = 7; 2096 t_rss->update_rss_config = 1; 2097 2098 /* Make sure RSS is actually required */ 2099 for_each_hwfn(edev, fn) { 2100 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns; 2101 i++) { 2102 if (rss[fn].rss_ind_table[i] != 2103 rss[fn].rss_ind_table[0]) 2104 break; 2105 } 2106 2107 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) { 2108 DP_INFO(edev, 2109 "CMT - 1 queue per-hwfn; Disabling RSS\n"); 2110 rss_mode = 0; 2111 goto out; 2112 } 2113 } 2114 2115 out: 2116 t_rss->rss_enable = rss_mode; 2117 2118 return rss_mode; 2119 } 2120 2121 int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 2122 struct rte_eth_rss_reta_entry64 *reta_conf, 2123 uint16_t reta_size) 2124 { 2125 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2126 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2127 struct ecore_sp_vport_update_params vport_update_params; 2128 struct ecore_rss_params *params; 2129 struct ecore_hwfn *p_hwfn; 2130 uint16_t i, idx, shift; 2131 uint8_t entry; 2132 int rc = 0; 2133 2134 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2135 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 2136 reta_size); 2137 return -EINVAL; 2138 } 2139 2140 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2141 params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns, 2142 RTE_CACHE_LINE_SIZE); 2143 if (params == NULL) { 2144 DP_ERR(edev, "failed to allocate memory\n"); 2145 return -ENOMEM; 2146 } 2147 2148 for (i = 0; i < reta_size; i++) { 2149 idx = i / RTE_RETA_GROUP_SIZE; 2150 shift = i % RTE_RETA_GROUP_SIZE; 2151 if (reta_conf[idx].mask & (1ULL << shift)) { 2152 entry = reta_conf[idx].reta[shift]; 2153 /* Pass rxq handles to ecore */ 2154 params->rss_ind_table[i] = 2155 qdev->fp_array[entry].rxq->handle; 2156 /* Update the local copy for RETA query command */ 2157 qdev->rss_ind_table[i] = entry; 2158 } 2159 } 2160 2161 params->update_rss_ind_table = 1; 2162 params->rss_table_size_log = 7; 2163 params->update_rss_config = 1; 2164 2165 /* Fix up RETA for CMT mode device */ 2166 if (ECORE_IS_CMT(edev)) 2167 qdev->rss_enable = qede_update_rss_parm_cmt(edev, 2168 params); 2169 vport_update_params.vport_id = 0; 2170 /* Use the current value of rss_enable */ 2171 params->rss_enable = qdev->rss_enable; 2172 vport_update_params.rss_params = params; 2173 2174 for_each_hwfn(edev, i) { 2175 p_hwfn = &edev->hwfns[i]; 2176 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2177 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2178 ECORE_SPQ_MODE_EBLOCK, NULL); 2179 if (rc) { 2180 DP_ERR(edev, "vport-update for RSS failed\n"); 2181 goto out; 2182 } 2183 } 2184 2185 out: 2186 rte_free(params); 2187 return rc; 2188 } 2189 2190 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 2191 struct rte_eth_rss_reta_entry64 *reta_conf, 2192 uint16_t reta_size) 2193 { 2194 struct qede_dev *qdev = eth_dev->data->dev_private; 2195 struct ecore_dev *edev = &qdev->edev; 2196 uint16_t i, idx, shift; 2197 uint8_t entry; 2198 2199 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2200 DP_ERR(edev, "reta_size %d is not supported\n", 2201 reta_size); 2202 return -EINVAL; 2203 } 2204 2205 for (i = 0; i < reta_size; i++) { 2206 idx = i / RTE_RETA_GROUP_SIZE; 2207 shift = i % RTE_RETA_GROUP_SIZE; 2208 if (reta_conf[idx].mask & (1ULL << shift)) { 2209 entry = qdev->rss_ind_table[i]; 2210 reta_conf[idx].reta[shift] = entry; 2211 } 2212 } 2213 2214 return 0; 2215 } 2216 2217 2218 2219 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 2220 { 2221 struct qede_dev *qdev = QEDE_INIT_QDEV(dev); 2222 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2223 struct rte_eth_dev_info dev_info = {0}; 2224 struct qede_fastpath *fp; 2225 uint32_t max_rx_pkt_len; 2226 uint32_t frame_size; 2227 uint16_t bufsz; 2228 bool restart = false; 2229 int i, rc; 2230 2231 PMD_INIT_FUNC_TRACE(edev); 2232 qede_dev_info_get(dev, &dev_info); 2233 max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN; 2234 frame_size = max_rx_pkt_len; 2235 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) { 2236 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n", 2237 mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN - 2238 QEDE_ETH_OVERHEAD); 2239 return -EINVAL; 2240 } 2241 if (!dev->data->scattered_rx && 2242 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 2243 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", 2244 dev->data->min_rx_buf_size); 2245 return -EINVAL; 2246 } 2247 /* Temporarily replace I/O functions with dummy ones. It cannot 2248 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. 2249 */ 2250 dev->rx_pkt_burst = qede_rxtx_pkts_dummy; 2251 dev->tx_pkt_burst = qede_rxtx_pkts_dummy; 2252 if (dev->data->dev_started) { 2253 dev->data->dev_started = 0; 2254 qede_dev_stop(dev); 2255 restart = true; 2256 } 2257 rte_delay_ms(1000); 2258 qdev->mtu = mtu; 2259 2260 /* Fix up RX buf size for all queues of the port */ 2261 for_each_rss(i) { 2262 fp = &qdev->fp_array[i]; 2263 if (fp->rxq != NULL) { 2264 bufsz = (uint16_t)rte_pktmbuf_data_room_size( 2265 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; 2266 /* cache align the mbuf size to simplfy rx_buf_size 2267 * calculation 2268 */ 2269 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz); 2270 rc = qede_calc_rx_buf_size(dev, bufsz, frame_size); 2271 if (rc < 0) 2272 return rc; 2273 2274 fp->rxq->rx_buf_size = rc; 2275 } 2276 } 2277 if (max_rx_pkt_len > RTE_ETHER_MAX_LEN) 2278 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 2279 else 2280 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2281 2282 if (!dev->data->dev_started && restart) { 2283 qede_dev_start(dev); 2284 dev->data->dev_started = 1; 2285 } 2286 2287 /* update max frame size */ 2288 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; 2289 /* Reassign back */ 2290 dev->rx_pkt_burst = qede_recv_pkts; 2291 dev->tx_pkt_burst = qede_xmit_pkts; 2292 2293 return 0; 2294 } 2295 2296 static int 2297 qede_dev_reset(struct rte_eth_dev *dev) 2298 { 2299 int ret; 2300 2301 ret = qede_eth_dev_uninit(dev); 2302 if (ret) 2303 return ret; 2304 2305 return qede_eth_dev_init(dev); 2306 } 2307 2308 static const struct eth_dev_ops qede_eth_dev_ops = { 2309 .dev_configure = qede_dev_configure, 2310 .dev_infos_get = qede_dev_info_get, 2311 .rx_queue_setup = qede_rx_queue_setup, 2312 .rx_queue_release = qede_rx_queue_release, 2313 .rx_descriptor_status = qede_rx_descriptor_status, 2314 .tx_queue_setup = qede_tx_queue_setup, 2315 .tx_queue_release = qede_tx_queue_release, 2316 .dev_start = qede_dev_start, 2317 .dev_reset = qede_dev_reset, 2318 .dev_set_link_up = qede_dev_set_link_up, 2319 .dev_set_link_down = qede_dev_set_link_down, 2320 .link_update = qede_link_update, 2321 .promiscuous_enable = qede_promiscuous_enable, 2322 .promiscuous_disable = qede_promiscuous_disable, 2323 .allmulticast_enable = qede_allmulticast_enable, 2324 .allmulticast_disable = qede_allmulticast_disable, 2325 .set_mc_addr_list = qede_set_mc_addr_list, 2326 .dev_stop = qede_dev_stop, 2327 .dev_close = qede_dev_close, 2328 .stats_get = qede_get_stats, 2329 .stats_reset = qede_reset_stats, 2330 .xstats_get = qede_get_xstats, 2331 .xstats_reset = qede_reset_xstats, 2332 .xstats_get_names = qede_get_xstats_names, 2333 .mac_addr_add = qede_mac_addr_add, 2334 .mac_addr_remove = qede_mac_addr_remove, 2335 .mac_addr_set = qede_mac_addr_set, 2336 .vlan_offload_set = qede_vlan_offload_set, 2337 .vlan_filter_set = qede_vlan_filter_set, 2338 .flow_ctrl_set = qede_flow_ctrl_set, 2339 .flow_ctrl_get = qede_flow_ctrl_get, 2340 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2341 .rss_hash_update = qede_rss_hash_update, 2342 .rss_hash_conf_get = qede_rss_hash_conf_get, 2343 .reta_update = qede_rss_reta_update, 2344 .reta_query = qede_rss_reta_query, 2345 .mtu_set = qede_set_mtu, 2346 .filter_ctrl = qede_dev_filter_ctrl, 2347 .udp_tunnel_port_add = qede_udp_dst_port_add, 2348 .udp_tunnel_port_del = qede_udp_dst_port_del, 2349 }; 2350 2351 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 2352 .dev_configure = qede_dev_configure, 2353 .dev_infos_get = qede_dev_info_get, 2354 .rx_queue_setup = qede_rx_queue_setup, 2355 .rx_queue_release = qede_rx_queue_release, 2356 .rx_descriptor_status = qede_rx_descriptor_status, 2357 .tx_queue_setup = qede_tx_queue_setup, 2358 .tx_queue_release = qede_tx_queue_release, 2359 .dev_start = qede_dev_start, 2360 .dev_reset = qede_dev_reset, 2361 .dev_set_link_up = qede_dev_set_link_up, 2362 .dev_set_link_down = qede_dev_set_link_down, 2363 .link_update = qede_link_update, 2364 .promiscuous_enable = qede_promiscuous_enable, 2365 .promiscuous_disable = qede_promiscuous_disable, 2366 .allmulticast_enable = qede_allmulticast_enable, 2367 .allmulticast_disable = qede_allmulticast_disable, 2368 .set_mc_addr_list = qede_set_mc_addr_list, 2369 .dev_stop = qede_dev_stop, 2370 .dev_close = qede_dev_close, 2371 .stats_get = qede_get_stats, 2372 .stats_reset = qede_reset_stats, 2373 .xstats_get = qede_get_xstats, 2374 .xstats_reset = qede_reset_xstats, 2375 .xstats_get_names = qede_get_xstats_names, 2376 .vlan_offload_set = qede_vlan_offload_set, 2377 .vlan_filter_set = qede_vlan_filter_set, 2378 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2379 .rss_hash_update = qede_rss_hash_update, 2380 .rss_hash_conf_get = qede_rss_hash_conf_get, 2381 .reta_update = qede_rss_reta_update, 2382 .reta_query = qede_rss_reta_query, 2383 .mtu_set = qede_set_mtu, 2384 .udp_tunnel_port_add = qede_udp_dst_port_add, 2385 .udp_tunnel_port_del = qede_udp_dst_port_del, 2386 .mac_addr_add = qede_mac_addr_add, 2387 .mac_addr_remove = qede_mac_addr_remove, 2388 .mac_addr_set = qede_mac_addr_set, 2389 }; 2390 2391 static void qede_update_pf_params(struct ecore_dev *edev) 2392 { 2393 struct ecore_pf_params pf_params; 2394 2395 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 2396 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; 2397 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 2398 qed_ops->common->update_pf_params(edev, &pf_params); 2399 } 2400 2401 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 2402 { 2403 struct rte_pci_device *pci_dev; 2404 struct rte_pci_addr pci_addr; 2405 struct qede_dev *adapter; 2406 struct ecore_dev *edev; 2407 struct qed_dev_eth_info dev_info; 2408 struct qed_slowpath_params params; 2409 static bool do_once = true; 2410 uint8_t bulletin_change; 2411 uint8_t vf_mac[RTE_ETHER_ADDR_LEN]; 2412 uint8_t is_mac_forced; 2413 bool is_mac_exist; 2414 /* Fix up ecore debug level */ 2415 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 2416 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 2417 uint32_t int_mode; 2418 int rc; 2419 2420 /* Extract key data structures */ 2421 adapter = eth_dev->data->dev_private; 2422 adapter->ethdev = eth_dev; 2423 edev = &adapter->edev; 2424 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2425 pci_addr = pci_dev->addr; 2426 2427 PMD_INIT_FUNC_TRACE(edev); 2428 2429 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 2430 pci_addr.bus, pci_addr.devid, pci_addr.function, 2431 eth_dev->data->port_id); 2432 2433 eth_dev->rx_pkt_burst = qede_recv_pkts; 2434 eth_dev->tx_pkt_burst = qede_xmit_pkts; 2435 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; 2436 2437 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2438 DP_ERR(edev, "Skipping device init from secondary process\n"); 2439 return 0; 2440 } 2441 2442 rte_eth_copy_pci_info(eth_dev, pci_dev); 2443 2444 /* @DPDK */ 2445 edev->vendor_id = pci_dev->id.vendor_id; 2446 edev->device_id = pci_dev->id.device_id; 2447 2448 qed_ops = qed_get_eth_ops(); 2449 if (!qed_ops) { 2450 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 2451 return -EINVAL; 2452 } 2453 2454 DP_INFO(edev, "Starting qede probe\n"); 2455 rc = qed_ops->common->probe(edev, pci_dev, dp_module, 2456 dp_level, is_vf); 2457 if (rc != 0) { 2458 DP_ERR(edev, "qede probe failed rc %d\n", rc); 2459 return -ENODEV; 2460 } 2461 qede_update_pf_params(edev); 2462 2463 switch (pci_dev->intr_handle.type) { 2464 case RTE_INTR_HANDLE_UIO_INTX: 2465 case RTE_INTR_HANDLE_VFIO_LEGACY: 2466 int_mode = ECORE_INT_MODE_INTA; 2467 rte_intr_callback_register(&pci_dev->intr_handle, 2468 qede_interrupt_handler_intx, 2469 (void *)eth_dev); 2470 break; 2471 default: 2472 int_mode = ECORE_INT_MODE_MSIX; 2473 rte_intr_callback_register(&pci_dev->intr_handle, 2474 qede_interrupt_handler, 2475 (void *)eth_dev); 2476 } 2477 2478 if (rte_intr_enable(&pci_dev->intr_handle)) { 2479 DP_ERR(edev, "rte_intr_enable() failed\n"); 2480 return -ENODEV; 2481 } 2482 2483 /* Start the Slowpath-process */ 2484 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 2485 2486 params.int_mode = int_mode; 2487 params.drv_major = QEDE_PMD_VERSION_MAJOR; 2488 params.drv_minor = QEDE_PMD_VERSION_MINOR; 2489 params.drv_rev = QEDE_PMD_VERSION_REVISION; 2490 params.drv_eng = QEDE_PMD_VERSION_PATCH; 2491 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, 2492 QEDE_PMD_DRV_VER_STR_SIZE); 2493 2494 /* For CMT mode device do periodic polling for slowpath events. 2495 * This is required since uio device uses only one MSI-x 2496 * interrupt vector but we need one for each engine. 2497 */ 2498 if (ECORE_IS_CMT(edev) && IS_PF(edev)) { 2499 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 2500 qede_poll_sp_sb_cb, 2501 (void *)eth_dev); 2502 if (rc != 0) { 2503 DP_ERR(edev, "Unable to start periodic" 2504 " timer rc %d\n", rc); 2505 return -EINVAL; 2506 } 2507 } 2508 2509 rc = qed_ops->common->slowpath_start(edev, ¶ms); 2510 if (rc) { 2511 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 2512 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2513 (void *)eth_dev); 2514 return -ENODEV; 2515 } 2516 2517 rc = qed_ops->fill_dev_info(edev, &dev_info); 2518 if (rc) { 2519 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 2520 qed_ops->common->slowpath_stop(edev); 2521 qed_ops->common->remove(edev); 2522 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2523 (void *)eth_dev); 2524 return -ENODEV; 2525 } 2526 2527 qede_alloc_etherdev(adapter, &dev_info); 2528 2529 adapter->ops->common->set_name(edev, edev->name); 2530 2531 if (!is_vf) 2532 adapter->dev_info.num_mac_filters = 2533 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 2534 ECORE_MAC); 2535 else 2536 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 2537 (uint32_t *)&adapter->dev_info.num_mac_filters); 2538 2539 /* Allocate memory for storing MAC addr */ 2540 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 2541 (RTE_ETHER_ADDR_LEN * 2542 adapter->dev_info.num_mac_filters), 2543 RTE_CACHE_LINE_SIZE); 2544 2545 if (eth_dev->data->mac_addrs == NULL) { 2546 DP_ERR(edev, "Failed to allocate MAC address\n"); 2547 qed_ops->common->slowpath_stop(edev); 2548 qed_ops->common->remove(edev); 2549 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2550 (void *)eth_dev); 2551 return -ENOMEM; 2552 } 2553 2554 if (!is_vf) { 2555 rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0]. 2556 hw_info.hw_mac_addr, 2557 ð_dev->data->mac_addrs[0]); 2558 rte_ether_addr_copy(ð_dev->data->mac_addrs[0], 2559 &adapter->primary_mac); 2560 } else { 2561 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 2562 &bulletin_change); 2563 if (bulletin_change) { 2564 is_mac_exist = 2565 ecore_vf_bulletin_get_forced_mac( 2566 ECORE_LEADING_HWFN(edev), 2567 vf_mac, 2568 &is_mac_forced); 2569 if (is_mac_exist) { 2570 DP_INFO(edev, "VF macaddr received from PF\n"); 2571 rte_ether_addr_copy( 2572 (struct rte_ether_addr *)&vf_mac, 2573 ð_dev->data->mac_addrs[0]); 2574 rte_ether_addr_copy( 2575 ð_dev->data->mac_addrs[0], 2576 &adapter->primary_mac); 2577 } else { 2578 DP_ERR(edev, "No VF macaddr assigned\n"); 2579 } 2580 } 2581 } 2582 2583 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 2584 2585 if (do_once) { 2586 qede_print_adapter_info(adapter); 2587 do_once = false; 2588 } 2589 2590 /* Bring-up the link */ 2591 qede_dev_set_link_state(eth_dev, true); 2592 2593 adapter->num_tx_queues = 0; 2594 adapter->num_rx_queues = 0; 2595 SLIST_INIT(&adapter->arfs_info.arfs_list_head); 2596 SLIST_INIT(&adapter->vlan_list_head); 2597 SLIST_INIT(&adapter->uc_list_head); 2598 SLIST_INIT(&adapter->mc_list_head); 2599 adapter->mtu = RTE_ETHER_MTU; 2600 adapter->vport_started = false; 2601 2602 /* VF tunnel offloads is enabled by default in PF driver */ 2603 adapter->vxlan.num_filters = 0; 2604 adapter->geneve.num_filters = 0; 2605 adapter->ipgre.num_filters = 0; 2606 if (is_vf) { 2607 adapter->vxlan.enable = true; 2608 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | 2609 ETH_TUNNEL_FILTER_IVLAN; 2610 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; 2611 adapter->geneve.enable = true; 2612 adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC | 2613 ETH_TUNNEL_FILTER_IVLAN; 2614 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT; 2615 adapter->ipgre.enable = true; 2616 adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC | 2617 ETH_TUNNEL_FILTER_IVLAN; 2618 } else { 2619 adapter->vxlan.enable = false; 2620 adapter->geneve.enable = false; 2621 adapter->ipgre.enable = false; 2622 } 2623 2624 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", 2625 adapter->primary_mac.addr_bytes[0], 2626 adapter->primary_mac.addr_bytes[1], 2627 adapter->primary_mac.addr_bytes[2], 2628 adapter->primary_mac.addr_bytes[3], 2629 adapter->primary_mac.addr_bytes[4], 2630 adapter->primary_mac.addr_bytes[5]); 2631 2632 DP_INFO(edev, "Device initialized\n"); 2633 2634 return 0; 2635 } 2636 2637 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 2638 { 2639 return qede_common_dev_init(eth_dev, 1); 2640 } 2641 2642 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 2643 { 2644 return qede_common_dev_init(eth_dev, 0); 2645 } 2646 2647 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 2648 { 2649 struct qede_dev *qdev = eth_dev->data->dev_private; 2650 struct ecore_dev *edev = &qdev->edev; 2651 2652 PMD_INIT_FUNC_TRACE(edev); 2653 2654 /* only uninitialize in the primary process */ 2655 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2656 return 0; 2657 2658 /* safe to close dev here */ 2659 qede_dev_close(eth_dev); 2660 2661 eth_dev->dev_ops = NULL; 2662 eth_dev->rx_pkt_burst = NULL; 2663 eth_dev->tx_pkt_burst = NULL; 2664 2665 return 0; 2666 } 2667 2668 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2669 { 2670 return qede_dev_common_uninit(eth_dev); 2671 } 2672 2673 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2674 { 2675 return qede_dev_common_uninit(eth_dev); 2676 } 2677 2678 static const struct rte_pci_id pci_id_qedevf_map[] = { 2679 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2680 { 2681 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF) 2682 }, 2683 { 2684 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV) 2685 }, 2686 { 2687 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV) 2688 }, 2689 {.vendor_id = 0,} 2690 }; 2691 2692 static const struct rte_pci_id pci_id_qede_map[] = { 2693 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2694 { 2695 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E) 2696 }, 2697 { 2698 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S) 2699 }, 2700 { 2701 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40) 2702 }, 2703 { 2704 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25) 2705 }, 2706 { 2707 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100) 2708 }, 2709 { 2710 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50) 2711 }, 2712 { 2713 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G) 2714 }, 2715 { 2716 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G) 2717 }, 2718 { 2719 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G) 2720 }, 2721 { 2722 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G) 2723 }, 2724 {.vendor_id = 0,} 2725 }; 2726 2727 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2728 struct rte_pci_device *pci_dev) 2729 { 2730 return rte_eth_dev_pci_generic_probe(pci_dev, 2731 sizeof(struct qede_dev), qedevf_eth_dev_init); 2732 } 2733 2734 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2735 { 2736 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit); 2737 } 2738 2739 static struct rte_pci_driver rte_qedevf_pmd = { 2740 .id_table = pci_id_qedevf_map, 2741 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 2742 RTE_PCI_DRV_IOVA_AS_VA, 2743 .probe = qedevf_eth_dev_pci_probe, 2744 .remove = qedevf_eth_dev_pci_remove, 2745 }; 2746 2747 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2748 struct rte_pci_device *pci_dev) 2749 { 2750 return rte_eth_dev_pci_generic_probe(pci_dev, 2751 sizeof(struct qede_dev), qede_eth_dev_init); 2752 } 2753 2754 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2755 { 2756 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit); 2757 } 2758 2759 static struct rte_pci_driver rte_qede_pmd = { 2760 .id_table = pci_id_qede_map, 2761 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | 2762 RTE_PCI_DRV_IOVA_AS_VA, 2763 .probe = qede_eth_dev_pci_probe, 2764 .remove = qede_eth_dev_pci_remove, 2765 }; 2766 2767 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd); 2768 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 2769 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); 2770 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); 2771 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 2772 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); 2773 2774 RTE_INIT(qede_init_log) 2775 { 2776 qede_logtype_init = rte_log_register("pmd.net.qede.init"); 2777 if (qede_logtype_init >= 0) 2778 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE); 2779 qede_logtype_driver = rte_log_register("pmd.net.qede.driver"); 2780 if (qede_logtype_driver >= 0) 2781 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE); 2782 } 2783