1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include "qede_ethdev.h" 8 #include <rte_alarm.h> 9 #include <rte_version.h> 10 #include <rte_kvargs.h> 11 12 /* Globals */ 13 int qede_logtype_init; 14 int qede_logtype_driver; 15 16 static const struct qed_eth_ops *qed_ops; 17 #define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */ 18 19 struct rte_qede_xstats_name_off { 20 char name[RTE_ETH_XSTATS_NAME_SIZE]; 21 uint64_t offset; 22 }; 23 24 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 25 {"rx_unicast_bytes", 26 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)}, 27 {"rx_multicast_bytes", 28 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)}, 29 {"rx_broadcast_bytes", 30 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)}, 31 {"rx_unicast_packets", 32 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)}, 33 {"rx_multicast_packets", 34 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)}, 35 {"rx_broadcast_packets", 36 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)}, 37 38 {"tx_unicast_bytes", 39 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)}, 40 {"tx_multicast_bytes", 41 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)}, 42 {"tx_broadcast_bytes", 43 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)}, 44 {"tx_unicast_packets", 45 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)}, 46 {"tx_multicast_packets", 47 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)}, 48 {"tx_broadcast_packets", 49 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)}, 50 51 {"rx_64_byte_packets", 52 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)}, 53 {"rx_65_to_127_byte_packets", 54 offsetof(struct ecore_eth_stats_common, 55 rx_65_to_127_byte_packets)}, 56 {"rx_128_to_255_byte_packets", 57 offsetof(struct ecore_eth_stats_common, 58 rx_128_to_255_byte_packets)}, 59 {"rx_256_to_511_byte_packets", 60 offsetof(struct ecore_eth_stats_common, 61 rx_256_to_511_byte_packets)}, 62 {"rx_512_to_1023_byte_packets", 63 offsetof(struct ecore_eth_stats_common, 64 rx_512_to_1023_byte_packets)}, 65 {"rx_1024_to_1518_byte_packets", 66 offsetof(struct ecore_eth_stats_common, 67 rx_1024_to_1518_byte_packets)}, 68 {"tx_64_byte_packets", 69 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)}, 70 {"tx_65_to_127_byte_packets", 71 offsetof(struct ecore_eth_stats_common, 72 tx_65_to_127_byte_packets)}, 73 {"tx_128_to_255_byte_packets", 74 offsetof(struct ecore_eth_stats_common, 75 tx_128_to_255_byte_packets)}, 76 {"tx_256_to_511_byte_packets", 77 offsetof(struct ecore_eth_stats_common, 78 tx_256_to_511_byte_packets)}, 79 {"tx_512_to_1023_byte_packets", 80 offsetof(struct ecore_eth_stats_common, 81 tx_512_to_1023_byte_packets)}, 82 {"tx_1024_to_1518_byte_packets", 83 offsetof(struct ecore_eth_stats_common, 84 tx_1024_to_1518_byte_packets)}, 85 86 {"rx_mac_crtl_frames", 87 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)}, 88 {"tx_mac_control_frames", 89 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)}, 90 {"rx_pause_frames", 91 offsetof(struct ecore_eth_stats_common, rx_pause_frames)}, 92 {"tx_pause_frames", 93 offsetof(struct ecore_eth_stats_common, tx_pause_frames)}, 94 {"rx_priority_flow_control_frames", 95 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)}, 96 {"tx_priority_flow_control_frames", 97 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)}, 98 99 {"rx_crc_errors", 100 offsetof(struct ecore_eth_stats_common, rx_crc_errors)}, 101 {"rx_align_errors", 102 offsetof(struct ecore_eth_stats_common, rx_align_errors)}, 103 {"rx_carrier_errors", 104 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)}, 105 {"rx_oversize_packet_errors", 106 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)}, 107 {"rx_jabber_errors", 108 offsetof(struct ecore_eth_stats_common, rx_jabbers)}, 109 {"rx_undersize_packet_errors", 110 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)}, 111 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)}, 112 {"rx_host_buffer_not_available", 113 offsetof(struct ecore_eth_stats_common, no_buff_discards)}, 114 /* Number of packets discarded because they are bigger than MTU */ 115 {"rx_packet_too_big_discards", 116 offsetof(struct ecore_eth_stats_common, 117 packet_too_big_discard)}, 118 {"rx_ttl_zero_discards", 119 offsetof(struct ecore_eth_stats_common, ttl0_discard)}, 120 {"rx_multi_function_tag_filter_discards", 121 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)}, 122 {"rx_mac_filter_discards", 123 offsetof(struct ecore_eth_stats_common, mac_filter_discards)}, 124 {"rx_hw_buffer_truncates", 125 offsetof(struct ecore_eth_stats_common, brb_truncates)}, 126 {"rx_hw_buffer_discards", 127 offsetof(struct ecore_eth_stats_common, brb_discards)}, 128 {"tx_error_drop_packets", 129 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)}, 130 131 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)}, 132 {"rx_mac_unicast_packets", 133 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)}, 134 {"rx_mac_multicast_packets", 135 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)}, 136 {"rx_mac_broadcast_packets", 137 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)}, 138 {"rx_mac_frames_ok", 139 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)}, 140 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)}, 141 {"tx_mac_unicast_packets", 142 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)}, 143 {"tx_mac_multicast_packets", 144 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)}, 145 {"tx_mac_broadcast_packets", 146 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)}, 147 148 {"lro_coalesced_packets", 149 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)}, 150 {"lro_coalesced_events", 151 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)}, 152 {"lro_aborts_num", 153 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)}, 154 {"lro_not_coalesced_packets", 155 offsetof(struct ecore_eth_stats_common, 156 tpa_not_coalesced_pkts)}, 157 {"lro_coalesced_bytes", 158 offsetof(struct ecore_eth_stats_common, 159 tpa_coalesced_bytes)}, 160 }; 161 162 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = { 163 {"rx_1519_to_1522_byte_packets", 164 offsetof(struct ecore_eth_stats, bb) + 165 offsetof(struct ecore_eth_stats_bb, 166 rx_1519_to_1522_byte_packets)}, 167 {"rx_1519_to_2047_byte_packets", 168 offsetof(struct ecore_eth_stats, bb) + 169 offsetof(struct ecore_eth_stats_bb, 170 rx_1519_to_2047_byte_packets)}, 171 {"rx_2048_to_4095_byte_packets", 172 offsetof(struct ecore_eth_stats, bb) + 173 offsetof(struct ecore_eth_stats_bb, 174 rx_2048_to_4095_byte_packets)}, 175 {"rx_4096_to_9216_byte_packets", 176 offsetof(struct ecore_eth_stats, bb) + 177 offsetof(struct ecore_eth_stats_bb, 178 rx_4096_to_9216_byte_packets)}, 179 {"rx_9217_to_16383_byte_packets", 180 offsetof(struct ecore_eth_stats, bb) + 181 offsetof(struct ecore_eth_stats_bb, 182 rx_9217_to_16383_byte_packets)}, 183 184 {"tx_1519_to_2047_byte_packets", 185 offsetof(struct ecore_eth_stats, bb) + 186 offsetof(struct ecore_eth_stats_bb, 187 tx_1519_to_2047_byte_packets)}, 188 {"tx_2048_to_4095_byte_packets", 189 offsetof(struct ecore_eth_stats, bb) + 190 offsetof(struct ecore_eth_stats_bb, 191 tx_2048_to_4095_byte_packets)}, 192 {"tx_4096_to_9216_byte_packets", 193 offsetof(struct ecore_eth_stats, bb) + 194 offsetof(struct ecore_eth_stats_bb, 195 tx_4096_to_9216_byte_packets)}, 196 {"tx_9217_to_16383_byte_packets", 197 offsetof(struct ecore_eth_stats, bb) + 198 offsetof(struct ecore_eth_stats_bb, 199 tx_9217_to_16383_byte_packets)}, 200 201 {"tx_lpi_entry_count", 202 offsetof(struct ecore_eth_stats, bb) + 203 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)}, 204 {"tx_total_collisions", 205 offsetof(struct ecore_eth_stats, bb) + 206 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)}, 207 }; 208 209 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = { 210 {"rx_1519_to_max_byte_packets", 211 offsetof(struct ecore_eth_stats, ah) + 212 offsetof(struct ecore_eth_stats_ah, 213 rx_1519_to_max_byte_packets)}, 214 {"tx_1519_to_max_byte_packets", 215 offsetof(struct ecore_eth_stats, ah) + 216 offsetof(struct ecore_eth_stats_ah, 217 tx_1519_to_max_byte_packets)}, 218 }; 219 220 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { 221 {"rx_q_segments", 222 offsetof(struct qede_rx_queue, rx_segs)}, 223 {"rx_q_hw_errors", 224 offsetof(struct qede_rx_queue, rx_hw_errors)}, 225 {"rx_q_allocation_errors", 226 offsetof(struct qede_rx_queue, rx_alloc_errors)} 227 }; 228 229 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 230 { 231 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 232 } 233 234 static void 235 qede_interrupt_handler_intx(void *param) 236 { 237 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 238 struct qede_dev *qdev = eth_dev->data->dev_private; 239 struct ecore_dev *edev = &qdev->edev; 240 u64 status; 241 242 /* Check if our device actually raised an interrupt */ 243 status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev)); 244 if (status & 0x1) { 245 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 246 247 if (rte_intr_enable(eth_dev->intr_handle)) 248 DP_ERR(edev, "rte_intr_enable failed\n"); 249 } 250 } 251 252 static void 253 qede_interrupt_handler(void *param) 254 { 255 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 256 struct qede_dev *qdev = eth_dev->data->dev_private; 257 struct ecore_dev *edev = &qdev->edev; 258 259 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 260 if (rte_intr_enable(eth_dev->intr_handle)) 261 DP_ERR(edev, "rte_intr_enable failed\n"); 262 } 263 264 static void 265 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 266 { 267 rte_memcpy(&qdev->dev_info, info, sizeof(*info)); 268 qdev->ops = qed_ops; 269 } 270 271 static void qede_print_adapter_info(struct qede_dev *qdev) 272 { 273 struct ecore_dev *edev = &qdev->edev; 274 struct qed_dev_info *info = &qdev->dev_info.common; 275 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE]; 276 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 277 278 DP_INFO(edev, "*********************************\n"); 279 DP_INFO(edev, " DPDK version:%s\n", rte_version()); 280 DP_INFO(edev, " Chip details : %s %c%d\n", 281 ECORE_IS_BB(edev) ? "BB" : "AH", 282 'A' + edev->chip_rev, 283 (int)edev->chip_metal); 284 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", 285 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng); 286 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s", 287 ver_str, QEDE_PMD_VERSION); 288 DP_INFO(edev, " Driver version : %s\n", drv_ver); 289 DP_INFO(edev, " Firmware version : %s\n", ver_str); 290 291 snprintf(ver_str, MCP_DRV_VER_STR_SIZE, 292 "%d.%d.%d.%d", 293 (info->mfw_rev >> 24) & 0xff, 294 (info->mfw_rev >> 16) & 0xff, 295 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff); 296 DP_INFO(edev, " Management Firmware version : %s\n", ver_str); 297 DP_INFO(edev, " Firmware file : %s\n", fw_file); 298 DP_INFO(edev, "*********************************\n"); 299 } 300 301 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) 302 { 303 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 304 unsigned int i = 0, j = 0, qid; 305 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 306 struct qede_tx_queue *txq; 307 308 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); 309 310 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 311 RTE_ETHDEV_QUEUE_STAT_CNTRS); 312 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), 313 RTE_ETHDEV_QUEUE_STAT_CNTRS); 314 315 for_each_rss(qid) { 316 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 317 offsetof(struct qede_rx_queue, rcv_pkts), 0, 318 sizeof(uint64_t)); 319 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 320 offsetof(struct qede_rx_queue, rx_hw_errors), 0, 321 sizeof(uint64_t)); 322 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 323 offsetof(struct qede_rx_queue, rx_alloc_errors), 0, 324 sizeof(uint64_t)); 325 326 if (xstats) 327 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++) 328 OSAL_MEMSET((((char *) 329 (qdev->fp_array[qid].rxq)) + 330 qede_rxq_xstats_strings[j].offset), 331 0, 332 sizeof(uint64_t)); 333 334 i++; 335 if (i == rxq_stat_cntrs) 336 break; 337 } 338 339 i = 0; 340 341 for_each_tss(qid) { 342 txq = qdev->fp_array[qid].txq; 343 344 OSAL_MEMSET((uint64_t *)(uintptr_t) 345 (((uint64_t)(uintptr_t)(txq)) + 346 offsetof(struct qede_tx_queue, xmit_pkts)), 0, 347 sizeof(uint64_t)); 348 349 i++; 350 if (i == txq_stat_cntrs) 351 break; 352 } 353 } 354 355 static int 356 qede_stop_vport(struct ecore_dev *edev) 357 { 358 struct ecore_hwfn *p_hwfn; 359 uint8_t vport_id; 360 int rc; 361 int i; 362 363 vport_id = 0; 364 for_each_hwfn(edev, i) { 365 p_hwfn = &edev->hwfns[i]; 366 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 367 vport_id); 368 if (rc != ECORE_SUCCESS) { 369 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc); 370 return rc; 371 } 372 } 373 374 DP_INFO(edev, "vport stopped\n"); 375 376 return 0; 377 } 378 379 static int 380 qede_start_vport(struct qede_dev *qdev, uint16_t mtu) 381 { 382 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 383 struct ecore_sp_vport_start_params params; 384 struct ecore_hwfn *p_hwfn; 385 int rc; 386 int i; 387 388 if (qdev->vport_started) 389 qede_stop_vport(edev); 390 391 memset(¶ms, 0, sizeof(params)); 392 params.vport_id = 0; 393 params.mtu = mtu; 394 /* @DPDK - Disable FW placement */ 395 params.zero_placement_offset = 1; 396 for_each_hwfn(edev, i) { 397 p_hwfn = &edev->hwfns[i]; 398 params.concrete_fid = p_hwfn->hw_info.concrete_fid; 399 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 400 rc = ecore_sp_vport_start(p_hwfn, ¶ms); 401 if (rc != ECORE_SUCCESS) { 402 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 403 return rc; 404 } 405 } 406 ecore_reset_vport_stats(edev); 407 qdev->vport_started = true; 408 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); 409 410 return 0; 411 } 412 413 #define QEDE_NPAR_TX_SWITCHING "npar_tx_switching" 414 #define QEDE_VF_TX_SWITCHING "vf_tx_switching" 415 416 /* Activate or deactivate vport via vport-update */ 417 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) 418 { 419 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 420 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 421 struct ecore_sp_vport_update_params params; 422 struct ecore_hwfn *p_hwfn; 423 uint8_t i; 424 int rc = -1; 425 426 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 427 params.vport_id = 0; 428 params.update_vport_active_rx_flg = 1; 429 params.update_vport_active_tx_flg = 1; 430 params.vport_active_rx_flg = flg; 431 params.vport_active_tx_flg = flg; 432 if (~qdev->enable_tx_switching & flg) { 433 params.update_tx_switching_flg = 1; 434 params.tx_switching_flg = !flg; 435 } 436 for_each_hwfn(edev, i) { 437 p_hwfn = &edev->hwfns[i]; 438 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 439 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 440 ECORE_SPQ_MODE_EBLOCK, NULL); 441 if (rc != ECORE_SUCCESS) { 442 DP_ERR(edev, "Failed to update vport\n"); 443 break; 444 } 445 } 446 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated"); 447 448 return rc; 449 } 450 451 static void 452 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, 453 uint16_t mtu, bool enable) 454 { 455 /* Enable LRO in split mode */ 456 sge_tpa_params->tpa_ipv4_en_flg = enable; 457 sge_tpa_params->tpa_ipv6_en_flg = enable; 458 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable; 459 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable; 460 /* set if tpa enable changes */ 461 sge_tpa_params->update_tpa_en_flg = 1; 462 /* set if tpa parameters should be handled */ 463 sge_tpa_params->update_tpa_param_flg = enable; 464 465 sge_tpa_params->max_buffers_per_cqe = 20; 466 /* Enable TPA in split mode. In this mode each TPA segment 467 * starts on the new BD, so there is one BD per segment. 468 */ 469 sge_tpa_params->tpa_pkt_split_flg = 1; 470 sge_tpa_params->tpa_hdr_data_split_flg = 0; 471 sge_tpa_params->tpa_gro_consistent_flg = 0; 472 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 473 sge_tpa_params->tpa_max_size = 0x7FFF; 474 sge_tpa_params->tpa_min_size_to_start = mtu / 2; 475 sge_tpa_params->tpa_min_size_to_cont = mtu / 2; 476 } 477 478 /* Enable/disable LRO via vport-update */ 479 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) 480 { 481 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 482 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 483 struct ecore_sp_vport_update_params params; 484 struct ecore_sge_tpa_params tpa_params; 485 struct ecore_hwfn *p_hwfn; 486 int rc; 487 int i; 488 489 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 490 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params)); 491 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg); 492 params.vport_id = 0; 493 params.sge_tpa_params = &tpa_params; 494 for_each_hwfn(edev, i) { 495 p_hwfn = &edev->hwfns[i]; 496 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 497 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 498 ECORE_SPQ_MODE_EBLOCK, NULL); 499 if (rc != ECORE_SUCCESS) { 500 DP_ERR(edev, "Failed to update LRO\n"); 501 return -1; 502 } 503 } 504 qdev->enable_lro = flg; 505 eth_dev->data->lro = flg; 506 507 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); 508 509 return 0; 510 } 511 512 static int 513 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, 514 enum qed_filter_rx_mode_type type) 515 { 516 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 517 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 518 struct ecore_filter_accept_flags flags; 519 520 memset(&flags, 0, sizeof(flags)); 521 522 flags.update_rx_mode_config = 1; 523 flags.update_tx_mode_config = 1; 524 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 525 ECORE_ACCEPT_MCAST_MATCHED | 526 ECORE_ACCEPT_BCAST; 527 528 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 529 ECORE_ACCEPT_MCAST_MATCHED | 530 ECORE_ACCEPT_BCAST; 531 532 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 533 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 534 if (IS_VF(edev)) { 535 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 536 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n"); 537 } 538 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 539 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 540 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC | 541 QED_FILTER_RX_MODE_TYPE_PROMISC)) { 542 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED | 543 ECORE_ACCEPT_MCAST_UNMATCHED; 544 } 545 546 return ecore_filter_accept_cmd(edev, 0, flags, false, false, 547 ECORE_SPQ_MODE_CB, NULL); 548 } 549 550 int 551 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 552 bool add) 553 { 554 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 555 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 556 struct qede_ucast_entry *tmp = NULL; 557 struct qede_ucast_entry *u; 558 struct ether_addr *mac_addr; 559 560 mac_addr = (struct ether_addr *)ucast->mac; 561 if (add) { 562 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 563 if ((memcmp(mac_addr, &tmp->mac, 564 ETHER_ADDR_LEN) == 0) && 565 ucast->vni == tmp->vni && 566 ucast->vlan == tmp->vlan) { 567 DP_INFO(edev, "Unicast MAC is already added" 568 " with vlan = %u, vni = %u\n", 569 ucast->vlan, ucast->vni); 570 return 0; 571 } 572 } 573 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), 574 RTE_CACHE_LINE_SIZE); 575 if (!u) { 576 DP_ERR(edev, "Did not allocate memory for ucast\n"); 577 return -ENOMEM; 578 } 579 ether_addr_copy(mac_addr, &u->mac); 580 u->vlan = ucast->vlan; 581 u->vni = ucast->vni; 582 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); 583 qdev->num_uc_addr++; 584 } else { 585 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 586 if ((memcmp(mac_addr, &tmp->mac, 587 ETHER_ADDR_LEN) == 0) && 588 ucast->vlan == tmp->vlan && 589 ucast->vni == tmp->vni) 590 break; 591 } 592 if (tmp == NULL) { 593 DP_INFO(edev, "Unicast MAC is not found\n"); 594 return -EINVAL; 595 } 596 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); 597 qdev->num_uc_addr--; 598 } 599 600 return 0; 601 } 602 603 static int 604 qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs, 605 uint32_t mc_addrs_num) 606 { 607 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 608 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 609 struct ecore_filter_mcast mcast; 610 struct qede_mcast_entry *m = NULL; 611 uint8_t i; 612 int rc; 613 614 for (i = 0; i < mc_addrs_num; i++) { 615 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), 616 RTE_CACHE_LINE_SIZE); 617 if (!m) { 618 DP_ERR(edev, "Did not allocate memory for mcast\n"); 619 return -ENOMEM; 620 } 621 ether_addr_copy(&mc_addrs[i], &m->mac); 622 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); 623 } 624 memset(&mcast, 0, sizeof(mcast)); 625 mcast.num_mc_addrs = mc_addrs_num; 626 mcast.opcode = ECORE_FILTER_ADD; 627 for (i = 0; i < mc_addrs_num; i++) 628 ether_addr_copy(&mc_addrs[i], (struct ether_addr *) 629 &mcast.mac[i]); 630 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 631 if (rc != ECORE_SUCCESS) { 632 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc); 633 return -1; 634 } 635 636 return 0; 637 } 638 639 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev) 640 { 641 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 642 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 643 struct qede_mcast_entry *tmp = NULL; 644 struct ecore_filter_mcast mcast; 645 int j; 646 int rc; 647 648 memset(&mcast, 0, sizeof(mcast)); 649 mcast.num_mc_addrs = qdev->num_mc_addr; 650 mcast.opcode = ECORE_FILTER_REMOVE; 651 j = 0; 652 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 653 ether_addr_copy(&tmp->mac, (struct ether_addr *)&mcast.mac[j]); 654 j++; 655 } 656 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 657 if (rc != ECORE_SUCCESS) { 658 DP_ERR(edev, "Failed to delete multicast filter\n"); 659 return -1; 660 } 661 /* Init the list */ 662 while (!SLIST_EMPTY(&qdev->mc_list_head)) { 663 tmp = SLIST_FIRST(&qdev->mc_list_head); 664 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list); 665 } 666 SLIST_INIT(&qdev->mc_list_head); 667 668 return 0; 669 } 670 671 enum _ecore_status_t 672 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 673 bool add) 674 { 675 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 676 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 677 enum _ecore_status_t rc = ECORE_INVAL; 678 679 if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) { 680 DP_ERR(edev, "Ucast filter table limit exceeded," 681 " Please enable promisc mode\n"); 682 return ECORE_INVAL; 683 } 684 685 rc = qede_ucast_filter(eth_dev, ucast, add); 686 if (rc == 0) 687 rc = ecore_filter_ucast_cmd(edev, ucast, 688 ECORE_SPQ_MODE_CB, NULL); 689 /* Indicate error only for add filter operation. 690 * Delete filter operations are not severe. 691 */ 692 if ((rc != ECORE_SUCCESS) && add) 693 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", 694 rc, add); 695 696 return rc; 697 } 698 699 static int 700 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, 701 __rte_unused uint32_t index, __rte_unused uint32_t pool) 702 { 703 struct ecore_filter_ucast ucast; 704 int re; 705 706 if (!is_valid_assigned_ether_addr(mac_addr)) 707 return -EINVAL; 708 709 qede_set_ucast_cmn_params(&ucast); 710 ucast.opcode = ECORE_FILTER_ADD; 711 ucast.type = ECORE_FILTER_MAC; 712 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); 713 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); 714 return re; 715 } 716 717 static void 718 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 719 { 720 struct qede_dev *qdev = eth_dev->data->dev_private; 721 struct ecore_dev *edev = &qdev->edev; 722 struct ecore_filter_ucast ucast; 723 724 PMD_INIT_FUNC_TRACE(edev); 725 726 if (index >= qdev->dev_info.num_mac_filters) { 727 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 728 index, qdev->dev_info.num_mac_filters); 729 return; 730 } 731 732 if (!is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) 733 return; 734 735 qede_set_ucast_cmn_params(&ucast); 736 ucast.opcode = ECORE_FILTER_REMOVE; 737 ucast.type = ECORE_FILTER_MAC; 738 739 /* Use the index maintained by rte */ 740 ether_addr_copy(ð_dev->data->mac_addrs[index], 741 (struct ether_addr *)&ucast.mac); 742 743 qede_mac_int_ops(eth_dev, &ucast, false); 744 } 745 746 static int 747 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) 748 { 749 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 750 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 751 752 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 753 mac_addr->addr_bytes)) { 754 DP_ERR(edev, "Setting MAC address is not allowed\n"); 755 return -EPERM; 756 } 757 758 qede_mac_addr_remove(eth_dev, 0); 759 760 return qede_mac_addr_add(eth_dev, mac_addr, 0, 0); 761 } 762 763 void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) 764 { 765 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 766 struct ecore_sp_vport_update_params params; 767 struct ecore_hwfn *p_hwfn; 768 uint8_t i; 769 int rc; 770 771 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 772 params.vport_id = 0; 773 params.update_accept_any_vlan_flg = 1; 774 params.accept_any_vlan = flg; 775 for_each_hwfn(edev, i) { 776 p_hwfn = &edev->hwfns[i]; 777 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 778 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 779 ECORE_SPQ_MODE_EBLOCK, NULL); 780 if (rc != ECORE_SUCCESS) { 781 DP_ERR(edev, "Failed to configure accept-any-vlan\n"); 782 return; 783 } 784 } 785 786 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled"); 787 } 788 789 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg) 790 { 791 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 792 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 793 struct ecore_sp_vport_update_params params; 794 struct ecore_hwfn *p_hwfn; 795 uint8_t i; 796 int rc; 797 798 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 799 params.vport_id = 0; 800 params.update_inner_vlan_removal_flg = 1; 801 params.inner_vlan_removal_flg = flg; 802 for_each_hwfn(edev, i) { 803 p_hwfn = &edev->hwfns[i]; 804 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 805 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 806 ECORE_SPQ_MODE_EBLOCK, NULL); 807 if (rc != ECORE_SUCCESS) { 808 DP_ERR(edev, "Failed to update vport\n"); 809 return -1; 810 } 811 } 812 813 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled"); 814 return 0; 815 } 816 817 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 818 uint16_t vlan_id, int on) 819 { 820 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 821 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 822 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 823 struct qede_vlan_entry *tmp = NULL; 824 struct qede_vlan_entry *vlan; 825 struct ecore_filter_ucast ucast; 826 int rc; 827 828 if (on) { 829 if (qdev->configured_vlans == dev_info->num_vlan_filters) { 830 DP_ERR(edev, "Reached max VLAN filter limit" 831 " enabling accept_any_vlan\n"); 832 qede_config_accept_any_vlan(qdev, true); 833 return 0; 834 } 835 836 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 837 if (tmp->vid == vlan_id) { 838 DP_INFO(edev, "VLAN %u already configured\n", 839 vlan_id); 840 return 0; 841 } 842 } 843 844 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), 845 RTE_CACHE_LINE_SIZE); 846 847 if (!vlan) { 848 DP_ERR(edev, "Did not allocate memory for VLAN\n"); 849 return -ENOMEM; 850 } 851 852 qede_set_ucast_cmn_params(&ucast); 853 ucast.opcode = ECORE_FILTER_ADD; 854 ucast.type = ECORE_FILTER_VLAN; 855 ucast.vlan = vlan_id; 856 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 857 NULL); 858 if (rc != 0) { 859 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 860 rc); 861 rte_free(vlan); 862 } else { 863 vlan->vid = vlan_id; 864 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); 865 qdev->configured_vlans++; 866 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", 867 vlan_id, qdev->configured_vlans); 868 } 869 } else { 870 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 871 if (tmp->vid == vlan_id) 872 break; 873 } 874 875 if (!tmp) { 876 if (qdev->configured_vlans == 0) { 877 DP_INFO(edev, 878 "No VLAN filters configured yet\n"); 879 return 0; 880 } 881 882 DP_ERR(edev, "VLAN %u not configured\n", vlan_id); 883 return -EINVAL; 884 } 885 886 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); 887 888 qede_set_ucast_cmn_params(&ucast); 889 ucast.opcode = ECORE_FILTER_REMOVE; 890 ucast.type = ECORE_FILTER_VLAN; 891 ucast.vlan = vlan_id; 892 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 893 NULL); 894 if (rc != 0) { 895 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 896 vlan_id, rc); 897 } else { 898 qdev->configured_vlans--; 899 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", 900 vlan_id, qdev->configured_vlans); 901 } 902 } 903 904 return rc; 905 } 906 907 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 908 { 909 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 910 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 911 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 912 913 if (mask & ETH_VLAN_STRIP_MASK) { 914 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 915 (void)qede_vlan_stripping(eth_dev, 1); 916 else 917 (void)qede_vlan_stripping(eth_dev, 0); 918 } 919 920 if (mask & ETH_VLAN_FILTER_MASK) { 921 /* VLAN filtering kicks in when a VLAN is added */ 922 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 923 qede_vlan_filter_set(eth_dev, 0, 1); 924 } else { 925 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ 926 DP_ERR(edev, 927 " Please remove existing VLAN filters" 928 " before disabling VLAN filtering\n"); 929 /* Signal app that VLAN filtering is still 930 * enabled 931 */ 932 eth_dev->data->dev_conf.rxmode.offloads |= 933 DEV_RX_OFFLOAD_VLAN_FILTER; 934 } else { 935 qede_vlan_filter_set(eth_dev, 0, 0); 936 } 937 } 938 } 939 940 if (mask & ETH_VLAN_EXTEND_MASK) 941 DP_ERR(edev, "Extend VLAN not supported\n"); 942 943 qdev->vlan_offload_mask = mask; 944 945 DP_INFO(edev, "VLAN offload mask %d\n", mask); 946 947 return 0; 948 } 949 950 static void qede_prandom_bytes(uint32_t *buff) 951 { 952 uint8_t i; 953 954 srand((unsigned int)time(NULL)); 955 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 956 buff[i] = rand(); 957 } 958 959 int qede_config_rss(struct rte_eth_dev *eth_dev) 960 { 961 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 962 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 963 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; 964 struct rte_eth_rss_reta_entry64 reta_conf[2]; 965 struct rte_eth_rss_conf rss_conf; 966 uint32_t i, id, pos, q; 967 968 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 969 if (!rss_conf.rss_key) { 970 DP_INFO(edev, "Applying driver default key\n"); 971 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 972 qede_prandom_bytes(&def_rss_key[0]); 973 rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; 974 } 975 976 /* Configure RSS hash */ 977 if (qede_rss_hash_update(eth_dev, &rss_conf)) 978 return -EINVAL; 979 980 /* Configure default RETA */ 981 memset(reta_conf, 0, sizeof(reta_conf)); 982 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) 983 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; 984 985 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 986 id = i / RTE_RETA_GROUP_SIZE; 987 pos = i % RTE_RETA_GROUP_SIZE; 988 q = i % QEDE_RSS_COUNT(qdev); 989 reta_conf[id].reta[pos] = q; 990 } 991 if (qede_rss_reta_update(eth_dev, &reta_conf[0], 992 ECORE_RSS_IND_TABLE_SIZE)) 993 return -EINVAL; 994 995 return 0; 996 } 997 998 static void qede_fastpath_start(struct ecore_dev *edev) 999 { 1000 struct ecore_hwfn *p_hwfn; 1001 int i; 1002 1003 for_each_hwfn(edev, i) { 1004 p_hwfn = &edev->hwfns[i]; 1005 ecore_hw_start_fastpath(p_hwfn); 1006 } 1007 } 1008 1009 static int qede_dev_start(struct rte_eth_dev *eth_dev) 1010 { 1011 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1012 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1013 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1014 1015 PMD_INIT_FUNC_TRACE(edev); 1016 1017 /* Update MTU only if it has changed */ 1018 if (eth_dev->data->mtu != qdev->mtu) { 1019 if (qede_update_mtu(eth_dev, qdev->mtu)) 1020 goto err; 1021 } 1022 1023 /* Configure TPA parameters */ 1024 if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1025 if (qede_enable_tpa(eth_dev, true)) 1026 return -EINVAL; 1027 /* Enable scatter mode for LRO */ 1028 if (!eth_dev->data->scattered_rx) 1029 rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER; 1030 } 1031 1032 /* Start queues */ 1033 if (qede_start_queues(eth_dev)) 1034 goto err; 1035 1036 if (IS_PF(edev)) 1037 qede_reset_queue_stats(qdev, true); 1038 1039 /* Newer SR-IOV PF driver expects RX/TX queues to be started before 1040 * enabling RSS. Hence RSS configuration is deferred upto this point. 1041 * Also, we would like to retain similar behavior in PF case, so we 1042 * don't do PF/VF specific check here. 1043 */ 1044 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 1045 if (qede_config_rss(eth_dev)) 1046 goto err; 1047 1048 /* Enable vport*/ 1049 if (qede_activate_vport(eth_dev, true)) 1050 goto err; 1051 1052 /* Update link status */ 1053 qede_link_update(eth_dev, 0); 1054 1055 /* Start/resume traffic */ 1056 qede_fastpath_start(edev); 1057 1058 DP_INFO(edev, "Device started\n"); 1059 1060 return 0; 1061 err: 1062 DP_ERR(edev, "Device start fails\n"); 1063 return -1; /* common error code is < 0 */ 1064 } 1065 1066 static void qede_dev_stop(struct rte_eth_dev *eth_dev) 1067 { 1068 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1069 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1070 1071 PMD_INIT_FUNC_TRACE(edev); 1072 1073 /* Disable vport */ 1074 if (qede_activate_vport(eth_dev, false)) 1075 return; 1076 1077 if (qdev->enable_lro) 1078 qede_enable_tpa(eth_dev, false); 1079 1080 /* Stop queues */ 1081 qede_stop_queues(eth_dev); 1082 1083 /* Disable traffic */ 1084 ecore_hw_stop_fastpath(edev); /* TBD - loop */ 1085 1086 DP_INFO(edev, "Device is stopped\n"); 1087 } 1088 1089 const char *valid_args[] = { 1090 QEDE_NPAR_TX_SWITCHING, 1091 QEDE_VF_TX_SWITCHING, 1092 NULL, 1093 }; 1094 1095 static int qede_args_check(const char *key, const char *val, void *opaque) 1096 { 1097 unsigned long tmp; 1098 int ret = 0; 1099 struct rte_eth_dev *eth_dev = opaque; 1100 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1101 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1102 1103 errno = 0; 1104 tmp = strtoul(val, NULL, 0); 1105 if (errno) { 1106 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val); 1107 return errno; 1108 } 1109 1110 if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) || 1111 ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) { 1112 qdev->enable_tx_switching = !!tmp; 1113 DP_INFO(edev, "Disabling %s tx-switching\n", 1114 strcmp(QEDE_NPAR_TX_SWITCHING, key) ? 1115 "VF" : "NPAR"); 1116 } 1117 1118 return ret; 1119 } 1120 1121 static int qede_args(struct rte_eth_dev *eth_dev) 1122 { 1123 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1124 struct rte_kvargs *kvlist; 1125 struct rte_devargs *devargs; 1126 int ret; 1127 int i; 1128 1129 devargs = pci_dev->device.devargs; 1130 if (!devargs) 1131 return 0; /* return success */ 1132 1133 kvlist = rte_kvargs_parse(devargs->args, valid_args); 1134 if (kvlist == NULL) 1135 return -EINVAL; 1136 1137 /* Process parameters. */ 1138 for (i = 0; (valid_args[i] != NULL); ++i) { 1139 if (rte_kvargs_count(kvlist, valid_args[i])) { 1140 ret = rte_kvargs_process(kvlist, valid_args[i], 1141 qede_args_check, eth_dev); 1142 if (ret != ECORE_SUCCESS) { 1143 rte_kvargs_free(kvlist); 1144 return ret; 1145 } 1146 } 1147 } 1148 rte_kvargs_free(kvlist); 1149 1150 return 0; 1151 } 1152 1153 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 1154 { 1155 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1156 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1157 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1158 int ret; 1159 1160 PMD_INIT_FUNC_TRACE(edev); 1161 1162 /* Check requirements for 100G mode */ 1163 if (ECORE_IS_CMT(edev)) { 1164 if (eth_dev->data->nb_rx_queues < 2 || 1165 eth_dev->data->nb_tx_queues < 2) { 1166 DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n"); 1167 return -EINVAL; 1168 } 1169 1170 if ((eth_dev->data->nb_rx_queues % 2 != 0) || 1171 (eth_dev->data->nb_tx_queues % 2 != 0)) { 1172 DP_ERR(edev, 1173 "100G mode needs even no. of RX/TX queues\n"); 1174 return -EINVAL; 1175 } 1176 } 1177 1178 /* We need to have min 1 RX queue.There is no min check in 1179 * rte_eth_dev_configure(), so we are checking it here. 1180 */ 1181 if (eth_dev->data->nb_rx_queues == 0) { 1182 DP_ERR(edev, "Minimum one RX queue is required\n"); 1183 return -EINVAL; 1184 } 1185 1186 /* Enable Tx switching by default */ 1187 qdev->enable_tx_switching = 1; 1188 1189 /* Parse devargs and fix up rxmode */ 1190 if (qede_args(eth_dev)) 1191 DP_NOTICE(edev, false, 1192 "Invalid devargs supplied, requested change will not take effect\n"); 1193 1194 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || 1195 rxmode->mq_mode == ETH_MQ_RX_RSS)) { 1196 DP_ERR(edev, "Unsupported multi-queue mode\n"); 1197 return -ENOTSUP; 1198 } 1199 /* Flow director mode check */ 1200 if (qede_check_fdir_support(eth_dev)) 1201 return -ENOTSUP; 1202 1203 qede_dealloc_fp_resc(eth_dev); 1204 qdev->num_tx_queues = eth_dev->data->nb_tx_queues; 1205 qdev->num_rx_queues = eth_dev->data->nb_rx_queues; 1206 if (qede_alloc_fp_resc(qdev)) 1207 return -ENOMEM; 1208 1209 /* If jumbo enabled adjust MTU */ 1210 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 1211 eth_dev->data->mtu = 1212 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1213 ETHER_HDR_LEN - ETHER_CRC_LEN; 1214 1215 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) 1216 eth_dev->data->scattered_rx = 1; 1217 1218 if (qede_start_vport(qdev, eth_dev->data->mtu)) 1219 return -1; 1220 1221 qdev->mtu = eth_dev->data->mtu; 1222 1223 /* Enable VLAN offloads by default */ 1224 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | 1225 ETH_VLAN_FILTER_MASK); 1226 if (ret) 1227 return ret; 1228 1229 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", 1230 QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)); 1231 1232 return 0; 1233 } 1234 1235 /* Info about HW descriptor ring limitations */ 1236 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 1237 .nb_max = 0x8000, /* 32K */ 1238 .nb_min = 128, 1239 .nb_align = 128 /* lowest common multiple */ 1240 }; 1241 1242 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 1243 .nb_max = 0x8000, /* 32K */ 1244 .nb_min = 256, 1245 .nb_align = 256, 1246 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, 1247 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 1248 }; 1249 1250 static void 1251 qede_dev_info_get(struct rte_eth_dev *eth_dev, 1252 struct rte_eth_dev_info *dev_info) 1253 { 1254 struct qede_dev *qdev = eth_dev->data->dev_private; 1255 struct ecore_dev *edev = &qdev->edev; 1256 struct qed_link_output link; 1257 uint32_t speed_cap = 0; 1258 1259 PMD_INIT_FUNC_TRACE(edev); 1260 1261 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; 1262 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 1263 dev_info->rx_desc_lim = qede_rx_desc_lim; 1264 dev_info->tx_desc_lim = qede_tx_desc_lim; 1265 1266 if (IS_PF(edev)) 1267 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1268 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2); 1269 else 1270 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1271 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); 1272 dev_info->max_tx_queues = dev_info->max_rx_queues; 1273 1274 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; 1275 dev_info->max_vfs = 0; 1276 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 1277 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1278 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 1279 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 1280 DEV_RX_OFFLOAD_UDP_CKSUM | 1281 DEV_RX_OFFLOAD_TCP_CKSUM | 1282 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1283 DEV_RX_OFFLOAD_TCP_LRO | 1284 DEV_RX_OFFLOAD_KEEP_CRC | 1285 DEV_RX_OFFLOAD_SCATTER | 1286 DEV_RX_OFFLOAD_JUMBO_FRAME | 1287 DEV_RX_OFFLOAD_VLAN_FILTER | 1288 DEV_RX_OFFLOAD_VLAN_STRIP); 1289 dev_info->rx_queue_offload_capa = 0; 1290 1291 /* TX offloads are on a per-packet basis, so it is applicable 1292 * to both at port and queue levels. 1293 */ 1294 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | 1295 DEV_TX_OFFLOAD_IPV4_CKSUM | 1296 DEV_TX_OFFLOAD_UDP_CKSUM | 1297 DEV_TX_OFFLOAD_TCP_CKSUM | 1298 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1299 DEV_TX_OFFLOAD_MULTI_SEGS | 1300 DEV_TX_OFFLOAD_TCP_TSO | 1301 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1302 DEV_TX_OFFLOAD_GENEVE_TNL_TSO); 1303 dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa; 1304 1305 dev_info->default_txconf = (struct rte_eth_txconf) { 1306 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS, 1307 }; 1308 1309 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1310 /* Packets are always dropped if no descriptors are available */ 1311 .rx_drop_en = 1, 1312 .offloads = 0, 1313 }; 1314 1315 memset(&link, 0, sizeof(struct qed_link_output)); 1316 qdev->ops->common->get_link(edev, &link); 1317 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1318 speed_cap |= ETH_LINK_SPEED_1G; 1319 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1320 speed_cap |= ETH_LINK_SPEED_10G; 1321 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1322 speed_cap |= ETH_LINK_SPEED_25G; 1323 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1324 speed_cap |= ETH_LINK_SPEED_40G; 1325 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1326 speed_cap |= ETH_LINK_SPEED_50G; 1327 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1328 speed_cap |= ETH_LINK_SPEED_100G; 1329 dev_info->speed_capa = speed_cap; 1330 } 1331 1332 /* return 0 means link status changed, -1 means not changed */ 1333 int 1334 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 1335 { 1336 struct qede_dev *qdev = eth_dev->data->dev_private; 1337 struct ecore_dev *edev = &qdev->edev; 1338 struct qed_link_output q_link; 1339 struct rte_eth_link link; 1340 uint16_t link_duplex; 1341 1342 memset(&q_link, 0, sizeof(q_link)); 1343 memset(&link, 0, sizeof(link)); 1344 1345 qdev->ops->common->get_link(edev, &q_link); 1346 1347 /* Link Speed */ 1348 link.link_speed = q_link.speed; 1349 1350 /* Link Mode */ 1351 switch (q_link.duplex) { 1352 case QEDE_DUPLEX_HALF: 1353 link_duplex = ETH_LINK_HALF_DUPLEX; 1354 break; 1355 case QEDE_DUPLEX_FULL: 1356 link_duplex = ETH_LINK_FULL_DUPLEX; 1357 break; 1358 case QEDE_DUPLEX_UNKNOWN: 1359 default: 1360 link_duplex = -1; 1361 } 1362 link.link_duplex = link_duplex; 1363 1364 /* Link Status */ 1365 link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN; 1366 1367 /* AN */ 1368 link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 1369 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 1370 1371 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 1372 link.link_speed, link.link_duplex, 1373 link.link_autoneg, link.link_status); 1374 1375 return rte_eth_linkstatus_set(eth_dev, &link); 1376 } 1377 1378 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 1379 { 1380 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT 1381 struct qede_dev *qdev = eth_dev->data->dev_private; 1382 struct ecore_dev *edev = &qdev->edev; 1383 1384 PMD_INIT_FUNC_TRACE(edev); 1385 #endif 1386 1387 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1388 1389 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1390 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1391 1392 qed_configure_filter_rx_mode(eth_dev, type); 1393 } 1394 1395 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 1396 { 1397 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT 1398 struct qede_dev *qdev = eth_dev->data->dev_private; 1399 struct ecore_dev *edev = &qdev->edev; 1400 1401 PMD_INIT_FUNC_TRACE(edev); 1402 #endif 1403 1404 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1405 qed_configure_filter_rx_mode(eth_dev, 1406 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 1407 else 1408 qed_configure_filter_rx_mode(eth_dev, 1409 QED_FILTER_RX_MODE_TYPE_REGULAR); 1410 } 1411 1412 static void qede_poll_sp_sb_cb(void *param) 1413 { 1414 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1415 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1416 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1417 int rc; 1418 1419 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 1420 qede_interrupt_action(&edev->hwfns[1]); 1421 1422 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 1423 qede_poll_sp_sb_cb, 1424 (void *)eth_dev); 1425 if (rc != 0) { 1426 DP_ERR(edev, "Unable to start periodic" 1427 " timer rc %d\n", rc); 1428 assert(false && "Unable to start periodic timer"); 1429 } 1430 } 1431 1432 static void qede_dev_close(struct rte_eth_dev *eth_dev) 1433 { 1434 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1435 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1436 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1437 1438 PMD_INIT_FUNC_TRACE(edev); 1439 1440 /* dev_stop() shall cleanup fp resources in hw but without releasing 1441 * dma memories and sw structures so that dev_start() can be called 1442 * by the app without reconfiguration. However, in dev_close() we 1443 * can release all the resources and device can be brought up newly 1444 */ 1445 if (eth_dev->data->dev_started) 1446 qede_dev_stop(eth_dev); 1447 1448 qede_stop_vport(edev); 1449 qdev->vport_started = false; 1450 qede_fdir_dealloc_resc(eth_dev); 1451 qede_dealloc_fp_resc(eth_dev); 1452 1453 eth_dev->data->nb_rx_queues = 0; 1454 eth_dev->data->nb_tx_queues = 0; 1455 1456 /* Bring the link down */ 1457 qede_dev_set_link_state(eth_dev, false); 1458 qdev->ops->common->slowpath_stop(edev); 1459 qdev->ops->common->remove(edev); 1460 rte_intr_disable(&pci_dev->intr_handle); 1461 1462 switch (pci_dev->intr_handle.type) { 1463 case RTE_INTR_HANDLE_UIO_INTX: 1464 case RTE_INTR_HANDLE_VFIO_LEGACY: 1465 rte_intr_callback_unregister(&pci_dev->intr_handle, 1466 qede_interrupt_handler_intx, 1467 (void *)eth_dev); 1468 break; 1469 default: 1470 rte_intr_callback_unregister(&pci_dev->intr_handle, 1471 qede_interrupt_handler, 1472 (void *)eth_dev); 1473 } 1474 1475 if (ECORE_IS_CMT(edev)) 1476 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 1477 } 1478 1479 static int 1480 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 1481 { 1482 struct qede_dev *qdev = eth_dev->data->dev_private; 1483 struct ecore_dev *edev = &qdev->edev; 1484 struct ecore_eth_stats stats; 1485 unsigned int i = 0, j = 0, qid; 1486 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 1487 struct qede_tx_queue *txq; 1488 1489 ecore_get_vport_stats(edev, &stats); 1490 1491 /* RX Stats */ 1492 eth_stats->ipackets = stats.common.rx_ucast_pkts + 1493 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts; 1494 1495 eth_stats->ibytes = stats.common.rx_ucast_bytes + 1496 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes; 1497 1498 eth_stats->ierrors = stats.common.rx_crc_errors + 1499 stats.common.rx_align_errors + 1500 stats.common.rx_carrier_errors + 1501 stats.common.rx_oversize_packets + 1502 stats.common.rx_jabbers + stats.common.rx_undersize_packets; 1503 1504 eth_stats->rx_nombuf = stats.common.no_buff_discards; 1505 1506 eth_stats->imissed = stats.common.mftag_filter_discards + 1507 stats.common.mac_filter_discards + 1508 stats.common.no_buff_discards + 1509 stats.common.brb_truncates + stats.common.brb_discards; 1510 1511 /* TX stats */ 1512 eth_stats->opackets = stats.common.tx_ucast_pkts + 1513 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts; 1514 1515 eth_stats->obytes = stats.common.tx_ucast_bytes + 1516 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes; 1517 1518 eth_stats->oerrors = stats.common.tx_err_drop_pkts; 1519 1520 /* Queue stats */ 1521 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1522 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1523 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), 1524 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1525 if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) || 1526 (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev))) 1527 DP_VERBOSE(edev, ECORE_MSG_DEBUG, 1528 "Not all the queue stats will be displayed. Set" 1529 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" 1530 " appropriately and retry.\n"); 1531 1532 for_each_rss(qid) { 1533 eth_stats->q_ipackets[i] = 1534 *(uint64_t *)( 1535 ((char *)(qdev->fp_array[qid].rxq)) + 1536 offsetof(struct qede_rx_queue, 1537 rcv_pkts)); 1538 eth_stats->q_errors[i] = 1539 *(uint64_t *)( 1540 ((char *)(qdev->fp_array[qid].rxq)) + 1541 offsetof(struct qede_rx_queue, 1542 rx_hw_errors)) + 1543 *(uint64_t *)( 1544 ((char *)(qdev->fp_array[qid].rxq)) + 1545 offsetof(struct qede_rx_queue, 1546 rx_alloc_errors)); 1547 i++; 1548 if (i == rxq_stat_cntrs) 1549 break; 1550 } 1551 1552 for_each_tss(qid) { 1553 txq = qdev->fp_array[qid].txq; 1554 eth_stats->q_opackets[j] = 1555 *((uint64_t *)(uintptr_t) 1556 (((uint64_t)(uintptr_t)(txq)) + 1557 offsetof(struct qede_tx_queue, 1558 xmit_pkts))); 1559 j++; 1560 if (j == txq_stat_cntrs) 1561 break; 1562 } 1563 1564 return 0; 1565 } 1566 1567 static unsigned 1568 qede_get_xstats_count(struct qede_dev *qdev) { 1569 if (ECORE_IS_BB(&qdev->edev)) 1570 return RTE_DIM(qede_xstats_strings) + 1571 RTE_DIM(qede_bb_xstats_strings) + 1572 (RTE_DIM(qede_rxq_xstats_strings) * 1573 RTE_MIN(QEDE_RSS_COUNT(qdev), 1574 RTE_ETHDEV_QUEUE_STAT_CNTRS)); 1575 else 1576 return RTE_DIM(qede_xstats_strings) + 1577 RTE_DIM(qede_ah_xstats_strings) + 1578 (RTE_DIM(qede_rxq_xstats_strings) * 1579 RTE_MIN(QEDE_RSS_COUNT(qdev), 1580 RTE_ETHDEV_QUEUE_STAT_CNTRS)); 1581 } 1582 1583 static int 1584 qede_get_xstats_names(struct rte_eth_dev *dev, 1585 struct rte_eth_xstat_name *xstats_names, 1586 __rte_unused unsigned int limit) 1587 { 1588 struct qede_dev *qdev = dev->data->dev_private; 1589 struct ecore_dev *edev = &qdev->edev; 1590 const unsigned int stat_cnt = qede_get_xstats_count(qdev); 1591 unsigned int i, qid, stat_idx = 0; 1592 unsigned int rxq_stat_cntrs; 1593 1594 if (xstats_names != NULL) { 1595 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1596 snprintf(xstats_names[stat_idx].name, 1597 sizeof(xstats_names[stat_idx].name), 1598 "%s", 1599 qede_xstats_strings[i].name); 1600 stat_idx++; 1601 } 1602 1603 if (ECORE_IS_BB(edev)) { 1604 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1605 snprintf(xstats_names[stat_idx].name, 1606 sizeof(xstats_names[stat_idx].name), 1607 "%s", 1608 qede_bb_xstats_strings[i].name); 1609 stat_idx++; 1610 } 1611 } else { 1612 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1613 snprintf(xstats_names[stat_idx].name, 1614 sizeof(xstats_names[stat_idx].name), 1615 "%s", 1616 qede_ah_xstats_strings[i].name); 1617 stat_idx++; 1618 } 1619 } 1620 1621 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1622 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1623 for (qid = 0; qid < rxq_stat_cntrs; qid++) { 1624 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1625 snprintf(xstats_names[stat_idx].name, 1626 sizeof(xstats_names[stat_idx].name), 1627 "%.4s%d%s", 1628 qede_rxq_xstats_strings[i].name, qid, 1629 qede_rxq_xstats_strings[i].name + 4); 1630 stat_idx++; 1631 } 1632 } 1633 } 1634 1635 return stat_cnt; 1636 } 1637 1638 static int 1639 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1640 unsigned int n) 1641 { 1642 struct qede_dev *qdev = dev->data->dev_private; 1643 struct ecore_dev *edev = &qdev->edev; 1644 struct ecore_eth_stats stats; 1645 const unsigned int num = qede_get_xstats_count(qdev); 1646 unsigned int i, qid, stat_idx = 0; 1647 unsigned int rxq_stat_cntrs; 1648 1649 if (n < num) 1650 return num; 1651 1652 ecore_get_vport_stats(edev, &stats); 1653 1654 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1655 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + 1656 qede_xstats_strings[i].offset); 1657 xstats[stat_idx].id = stat_idx; 1658 stat_idx++; 1659 } 1660 1661 if (ECORE_IS_BB(edev)) { 1662 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1663 xstats[stat_idx].value = 1664 *(uint64_t *)(((char *)&stats) + 1665 qede_bb_xstats_strings[i].offset); 1666 xstats[stat_idx].id = stat_idx; 1667 stat_idx++; 1668 } 1669 } else { 1670 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1671 xstats[stat_idx].value = 1672 *(uint64_t *)(((char *)&stats) + 1673 qede_ah_xstats_strings[i].offset); 1674 xstats[stat_idx].id = stat_idx; 1675 stat_idx++; 1676 } 1677 } 1678 1679 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1680 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1681 for (qid = 0; qid < rxq_stat_cntrs; qid++) { 1682 for_each_rss(qid) { 1683 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1684 xstats[stat_idx].value = *(uint64_t *)( 1685 ((char *)(qdev->fp_array[qid].rxq)) + 1686 qede_rxq_xstats_strings[i].offset); 1687 xstats[stat_idx].id = stat_idx; 1688 stat_idx++; 1689 } 1690 } 1691 } 1692 1693 return stat_idx; 1694 } 1695 1696 static void 1697 qede_reset_xstats(struct rte_eth_dev *dev) 1698 { 1699 struct qede_dev *qdev = dev->data->dev_private; 1700 struct ecore_dev *edev = &qdev->edev; 1701 1702 ecore_reset_vport_stats(edev); 1703 qede_reset_queue_stats(qdev, true); 1704 } 1705 1706 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 1707 { 1708 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1709 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1710 struct qed_link_params link_params; 1711 int rc; 1712 1713 DP_INFO(edev, "setting link state %d\n", link_up); 1714 memset(&link_params, 0, sizeof(link_params)); 1715 link_params.link_up = link_up; 1716 rc = qdev->ops->common->set_link(edev, &link_params); 1717 if (rc != ECORE_SUCCESS) 1718 DP_ERR(edev, "Unable to set link state %d\n", link_up); 1719 1720 return rc; 1721 } 1722 1723 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 1724 { 1725 return qede_dev_set_link_state(eth_dev, true); 1726 } 1727 1728 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 1729 { 1730 return qede_dev_set_link_state(eth_dev, false); 1731 } 1732 1733 static void qede_reset_stats(struct rte_eth_dev *eth_dev) 1734 { 1735 struct qede_dev *qdev = eth_dev->data->dev_private; 1736 struct ecore_dev *edev = &qdev->edev; 1737 1738 ecore_reset_vport_stats(edev); 1739 qede_reset_queue_stats(qdev, false); 1740 } 1741 1742 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 1743 { 1744 enum qed_filter_rx_mode_type type = 1745 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1746 1747 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1748 type |= QED_FILTER_RX_MODE_TYPE_PROMISC; 1749 1750 qed_configure_filter_rx_mode(eth_dev, type); 1751 } 1752 1753 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 1754 { 1755 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1756 qed_configure_filter_rx_mode(eth_dev, 1757 QED_FILTER_RX_MODE_TYPE_PROMISC); 1758 else 1759 qed_configure_filter_rx_mode(eth_dev, 1760 QED_FILTER_RX_MODE_TYPE_REGULAR); 1761 } 1762 1763 static int 1764 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs, 1765 uint32_t mc_addrs_num) 1766 { 1767 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1768 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1769 uint8_t i; 1770 1771 if (mc_addrs_num > ECORE_MAX_MC_ADDRS) { 1772 DP_ERR(edev, "Reached max multicast filters limit," 1773 "Please enable multicast promisc mode\n"); 1774 return -ENOSPC; 1775 } 1776 1777 for (i = 0; i < mc_addrs_num; i++) { 1778 if (!is_multicast_ether_addr(&mc_addrs[i])) { 1779 DP_ERR(edev, "Not a valid multicast MAC\n"); 1780 return -EINVAL; 1781 } 1782 } 1783 1784 /* Flush all existing entries */ 1785 if (qede_del_mcast_filters(eth_dev)) 1786 return -1; 1787 1788 /* Set new mcast list */ 1789 return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num); 1790 } 1791 1792 /* Update MTU via vport-update without doing port restart. 1793 * The vport must be deactivated before calling this API. 1794 */ 1795 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) 1796 { 1797 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1798 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1799 struct ecore_hwfn *p_hwfn; 1800 int rc; 1801 int i; 1802 1803 if (IS_PF(edev)) { 1804 struct ecore_sp_vport_update_params params; 1805 1806 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 1807 params.vport_id = 0; 1808 params.mtu = mtu; 1809 params.vport_id = 0; 1810 for_each_hwfn(edev, i) { 1811 p_hwfn = &edev->hwfns[i]; 1812 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1813 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 1814 ECORE_SPQ_MODE_EBLOCK, NULL); 1815 if (rc != ECORE_SUCCESS) 1816 goto err; 1817 } 1818 } else { 1819 for_each_hwfn(edev, i) { 1820 p_hwfn = &edev->hwfns[i]; 1821 rc = ecore_vf_pf_update_mtu(p_hwfn, mtu); 1822 if (rc == ECORE_INVAL) { 1823 DP_INFO(edev, "VF MTU Update TLV not supported\n"); 1824 /* Recreate vport */ 1825 rc = qede_start_vport(qdev, mtu); 1826 if (rc != ECORE_SUCCESS) 1827 goto err; 1828 1829 /* Restore config lost due to vport stop */ 1830 if (eth_dev->data->promiscuous) 1831 qede_promiscuous_enable(eth_dev); 1832 else 1833 qede_promiscuous_disable(eth_dev); 1834 1835 if (eth_dev->data->all_multicast) 1836 qede_allmulticast_enable(eth_dev); 1837 else 1838 qede_allmulticast_disable(eth_dev); 1839 1840 qede_vlan_offload_set(eth_dev, 1841 qdev->vlan_offload_mask); 1842 } else if (rc != ECORE_SUCCESS) { 1843 goto err; 1844 } 1845 } 1846 } 1847 DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu); 1848 1849 return 0; 1850 1851 err: 1852 DP_ERR(edev, "Failed to update MTU\n"); 1853 return -1; 1854 } 1855 1856 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 1857 struct rte_eth_fc_conf *fc_conf) 1858 { 1859 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1860 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1861 struct qed_link_output current_link; 1862 struct qed_link_params params; 1863 1864 memset(¤t_link, 0, sizeof(current_link)); 1865 qdev->ops->common->get_link(edev, ¤t_link); 1866 1867 memset(¶ms, 0, sizeof(params)); 1868 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 1869 if (fc_conf->autoneg) { 1870 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 1871 DP_ERR(edev, "Autoneg not supported\n"); 1872 return -EINVAL; 1873 } 1874 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1875 } 1876 1877 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 1878 if (fc_conf->mode == RTE_FC_FULL) 1879 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 1880 QED_LINK_PAUSE_RX_ENABLE); 1881 if (fc_conf->mode == RTE_FC_TX_PAUSE) 1882 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1883 if (fc_conf->mode == RTE_FC_RX_PAUSE) 1884 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1885 1886 params.link_up = true; 1887 (void)qdev->ops->common->set_link(edev, ¶ms); 1888 1889 return 0; 1890 } 1891 1892 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 1893 struct rte_eth_fc_conf *fc_conf) 1894 { 1895 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1896 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1897 struct qed_link_output current_link; 1898 1899 memset(¤t_link, 0, sizeof(current_link)); 1900 qdev->ops->common->get_link(edev, ¤t_link); 1901 1902 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1903 fc_conf->autoneg = true; 1904 1905 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 1906 QED_LINK_PAUSE_TX_ENABLE)) 1907 fc_conf->mode = RTE_FC_FULL; 1908 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 1909 fc_conf->mode = RTE_FC_RX_PAUSE; 1910 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 1911 fc_conf->mode = RTE_FC_TX_PAUSE; 1912 else 1913 fc_conf->mode = RTE_FC_NONE; 1914 1915 return 0; 1916 } 1917 1918 static const uint32_t * 1919 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 1920 { 1921 static const uint32_t ptypes[] = { 1922 RTE_PTYPE_L2_ETHER, 1923 RTE_PTYPE_L2_ETHER_VLAN, 1924 RTE_PTYPE_L3_IPV4, 1925 RTE_PTYPE_L3_IPV6, 1926 RTE_PTYPE_L4_TCP, 1927 RTE_PTYPE_L4_UDP, 1928 RTE_PTYPE_TUNNEL_VXLAN, 1929 RTE_PTYPE_L4_FRAG, 1930 RTE_PTYPE_TUNNEL_GENEVE, 1931 RTE_PTYPE_TUNNEL_GRE, 1932 /* Inner */ 1933 RTE_PTYPE_INNER_L2_ETHER, 1934 RTE_PTYPE_INNER_L2_ETHER_VLAN, 1935 RTE_PTYPE_INNER_L3_IPV4, 1936 RTE_PTYPE_INNER_L3_IPV6, 1937 RTE_PTYPE_INNER_L4_TCP, 1938 RTE_PTYPE_INNER_L4_UDP, 1939 RTE_PTYPE_INNER_L4_FRAG, 1940 RTE_PTYPE_UNKNOWN 1941 }; 1942 1943 if (eth_dev->rx_pkt_burst == qede_recv_pkts) 1944 return ptypes; 1945 1946 return NULL; 1947 } 1948 1949 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) 1950 { 1951 *rss_caps = 0; 1952 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 1953 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 1954 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 1955 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 1956 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 1957 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 1958 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; 1959 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; 1960 } 1961 1962 int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 1963 struct rte_eth_rss_conf *rss_conf) 1964 { 1965 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1966 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1967 struct ecore_sp_vport_update_params vport_update_params; 1968 struct ecore_rss_params rss_params; 1969 struct ecore_hwfn *p_hwfn; 1970 uint32_t *key = (uint32_t *)rss_conf->rss_key; 1971 uint64_t hf = rss_conf->rss_hf; 1972 uint8_t len = rss_conf->rss_key_len; 1973 uint8_t idx; 1974 uint8_t i; 1975 int rc; 1976 1977 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1978 memset(&rss_params, 0, sizeof(rss_params)); 1979 1980 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", 1981 (unsigned long)hf, len, key); 1982 1983 if (hf != 0) { 1984 /* Enabling RSS */ 1985 DP_INFO(edev, "Enabling rss\n"); 1986 1987 /* RSS caps */ 1988 qede_init_rss_caps(&rss_params.rss_caps, hf); 1989 rss_params.update_rss_capabilities = 1; 1990 1991 /* RSS hash key */ 1992 if (key) { 1993 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { 1994 DP_ERR(edev, "RSS key length exceeds limit\n"); 1995 return -EINVAL; 1996 } 1997 DP_INFO(edev, "Applying user supplied hash key\n"); 1998 rss_params.update_rss_key = 1; 1999 memcpy(&rss_params.rss_key, key, len); 2000 } 2001 rss_params.rss_enable = 1; 2002 } 2003 2004 rss_params.update_rss_config = 1; 2005 /* tbl_size has to be set with capabilities */ 2006 rss_params.rss_table_size_log = 7; 2007 vport_update_params.vport_id = 0; 2008 /* pass the L2 handles instead of qids */ 2009 for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) { 2010 idx = i % QEDE_RSS_COUNT(qdev); 2011 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle; 2012 } 2013 vport_update_params.rss_params = &rss_params; 2014 2015 for_each_hwfn(edev, i) { 2016 p_hwfn = &edev->hwfns[i]; 2017 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2018 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2019 ECORE_SPQ_MODE_EBLOCK, NULL); 2020 if (rc) { 2021 DP_ERR(edev, "vport-update for RSS failed\n"); 2022 return rc; 2023 } 2024 } 2025 qdev->rss_enable = rss_params.rss_enable; 2026 2027 /* Update local structure for hash query */ 2028 qdev->rss_conf.rss_hf = hf; 2029 qdev->rss_conf.rss_key_len = len; 2030 if (qdev->rss_enable) { 2031 if (qdev->rss_conf.rss_key == NULL) { 2032 qdev->rss_conf.rss_key = (uint8_t *)malloc(len); 2033 if (qdev->rss_conf.rss_key == NULL) { 2034 DP_ERR(edev, "No memory to store RSS key\n"); 2035 return -ENOMEM; 2036 } 2037 } 2038 if (key && len) { 2039 DP_INFO(edev, "Storing RSS key\n"); 2040 memcpy(qdev->rss_conf.rss_key, key, len); 2041 } 2042 } else if (!qdev->rss_enable && len == 0) { 2043 if (qdev->rss_conf.rss_key) { 2044 free(qdev->rss_conf.rss_key); 2045 qdev->rss_conf.rss_key = NULL; 2046 DP_INFO(edev, "Free RSS key\n"); 2047 } 2048 } 2049 2050 return 0; 2051 } 2052 2053 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 2054 struct rte_eth_rss_conf *rss_conf) 2055 { 2056 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2057 2058 rss_conf->rss_hf = qdev->rss_conf.rss_hf; 2059 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; 2060 2061 if (rss_conf->rss_key && qdev->rss_conf.rss_key) 2062 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, 2063 rss_conf->rss_key_len); 2064 return 0; 2065 } 2066 2067 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev, 2068 struct ecore_rss_params *rss) 2069 { 2070 int i, fn; 2071 bool rss_mode = 1; /* enable */ 2072 struct ecore_queue_cid *cid; 2073 struct ecore_rss_params *t_rss; 2074 2075 /* In regular scenario, we'd simply need to take input handlers. 2076 * But in CMT, we'd have to split the handlers according to the 2077 * engine they were configured on. We'd then have to understand 2078 * whether RSS is really required, since 2-queues on CMT doesn't 2079 * require RSS. 2080 */ 2081 2082 /* CMT should be round-robin */ 2083 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 2084 cid = rss->rss_ind_table[i]; 2085 2086 if (cid->p_owner == ECORE_LEADING_HWFN(edev)) 2087 t_rss = &rss[0]; 2088 else 2089 t_rss = &rss[1]; 2090 2091 t_rss->rss_ind_table[i / edev->num_hwfns] = cid; 2092 } 2093 2094 t_rss = &rss[1]; 2095 t_rss->update_rss_ind_table = 1; 2096 t_rss->rss_table_size_log = 7; 2097 t_rss->update_rss_config = 1; 2098 2099 /* Make sure RSS is actually required */ 2100 for_each_hwfn(edev, fn) { 2101 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns; 2102 i++) { 2103 if (rss[fn].rss_ind_table[i] != 2104 rss[fn].rss_ind_table[0]) 2105 break; 2106 } 2107 2108 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) { 2109 DP_INFO(edev, 2110 "CMT - 1 queue per-hwfn; Disabling RSS\n"); 2111 rss_mode = 0; 2112 goto out; 2113 } 2114 } 2115 2116 out: 2117 t_rss->rss_enable = rss_mode; 2118 2119 return rss_mode; 2120 } 2121 2122 int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 2123 struct rte_eth_rss_reta_entry64 *reta_conf, 2124 uint16_t reta_size) 2125 { 2126 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2127 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2128 struct ecore_sp_vport_update_params vport_update_params; 2129 struct ecore_rss_params *params; 2130 struct ecore_hwfn *p_hwfn; 2131 uint16_t i, idx, shift; 2132 uint8_t entry; 2133 int rc = 0; 2134 2135 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2136 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 2137 reta_size); 2138 return -EINVAL; 2139 } 2140 2141 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2142 params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns, 2143 RTE_CACHE_LINE_SIZE); 2144 if (params == NULL) { 2145 DP_ERR(edev, "failed to allocate memory\n"); 2146 return -ENOMEM; 2147 } 2148 2149 for (i = 0; i < reta_size; i++) { 2150 idx = i / RTE_RETA_GROUP_SIZE; 2151 shift = i % RTE_RETA_GROUP_SIZE; 2152 if (reta_conf[idx].mask & (1ULL << shift)) { 2153 entry = reta_conf[idx].reta[shift]; 2154 /* Pass rxq handles to ecore */ 2155 params->rss_ind_table[i] = 2156 qdev->fp_array[entry].rxq->handle; 2157 /* Update the local copy for RETA query command */ 2158 qdev->rss_ind_table[i] = entry; 2159 } 2160 } 2161 2162 params->update_rss_ind_table = 1; 2163 params->rss_table_size_log = 7; 2164 params->update_rss_config = 1; 2165 2166 /* Fix up RETA for CMT mode device */ 2167 if (ECORE_IS_CMT(edev)) 2168 qdev->rss_enable = qede_update_rss_parm_cmt(edev, 2169 params); 2170 vport_update_params.vport_id = 0; 2171 /* Use the current value of rss_enable */ 2172 params->rss_enable = qdev->rss_enable; 2173 vport_update_params.rss_params = params; 2174 2175 for_each_hwfn(edev, i) { 2176 p_hwfn = &edev->hwfns[i]; 2177 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2178 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2179 ECORE_SPQ_MODE_EBLOCK, NULL); 2180 if (rc) { 2181 DP_ERR(edev, "vport-update for RSS failed\n"); 2182 goto out; 2183 } 2184 } 2185 2186 out: 2187 rte_free(params); 2188 return rc; 2189 } 2190 2191 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 2192 struct rte_eth_rss_reta_entry64 *reta_conf, 2193 uint16_t reta_size) 2194 { 2195 struct qede_dev *qdev = eth_dev->data->dev_private; 2196 struct ecore_dev *edev = &qdev->edev; 2197 uint16_t i, idx, shift; 2198 uint8_t entry; 2199 2200 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2201 DP_ERR(edev, "reta_size %d is not supported\n", 2202 reta_size); 2203 return -EINVAL; 2204 } 2205 2206 for (i = 0; i < reta_size; i++) { 2207 idx = i / RTE_RETA_GROUP_SIZE; 2208 shift = i % RTE_RETA_GROUP_SIZE; 2209 if (reta_conf[idx].mask & (1ULL << shift)) { 2210 entry = qdev->rss_ind_table[i]; 2211 reta_conf[idx].reta[shift] = entry; 2212 } 2213 } 2214 2215 return 0; 2216 } 2217 2218 2219 2220 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 2221 { 2222 struct qede_dev *qdev = QEDE_INIT_QDEV(dev); 2223 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2224 struct rte_eth_dev_info dev_info = {0}; 2225 struct qede_fastpath *fp; 2226 uint32_t max_rx_pkt_len; 2227 uint32_t frame_size; 2228 uint16_t rx_buf_size; 2229 uint16_t bufsz; 2230 bool restart = false; 2231 int i; 2232 2233 PMD_INIT_FUNC_TRACE(edev); 2234 qede_dev_info_get(dev, &dev_info); 2235 max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2236 frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD; 2237 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) { 2238 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n", 2239 mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN - 2240 ETHER_CRC_LEN - QEDE_ETH_OVERHEAD); 2241 return -EINVAL; 2242 } 2243 if (!dev->data->scattered_rx && 2244 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 2245 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", 2246 dev->data->min_rx_buf_size); 2247 return -EINVAL; 2248 } 2249 /* Temporarily replace I/O functions with dummy ones. It cannot 2250 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. 2251 */ 2252 dev->rx_pkt_burst = qede_rxtx_pkts_dummy; 2253 dev->tx_pkt_burst = qede_rxtx_pkts_dummy; 2254 if (dev->data->dev_started) { 2255 dev->data->dev_started = 0; 2256 qede_dev_stop(dev); 2257 restart = true; 2258 } 2259 rte_delay_ms(1000); 2260 qdev->mtu = mtu; 2261 2262 /* Fix up RX buf size for all queues of the port */ 2263 for_each_rss(i) { 2264 fp = &qdev->fp_array[i]; 2265 if (fp->rxq != NULL) { 2266 bufsz = (uint16_t)rte_pktmbuf_data_room_size( 2267 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; 2268 if (dev->data->scattered_rx) 2269 rx_buf_size = bufsz + ETHER_HDR_LEN + 2270 ETHER_CRC_LEN + QEDE_ETH_OVERHEAD; 2271 else 2272 rx_buf_size = frame_size; 2273 rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size); 2274 fp->rxq->rx_buf_size = rx_buf_size; 2275 DP_INFO(edev, "RX buffer size %u\n", rx_buf_size); 2276 } 2277 } 2278 if (max_rx_pkt_len > ETHER_MAX_LEN) 2279 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 2280 else 2281 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2282 2283 if (!dev->data->dev_started && restart) { 2284 qede_dev_start(dev); 2285 dev->data->dev_started = 1; 2286 } 2287 2288 /* update max frame size */ 2289 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; 2290 /* Reassign back */ 2291 dev->rx_pkt_burst = qede_recv_pkts; 2292 dev->tx_pkt_burst = qede_xmit_pkts; 2293 2294 return 0; 2295 } 2296 2297 static const struct eth_dev_ops qede_eth_dev_ops = { 2298 .dev_configure = qede_dev_configure, 2299 .dev_infos_get = qede_dev_info_get, 2300 .rx_queue_setup = qede_rx_queue_setup, 2301 .rx_queue_release = qede_rx_queue_release, 2302 .tx_queue_setup = qede_tx_queue_setup, 2303 .tx_queue_release = qede_tx_queue_release, 2304 .dev_start = qede_dev_start, 2305 .dev_set_link_up = qede_dev_set_link_up, 2306 .dev_set_link_down = qede_dev_set_link_down, 2307 .link_update = qede_link_update, 2308 .promiscuous_enable = qede_promiscuous_enable, 2309 .promiscuous_disable = qede_promiscuous_disable, 2310 .allmulticast_enable = qede_allmulticast_enable, 2311 .allmulticast_disable = qede_allmulticast_disable, 2312 .set_mc_addr_list = qede_set_mc_addr_list, 2313 .dev_stop = qede_dev_stop, 2314 .dev_close = qede_dev_close, 2315 .stats_get = qede_get_stats, 2316 .stats_reset = qede_reset_stats, 2317 .xstats_get = qede_get_xstats, 2318 .xstats_reset = qede_reset_xstats, 2319 .xstats_get_names = qede_get_xstats_names, 2320 .mac_addr_add = qede_mac_addr_add, 2321 .mac_addr_remove = qede_mac_addr_remove, 2322 .mac_addr_set = qede_mac_addr_set, 2323 .vlan_offload_set = qede_vlan_offload_set, 2324 .vlan_filter_set = qede_vlan_filter_set, 2325 .flow_ctrl_set = qede_flow_ctrl_set, 2326 .flow_ctrl_get = qede_flow_ctrl_get, 2327 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2328 .rss_hash_update = qede_rss_hash_update, 2329 .rss_hash_conf_get = qede_rss_hash_conf_get, 2330 .reta_update = qede_rss_reta_update, 2331 .reta_query = qede_rss_reta_query, 2332 .mtu_set = qede_set_mtu, 2333 .filter_ctrl = qede_dev_filter_ctrl, 2334 .udp_tunnel_port_add = qede_udp_dst_port_add, 2335 .udp_tunnel_port_del = qede_udp_dst_port_del, 2336 }; 2337 2338 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 2339 .dev_configure = qede_dev_configure, 2340 .dev_infos_get = qede_dev_info_get, 2341 .rx_queue_setup = qede_rx_queue_setup, 2342 .rx_queue_release = qede_rx_queue_release, 2343 .tx_queue_setup = qede_tx_queue_setup, 2344 .tx_queue_release = qede_tx_queue_release, 2345 .dev_start = qede_dev_start, 2346 .dev_set_link_up = qede_dev_set_link_up, 2347 .dev_set_link_down = qede_dev_set_link_down, 2348 .link_update = qede_link_update, 2349 .promiscuous_enable = qede_promiscuous_enable, 2350 .promiscuous_disable = qede_promiscuous_disable, 2351 .allmulticast_enable = qede_allmulticast_enable, 2352 .allmulticast_disable = qede_allmulticast_disable, 2353 .set_mc_addr_list = qede_set_mc_addr_list, 2354 .dev_stop = qede_dev_stop, 2355 .dev_close = qede_dev_close, 2356 .stats_get = qede_get_stats, 2357 .stats_reset = qede_reset_stats, 2358 .xstats_get = qede_get_xstats, 2359 .xstats_reset = qede_reset_xstats, 2360 .xstats_get_names = qede_get_xstats_names, 2361 .vlan_offload_set = qede_vlan_offload_set, 2362 .vlan_filter_set = qede_vlan_filter_set, 2363 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2364 .rss_hash_update = qede_rss_hash_update, 2365 .rss_hash_conf_get = qede_rss_hash_conf_get, 2366 .reta_update = qede_rss_reta_update, 2367 .reta_query = qede_rss_reta_query, 2368 .mtu_set = qede_set_mtu, 2369 .udp_tunnel_port_add = qede_udp_dst_port_add, 2370 .udp_tunnel_port_del = qede_udp_dst_port_del, 2371 .mac_addr_add = qede_mac_addr_add, 2372 .mac_addr_remove = qede_mac_addr_remove, 2373 .mac_addr_set = qede_mac_addr_set, 2374 }; 2375 2376 static void qede_update_pf_params(struct ecore_dev *edev) 2377 { 2378 struct ecore_pf_params pf_params; 2379 2380 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 2381 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; 2382 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 2383 qed_ops->common->update_pf_params(edev, &pf_params); 2384 } 2385 2386 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 2387 { 2388 struct rte_pci_device *pci_dev; 2389 struct rte_pci_addr pci_addr; 2390 struct qede_dev *adapter; 2391 struct ecore_dev *edev; 2392 struct qed_dev_eth_info dev_info; 2393 struct qed_slowpath_params params; 2394 static bool do_once = true; 2395 uint8_t bulletin_change; 2396 uint8_t vf_mac[ETHER_ADDR_LEN]; 2397 uint8_t is_mac_forced; 2398 bool is_mac_exist; 2399 /* Fix up ecore debug level */ 2400 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 2401 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 2402 uint32_t int_mode; 2403 int rc; 2404 2405 /* Extract key data structures */ 2406 adapter = eth_dev->data->dev_private; 2407 adapter->ethdev = eth_dev; 2408 edev = &adapter->edev; 2409 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2410 pci_addr = pci_dev->addr; 2411 2412 PMD_INIT_FUNC_TRACE(edev); 2413 2414 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 2415 pci_addr.bus, pci_addr.devid, pci_addr.function, 2416 eth_dev->data->port_id); 2417 2418 eth_dev->rx_pkt_burst = qede_recv_pkts; 2419 eth_dev->tx_pkt_burst = qede_xmit_pkts; 2420 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; 2421 2422 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2423 DP_ERR(edev, "Skipping device init from secondary process\n"); 2424 return 0; 2425 } 2426 2427 rte_eth_copy_pci_info(eth_dev, pci_dev); 2428 2429 /* @DPDK */ 2430 edev->vendor_id = pci_dev->id.vendor_id; 2431 edev->device_id = pci_dev->id.device_id; 2432 2433 qed_ops = qed_get_eth_ops(); 2434 if (!qed_ops) { 2435 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 2436 return -EINVAL; 2437 } 2438 2439 DP_INFO(edev, "Starting qede probe\n"); 2440 rc = qed_ops->common->probe(edev, pci_dev, dp_module, 2441 dp_level, is_vf); 2442 if (rc != 0) { 2443 DP_ERR(edev, "qede probe failed rc %d\n", rc); 2444 return -ENODEV; 2445 } 2446 qede_update_pf_params(edev); 2447 2448 switch (pci_dev->intr_handle.type) { 2449 case RTE_INTR_HANDLE_UIO_INTX: 2450 case RTE_INTR_HANDLE_VFIO_LEGACY: 2451 int_mode = ECORE_INT_MODE_INTA; 2452 rte_intr_callback_register(&pci_dev->intr_handle, 2453 qede_interrupt_handler_intx, 2454 (void *)eth_dev); 2455 break; 2456 default: 2457 int_mode = ECORE_INT_MODE_MSIX; 2458 rte_intr_callback_register(&pci_dev->intr_handle, 2459 qede_interrupt_handler, 2460 (void *)eth_dev); 2461 } 2462 2463 if (rte_intr_enable(&pci_dev->intr_handle)) { 2464 DP_ERR(edev, "rte_intr_enable() failed\n"); 2465 return -ENODEV; 2466 } 2467 2468 /* Start the Slowpath-process */ 2469 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 2470 2471 params.int_mode = int_mode; 2472 params.drv_major = QEDE_PMD_VERSION_MAJOR; 2473 params.drv_minor = QEDE_PMD_VERSION_MINOR; 2474 params.drv_rev = QEDE_PMD_VERSION_REVISION; 2475 params.drv_eng = QEDE_PMD_VERSION_PATCH; 2476 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, 2477 QEDE_PMD_DRV_VER_STR_SIZE); 2478 2479 /* For CMT mode device do periodic polling for slowpath events. 2480 * This is required since uio device uses only one MSI-x 2481 * interrupt vector but we need one for each engine. 2482 */ 2483 if (ECORE_IS_CMT(edev) && IS_PF(edev)) { 2484 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 2485 qede_poll_sp_sb_cb, 2486 (void *)eth_dev); 2487 if (rc != 0) { 2488 DP_ERR(edev, "Unable to start periodic" 2489 " timer rc %d\n", rc); 2490 return -EINVAL; 2491 } 2492 } 2493 2494 rc = qed_ops->common->slowpath_start(edev, ¶ms); 2495 if (rc) { 2496 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 2497 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2498 (void *)eth_dev); 2499 return -ENODEV; 2500 } 2501 2502 rc = qed_ops->fill_dev_info(edev, &dev_info); 2503 if (rc) { 2504 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 2505 qed_ops->common->slowpath_stop(edev); 2506 qed_ops->common->remove(edev); 2507 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2508 (void *)eth_dev); 2509 return -ENODEV; 2510 } 2511 2512 qede_alloc_etherdev(adapter, &dev_info); 2513 2514 adapter->ops->common->set_name(edev, edev->name); 2515 2516 if (!is_vf) 2517 adapter->dev_info.num_mac_filters = 2518 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 2519 ECORE_MAC); 2520 else 2521 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 2522 (uint32_t *)&adapter->dev_info.num_mac_filters); 2523 2524 /* Allocate memory for storing MAC addr */ 2525 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 2526 (ETHER_ADDR_LEN * 2527 adapter->dev_info.num_mac_filters), 2528 RTE_CACHE_LINE_SIZE); 2529 2530 if (eth_dev->data->mac_addrs == NULL) { 2531 DP_ERR(edev, "Failed to allocate MAC address\n"); 2532 qed_ops->common->slowpath_stop(edev); 2533 qed_ops->common->remove(edev); 2534 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2535 (void *)eth_dev); 2536 return -ENOMEM; 2537 } 2538 2539 if (!is_vf) { 2540 ether_addr_copy((struct ether_addr *)edev->hwfns[0]. 2541 hw_info.hw_mac_addr, 2542 ð_dev->data->mac_addrs[0]); 2543 ether_addr_copy(ð_dev->data->mac_addrs[0], 2544 &adapter->primary_mac); 2545 } else { 2546 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 2547 &bulletin_change); 2548 if (bulletin_change) { 2549 is_mac_exist = 2550 ecore_vf_bulletin_get_forced_mac( 2551 ECORE_LEADING_HWFN(edev), 2552 vf_mac, 2553 &is_mac_forced); 2554 if (is_mac_exist) { 2555 DP_INFO(edev, "VF macaddr received from PF\n"); 2556 ether_addr_copy((struct ether_addr *)&vf_mac, 2557 ð_dev->data->mac_addrs[0]); 2558 ether_addr_copy(ð_dev->data->mac_addrs[0], 2559 &adapter->primary_mac); 2560 } else { 2561 DP_ERR(edev, "No VF macaddr assigned\n"); 2562 } 2563 } 2564 } 2565 2566 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 2567 2568 if (do_once) { 2569 qede_print_adapter_info(adapter); 2570 do_once = false; 2571 } 2572 2573 /* Bring-up the link */ 2574 qede_dev_set_link_state(eth_dev, true); 2575 2576 adapter->num_tx_queues = 0; 2577 adapter->num_rx_queues = 0; 2578 SLIST_INIT(&adapter->fdir_info.fdir_list_head); 2579 SLIST_INIT(&adapter->vlan_list_head); 2580 SLIST_INIT(&adapter->uc_list_head); 2581 SLIST_INIT(&adapter->mc_list_head); 2582 adapter->mtu = ETHER_MTU; 2583 adapter->vport_started = false; 2584 2585 /* VF tunnel offloads is enabled by default in PF driver */ 2586 adapter->vxlan.num_filters = 0; 2587 adapter->geneve.num_filters = 0; 2588 adapter->ipgre.num_filters = 0; 2589 if (is_vf) { 2590 adapter->vxlan.enable = true; 2591 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | 2592 ETH_TUNNEL_FILTER_IVLAN; 2593 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; 2594 adapter->geneve.enable = true; 2595 adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC | 2596 ETH_TUNNEL_FILTER_IVLAN; 2597 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT; 2598 adapter->ipgre.enable = true; 2599 adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC | 2600 ETH_TUNNEL_FILTER_IVLAN; 2601 } else { 2602 adapter->vxlan.enable = false; 2603 adapter->geneve.enable = false; 2604 adapter->ipgre.enable = false; 2605 } 2606 2607 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", 2608 adapter->primary_mac.addr_bytes[0], 2609 adapter->primary_mac.addr_bytes[1], 2610 adapter->primary_mac.addr_bytes[2], 2611 adapter->primary_mac.addr_bytes[3], 2612 adapter->primary_mac.addr_bytes[4], 2613 adapter->primary_mac.addr_bytes[5]); 2614 2615 DP_INFO(edev, "Device initialized\n"); 2616 2617 return 0; 2618 } 2619 2620 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 2621 { 2622 return qede_common_dev_init(eth_dev, 1); 2623 } 2624 2625 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 2626 { 2627 return qede_common_dev_init(eth_dev, 0); 2628 } 2629 2630 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 2631 { 2632 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT 2633 struct qede_dev *qdev = eth_dev->data->dev_private; 2634 struct ecore_dev *edev = &qdev->edev; 2635 2636 PMD_INIT_FUNC_TRACE(edev); 2637 #endif 2638 2639 /* only uninitialize in the primary process */ 2640 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2641 return 0; 2642 2643 /* safe to close dev here */ 2644 qede_dev_close(eth_dev); 2645 2646 eth_dev->dev_ops = NULL; 2647 eth_dev->rx_pkt_burst = NULL; 2648 eth_dev->tx_pkt_burst = NULL; 2649 2650 if (eth_dev->data->mac_addrs) 2651 rte_free(eth_dev->data->mac_addrs); 2652 2653 eth_dev->data->mac_addrs = NULL; 2654 2655 return 0; 2656 } 2657 2658 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2659 { 2660 return qede_dev_common_uninit(eth_dev); 2661 } 2662 2663 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2664 { 2665 return qede_dev_common_uninit(eth_dev); 2666 } 2667 2668 static const struct rte_pci_id pci_id_qedevf_map[] = { 2669 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2670 { 2671 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF) 2672 }, 2673 { 2674 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV) 2675 }, 2676 { 2677 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV) 2678 }, 2679 {.vendor_id = 0,} 2680 }; 2681 2682 static const struct rte_pci_id pci_id_qede_map[] = { 2683 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2684 { 2685 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E) 2686 }, 2687 { 2688 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S) 2689 }, 2690 { 2691 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40) 2692 }, 2693 { 2694 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25) 2695 }, 2696 { 2697 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100) 2698 }, 2699 { 2700 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50) 2701 }, 2702 { 2703 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G) 2704 }, 2705 { 2706 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G) 2707 }, 2708 { 2709 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G) 2710 }, 2711 { 2712 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G) 2713 }, 2714 {.vendor_id = 0,} 2715 }; 2716 2717 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2718 struct rte_pci_device *pci_dev) 2719 { 2720 return rte_eth_dev_pci_generic_probe(pci_dev, 2721 sizeof(struct qede_dev), qedevf_eth_dev_init); 2722 } 2723 2724 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2725 { 2726 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit); 2727 } 2728 2729 static struct rte_pci_driver rte_qedevf_pmd = { 2730 .id_table = pci_id_qedevf_map, 2731 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2732 .probe = qedevf_eth_dev_pci_probe, 2733 .remove = qedevf_eth_dev_pci_remove, 2734 }; 2735 2736 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2737 struct rte_pci_device *pci_dev) 2738 { 2739 return rte_eth_dev_pci_generic_probe(pci_dev, 2740 sizeof(struct qede_dev), qede_eth_dev_init); 2741 } 2742 2743 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2744 { 2745 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit); 2746 } 2747 2748 static struct rte_pci_driver rte_qede_pmd = { 2749 .id_table = pci_id_qede_map, 2750 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2751 .probe = qede_eth_dev_pci_probe, 2752 .remove = qede_eth_dev_pci_remove, 2753 }; 2754 2755 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd); 2756 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 2757 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); 2758 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); 2759 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 2760 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); 2761 2762 RTE_INIT(qede_init_log) 2763 { 2764 qede_logtype_init = rte_log_register("pmd.net.qede.init"); 2765 if (qede_logtype_init >= 0) 2766 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE); 2767 qede_logtype_driver = rte_log_register("pmd.net.qede.driver"); 2768 if (qede_logtype_driver >= 0) 2769 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE); 2770 } 2771