1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include "qede_ethdev.h" 8 #include <rte_string_fns.h> 9 #include <rte_alarm.h> 10 #include <rte_version.h> 11 #include <rte_kvargs.h> 12 13 /* Globals */ 14 int qede_logtype_init; 15 int qede_logtype_driver; 16 17 static const struct qed_eth_ops *qed_ops; 18 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev); 19 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev); 20 21 #define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */ 22 23 struct rte_qede_xstats_name_off { 24 char name[RTE_ETH_XSTATS_NAME_SIZE]; 25 uint64_t offset; 26 }; 27 28 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 29 {"rx_unicast_bytes", 30 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)}, 31 {"rx_multicast_bytes", 32 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)}, 33 {"rx_broadcast_bytes", 34 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)}, 35 {"rx_unicast_packets", 36 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)}, 37 {"rx_multicast_packets", 38 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)}, 39 {"rx_broadcast_packets", 40 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)}, 41 42 {"tx_unicast_bytes", 43 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)}, 44 {"tx_multicast_bytes", 45 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)}, 46 {"tx_broadcast_bytes", 47 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)}, 48 {"tx_unicast_packets", 49 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)}, 50 {"tx_multicast_packets", 51 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)}, 52 {"tx_broadcast_packets", 53 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)}, 54 55 {"rx_64_byte_packets", 56 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)}, 57 {"rx_65_to_127_byte_packets", 58 offsetof(struct ecore_eth_stats_common, 59 rx_65_to_127_byte_packets)}, 60 {"rx_128_to_255_byte_packets", 61 offsetof(struct ecore_eth_stats_common, 62 rx_128_to_255_byte_packets)}, 63 {"rx_256_to_511_byte_packets", 64 offsetof(struct ecore_eth_stats_common, 65 rx_256_to_511_byte_packets)}, 66 {"rx_512_to_1023_byte_packets", 67 offsetof(struct ecore_eth_stats_common, 68 rx_512_to_1023_byte_packets)}, 69 {"rx_1024_to_1518_byte_packets", 70 offsetof(struct ecore_eth_stats_common, 71 rx_1024_to_1518_byte_packets)}, 72 {"tx_64_byte_packets", 73 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)}, 74 {"tx_65_to_127_byte_packets", 75 offsetof(struct ecore_eth_stats_common, 76 tx_65_to_127_byte_packets)}, 77 {"tx_128_to_255_byte_packets", 78 offsetof(struct ecore_eth_stats_common, 79 tx_128_to_255_byte_packets)}, 80 {"tx_256_to_511_byte_packets", 81 offsetof(struct ecore_eth_stats_common, 82 tx_256_to_511_byte_packets)}, 83 {"tx_512_to_1023_byte_packets", 84 offsetof(struct ecore_eth_stats_common, 85 tx_512_to_1023_byte_packets)}, 86 {"tx_1024_to_1518_byte_packets", 87 offsetof(struct ecore_eth_stats_common, 88 tx_1024_to_1518_byte_packets)}, 89 90 {"rx_mac_crtl_frames", 91 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)}, 92 {"tx_mac_control_frames", 93 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)}, 94 {"rx_pause_frames", 95 offsetof(struct ecore_eth_stats_common, rx_pause_frames)}, 96 {"tx_pause_frames", 97 offsetof(struct ecore_eth_stats_common, tx_pause_frames)}, 98 {"rx_priority_flow_control_frames", 99 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)}, 100 {"tx_priority_flow_control_frames", 101 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)}, 102 103 {"rx_crc_errors", 104 offsetof(struct ecore_eth_stats_common, rx_crc_errors)}, 105 {"rx_align_errors", 106 offsetof(struct ecore_eth_stats_common, rx_align_errors)}, 107 {"rx_carrier_errors", 108 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)}, 109 {"rx_oversize_packet_errors", 110 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)}, 111 {"rx_jabber_errors", 112 offsetof(struct ecore_eth_stats_common, rx_jabbers)}, 113 {"rx_undersize_packet_errors", 114 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)}, 115 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)}, 116 {"rx_host_buffer_not_available", 117 offsetof(struct ecore_eth_stats_common, no_buff_discards)}, 118 /* Number of packets discarded because they are bigger than MTU */ 119 {"rx_packet_too_big_discards", 120 offsetof(struct ecore_eth_stats_common, 121 packet_too_big_discard)}, 122 {"rx_ttl_zero_discards", 123 offsetof(struct ecore_eth_stats_common, ttl0_discard)}, 124 {"rx_multi_function_tag_filter_discards", 125 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)}, 126 {"rx_mac_filter_discards", 127 offsetof(struct ecore_eth_stats_common, mac_filter_discards)}, 128 {"rx_gft_filter_drop", 129 offsetof(struct ecore_eth_stats_common, gft_filter_drop)}, 130 {"rx_hw_buffer_truncates", 131 offsetof(struct ecore_eth_stats_common, brb_truncates)}, 132 {"rx_hw_buffer_discards", 133 offsetof(struct ecore_eth_stats_common, brb_discards)}, 134 {"tx_error_drop_packets", 135 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)}, 136 137 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)}, 138 {"rx_mac_unicast_packets", 139 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)}, 140 {"rx_mac_multicast_packets", 141 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)}, 142 {"rx_mac_broadcast_packets", 143 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)}, 144 {"rx_mac_frames_ok", 145 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)}, 146 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)}, 147 {"tx_mac_unicast_packets", 148 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)}, 149 {"tx_mac_multicast_packets", 150 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)}, 151 {"tx_mac_broadcast_packets", 152 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)}, 153 154 {"lro_coalesced_packets", 155 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)}, 156 {"lro_coalesced_events", 157 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)}, 158 {"lro_aborts_num", 159 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)}, 160 {"lro_not_coalesced_packets", 161 offsetof(struct ecore_eth_stats_common, 162 tpa_not_coalesced_pkts)}, 163 {"lro_coalesced_bytes", 164 offsetof(struct ecore_eth_stats_common, 165 tpa_coalesced_bytes)}, 166 }; 167 168 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = { 169 {"rx_1519_to_1522_byte_packets", 170 offsetof(struct ecore_eth_stats, bb) + 171 offsetof(struct ecore_eth_stats_bb, 172 rx_1519_to_1522_byte_packets)}, 173 {"rx_1519_to_2047_byte_packets", 174 offsetof(struct ecore_eth_stats, bb) + 175 offsetof(struct ecore_eth_stats_bb, 176 rx_1519_to_2047_byte_packets)}, 177 {"rx_2048_to_4095_byte_packets", 178 offsetof(struct ecore_eth_stats, bb) + 179 offsetof(struct ecore_eth_stats_bb, 180 rx_2048_to_4095_byte_packets)}, 181 {"rx_4096_to_9216_byte_packets", 182 offsetof(struct ecore_eth_stats, bb) + 183 offsetof(struct ecore_eth_stats_bb, 184 rx_4096_to_9216_byte_packets)}, 185 {"rx_9217_to_16383_byte_packets", 186 offsetof(struct ecore_eth_stats, bb) + 187 offsetof(struct ecore_eth_stats_bb, 188 rx_9217_to_16383_byte_packets)}, 189 190 {"tx_1519_to_2047_byte_packets", 191 offsetof(struct ecore_eth_stats, bb) + 192 offsetof(struct ecore_eth_stats_bb, 193 tx_1519_to_2047_byte_packets)}, 194 {"tx_2048_to_4095_byte_packets", 195 offsetof(struct ecore_eth_stats, bb) + 196 offsetof(struct ecore_eth_stats_bb, 197 tx_2048_to_4095_byte_packets)}, 198 {"tx_4096_to_9216_byte_packets", 199 offsetof(struct ecore_eth_stats, bb) + 200 offsetof(struct ecore_eth_stats_bb, 201 tx_4096_to_9216_byte_packets)}, 202 {"tx_9217_to_16383_byte_packets", 203 offsetof(struct ecore_eth_stats, bb) + 204 offsetof(struct ecore_eth_stats_bb, 205 tx_9217_to_16383_byte_packets)}, 206 207 {"tx_lpi_entry_count", 208 offsetof(struct ecore_eth_stats, bb) + 209 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)}, 210 {"tx_total_collisions", 211 offsetof(struct ecore_eth_stats, bb) + 212 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)}, 213 }; 214 215 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = { 216 {"rx_1519_to_max_byte_packets", 217 offsetof(struct ecore_eth_stats, ah) + 218 offsetof(struct ecore_eth_stats_ah, 219 rx_1519_to_max_byte_packets)}, 220 {"tx_1519_to_max_byte_packets", 221 offsetof(struct ecore_eth_stats, ah) + 222 offsetof(struct ecore_eth_stats_ah, 223 tx_1519_to_max_byte_packets)}, 224 }; 225 226 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { 227 {"rx_q_segments", 228 offsetof(struct qede_rx_queue, rx_segs)}, 229 {"rx_q_hw_errors", 230 offsetof(struct qede_rx_queue, rx_hw_errors)}, 231 {"rx_q_allocation_errors", 232 offsetof(struct qede_rx_queue, rx_alloc_errors)} 233 }; 234 235 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 236 { 237 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 238 } 239 240 static void 241 qede_interrupt_handler_intx(void *param) 242 { 243 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 244 struct qede_dev *qdev = eth_dev->data->dev_private; 245 struct ecore_dev *edev = &qdev->edev; 246 u64 status; 247 248 /* Check if our device actually raised an interrupt */ 249 status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev)); 250 if (status & 0x1) { 251 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 252 253 if (rte_intr_ack(eth_dev->intr_handle)) 254 DP_ERR(edev, "rte_intr_ack failed\n"); 255 } 256 } 257 258 static void 259 qede_interrupt_handler(void *param) 260 { 261 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 262 struct qede_dev *qdev = eth_dev->data->dev_private; 263 struct ecore_dev *edev = &qdev->edev; 264 265 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 266 if (rte_intr_ack(eth_dev->intr_handle)) 267 DP_ERR(edev, "rte_intr_ack failed\n"); 268 } 269 270 static void 271 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 272 { 273 rte_memcpy(&qdev->dev_info, info, sizeof(*info)); 274 qdev->ops = qed_ops; 275 } 276 277 static void qede_print_adapter_info(struct qede_dev *qdev) 278 { 279 struct ecore_dev *edev = &qdev->edev; 280 struct qed_dev_info *info = &qdev->dev_info.common; 281 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 282 283 DP_INFO(edev, "**************************************************\n"); 284 DP_INFO(edev, " DPDK version\t\t\t: %s\n", rte_version()); 285 DP_INFO(edev, " Chip details\t\t\t: %s %c%d\n", 286 ECORE_IS_BB(edev) ? "BB" : "AH", 287 'A' + edev->chip_rev, 288 (int)edev->chip_metal); 289 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 290 QEDE_PMD_DRV_VERSION); 291 DP_INFO(edev, " Driver version\t\t\t: %s\n", ver_str); 292 293 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 294 QEDE_PMD_BASE_VERSION); 295 DP_INFO(edev, " Base version\t\t\t: %s\n", ver_str); 296 297 if (!IS_VF(edev)) 298 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 299 QEDE_PMD_FW_VERSION); 300 else 301 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", 302 info->fw_major, info->fw_minor, 303 info->fw_rev, info->fw_eng); 304 DP_INFO(edev, " Firmware version\t\t\t: %s\n", ver_str); 305 306 snprintf(ver_str, MCP_DRV_VER_STR_SIZE, 307 "%d.%d.%d.%d", 308 (info->mfw_rev & QED_MFW_VERSION_3_MASK) >> 309 QED_MFW_VERSION_3_OFFSET, 310 (info->mfw_rev & QED_MFW_VERSION_2_MASK) >> 311 QED_MFW_VERSION_2_OFFSET, 312 (info->mfw_rev & QED_MFW_VERSION_1_MASK) >> 313 QED_MFW_VERSION_1_OFFSET, 314 (info->mfw_rev & QED_MFW_VERSION_0_MASK) >> 315 QED_MFW_VERSION_0_OFFSET); 316 DP_INFO(edev, " Management Firmware version\t: %s\n", ver_str); 317 DP_INFO(edev, " Firmware file\t\t\t: %s\n", qede_fw_file); 318 DP_INFO(edev, "**************************************************\n"); 319 } 320 321 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) 322 { 323 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; 324 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 325 unsigned int i = 0, j = 0, qid; 326 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 327 struct qede_tx_queue *txq; 328 329 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); 330 331 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev), 332 RTE_ETHDEV_QUEUE_STAT_CNTRS); 333 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev), 334 RTE_ETHDEV_QUEUE_STAT_CNTRS); 335 336 for (qid = 0; qid < qdev->num_rx_queues; qid++) { 337 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 338 offsetof(struct qede_rx_queue, rcv_pkts), 0, 339 sizeof(uint64_t)); 340 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 341 offsetof(struct qede_rx_queue, rx_hw_errors), 0, 342 sizeof(uint64_t)); 343 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 344 offsetof(struct qede_rx_queue, rx_alloc_errors), 0, 345 sizeof(uint64_t)); 346 347 if (xstats) 348 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++) 349 OSAL_MEMSET((((char *) 350 (qdev->fp_array[qid].rxq)) + 351 qede_rxq_xstats_strings[j].offset), 352 0, 353 sizeof(uint64_t)); 354 355 i++; 356 if (i == rxq_stat_cntrs) 357 break; 358 } 359 360 i = 0; 361 362 for (qid = 0; qid < qdev->num_tx_queues; qid++) { 363 txq = qdev->fp_array[qid].txq; 364 365 OSAL_MEMSET((uint64_t *)(uintptr_t) 366 (((uint64_t)(uintptr_t)(txq)) + 367 offsetof(struct qede_tx_queue, xmit_pkts)), 0, 368 sizeof(uint64_t)); 369 370 i++; 371 if (i == txq_stat_cntrs) 372 break; 373 } 374 } 375 376 static int 377 qede_stop_vport(struct ecore_dev *edev) 378 { 379 struct ecore_hwfn *p_hwfn; 380 uint8_t vport_id; 381 int rc; 382 int i; 383 384 vport_id = 0; 385 for_each_hwfn(edev, i) { 386 p_hwfn = &edev->hwfns[i]; 387 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 388 vport_id); 389 if (rc != ECORE_SUCCESS) { 390 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc); 391 return rc; 392 } 393 } 394 395 DP_INFO(edev, "vport stopped\n"); 396 397 return 0; 398 } 399 400 static int 401 qede_start_vport(struct qede_dev *qdev, uint16_t mtu) 402 { 403 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 404 struct ecore_sp_vport_start_params params; 405 struct ecore_hwfn *p_hwfn; 406 int rc; 407 int i; 408 409 if (qdev->vport_started) 410 qede_stop_vport(edev); 411 412 memset(¶ms, 0, sizeof(params)); 413 params.vport_id = 0; 414 params.mtu = mtu; 415 /* @DPDK - Disable FW placement */ 416 params.zero_placement_offset = 1; 417 for_each_hwfn(edev, i) { 418 p_hwfn = &edev->hwfns[i]; 419 params.concrete_fid = p_hwfn->hw_info.concrete_fid; 420 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 421 rc = ecore_sp_vport_start(p_hwfn, ¶ms); 422 if (rc != ECORE_SUCCESS) { 423 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 424 return rc; 425 } 426 } 427 ecore_reset_vport_stats(edev); 428 qdev->vport_started = true; 429 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); 430 431 return 0; 432 } 433 434 #define QEDE_NPAR_TX_SWITCHING "npar_tx_switching" 435 #define QEDE_VF_TX_SWITCHING "vf_tx_switching" 436 437 /* Activate or deactivate vport via vport-update */ 438 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) 439 { 440 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 441 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 442 struct ecore_sp_vport_update_params params; 443 struct ecore_hwfn *p_hwfn; 444 uint8_t i; 445 int rc = -1; 446 447 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 448 params.vport_id = 0; 449 params.update_vport_active_rx_flg = 1; 450 params.update_vport_active_tx_flg = 1; 451 params.vport_active_rx_flg = flg; 452 params.vport_active_tx_flg = flg; 453 if (~qdev->enable_tx_switching & flg) { 454 params.update_tx_switching_flg = 1; 455 params.tx_switching_flg = !flg; 456 } 457 for_each_hwfn(edev, i) { 458 p_hwfn = &edev->hwfns[i]; 459 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 460 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 461 ECORE_SPQ_MODE_EBLOCK, NULL); 462 if (rc != ECORE_SUCCESS) { 463 DP_ERR(edev, "Failed to update vport\n"); 464 break; 465 } 466 } 467 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated"); 468 469 return rc; 470 } 471 472 static void 473 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, 474 uint16_t mtu, bool enable) 475 { 476 /* Enable LRO in split mode */ 477 sge_tpa_params->tpa_ipv4_en_flg = enable; 478 sge_tpa_params->tpa_ipv6_en_flg = enable; 479 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable; 480 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable; 481 /* set if tpa enable changes */ 482 sge_tpa_params->update_tpa_en_flg = 1; 483 /* set if tpa parameters should be handled */ 484 sge_tpa_params->update_tpa_param_flg = enable; 485 486 sge_tpa_params->max_buffers_per_cqe = 20; 487 /* Enable TPA in split mode. In this mode each TPA segment 488 * starts on the new BD, so there is one BD per segment. 489 */ 490 sge_tpa_params->tpa_pkt_split_flg = 1; 491 sge_tpa_params->tpa_hdr_data_split_flg = 0; 492 sge_tpa_params->tpa_gro_consistent_flg = 0; 493 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 494 sge_tpa_params->tpa_max_size = 0x7FFF; 495 sge_tpa_params->tpa_min_size_to_start = mtu / 2; 496 sge_tpa_params->tpa_min_size_to_cont = mtu / 2; 497 } 498 499 /* Enable/disable LRO via vport-update */ 500 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) 501 { 502 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 503 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 504 struct ecore_sp_vport_update_params params; 505 struct ecore_sge_tpa_params tpa_params; 506 struct ecore_hwfn *p_hwfn; 507 int rc; 508 int i; 509 510 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 511 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params)); 512 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg); 513 params.vport_id = 0; 514 params.sge_tpa_params = &tpa_params; 515 for_each_hwfn(edev, i) { 516 p_hwfn = &edev->hwfns[i]; 517 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 518 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 519 ECORE_SPQ_MODE_EBLOCK, NULL); 520 if (rc != ECORE_SUCCESS) { 521 DP_ERR(edev, "Failed to update LRO\n"); 522 return -1; 523 } 524 } 525 qdev->enable_lro = flg; 526 eth_dev->data->lro = flg; 527 528 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); 529 530 return 0; 531 } 532 533 static int 534 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, 535 enum qed_filter_rx_mode_type type) 536 { 537 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 538 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 539 struct ecore_filter_accept_flags flags; 540 541 memset(&flags, 0, sizeof(flags)); 542 543 flags.update_rx_mode_config = 1; 544 flags.update_tx_mode_config = 1; 545 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 546 ECORE_ACCEPT_MCAST_MATCHED | 547 ECORE_ACCEPT_BCAST; 548 549 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 550 ECORE_ACCEPT_MCAST_MATCHED | 551 ECORE_ACCEPT_BCAST; 552 553 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 554 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 555 if (IS_VF(edev)) { 556 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 557 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n"); 558 } 559 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 560 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 561 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC | 562 QED_FILTER_RX_MODE_TYPE_PROMISC)) { 563 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED | 564 ECORE_ACCEPT_MCAST_UNMATCHED; 565 } 566 567 return ecore_filter_accept_cmd(edev, 0, flags, false, false, 568 ECORE_SPQ_MODE_CB, NULL); 569 } 570 571 int 572 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 573 bool add) 574 { 575 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 576 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 577 struct qede_ucast_entry *tmp = NULL; 578 struct qede_ucast_entry *u; 579 struct rte_ether_addr *mac_addr; 580 581 mac_addr = (struct rte_ether_addr *)ucast->mac; 582 if (add) { 583 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 584 if ((memcmp(mac_addr, &tmp->mac, 585 RTE_ETHER_ADDR_LEN) == 0) && 586 ucast->vni == tmp->vni && 587 ucast->vlan == tmp->vlan) { 588 DP_INFO(edev, "Unicast MAC is already added" 589 " with vlan = %u, vni = %u\n", 590 ucast->vlan, ucast->vni); 591 return 0; 592 } 593 } 594 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), 595 RTE_CACHE_LINE_SIZE); 596 if (!u) { 597 DP_ERR(edev, "Did not allocate memory for ucast\n"); 598 return -ENOMEM; 599 } 600 rte_ether_addr_copy(mac_addr, &u->mac); 601 u->vlan = ucast->vlan; 602 u->vni = ucast->vni; 603 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); 604 qdev->num_uc_addr++; 605 } else { 606 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 607 if ((memcmp(mac_addr, &tmp->mac, 608 RTE_ETHER_ADDR_LEN) == 0) && 609 ucast->vlan == tmp->vlan && 610 ucast->vni == tmp->vni) 611 break; 612 } 613 if (tmp == NULL) { 614 DP_INFO(edev, "Unicast MAC is not found\n"); 615 return -EINVAL; 616 } 617 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); 618 qdev->num_uc_addr--; 619 } 620 621 return 0; 622 } 623 624 static int 625 qede_add_mcast_filters(struct rte_eth_dev *eth_dev, 626 struct rte_ether_addr *mc_addrs, 627 uint32_t mc_addrs_num) 628 { 629 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 630 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 631 struct ecore_filter_mcast mcast; 632 struct qede_mcast_entry *m = NULL; 633 uint8_t i; 634 int rc; 635 636 for (i = 0; i < mc_addrs_num; i++) { 637 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), 638 RTE_CACHE_LINE_SIZE); 639 if (!m) { 640 DP_ERR(edev, "Did not allocate memory for mcast\n"); 641 return -ENOMEM; 642 } 643 rte_ether_addr_copy(&mc_addrs[i], &m->mac); 644 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); 645 } 646 memset(&mcast, 0, sizeof(mcast)); 647 mcast.num_mc_addrs = mc_addrs_num; 648 mcast.opcode = ECORE_FILTER_ADD; 649 for (i = 0; i < mc_addrs_num; i++) 650 rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *) 651 &mcast.mac[i]); 652 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 653 if (rc != ECORE_SUCCESS) { 654 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc); 655 return -1; 656 } 657 658 return 0; 659 } 660 661 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev) 662 { 663 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 664 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 665 struct qede_mcast_entry *tmp = NULL; 666 struct ecore_filter_mcast mcast; 667 int j; 668 int rc; 669 670 memset(&mcast, 0, sizeof(mcast)); 671 mcast.num_mc_addrs = qdev->num_mc_addr; 672 mcast.opcode = ECORE_FILTER_REMOVE; 673 j = 0; 674 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 675 rte_ether_addr_copy(&tmp->mac, 676 (struct rte_ether_addr *)&mcast.mac[j]); 677 j++; 678 } 679 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 680 if (rc != ECORE_SUCCESS) { 681 DP_ERR(edev, "Failed to delete multicast filter\n"); 682 return -1; 683 } 684 /* Init the list */ 685 while (!SLIST_EMPTY(&qdev->mc_list_head)) { 686 tmp = SLIST_FIRST(&qdev->mc_list_head); 687 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list); 688 } 689 SLIST_INIT(&qdev->mc_list_head); 690 691 return 0; 692 } 693 694 enum _ecore_status_t 695 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 696 bool add) 697 { 698 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 699 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 700 enum _ecore_status_t rc = ECORE_INVAL; 701 702 if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) { 703 DP_ERR(edev, "Ucast filter table limit exceeded," 704 " Please enable promisc mode\n"); 705 return ECORE_INVAL; 706 } 707 708 rc = qede_ucast_filter(eth_dev, ucast, add); 709 if (rc == 0) 710 rc = ecore_filter_ucast_cmd(edev, ucast, 711 ECORE_SPQ_MODE_CB, NULL); 712 /* Indicate error only for add filter operation. 713 * Delete filter operations are not severe. 714 */ 715 if ((rc != ECORE_SUCCESS) && add) 716 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", 717 rc, add); 718 719 return rc; 720 } 721 722 static int 723 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr, 724 __rte_unused uint32_t index, __rte_unused uint32_t pool) 725 { 726 struct ecore_filter_ucast ucast; 727 int re; 728 729 if (!rte_is_valid_assigned_ether_addr(mac_addr)) 730 return -EINVAL; 731 732 qede_set_ucast_cmn_params(&ucast); 733 ucast.opcode = ECORE_FILTER_ADD; 734 ucast.type = ECORE_FILTER_MAC; 735 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac); 736 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); 737 return re; 738 } 739 740 static void 741 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 742 { 743 struct qede_dev *qdev = eth_dev->data->dev_private; 744 struct ecore_dev *edev = &qdev->edev; 745 struct ecore_filter_ucast ucast; 746 747 PMD_INIT_FUNC_TRACE(edev); 748 749 if (index >= qdev->dev_info.num_mac_filters) { 750 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 751 index, qdev->dev_info.num_mac_filters); 752 return; 753 } 754 755 if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) 756 return; 757 758 qede_set_ucast_cmn_params(&ucast); 759 ucast.opcode = ECORE_FILTER_REMOVE; 760 ucast.type = ECORE_FILTER_MAC; 761 762 /* Use the index maintained by rte */ 763 rte_ether_addr_copy(ð_dev->data->mac_addrs[index], 764 (struct rte_ether_addr *)&ucast.mac); 765 766 qede_mac_int_ops(eth_dev, &ucast, false); 767 } 768 769 static int 770 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr) 771 { 772 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 773 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 774 775 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 776 mac_addr->addr_bytes)) { 777 DP_ERR(edev, "Setting MAC address is not allowed\n"); 778 return -EPERM; 779 } 780 781 qede_mac_addr_remove(eth_dev, 0); 782 783 return qede_mac_addr_add(eth_dev, mac_addr, 0, 0); 784 } 785 786 void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) 787 { 788 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 789 struct ecore_sp_vport_update_params params; 790 struct ecore_hwfn *p_hwfn; 791 uint8_t i; 792 int rc; 793 794 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 795 params.vport_id = 0; 796 params.update_accept_any_vlan_flg = 1; 797 params.accept_any_vlan = flg; 798 for_each_hwfn(edev, i) { 799 p_hwfn = &edev->hwfns[i]; 800 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 801 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 802 ECORE_SPQ_MODE_EBLOCK, NULL); 803 if (rc != ECORE_SUCCESS) { 804 DP_ERR(edev, "Failed to configure accept-any-vlan\n"); 805 return; 806 } 807 } 808 809 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled"); 810 } 811 812 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg) 813 { 814 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 815 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 816 struct ecore_sp_vport_update_params params; 817 struct ecore_hwfn *p_hwfn; 818 uint8_t i; 819 int rc; 820 821 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 822 params.vport_id = 0; 823 params.update_inner_vlan_removal_flg = 1; 824 params.inner_vlan_removal_flg = flg; 825 for_each_hwfn(edev, i) { 826 p_hwfn = &edev->hwfns[i]; 827 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 828 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 829 ECORE_SPQ_MODE_EBLOCK, NULL); 830 if (rc != ECORE_SUCCESS) { 831 DP_ERR(edev, "Failed to update vport\n"); 832 return -1; 833 } 834 } 835 836 qdev->vlan_strip_flg = flg; 837 838 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled"); 839 return 0; 840 } 841 842 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 843 uint16_t vlan_id, int on) 844 { 845 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 846 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 847 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 848 struct qede_vlan_entry *tmp = NULL; 849 struct qede_vlan_entry *vlan; 850 struct ecore_filter_ucast ucast; 851 int rc; 852 853 if (on) { 854 if (qdev->configured_vlans == dev_info->num_vlan_filters) { 855 DP_ERR(edev, "Reached max VLAN filter limit" 856 " enabling accept_any_vlan\n"); 857 qede_config_accept_any_vlan(qdev, true); 858 return 0; 859 } 860 861 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 862 if (tmp->vid == vlan_id) { 863 DP_INFO(edev, "VLAN %u already configured\n", 864 vlan_id); 865 return 0; 866 } 867 } 868 869 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), 870 RTE_CACHE_LINE_SIZE); 871 872 if (!vlan) { 873 DP_ERR(edev, "Did not allocate memory for VLAN\n"); 874 return -ENOMEM; 875 } 876 877 qede_set_ucast_cmn_params(&ucast); 878 ucast.opcode = ECORE_FILTER_ADD; 879 ucast.type = ECORE_FILTER_VLAN; 880 ucast.vlan = vlan_id; 881 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 882 NULL); 883 if (rc != 0) { 884 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 885 rc); 886 rte_free(vlan); 887 } else { 888 vlan->vid = vlan_id; 889 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); 890 qdev->configured_vlans++; 891 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", 892 vlan_id, qdev->configured_vlans); 893 } 894 } else { 895 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 896 if (tmp->vid == vlan_id) 897 break; 898 } 899 900 if (!tmp) { 901 if (qdev->configured_vlans == 0) { 902 DP_INFO(edev, 903 "No VLAN filters configured yet\n"); 904 return 0; 905 } 906 907 DP_ERR(edev, "VLAN %u not configured\n", vlan_id); 908 return -EINVAL; 909 } 910 911 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); 912 913 qede_set_ucast_cmn_params(&ucast); 914 ucast.opcode = ECORE_FILTER_REMOVE; 915 ucast.type = ECORE_FILTER_VLAN; 916 ucast.vlan = vlan_id; 917 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 918 NULL); 919 if (rc != 0) { 920 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 921 vlan_id, rc); 922 } else { 923 qdev->configured_vlans--; 924 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", 925 vlan_id, qdev->configured_vlans); 926 } 927 } 928 929 return rc; 930 } 931 932 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 933 { 934 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 935 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 936 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 937 938 if (mask & ETH_VLAN_STRIP_MASK) { 939 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 940 (void)qede_vlan_stripping(eth_dev, 1); 941 else 942 (void)qede_vlan_stripping(eth_dev, 0); 943 } 944 945 if (mask & ETH_VLAN_FILTER_MASK) { 946 /* VLAN filtering kicks in when a VLAN is added */ 947 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 948 qede_vlan_filter_set(eth_dev, 0, 1); 949 } else { 950 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ 951 DP_ERR(edev, 952 " Please remove existing VLAN filters" 953 " before disabling VLAN filtering\n"); 954 /* Signal app that VLAN filtering is still 955 * enabled 956 */ 957 eth_dev->data->dev_conf.rxmode.offloads |= 958 DEV_RX_OFFLOAD_VLAN_FILTER; 959 } else { 960 qede_vlan_filter_set(eth_dev, 0, 0); 961 } 962 } 963 } 964 965 if (mask & ETH_VLAN_EXTEND_MASK) 966 DP_ERR(edev, "Extend VLAN not supported\n"); 967 968 qdev->vlan_offload_mask = mask; 969 970 DP_INFO(edev, "VLAN offload mask %d\n", mask); 971 972 return 0; 973 } 974 975 static void qede_prandom_bytes(uint32_t *buff) 976 { 977 uint8_t i; 978 979 srand((unsigned int)time(NULL)); 980 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 981 buff[i] = rand(); 982 } 983 984 int qede_config_rss(struct rte_eth_dev *eth_dev) 985 { 986 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 987 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 988 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; 989 struct rte_eth_rss_reta_entry64 reta_conf[2]; 990 struct rte_eth_rss_conf rss_conf; 991 uint32_t i, id, pos, q; 992 993 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 994 if (!rss_conf.rss_key) { 995 DP_INFO(edev, "Applying driver default key\n"); 996 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 997 qede_prandom_bytes(&def_rss_key[0]); 998 rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; 999 } 1000 1001 /* Configure RSS hash */ 1002 if (qede_rss_hash_update(eth_dev, &rss_conf)) 1003 return -EINVAL; 1004 1005 /* Configure default RETA */ 1006 memset(reta_conf, 0, sizeof(reta_conf)); 1007 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) 1008 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; 1009 1010 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 1011 id = i / RTE_RETA_GROUP_SIZE; 1012 pos = i % RTE_RETA_GROUP_SIZE; 1013 q = i % QEDE_RSS_COUNT(eth_dev); 1014 reta_conf[id].reta[pos] = q; 1015 } 1016 if (qede_rss_reta_update(eth_dev, &reta_conf[0], 1017 ECORE_RSS_IND_TABLE_SIZE)) 1018 return -EINVAL; 1019 1020 return 0; 1021 } 1022 1023 static void qede_fastpath_start(struct ecore_dev *edev) 1024 { 1025 struct ecore_hwfn *p_hwfn; 1026 int i; 1027 1028 for_each_hwfn(edev, i) { 1029 p_hwfn = &edev->hwfns[i]; 1030 ecore_hw_start_fastpath(p_hwfn); 1031 } 1032 } 1033 1034 static int qede_dev_start(struct rte_eth_dev *eth_dev) 1035 { 1036 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1037 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1038 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1039 1040 PMD_INIT_FUNC_TRACE(edev); 1041 1042 /* Update MTU only if it has changed */ 1043 if (qdev->new_mtu && qdev->new_mtu != qdev->mtu) { 1044 if (qede_update_mtu(eth_dev, qdev->new_mtu)) 1045 goto err; 1046 qdev->mtu = qdev->new_mtu; 1047 qdev->new_mtu = 0; 1048 } 1049 1050 /* Configure TPA parameters */ 1051 if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1052 if (qede_enable_tpa(eth_dev, true)) 1053 return -EINVAL; 1054 /* Enable scatter mode for LRO */ 1055 if (!eth_dev->data->scattered_rx) 1056 rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER; 1057 } 1058 1059 /* Start queues */ 1060 if (qede_start_queues(eth_dev)) 1061 goto err; 1062 1063 if (IS_PF(edev)) 1064 qede_reset_queue_stats(qdev, true); 1065 1066 /* Newer SR-IOV PF driver expects RX/TX queues to be started before 1067 * enabling RSS. Hence RSS configuration is deferred upto this point. 1068 * Also, we would like to retain similar behavior in PF case, so we 1069 * don't do PF/VF specific check here. 1070 */ 1071 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 1072 if (qede_config_rss(eth_dev)) 1073 goto err; 1074 1075 /* Enable vport*/ 1076 if (qede_activate_vport(eth_dev, true)) 1077 goto err; 1078 1079 /* Update link status */ 1080 qede_link_update(eth_dev, 0); 1081 1082 /* Start/resume traffic */ 1083 qede_fastpath_start(edev); 1084 1085 DP_INFO(edev, "Device started\n"); 1086 1087 return 0; 1088 err: 1089 DP_ERR(edev, "Device start fails\n"); 1090 return -1; /* common error code is < 0 */ 1091 } 1092 1093 static void qede_dev_stop(struct rte_eth_dev *eth_dev) 1094 { 1095 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1096 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1097 1098 PMD_INIT_FUNC_TRACE(edev); 1099 1100 /* Disable vport */ 1101 if (qede_activate_vport(eth_dev, false)) 1102 return; 1103 1104 if (qdev->enable_lro) 1105 qede_enable_tpa(eth_dev, false); 1106 1107 /* Stop queues */ 1108 qede_stop_queues(eth_dev); 1109 1110 /* Disable traffic */ 1111 ecore_hw_stop_fastpath(edev); /* TBD - loop */ 1112 1113 DP_INFO(edev, "Device is stopped\n"); 1114 } 1115 1116 static const char * const valid_args[] = { 1117 QEDE_NPAR_TX_SWITCHING, 1118 QEDE_VF_TX_SWITCHING, 1119 NULL, 1120 }; 1121 1122 static int qede_args_check(const char *key, const char *val, void *opaque) 1123 { 1124 unsigned long tmp; 1125 int ret = 0; 1126 struct rte_eth_dev *eth_dev = opaque; 1127 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1128 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1129 1130 errno = 0; 1131 tmp = strtoul(val, NULL, 0); 1132 if (errno) { 1133 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val); 1134 return errno; 1135 } 1136 1137 if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) || 1138 ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) { 1139 qdev->enable_tx_switching = !!tmp; 1140 DP_INFO(edev, "Disabling %s tx-switching\n", 1141 strcmp(QEDE_NPAR_TX_SWITCHING, key) ? 1142 "VF" : "NPAR"); 1143 } 1144 1145 return ret; 1146 } 1147 1148 static int qede_args(struct rte_eth_dev *eth_dev) 1149 { 1150 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1151 struct rte_kvargs *kvlist; 1152 struct rte_devargs *devargs; 1153 int ret; 1154 int i; 1155 1156 devargs = pci_dev->device.devargs; 1157 if (!devargs) 1158 return 0; /* return success */ 1159 1160 kvlist = rte_kvargs_parse(devargs->args, valid_args); 1161 if (kvlist == NULL) 1162 return -EINVAL; 1163 1164 /* Process parameters. */ 1165 for (i = 0; (valid_args[i] != NULL); ++i) { 1166 if (rte_kvargs_count(kvlist, valid_args[i])) { 1167 ret = rte_kvargs_process(kvlist, valid_args[i], 1168 qede_args_check, eth_dev); 1169 if (ret != ECORE_SUCCESS) { 1170 rte_kvargs_free(kvlist); 1171 return ret; 1172 } 1173 } 1174 } 1175 rte_kvargs_free(kvlist); 1176 1177 return 0; 1178 } 1179 1180 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 1181 { 1182 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1183 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1184 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1185 int ret; 1186 1187 PMD_INIT_FUNC_TRACE(edev); 1188 1189 if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) 1190 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1191 1192 /* We need to have min 1 RX queue.There is no min check in 1193 * rte_eth_dev_configure(), so we are checking it here. 1194 */ 1195 if (eth_dev->data->nb_rx_queues == 0) { 1196 DP_ERR(edev, "Minimum one RX queue is required\n"); 1197 return -EINVAL; 1198 } 1199 1200 /* Enable Tx switching by default */ 1201 qdev->enable_tx_switching = 1; 1202 1203 /* Parse devargs and fix up rxmode */ 1204 if (qede_args(eth_dev)) 1205 DP_NOTICE(edev, false, 1206 "Invalid devargs supplied, requested change will not take effect\n"); 1207 1208 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || 1209 rxmode->mq_mode == ETH_MQ_RX_RSS)) { 1210 DP_ERR(edev, "Unsupported multi-queue mode\n"); 1211 return -ENOTSUP; 1212 } 1213 /* Flow director mode check */ 1214 if (qede_check_fdir_support(eth_dev)) 1215 return -ENOTSUP; 1216 1217 qede_dealloc_fp_resc(eth_dev); 1218 qdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns; 1219 qdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns; 1220 1221 if (qede_alloc_fp_resc(qdev)) 1222 return -ENOMEM; 1223 1224 /* If jumbo enabled adjust MTU */ 1225 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 1226 eth_dev->data->mtu = 1227 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1228 RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD; 1229 1230 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) 1231 eth_dev->data->scattered_rx = 1; 1232 1233 if (qede_start_vport(qdev, eth_dev->data->mtu)) 1234 return -1; 1235 1236 qdev->mtu = eth_dev->data->mtu; 1237 1238 /* Enable VLAN offloads by default */ 1239 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | 1240 ETH_VLAN_FILTER_MASK); 1241 if (ret) 1242 return ret; 1243 1244 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", 1245 QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev)); 1246 1247 if (ECORE_IS_CMT(edev)) 1248 DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n", 1249 qdev->num_rx_queues, qdev->num_tx_queues); 1250 1251 1252 return 0; 1253 } 1254 1255 /* Info about HW descriptor ring limitations */ 1256 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 1257 .nb_max = 0x8000, /* 32K */ 1258 .nb_min = 128, 1259 .nb_align = 128 /* lowest common multiple */ 1260 }; 1261 1262 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 1263 .nb_max = 0x8000, /* 32K */ 1264 .nb_min = 256, 1265 .nb_align = 256, 1266 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, 1267 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 1268 }; 1269 1270 static int 1271 qede_dev_info_get(struct rte_eth_dev *eth_dev, 1272 struct rte_eth_dev_info *dev_info) 1273 { 1274 struct qede_dev *qdev = eth_dev->data->dev_private; 1275 struct ecore_dev *edev = &qdev->edev; 1276 struct qed_link_output link; 1277 uint32_t speed_cap = 0; 1278 1279 PMD_INIT_FUNC_TRACE(edev); 1280 1281 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; 1282 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 1283 dev_info->rx_desc_lim = qede_rx_desc_lim; 1284 dev_info->tx_desc_lim = qede_tx_desc_lim; 1285 1286 if (IS_PF(edev)) 1287 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1288 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2); 1289 else 1290 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1291 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); 1292 /* Since CMT mode internally doubles the number of queues */ 1293 if (ECORE_IS_CMT(edev)) 1294 dev_info->max_rx_queues = dev_info->max_rx_queues / 2; 1295 1296 dev_info->max_tx_queues = dev_info->max_rx_queues; 1297 1298 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; 1299 dev_info->max_vfs = 0; 1300 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 1301 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1302 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 1303 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 1304 DEV_RX_OFFLOAD_UDP_CKSUM | 1305 DEV_RX_OFFLOAD_TCP_CKSUM | 1306 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1307 DEV_RX_OFFLOAD_TCP_LRO | 1308 DEV_RX_OFFLOAD_KEEP_CRC | 1309 DEV_RX_OFFLOAD_SCATTER | 1310 DEV_RX_OFFLOAD_JUMBO_FRAME | 1311 DEV_RX_OFFLOAD_VLAN_FILTER | 1312 DEV_RX_OFFLOAD_VLAN_STRIP | 1313 DEV_RX_OFFLOAD_RSS_HASH); 1314 dev_info->rx_queue_offload_capa = 0; 1315 1316 /* TX offloads are on a per-packet basis, so it is applicable 1317 * to both at port and queue levels. 1318 */ 1319 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | 1320 DEV_TX_OFFLOAD_IPV4_CKSUM | 1321 DEV_TX_OFFLOAD_UDP_CKSUM | 1322 DEV_TX_OFFLOAD_TCP_CKSUM | 1323 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1324 DEV_TX_OFFLOAD_MULTI_SEGS | 1325 DEV_TX_OFFLOAD_TCP_TSO | 1326 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1327 DEV_TX_OFFLOAD_GENEVE_TNL_TSO); 1328 dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa; 1329 1330 dev_info->default_txconf = (struct rte_eth_txconf) { 1331 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS, 1332 }; 1333 1334 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1335 /* Packets are always dropped if no descriptors are available */ 1336 .rx_drop_en = 1, 1337 .offloads = 0, 1338 }; 1339 1340 memset(&link, 0, sizeof(struct qed_link_output)); 1341 qdev->ops->common->get_link(edev, &link); 1342 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1343 speed_cap |= ETH_LINK_SPEED_1G; 1344 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1345 speed_cap |= ETH_LINK_SPEED_10G; 1346 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1347 speed_cap |= ETH_LINK_SPEED_25G; 1348 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1349 speed_cap |= ETH_LINK_SPEED_40G; 1350 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1351 speed_cap |= ETH_LINK_SPEED_50G; 1352 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1353 speed_cap |= ETH_LINK_SPEED_100G; 1354 dev_info->speed_capa = speed_cap; 1355 1356 return 0; 1357 } 1358 1359 /* return 0 means link status changed, -1 means not changed */ 1360 int 1361 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 1362 { 1363 struct qede_dev *qdev = eth_dev->data->dev_private; 1364 struct ecore_dev *edev = &qdev->edev; 1365 struct qed_link_output q_link; 1366 struct rte_eth_link link; 1367 uint16_t link_duplex; 1368 1369 memset(&q_link, 0, sizeof(q_link)); 1370 memset(&link, 0, sizeof(link)); 1371 1372 qdev->ops->common->get_link(edev, &q_link); 1373 1374 /* Link Speed */ 1375 link.link_speed = q_link.speed; 1376 1377 /* Link Mode */ 1378 switch (q_link.duplex) { 1379 case QEDE_DUPLEX_HALF: 1380 link_duplex = ETH_LINK_HALF_DUPLEX; 1381 break; 1382 case QEDE_DUPLEX_FULL: 1383 link_duplex = ETH_LINK_FULL_DUPLEX; 1384 break; 1385 case QEDE_DUPLEX_UNKNOWN: 1386 default: 1387 link_duplex = -1; 1388 } 1389 link.link_duplex = link_duplex; 1390 1391 /* Link Status */ 1392 link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN; 1393 1394 /* AN */ 1395 link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 1396 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 1397 1398 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 1399 link.link_speed, link.link_duplex, 1400 link.link_autoneg, link.link_status); 1401 1402 return rte_eth_linkstatus_set(eth_dev, &link); 1403 } 1404 1405 static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 1406 { 1407 struct qede_dev *qdev = eth_dev->data->dev_private; 1408 struct ecore_dev *edev = &qdev->edev; 1409 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1410 enum _ecore_status_t ecore_status; 1411 1412 PMD_INIT_FUNC_TRACE(edev); 1413 1414 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1415 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1416 1417 ecore_status = qed_configure_filter_rx_mode(eth_dev, type); 1418 1419 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1420 } 1421 1422 static int qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 1423 { 1424 struct qede_dev *qdev = eth_dev->data->dev_private; 1425 struct ecore_dev *edev = &qdev->edev; 1426 enum _ecore_status_t ecore_status; 1427 1428 PMD_INIT_FUNC_TRACE(edev); 1429 1430 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1431 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1432 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 1433 else 1434 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1435 QED_FILTER_RX_MODE_TYPE_REGULAR); 1436 1437 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1438 } 1439 1440 static void qede_poll_sp_sb_cb(void *param) 1441 { 1442 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1443 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1444 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1445 int rc; 1446 1447 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 1448 qede_interrupt_action(&edev->hwfns[1]); 1449 1450 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 1451 qede_poll_sp_sb_cb, 1452 (void *)eth_dev); 1453 if (rc != 0) { 1454 DP_ERR(edev, "Unable to start periodic" 1455 " timer rc %d\n", rc); 1456 } 1457 } 1458 1459 static void qede_dev_close(struct rte_eth_dev *eth_dev) 1460 { 1461 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1462 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1463 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1464 1465 PMD_INIT_FUNC_TRACE(edev); 1466 1467 /* dev_stop() shall cleanup fp resources in hw but without releasing 1468 * dma memories and sw structures so that dev_start() can be called 1469 * by the app without reconfiguration. However, in dev_close() we 1470 * can release all the resources and device can be brought up newly 1471 */ 1472 if (eth_dev->data->dev_started) 1473 qede_dev_stop(eth_dev); 1474 1475 qede_stop_vport(edev); 1476 qdev->vport_started = false; 1477 qede_fdir_dealloc_resc(eth_dev); 1478 qede_dealloc_fp_resc(eth_dev); 1479 1480 eth_dev->data->nb_rx_queues = 0; 1481 eth_dev->data->nb_tx_queues = 0; 1482 1483 /* Bring the link down */ 1484 qede_dev_set_link_state(eth_dev, false); 1485 qdev->ops->common->slowpath_stop(edev); 1486 qdev->ops->common->remove(edev); 1487 rte_intr_disable(&pci_dev->intr_handle); 1488 1489 switch (pci_dev->intr_handle.type) { 1490 case RTE_INTR_HANDLE_UIO_INTX: 1491 case RTE_INTR_HANDLE_VFIO_LEGACY: 1492 rte_intr_callback_unregister(&pci_dev->intr_handle, 1493 qede_interrupt_handler_intx, 1494 (void *)eth_dev); 1495 break; 1496 default: 1497 rte_intr_callback_unregister(&pci_dev->intr_handle, 1498 qede_interrupt_handler, 1499 (void *)eth_dev); 1500 } 1501 1502 if (ECORE_IS_CMT(edev)) 1503 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 1504 } 1505 1506 static int 1507 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 1508 { 1509 struct qede_dev *qdev = eth_dev->data->dev_private; 1510 struct ecore_dev *edev = &qdev->edev; 1511 struct ecore_eth_stats stats; 1512 unsigned int i = 0, j = 0, qid, idx, hw_fn; 1513 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 1514 struct qede_tx_queue *txq; 1515 1516 ecore_get_vport_stats(edev, &stats); 1517 1518 /* RX Stats */ 1519 eth_stats->ipackets = stats.common.rx_ucast_pkts + 1520 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts; 1521 1522 eth_stats->ibytes = stats.common.rx_ucast_bytes + 1523 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes; 1524 1525 eth_stats->ierrors = stats.common.rx_crc_errors + 1526 stats.common.rx_align_errors + 1527 stats.common.rx_carrier_errors + 1528 stats.common.rx_oversize_packets + 1529 stats.common.rx_jabbers + stats.common.rx_undersize_packets; 1530 1531 eth_stats->rx_nombuf = stats.common.no_buff_discards; 1532 1533 eth_stats->imissed = stats.common.mftag_filter_discards + 1534 stats.common.mac_filter_discards + 1535 stats.common.no_buff_discards + 1536 stats.common.brb_truncates + stats.common.brb_discards; 1537 1538 /* TX stats */ 1539 eth_stats->opackets = stats.common.tx_ucast_pkts + 1540 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts; 1541 1542 eth_stats->obytes = stats.common.tx_ucast_bytes + 1543 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes; 1544 1545 eth_stats->oerrors = stats.common.tx_err_drop_pkts; 1546 1547 /* Queue stats */ 1548 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev), 1549 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1550 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev), 1551 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1552 if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) || 1553 txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev)) 1554 DP_VERBOSE(edev, ECORE_MSG_DEBUG, 1555 "Not all the queue stats will be displayed. Set" 1556 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" 1557 " appropriately and retry.\n"); 1558 1559 for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) { 1560 eth_stats->q_ipackets[i] = 0; 1561 eth_stats->q_errors[i] = 0; 1562 1563 for_each_hwfn(edev, hw_fn) { 1564 idx = qid * edev->num_hwfns + hw_fn; 1565 1566 eth_stats->q_ipackets[i] += 1567 *(uint64_t *) 1568 (((char *)(qdev->fp_array[idx].rxq)) + 1569 offsetof(struct qede_rx_queue, 1570 rcv_pkts)); 1571 eth_stats->q_errors[i] += 1572 *(uint64_t *) 1573 (((char *)(qdev->fp_array[idx].rxq)) + 1574 offsetof(struct qede_rx_queue, 1575 rx_hw_errors)) + 1576 *(uint64_t *) 1577 (((char *)(qdev->fp_array[idx].rxq)) + 1578 offsetof(struct qede_rx_queue, 1579 rx_alloc_errors)); 1580 } 1581 1582 i++; 1583 if (i == rxq_stat_cntrs) 1584 break; 1585 } 1586 1587 for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) { 1588 eth_stats->q_opackets[j] = 0; 1589 1590 for_each_hwfn(edev, hw_fn) { 1591 idx = qid * edev->num_hwfns + hw_fn; 1592 1593 txq = qdev->fp_array[idx].txq; 1594 eth_stats->q_opackets[j] += 1595 *((uint64_t *)(uintptr_t) 1596 (((uint64_t)(uintptr_t)(txq)) + 1597 offsetof(struct qede_tx_queue, 1598 xmit_pkts))); 1599 } 1600 1601 j++; 1602 if (j == txq_stat_cntrs) 1603 break; 1604 } 1605 1606 return 0; 1607 } 1608 1609 static unsigned 1610 qede_get_xstats_count(struct qede_dev *qdev) { 1611 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; 1612 1613 if (ECORE_IS_BB(&qdev->edev)) 1614 return RTE_DIM(qede_xstats_strings) + 1615 RTE_DIM(qede_bb_xstats_strings) + 1616 (RTE_DIM(qede_rxq_xstats_strings) * 1617 QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns); 1618 else 1619 return RTE_DIM(qede_xstats_strings) + 1620 RTE_DIM(qede_ah_xstats_strings) + 1621 (RTE_DIM(qede_rxq_xstats_strings) * 1622 QEDE_RSS_COUNT(dev)); 1623 } 1624 1625 static int 1626 qede_get_xstats_names(struct rte_eth_dev *dev, 1627 struct rte_eth_xstat_name *xstats_names, 1628 __rte_unused unsigned int limit) 1629 { 1630 struct qede_dev *qdev = dev->data->dev_private; 1631 struct ecore_dev *edev = &qdev->edev; 1632 const unsigned int stat_cnt = qede_get_xstats_count(qdev); 1633 unsigned int i, qid, hw_fn, stat_idx = 0; 1634 1635 if (xstats_names == NULL) 1636 return stat_cnt; 1637 1638 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1639 strlcpy(xstats_names[stat_idx].name, 1640 qede_xstats_strings[i].name, 1641 sizeof(xstats_names[stat_idx].name)); 1642 stat_idx++; 1643 } 1644 1645 if (ECORE_IS_BB(edev)) { 1646 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1647 strlcpy(xstats_names[stat_idx].name, 1648 qede_bb_xstats_strings[i].name, 1649 sizeof(xstats_names[stat_idx].name)); 1650 stat_idx++; 1651 } 1652 } else { 1653 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1654 strlcpy(xstats_names[stat_idx].name, 1655 qede_ah_xstats_strings[i].name, 1656 sizeof(xstats_names[stat_idx].name)); 1657 stat_idx++; 1658 } 1659 } 1660 1661 for (qid = 0; qid < QEDE_RSS_COUNT(dev); qid++) { 1662 for_each_hwfn(edev, hw_fn) { 1663 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1664 snprintf(xstats_names[stat_idx].name, 1665 RTE_ETH_XSTATS_NAME_SIZE, 1666 "%.4s%d.%d%s", 1667 qede_rxq_xstats_strings[i].name, 1668 hw_fn, qid, 1669 qede_rxq_xstats_strings[i].name + 4); 1670 stat_idx++; 1671 } 1672 } 1673 } 1674 1675 return stat_cnt; 1676 } 1677 1678 static int 1679 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1680 unsigned int n) 1681 { 1682 struct qede_dev *qdev = dev->data->dev_private; 1683 struct ecore_dev *edev = &qdev->edev; 1684 struct ecore_eth_stats stats; 1685 const unsigned int num = qede_get_xstats_count(qdev); 1686 unsigned int i, qid, hw_fn, fpidx, stat_idx = 0; 1687 1688 if (n < num) 1689 return num; 1690 1691 ecore_get_vport_stats(edev, &stats); 1692 1693 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1694 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + 1695 qede_xstats_strings[i].offset); 1696 xstats[stat_idx].id = stat_idx; 1697 stat_idx++; 1698 } 1699 1700 if (ECORE_IS_BB(edev)) { 1701 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1702 xstats[stat_idx].value = 1703 *(uint64_t *)(((char *)&stats) + 1704 qede_bb_xstats_strings[i].offset); 1705 xstats[stat_idx].id = stat_idx; 1706 stat_idx++; 1707 } 1708 } else { 1709 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1710 xstats[stat_idx].value = 1711 *(uint64_t *)(((char *)&stats) + 1712 qede_ah_xstats_strings[i].offset); 1713 xstats[stat_idx].id = stat_idx; 1714 stat_idx++; 1715 } 1716 } 1717 1718 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 1719 for_each_hwfn(edev, hw_fn) { 1720 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1721 fpidx = qid * edev->num_hwfns + hw_fn; 1722 xstats[stat_idx].value = *(uint64_t *) 1723 (((char *)(qdev->fp_array[fpidx].rxq)) + 1724 qede_rxq_xstats_strings[i].offset); 1725 xstats[stat_idx].id = stat_idx; 1726 stat_idx++; 1727 } 1728 1729 } 1730 } 1731 1732 return stat_idx; 1733 } 1734 1735 static int 1736 qede_reset_xstats(struct rte_eth_dev *dev) 1737 { 1738 struct qede_dev *qdev = dev->data->dev_private; 1739 struct ecore_dev *edev = &qdev->edev; 1740 1741 ecore_reset_vport_stats(edev); 1742 qede_reset_queue_stats(qdev, true); 1743 1744 return 0; 1745 } 1746 1747 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 1748 { 1749 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1750 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1751 struct qed_link_params link_params; 1752 int rc; 1753 1754 DP_INFO(edev, "setting link state %d\n", link_up); 1755 memset(&link_params, 0, sizeof(link_params)); 1756 link_params.link_up = link_up; 1757 rc = qdev->ops->common->set_link(edev, &link_params); 1758 if (rc != ECORE_SUCCESS) 1759 DP_ERR(edev, "Unable to set link state %d\n", link_up); 1760 1761 return rc; 1762 } 1763 1764 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 1765 { 1766 return qede_dev_set_link_state(eth_dev, true); 1767 } 1768 1769 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 1770 { 1771 return qede_dev_set_link_state(eth_dev, false); 1772 } 1773 1774 static int qede_reset_stats(struct rte_eth_dev *eth_dev) 1775 { 1776 struct qede_dev *qdev = eth_dev->data->dev_private; 1777 struct ecore_dev *edev = &qdev->edev; 1778 1779 ecore_reset_vport_stats(edev); 1780 qede_reset_queue_stats(qdev, false); 1781 1782 return 0; 1783 } 1784 1785 static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 1786 { 1787 enum qed_filter_rx_mode_type type = 1788 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1789 enum _ecore_status_t ecore_status; 1790 1791 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1792 type |= QED_FILTER_RX_MODE_TYPE_PROMISC; 1793 1794 ecore_status = qed_configure_filter_rx_mode(eth_dev, type); 1795 1796 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1797 } 1798 1799 static int qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 1800 { 1801 enum _ecore_status_t ecore_status; 1802 1803 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1804 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1805 QED_FILTER_RX_MODE_TYPE_PROMISC); 1806 else 1807 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1808 QED_FILTER_RX_MODE_TYPE_REGULAR); 1809 1810 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1811 } 1812 1813 static int 1814 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, 1815 struct rte_ether_addr *mc_addrs, 1816 uint32_t mc_addrs_num) 1817 { 1818 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1819 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1820 uint8_t i; 1821 1822 if (mc_addrs_num > ECORE_MAX_MC_ADDRS) { 1823 DP_ERR(edev, "Reached max multicast filters limit," 1824 "Please enable multicast promisc mode\n"); 1825 return -ENOSPC; 1826 } 1827 1828 for (i = 0; i < mc_addrs_num; i++) { 1829 if (!rte_is_multicast_ether_addr(&mc_addrs[i])) { 1830 DP_ERR(edev, "Not a valid multicast MAC\n"); 1831 return -EINVAL; 1832 } 1833 } 1834 1835 /* Flush all existing entries */ 1836 if (qede_del_mcast_filters(eth_dev)) 1837 return -1; 1838 1839 /* Set new mcast list */ 1840 return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num); 1841 } 1842 1843 /* Update MTU via vport-update without doing port restart. 1844 * The vport must be deactivated before calling this API. 1845 */ 1846 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) 1847 { 1848 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1849 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1850 struct ecore_hwfn *p_hwfn; 1851 int rc; 1852 int i; 1853 1854 if (IS_PF(edev)) { 1855 struct ecore_sp_vport_update_params params; 1856 1857 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 1858 params.vport_id = 0; 1859 params.mtu = mtu; 1860 params.vport_id = 0; 1861 for_each_hwfn(edev, i) { 1862 p_hwfn = &edev->hwfns[i]; 1863 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1864 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 1865 ECORE_SPQ_MODE_EBLOCK, NULL); 1866 if (rc != ECORE_SUCCESS) 1867 goto err; 1868 } 1869 } else { 1870 for_each_hwfn(edev, i) { 1871 p_hwfn = &edev->hwfns[i]; 1872 rc = ecore_vf_pf_update_mtu(p_hwfn, mtu); 1873 if (rc == ECORE_INVAL) { 1874 DP_INFO(edev, "VF MTU Update TLV not supported\n"); 1875 /* Recreate vport */ 1876 rc = qede_start_vport(qdev, mtu); 1877 if (rc != ECORE_SUCCESS) 1878 goto err; 1879 1880 /* Restore config lost due to vport stop */ 1881 if (eth_dev->data->promiscuous) 1882 qede_promiscuous_enable(eth_dev); 1883 else 1884 qede_promiscuous_disable(eth_dev); 1885 1886 if (eth_dev->data->all_multicast) 1887 qede_allmulticast_enable(eth_dev); 1888 else 1889 qede_allmulticast_disable(eth_dev); 1890 1891 qede_vlan_offload_set(eth_dev, 1892 qdev->vlan_offload_mask); 1893 } else if (rc != ECORE_SUCCESS) { 1894 goto err; 1895 } 1896 } 1897 } 1898 DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu); 1899 1900 return 0; 1901 1902 err: 1903 DP_ERR(edev, "Failed to update MTU\n"); 1904 return -1; 1905 } 1906 1907 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 1908 struct rte_eth_fc_conf *fc_conf) 1909 { 1910 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1911 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1912 struct qed_link_output current_link; 1913 struct qed_link_params params; 1914 1915 memset(¤t_link, 0, sizeof(current_link)); 1916 qdev->ops->common->get_link(edev, ¤t_link); 1917 1918 memset(¶ms, 0, sizeof(params)); 1919 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 1920 if (fc_conf->autoneg) { 1921 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 1922 DP_ERR(edev, "Autoneg not supported\n"); 1923 return -EINVAL; 1924 } 1925 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1926 } 1927 1928 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 1929 if (fc_conf->mode == RTE_FC_FULL) 1930 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 1931 QED_LINK_PAUSE_RX_ENABLE); 1932 if (fc_conf->mode == RTE_FC_TX_PAUSE) 1933 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1934 if (fc_conf->mode == RTE_FC_RX_PAUSE) 1935 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1936 1937 params.link_up = true; 1938 (void)qdev->ops->common->set_link(edev, ¶ms); 1939 1940 return 0; 1941 } 1942 1943 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 1944 struct rte_eth_fc_conf *fc_conf) 1945 { 1946 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1947 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1948 struct qed_link_output current_link; 1949 1950 memset(¤t_link, 0, sizeof(current_link)); 1951 qdev->ops->common->get_link(edev, ¤t_link); 1952 1953 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1954 fc_conf->autoneg = true; 1955 1956 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 1957 QED_LINK_PAUSE_TX_ENABLE)) 1958 fc_conf->mode = RTE_FC_FULL; 1959 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 1960 fc_conf->mode = RTE_FC_RX_PAUSE; 1961 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 1962 fc_conf->mode = RTE_FC_TX_PAUSE; 1963 else 1964 fc_conf->mode = RTE_FC_NONE; 1965 1966 return 0; 1967 } 1968 1969 static const uint32_t * 1970 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 1971 { 1972 static const uint32_t ptypes[] = { 1973 RTE_PTYPE_L2_ETHER, 1974 RTE_PTYPE_L2_ETHER_VLAN, 1975 RTE_PTYPE_L3_IPV4, 1976 RTE_PTYPE_L3_IPV6, 1977 RTE_PTYPE_L4_TCP, 1978 RTE_PTYPE_L4_UDP, 1979 RTE_PTYPE_TUNNEL_VXLAN, 1980 RTE_PTYPE_L4_FRAG, 1981 RTE_PTYPE_TUNNEL_GENEVE, 1982 RTE_PTYPE_TUNNEL_GRE, 1983 /* Inner */ 1984 RTE_PTYPE_INNER_L2_ETHER, 1985 RTE_PTYPE_INNER_L2_ETHER_VLAN, 1986 RTE_PTYPE_INNER_L3_IPV4, 1987 RTE_PTYPE_INNER_L3_IPV6, 1988 RTE_PTYPE_INNER_L4_TCP, 1989 RTE_PTYPE_INNER_L4_UDP, 1990 RTE_PTYPE_INNER_L4_FRAG, 1991 RTE_PTYPE_UNKNOWN 1992 }; 1993 1994 if (eth_dev->rx_pkt_burst == qede_recv_pkts || 1995 eth_dev->rx_pkt_burst == qede_recv_pkts_cmt) 1996 return ptypes; 1997 1998 return NULL; 1999 } 2000 2001 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) 2002 { 2003 *rss_caps = 0; 2004 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 2005 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 2006 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 2007 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 2008 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 2009 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 2010 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; 2011 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; 2012 } 2013 2014 int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 2015 struct rte_eth_rss_conf *rss_conf) 2016 { 2017 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2018 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2019 struct ecore_sp_vport_update_params vport_update_params; 2020 struct ecore_rss_params rss_params; 2021 struct ecore_hwfn *p_hwfn; 2022 uint32_t *key = (uint32_t *)rss_conf->rss_key; 2023 uint64_t hf = rss_conf->rss_hf; 2024 uint8_t len = rss_conf->rss_key_len; 2025 uint8_t idx, i, j, fpidx; 2026 int rc; 2027 2028 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2029 memset(&rss_params, 0, sizeof(rss_params)); 2030 2031 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", 2032 (unsigned long)hf, len, key); 2033 2034 if (hf != 0) { 2035 /* Enabling RSS */ 2036 DP_INFO(edev, "Enabling rss\n"); 2037 2038 /* RSS caps */ 2039 qede_init_rss_caps(&rss_params.rss_caps, hf); 2040 rss_params.update_rss_capabilities = 1; 2041 2042 /* RSS hash key */ 2043 if (key) { 2044 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { 2045 DP_ERR(edev, "RSS key length exceeds limit\n"); 2046 return -EINVAL; 2047 } 2048 DP_INFO(edev, "Applying user supplied hash key\n"); 2049 rss_params.update_rss_key = 1; 2050 memcpy(&rss_params.rss_key, key, len); 2051 } 2052 rss_params.rss_enable = 1; 2053 } 2054 2055 rss_params.update_rss_config = 1; 2056 /* tbl_size has to be set with capabilities */ 2057 rss_params.rss_table_size_log = 7; 2058 vport_update_params.vport_id = 0; 2059 2060 for_each_hwfn(edev, i) { 2061 /* pass the L2 handles instead of qids */ 2062 for (j = 0 ; j < ECORE_RSS_IND_TABLE_SIZE ; j++) { 2063 idx = j % QEDE_RSS_COUNT(eth_dev); 2064 fpidx = idx * edev->num_hwfns + i; 2065 rss_params.rss_ind_table[j] = 2066 qdev->fp_array[fpidx].rxq->handle; 2067 } 2068 2069 vport_update_params.rss_params = &rss_params; 2070 2071 p_hwfn = &edev->hwfns[i]; 2072 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2073 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2074 ECORE_SPQ_MODE_EBLOCK, NULL); 2075 if (rc) { 2076 DP_ERR(edev, "vport-update for RSS failed\n"); 2077 return rc; 2078 } 2079 } 2080 qdev->rss_enable = rss_params.rss_enable; 2081 2082 /* Update local structure for hash query */ 2083 qdev->rss_conf.rss_hf = hf; 2084 qdev->rss_conf.rss_key_len = len; 2085 if (qdev->rss_enable) { 2086 if (qdev->rss_conf.rss_key == NULL) { 2087 qdev->rss_conf.rss_key = (uint8_t *)malloc(len); 2088 if (qdev->rss_conf.rss_key == NULL) { 2089 DP_ERR(edev, "No memory to store RSS key\n"); 2090 return -ENOMEM; 2091 } 2092 } 2093 if (key && len) { 2094 DP_INFO(edev, "Storing RSS key\n"); 2095 memcpy(qdev->rss_conf.rss_key, key, len); 2096 } 2097 } else if (!qdev->rss_enable && len == 0) { 2098 if (qdev->rss_conf.rss_key) { 2099 free(qdev->rss_conf.rss_key); 2100 qdev->rss_conf.rss_key = NULL; 2101 DP_INFO(edev, "Free RSS key\n"); 2102 } 2103 } 2104 2105 return 0; 2106 } 2107 2108 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 2109 struct rte_eth_rss_conf *rss_conf) 2110 { 2111 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2112 2113 rss_conf->rss_hf = qdev->rss_conf.rss_hf; 2114 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; 2115 2116 if (rss_conf->rss_key && qdev->rss_conf.rss_key) 2117 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, 2118 rss_conf->rss_key_len); 2119 return 0; 2120 } 2121 2122 int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 2123 struct rte_eth_rss_reta_entry64 *reta_conf, 2124 uint16_t reta_size) 2125 { 2126 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2127 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2128 struct ecore_sp_vport_update_params vport_update_params; 2129 struct ecore_rss_params *params; 2130 uint16_t i, j, idx, fid, shift; 2131 struct ecore_hwfn *p_hwfn; 2132 uint8_t entry; 2133 int rc = 0; 2134 2135 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2136 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 2137 reta_size); 2138 return -EINVAL; 2139 } 2140 2141 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2142 params = rte_zmalloc("qede_rss", sizeof(*params), RTE_CACHE_LINE_SIZE); 2143 if (params == NULL) { 2144 DP_ERR(edev, "failed to allocate memory\n"); 2145 return -ENOMEM; 2146 } 2147 2148 params->update_rss_ind_table = 1; 2149 params->rss_table_size_log = 7; 2150 params->update_rss_config = 1; 2151 2152 vport_update_params.vport_id = 0; 2153 /* Use the current value of rss_enable */ 2154 params->rss_enable = qdev->rss_enable; 2155 vport_update_params.rss_params = params; 2156 2157 for_each_hwfn(edev, i) { 2158 for (j = 0; j < reta_size; j++) { 2159 idx = j / RTE_RETA_GROUP_SIZE; 2160 shift = j % RTE_RETA_GROUP_SIZE; 2161 if (reta_conf[idx].mask & (1ULL << shift)) { 2162 entry = reta_conf[idx].reta[shift]; 2163 fid = entry * edev->num_hwfns + i; 2164 /* Pass rxq handles to ecore */ 2165 params->rss_ind_table[j] = 2166 qdev->fp_array[fid].rxq->handle; 2167 /* Update the local copy for RETA query cmd */ 2168 qdev->rss_ind_table[j] = entry; 2169 } 2170 } 2171 2172 p_hwfn = &edev->hwfns[i]; 2173 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2174 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2175 ECORE_SPQ_MODE_EBLOCK, NULL); 2176 if (rc) { 2177 DP_ERR(edev, "vport-update for RSS failed\n"); 2178 goto out; 2179 } 2180 } 2181 2182 out: 2183 rte_free(params); 2184 return rc; 2185 } 2186 2187 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 2188 struct rte_eth_rss_reta_entry64 *reta_conf, 2189 uint16_t reta_size) 2190 { 2191 struct qede_dev *qdev = eth_dev->data->dev_private; 2192 struct ecore_dev *edev = &qdev->edev; 2193 uint16_t i, idx, shift; 2194 uint8_t entry; 2195 2196 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2197 DP_ERR(edev, "reta_size %d is not supported\n", 2198 reta_size); 2199 return -EINVAL; 2200 } 2201 2202 for (i = 0; i < reta_size; i++) { 2203 idx = i / RTE_RETA_GROUP_SIZE; 2204 shift = i % RTE_RETA_GROUP_SIZE; 2205 if (reta_conf[idx].mask & (1ULL << shift)) { 2206 entry = qdev->rss_ind_table[i]; 2207 reta_conf[idx].reta[shift] = entry; 2208 } 2209 } 2210 2211 return 0; 2212 } 2213 2214 2215 2216 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 2217 { 2218 struct qede_dev *qdev = QEDE_INIT_QDEV(dev); 2219 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2220 struct rte_eth_dev_info dev_info = {0}; 2221 struct qede_fastpath *fp; 2222 uint32_t max_rx_pkt_len; 2223 uint32_t frame_size; 2224 uint16_t bufsz; 2225 bool restart = false; 2226 int i, rc; 2227 2228 PMD_INIT_FUNC_TRACE(edev); 2229 rc = qede_dev_info_get(dev, &dev_info); 2230 if (rc != 0) { 2231 DP_ERR(edev, "Error during getting ethernet device info\n"); 2232 return rc; 2233 } 2234 max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN; 2235 frame_size = max_rx_pkt_len; 2236 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) { 2237 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n", 2238 mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN - 2239 QEDE_ETH_OVERHEAD); 2240 return -EINVAL; 2241 } 2242 if (!dev->data->scattered_rx && 2243 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 2244 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", 2245 dev->data->min_rx_buf_size); 2246 return -EINVAL; 2247 } 2248 /* Temporarily replace I/O functions with dummy ones. It cannot 2249 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. 2250 */ 2251 dev->rx_pkt_burst = qede_rxtx_pkts_dummy; 2252 dev->tx_pkt_burst = qede_rxtx_pkts_dummy; 2253 if (dev->data->dev_started) { 2254 dev->data->dev_started = 0; 2255 qede_dev_stop(dev); 2256 restart = true; 2257 } 2258 rte_delay_ms(1000); 2259 qdev->new_mtu = mtu; 2260 2261 /* Fix up RX buf size for all queues of the port */ 2262 for (i = 0; i < qdev->num_rx_queues; i++) { 2263 fp = &qdev->fp_array[i]; 2264 if (fp->rxq != NULL) { 2265 bufsz = (uint16_t)rte_pktmbuf_data_room_size( 2266 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; 2267 /* cache align the mbuf size to simplfy rx_buf_size 2268 * calculation 2269 */ 2270 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz); 2271 rc = qede_calc_rx_buf_size(dev, bufsz, frame_size); 2272 if (rc < 0) 2273 return rc; 2274 2275 fp->rxq->rx_buf_size = rc; 2276 } 2277 } 2278 if (max_rx_pkt_len > RTE_ETHER_MAX_LEN) 2279 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 2280 else 2281 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2282 2283 if (!dev->data->dev_started && restart) { 2284 qede_dev_start(dev); 2285 dev->data->dev_started = 1; 2286 } 2287 2288 /* update max frame size */ 2289 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; 2290 /* Reassign back */ 2291 if (ECORE_IS_CMT(edev)) { 2292 dev->rx_pkt_burst = qede_recv_pkts_cmt; 2293 dev->tx_pkt_burst = qede_xmit_pkts_cmt; 2294 } else { 2295 dev->rx_pkt_burst = qede_recv_pkts; 2296 dev->tx_pkt_burst = qede_xmit_pkts; 2297 } 2298 return 0; 2299 } 2300 2301 static int 2302 qede_dev_reset(struct rte_eth_dev *dev) 2303 { 2304 int ret; 2305 2306 ret = qede_eth_dev_uninit(dev); 2307 if (ret) 2308 return ret; 2309 2310 return qede_eth_dev_init(dev); 2311 } 2312 2313 static const struct eth_dev_ops qede_eth_dev_ops = { 2314 .dev_configure = qede_dev_configure, 2315 .dev_infos_get = qede_dev_info_get, 2316 .rx_queue_setup = qede_rx_queue_setup, 2317 .rx_queue_release = qede_rx_queue_release, 2318 .rx_descriptor_status = qede_rx_descriptor_status, 2319 .tx_queue_setup = qede_tx_queue_setup, 2320 .tx_queue_release = qede_tx_queue_release, 2321 .dev_start = qede_dev_start, 2322 .dev_reset = qede_dev_reset, 2323 .dev_set_link_up = qede_dev_set_link_up, 2324 .dev_set_link_down = qede_dev_set_link_down, 2325 .link_update = qede_link_update, 2326 .promiscuous_enable = qede_promiscuous_enable, 2327 .promiscuous_disable = qede_promiscuous_disable, 2328 .allmulticast_enable = qede_allmulticast_enable, 2329 .allmulticast_disable = qede_allmulticast_disable, 2330 .set_mc_addr_list = qede_set_mc_addr_list, 2331 .dev_stop = qede_dev_stop, 2332 .dev_close = qede_dev_close, 2333 .stats_get = qede_get_stats, 2334 .stats_reset = qede_reset_stats, 2335 .xstats_get = qede_get_xstats, 2336 .xstats_reset = qede_reset_xstats, 2337 .xstats_get_names = qede_get_xstats_names, 2338 .mac_addr_add = qede_mac_addr_add, 2339 .mac_addr_remove = qede_mac_addr_remove, 2340 .mac_addr_set = qede_mac_addr_set, 2341 .vlan_offload_set = qede_vlan_offload_set, 2342 .vlan_filter_set = qede_vlan_filter_set, 2343 .flow_ctrl_set = qede_flow_ctrl_set, 2344 .flow_ctrl_get = qede_flow_ctrl_get, 2345 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2346 .rss_hash_update = qede_rss_hash_update, 2347 .rss_hash_conf_get = qede_rss_hash_conf_get, 2348 .reta_update = qede_rss_reta_update, 2349 .reta_query = qede_rss_reta_query, 2350 .mtu_set = qede_set_mtu, 2351 .filter_ctrl = qede_dev_filter_ctrl, 2352 .udp_tunnel_port_add = qede_udp_dst_port_add, 2353 .udp_tunnel_port_del = qede_udp_dst_port_del, 2354 }; 2355 2356 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 2357 .dev_configure = qede_dev_configure, 2358 .dev_infos_get = qede_dev_info_get, 2359 .rx_queue_setup = qede_rx_queue_setup, 2360 .rx_queue_release = qede_rx_queue_release, 2361 .rx_descriptor_status = qede_rx_descriptor_status, 2362 .tx_queue_setup = qede_tx_queue_setup, 2363 .tx_queue_release = qede_tx_queue_release, 2364 .dev_start = qede_dev_start, 2365 .dev_reset = qede_dev_reset, 2366 .dev_set_link_up = qede_dev_set_link_up, 2367 .dev_set_link_down = qede_dev_set_link_down, 2368 .link_update = qede_link_update, 2369 .promiscuous_enable = qede_promiscuous_enable, 2370 .promiscuous_disable = qede_promiscuous_disable, 2371 .allmulticast_enable = qede_allmulticast_enable, 2372 .allmulticast_disable = qede_allmulticast_disable, 2373 .set_mc_addr_list = qede_set_mc_addr_list, 2374 .dev_stop = qede_dev_stop, 2375 .dev_close = qede_dev_close, 2376 .stats_get = qede_get_stats, 2377 .stats_reset = qede_reset_stats, 2378 .xstats_get = qede_get_xstats, 2379 .xstats_reset = qede_reset_xstats, 2380 .xstats_get_names = qede_get_xstats_names, 2381 .vlan_offload_set = qede_vlan_offload_set, 2382 .vlan_filter_set = qede_vlan_filter_set, 2383 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2384 .rss_hash_update = qede_rss_hash_update, 2385 .rss_hash_conf_get = qede_rss_hash_conf_get, 2386 .reta_update = qede_rss_reta_update, 2387 .reta_query = qede_rss_reta_query, 2388 .mtu_set = qede_set_mtu, 2389 .udp_tunnel_port_add = qede_udp_dst_port_add, 2390 .udp_tunnel_port_del = qede_udp_dst_port_del, 2391 .mac_addr_add = qede_mac_addr_add, 2392 .mac_addr_remove = qede_mac_addr_remove, 2393 .mac_addr_set = qede_mac_addr_set, 2394 }; 2395 2396 static void qede_update_pf_params(struct ecore_dev *edev) 2397 { 2398 struct ecore_pf_params pf_params; 2399 2400 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 2401 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; 2402 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 2403 qed_ops->common->update_pf_params(edev, &pf_params); 2404 } 2405 2406 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 2407 { 2408 struct rte_pci_device *pci_dev; 2409 struct rte_pci_addr pci_addr; 2410 struct qede_dev *adapter; 2411 struct ecore_dev *edev; 2412 struct qed_dev_eth_info dev_info; 2413 struct qed_slowpath_params params; 2414 static bool do_once = true; 2415 uint8_t bulletin_change; 2416 uint8_t vf_mac[RTE_ETHER_ADDR_LEN]; 2417 uint8_t is_mac_forced; 2418 bool is_mac_exist; 2419 /* Fix up ecore debug level */ 2420 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 2421 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 2422 uint32_t int_mode; 2423 int rc; 2424 2425 /* Extract key data structures */ 2426 adapter = eth_dev->data->dev_private; 2427 adapter->ethdev = eth_dev; 2428 edev = &adapter->edev; 2429 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2430 pci_addr = pci_dev->addr; 2431 2432 PMD_INIT_FUNC_TRACE(edev); 2433 2434 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 2435 pci_addr.bus, pci_addr.devid, pci_addr.function, 2436 eth_dev->data->port_id); 2437 2438 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2439 DP_ERR(edev, "Skipping device init from secondary process\n"); 2440 return 0; 2441 } 2442 2443 rte_eth_copy_pci_info(eth_dev, pci_dev); 2444 2445 /* @DPDK */ 2446 edev->vendor_id = pci_dev->id.vendor_id; 2447 edev->device_id = pci_dev->id.device_id; 2448 2449 qed_ops = qed_get_eth_ops(); 2450 if (!qed_ops) { 2451 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 2452 rc = -EINVAL; 2453 goto err; 2454 } 2455 2456 DP_INFO(edev, "Starting qede probe\n"); 2457 rc = qed_ops->common->probe(edev, pci_dev, dp_module, 2458 dp_level, is_vf); 2459 if (rc != 0) { 2460 DP_ERR(edev, "qede probe failed rc %d\n", rc); 2461 rc = -ENODEV; 2462 goto err; 2463 } 2464 qede_update_pf_params(edev); 2465 2466 switch (pci_dev->intr_handle.type) { 2467 case RTE_INTR_HANDLE_UIO_INTX: 2468 case RTE_INTR_HANDLE_VFIO_LEGACY: 2469 int_mode = ECORE_INT_MODE_INTA; 2470 rte_intr_callback_register(&pci_dev->intr_handle, 2471 qede_interrupt_handler_intx, 2472 (void *)eth_dev); 2473 break; 2474 default: 2475 int_mode = ECORE_INT_MODE_MSIX; 2476 rte_intr_callback_register(&pci_dev->intr_handle, 2477 qede_interrupt_handler, 2478 (void *)eth_dev); 2479 } 2480 2481 if (rte_intr_enable(&pci_dev->intr_handle)) { 2482 DP_ERR(edev, "rte_intr_enable() failed\n"); 2483 rc = -ENODEV; 2484 goto err; 2485 } 2486 2487 /* Start the Slowpath-process */ 2488 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 2489 2490 params.int_mode = int_mode; 2491 params.drv_major = QEDE_PMD_VERSION_MAJOR; 2492 params.drv_minor = QEDE_PMD_VERSION_MINOR; 2493 params.drv_rev = QEDE_PMD_VERSION_REVISION; 2494 params.drv_eng = QEDE_PMD_VERSION_PATCH; 2495 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, 2496 QEDE_PMD_DRV_VER_STR_SIZE); 2497 2498 if (ECORE_IS_CMT(edev)) { 2499 eth_dev->rx_pkt_burst = qede_recv_pkts_cmt; 2500 eth_dev->tx_pkt_burst = qede_xmit_pkts_cmt; 2501 } else { 2502 eth_dev->rx_pkt_burst = qede_recv_pkts; 2503 eth_dev->tx_pkt_burst = qede_xmit_pkts; 2504 } 2505 2506 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; 2507 2508 /* For CMT mode device do periodic polling for slowpath events. 2509 * This is required since uio device uses only one MSI-x 2510 * interrupt vector but we need one for each engine. 2511 */ 2512 if (ECORE_IS_CMT(edev) && IS_PF(edev)) { 2513 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 2514 qede_poll_sp_sb_cb, 2515 (void *)eth_dev); 2516 if (rc != 0) { 2517 DP_ERR(edev, "Unable to start periodic" 2518 " timer rc %d\n", rc); 2519 rc = -EINVAL; 2520 goto err; 2521 } 2522 } 2523 2524 rc = qed_ops->common->slowpath_start(edev, ¶ms); 2525 if (rc) { 2526 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 2527 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2528 (void *)eth_dev); 2529 rc = -ENODEV; 2530 goto err; 2531 } 2532 2533 rc = qed_ops->fill_dev_info(edev, &dev_info); 2534 if (rc) { 2535 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 2536 qed_ops->common->slowpath_stop(edev); 2537 qed_ops->common->remove(edev); 2538 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2539 (void *)eth_dev); 2540 rc = -ENODEV; 2541 goto err; 2542 } 2543 2544 qede_alloc_etherdev(adapter, &dev_info); 2545 2546 if (do_once) { 2547 qede_print_adapter_info(adapter); 2548 do_once = false; 2549 } 2550 2551 adapter->ops->common->set_name(edev, edev->name); 2552 2553 if (!is_vf) 2554 adapter->dev_info.num_mac_filters = 2555 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 2556 ECORE_MAC); 2557 else 2558 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 2559 (uint32_t *)&adapter->dev_info.num_mac_filters); 2560 2561 /* Allocate memory for storing MAC addr */ 2562 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 2563 (RTE_ETHER_ADDR_LEN * 2564 adapter->dev_info.num_mac_filters), 2565 RTE_CACHE_LINE_SIZE); 2566 2567 if (eth_dev->data->mac_addrs == NULL) { 2568 DP_ERR(edev, "Failed to allocate MAC address\n"); 2569 qed_ops->common->slowpath_stop(edev); 2570 qed_ops->common->remove(edev); 2571 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2572 (void *)eth_dev); 2573 return -ENOMEM; 2574 } 2575 2576 if (!is_vf) { 2577 rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0]. 2578 hw_info.hw_mac_addr, 2579 ð_dev->data->mac_addrs[0]); 2580 rte_ether_addr_copy(ð_dev->data->mac_addrs[0], 2581 &adapter->primary_mac); 2582 } else { 2583 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 2584 &bulletin_change); 2585 if (bulletin_change) { 2586 is_mac_exist = 2587 ecore_vf_bulletin_get_forced_mac( 2588 ECORE_LEADING_HWFN(edev), 2589 vf_mac, 2590 &is_mac_forced); 2591 if (is_mac_exist) { 2592 DP_INFO(edev, "VF macaddr received from PF\n"); 2593 rte_ether_addr_copy( 2594 (struct rte_ether_addr *)&vf_mac, 2595 ð_dev->data->mac_addrs[0]); 2596 rte_ether_addr_copy( 2597 ð_dev->data->mac_addrs[0], 2598 &adapter->primary_mac); 2599 } else { 2600 DP_ERR(edev, "No VF macaddr assigned\n"); 2601 } 2602 } 2603 } 2604 2605 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 2606 2607 /* Bring-up the link */ 2608 qede_dev_set_link_state(eth_dev, true); 2609 2610 adapter->num_tx_queues = 0; 2611 adapter->num_rx_queues = 0; 2612 SLIST_INIT(&adapter->arfs_info.arfs_list_head); 2613 SLIST_INIT(&adapter->vlan_list_head); 2614 SLIST_INIT(&adapter->uc_list_head); 2615 SLIST_INIT(&adapter->mc_list_head); 2616 adapter->mtu = RTE_ETHER_MTU; 2617 adapter->vport_started = false; 2618 2619 /* VF tunnel offloads is enabled by default in PF driver */ 2620 adapter->vxlan.num_filters = 0; 2621 adapter->geneve.num_filters = 0; 2622 adapter->ipgre.num_filters = 0; 2623 if (is_vf) { 2624 adapter->vxlan.enable = true; 2625 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | 2626 ETH_TUNNEL_FILTER_IVLAN; 2627 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; 2628 adapter->geneve.enable = true; 2629 adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC | 2630 ETH_TUNNEL_FILTER_IVLAN; 2631 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT; 2632 adapter->ipgre.enable = true; 2633 adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC | 2634 ETH_TUNNEL_FILTER_IVLAN; 2635 } else { 2636 adapter->vxlan.enable = false; 2637 adapter->geneve.enable = false; 2638 adapter->ipgre.enable = false; 2639 } 2640 2641 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", 2642 adapter->primary_mac.addr_bytes[0], 2643 adapter->primary_mac.addr_bytes[1], 2644 adapter->primary_mac.addr_bytes[2], 2645 adapter->primary_mac.addr_bytes[3], 2646 adapter->primary_mac.addr_bytes[4], 2647 adapter->primary_mac.addr_bytes[5]); 2648 2649 DP_INFO(edev, "Device initialized\n"); 2650 2651 return 0; 2652 2653 err: 2654 if (do_once) { 2655 qede_print_adapter_info(adapter); 2656 do_once = false; 2657 } 2658 return rc; 2659 } 2660 2661 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 2662 { 2663 return qede_common_dev_init(eth_dev, 1); 2664 } 2665 2666 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 2667 { 2668 return qede_common_dev_init(eth_dev, 0); 2669 } 2670 2671 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 2672 { 2673 struct qede_dev *qdev = eth_dev->data->dev_private; 2674 struct ecore_dev *edev = &qdev->edev; 2675 2676 PMD_INIT_FUNC_TRACE(edev); 2677 2678 /* only uninitialize in the primary process */ 2679 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2680 return 0; 2681 2682 /* safe to close dev here */ 2683 qede_dev_close(eth_dev); 2684 2685 eth_dev->dev_ops = NULL; 2686 eth_dev->rx_pkt_burst = NULL; 2687 eth_dev->tx_pkt_burst = NULL; 2688 2689 return 0; 2690 } 2691 2692 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2693 { 2694 return qede_dev_common_uninit(eth_dev); 2695 } 2696 2697 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2698 { 2699 return qede_dev_common_uninit(eth_dev); 2700 } 2701 2702 static const struct rte_pci_id pci_id_qedevf_map[] = { 2703 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2704 { 2705 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF) 2706 }, 2707 { 2708 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV) 2709 }, 2710 { 2711 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV) 2712 }, 2713 {.vendor_id = 0,} 2714 }; 2715 2716 static const struct rte_pci_id pci_id_qede_map[] = { 2717 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2718 { 2719 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E) 2720 }, 2721 { 2722 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S) 2723 }, 2724 { 2725 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40) 2726 }, 2727 { 2728 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25) 2729 }, 2730 { 2731 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100) 2732 }, 2733 { 2734 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50) 2735 }, 2736 { 2737 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G) 2738 }, 2739 { 2740 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G) 2741 }, 2742 { 2743 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G) 2744 }, 2745 { 2746 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G) 2747 }, 2748 {.vendor_id = 0,} 2749 }; 2750 2751 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2752 struct rte_pci_device *pci_dev) 2753 { 2754 return rte_eth_dev_pci_generic_probe(pci_dev, 2755 sizeof(struct qede_dev), qedevf_eth_dev_init); 2756 } 2757 2758 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2759 { 2760 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit); 2761 } 2762 2763 static struct rte_pci_driver rte_qedevf_pmd = { 2764 .id_table = pci_id_qedevf_map, 2765 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2766 .probe = qedevf_eth_dev_pci_probe, 2767 .remove = qedevf_eth_dev_pci_remove, 2768 }; 2769 2770 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2771 struct rte_pci_device *pci_dev) 2772 { 2773 return rte_eth_dev_pci_generic_probe(pci_dev, 2774 sizeof(struct qede_dev), qede_eth_dev_init); 2775 } 2776 2777 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2778 { 2779 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit); 2780 } 2781 2782 static struct rte_pci_driver rte_qede_pmd = { 2783 .id_table = pci_id_qede_map, 2784 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2785 .probe = qede_eth_dev_pci_probe, 2786 .remove = qede_eth_dev_pci_remove, 2787 }; 2788 2789 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd); 2790 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 2791 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); 2792 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); 2793 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 2794 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); 2795 2796 RTE_INIT(qede_init_log) 2797 { 2798 qede_logtype_init = rte_log_register("pmd.net.qede.init"); 2799 if (qede_logtype_init >= 0) 2800 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE); 2801 qede_logtype_driver = rte_log_register("pmd.net.qede.driver"); 2802 if (qede_logtype_driver >= 0) 2803 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE); 2804 } 2805