1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include "qede_ethdev.h" 8 #include <rte_string_fns.h> 9 #include <rte_alarm.h> 10 #include <rte_version.h> 11 #include <rte_kvargs.h> 12 13 /* Globals */ 14 int qede_logtype_init; 15 int qede_logtype_driver; 16 17 static const struct qed_eth_ops *qed_ops; 18 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev); 19 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev); 20 21 #define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */ 22 23 struct rte_qede_xstats_name_off { 24 char name[RTE_ETH_XSTATS_NAME_SIZE]; 25 uint64_t offset; 26 }; 27 28 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 29 {"rx_unicast_bytes", 30 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)}, 31 {"rx_multicast_bytes", 32 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)}, 33 {"rx_broadcast_bytes", 34 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)}, 35 {"rx_unicast_packets", 36 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)}, 37 {"rx_multicast_packets", 38 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)}, 39 {"rx_broadcast_packets", 40 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)}, 41 42 {"tx_unicast_bytes", 43 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)}, 44 {"tx_multicast_bytes", 45 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)}, 46 {"tx_broadcast_bytes", 47 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)}, 48 {"tx_unicast_packets", 49 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)}, 50 {"tx_multicast_packets", 51 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)}, 52 {"tx_broadcast_packets", 53 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)}, 54 55 {"rx_64_byte_packets", 56 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)}, 57 {"rx_65_to_127_byte_packets", 58 offsetof(struct ecore_eth_stats_common, 59 rx_65_to_127_byte_packets)}, 60 {"rx_128_to_255_byte_packets", 61 offsetof(struct ecore_eth_stats_common, 62 rx_128_to_255_byte_packets)}, 63 {"rx_256_to_511_byte_packets", 64 offsetof(struct ecore_eth_stats_common, 65 rx_256_to_511_byte_packets)}, 66 {"rx_512_to_1023_byte_packets", 67 offsetof(struct ecore_eth_stats_common, 68 rx_512_to_1023_byte_packets)}, 69 {"rx_1024_to_1518_byte_packets", 70 offsetof(struct ecore_eth_stats_common, 71 rx_1024_to_1518_byte_packets)}, 72 {"tx_64_byte_packets", 73 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)}, 74 {"tx_65_to_127_byte_packets", 75 offsetof(struct ecore_eth_stats_common, 76 tx_65_to_127_byte_packets)}, 77 {"tx_128_to_255_byte_packets", 78 offsetof(struct ecore_eth_stats_common, 79 tx_128_to_255_byte_packets)}, 80 {"tx_256_to_511_byte_packets", 81 offsetof(struct ecore_eth_stats_common, 82 tx_256_to_511_byte_packets)}, 83 {"tx_512_to_1023_byte_packets", 84 offsetof(struct ecore_eth_stats_common, 85 tx_512_to_1023_byte_packets)}, 86 {"tx_1024_to_1518_byte_packets", 87 offsetof(struct ecore_eth_stats_common, 88 tx_1024_to_1518_byte_packets)}, 89 90 {"rx_mac_crtl_frames", 91 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)}, 92 {"tx_mac_control_frames", 93 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)}, 94 {"rx_pause_frames", 95 offsetof(struct ecore_eth_stats_common, rx_pause_frames)}, 96 {"tx_pause_frames", 97 offsetof(struct ecore_eth_stats_common, tx_pause_frames)}, 98 {"rx_priority_flow_control_frames", 99 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)}, 100 {"tx_priority_flow_control_frames", 101 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)}, 102 103 {"rx_crc_errors", 104 offsetof(struct ecore_eth_stats_common, rx_crc_errors)}, 105 {"rx_align_errors", 106 offsetof(struct ecore_eth_stats_common, rx_align_errors)}, 107 {"rx_carrier_errors", 108 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)}, 109 {"rx_oversize_packet_errors", 110 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)}, 111 {"rx_jabber_errors", 112 offsetof(struct ecore_eth_stats_common, rx_jabbers)}, 113 {"rx_undersize_packet_errors", 114 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)}, 115 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)}, 116 {"rx_host_buffer_not_available", 117 offsetof(struct ecore_eth_stats_common, no_buff_discards)}, 118 /* Number of packets discarded because they are bigger than MTU */ 119 {"rx_packet_too_big_discards", 120 offsetof(struct ecore_eth_stats_common, 121 packet_too_big_discard)}, 122 {"rx_ttl_zero_discards", 123 offsetof(struct ecore_eth_stats_common, ttl0_discard)}, 124 {"rx_multi_function_tag_filter_discards", 125 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)}, 126 {"rx_mac_filter_discards", 127 offsetof(struct ecore_eth_stats_common, mac_filter_discards)}, 128 {"rx_gft_filter_drop", 129 offsetof(struct ecore_eth_stats_common, gft_filter_drop)}, 130 {"rx_hw_buffer_truncates", 131 offsetof(struct ecore_eth_stats_common, brb_truncates)}, 132 {"rx_hw_buffer_discards", 133 offsetof(struct ecore_eth_stats_common, brb_discards)}, 134 {"tx_error_drop_packets", 135 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)}, 136 137 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)}, 138 {"rx_mac_unicast_packets", 139 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)}, 140 {"rx_mac_multicast_packets", 141 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)}, 142 {"rx_mac_broadcast_packets", 143 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)}, 144 {"rx_mac_frames_ok", 145 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)}, 146 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)}, 147 {"tx_mac_unicast_packets", 148 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)}, 149 {"tx_mac_multicast_packets", 150 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)}, 151 {"tx_mac_broadcast_packets", 152 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)}, 153 154 {"lro_coalesced_packets", 155 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)}, 156 {"lro_coalesced_events", 157 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)}, 158 {"lro_aborts_num", 159 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)}, 160 {"lro_not_coalesced_packets", 161 offsetof(struct ecore_eth_stats_common, 162 tpa_not_coalesced_pkts)}, 163 {"lro_coalesced_bytes", 164 offsetof(struct ecore_eth_stats_common, 165 tpa_coalesced_bytes)}, 166 }; 167 168 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = { 169 {"rx_1519_to_1522_byte_packets", 170 offsetof(struct ecore_eth_stats, bb) + 171 offsetof(struct ecore_eth_stats_bb, 172 rx_1519_to_1522_byte_packets)}, 173 {"rx_1519_to_2047_byte_packets", 174 offsetof(struct ecore_eth_stats, bb) + 175 offsetof(struct ecore_eth_stats_bb, 176 rx_1519_to_2047_byte_packets)}, 177 {"rx_2048_to_4095_byte_packets", 178 offsetof(struct ecore_eth_stats, bb) + 179 offsetof(struct ecore_eth_stats_bb, 180 rx_2048_to_4095_byte_packets)}, 181 {"rx_4096_to_9216_byte_packets", 182 offsetof(struct ecore_eth_stats, bb) + 183 offsetof(struct ecore_eth_stats_bb, 184 rx_4096_to_9216_byte_packets)}, 185 {"rx_9217_to_16383_byte_packets", 186 offsetof(struct ecore_eth_stats, bb) + 187 offsetof(struct ecore_eth_stats_bb, 188 rx_9217_to_16383_byte_packets)}, 189 190 {"tx_1519_to_2047_byte_packets", 191 offsetof(struct ecore_eth_stats, bb) + 192 offsetof(struct ecore_eth_stats_bb, 193 tx_1519_to_2047_byte_packets)}, 194 {"tx_2048_to_4095_byte_packets", 195 offsetof(struct ecore_eth_stats, bb) + 196 offsetof(struct ecore_eth_stats_bb, 197 tx_2048_to_4095_byte_packets)}, 198 {"tx_4096_to_9216_byte_packets", 199 offsetof(struct ecore_eth_stats, bb) + 200 offsetof(struct ecore_eth_stats_bb, 201 tx_4096_to_9216_byte_packets)}, 202 {"tx_9217_to_16383_byte_packets", 203 offsetof(struct ecore_eth_stats, bb) + 204 offsetof(struct ecore_eth_stats_bb, 205 tx_9217_to_16383_byte_packets)}, 206 207 {"tx_lpi_entry_count", 208 offsetof(struct ecore_eth_stats, bb) + 209 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)}, 210 {"tx_total_collisions", 211 offsetof(struct ecore_eth_stats, bb) + 212 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)}, 213 }; 214 215 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = { 216 {"rx_1519_to_max_byte_packets", 217 offsetof(struct ecore_eth_stats, ah) + 218 offsetof(struct ecore_eth_stats_ah, 219 rx_1519_to_max_byte_packets)}, 220 {"tx_1519_to_max_byte_packets", 221 offsetof(struct ecore_eth_stats, ah) + 222 offsetof(struct ecore_eth_stats_ah, 223 tx_1519_to_max_byte_packets)}, 224 }; 225 226 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { 227 {"rx_q_segments", 228 offsetof(struct qede_rx_queue, rx_segs)}, 229 {"rx_q_hw_errors", 230 offsetof(struct qede_rx_queue, rx_hw_errors)}, 231 {"rx_q_allocation_errors", 232 offsetof(struct qede_rx_queue, rx_alloc_errors)} 233 }; 234 235 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 236 { 237 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 238 } 239 240 static void 241 qede_interrupt_handler_intx(void *param) 242 { 243 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 244 struct qede_dev *qdev = eth_dev->data->dev_private; 245 struct ecore_dev *edev = &qdev->edev; 246 u64 status; 247 248 /* Check if our device actually raised an interrupt */ 249 status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev)); 250 if (status & 0x1) { 251 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 252 253 if (rte_intr_ack(eth_dev->intr_handle)) 254 DP_ERR(edev, "rte_intr_ack failed\n"); 255 } 256 } 257 258 static void 259 qede_interrupt_handler(void *param) 260 { 261 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 262 struct qede_dev *qdev = eth_dev->data->dev_private; 263 struct ecore_dev *edev = &qdev->edev; 264 265 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 266 if (rte_intr_ack(eth_dev->intr_handle)) 267 DP_ERR(edev, "rte_intr_ack failed\n"); 268 } 269 270 static void 271 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 272 { 273 rte_memcpy(&qdev->dev_info, info, sizeof(*info)); 274 qdev->ops = qed_ops; 275 } 276 277 static void qede_print_adapter_info(struct qede_dev *qdev) 278 { 279 struct ecore_dev *edev = &qdev->edev; 280 struct qed_dev_info *info = &qdev->dev_info.common; 281 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE]; 282 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 283 284 DP_INFO(edev, "*********************************\n"); 285 DP_INFO(edev, " DPDK version:%s\n", rte_version()); 286 DP_INFO(edev, " Chip details : %s %c%d\n", 287 ECORE_IS_BB(edev) ? "BB" : "AH", 288 'A' + edev->chip_rev, 289 (int)edev->chip_metal); 290 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", 291 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng); 292 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s", 293 ver_str, QEDE_PMD_VERSION); 294 DP_INFO(edev, " Driver version : %s\n", drv_ver); 295 DP_INFO(edev, " Firmware version : %s\n", ver_str); 296 297 snprintf(ver_str, MCP_DRV_VER_STR_SIZE, 298 "%d.%d.%d.%d", 299 (info->mfw_rev >> 24) & 0xff, 300 (info->mfw_rev >> 16) & 0xff, 301 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff); 302 DP_INFO(edev, " Management Firmware version : %s\n", ver_str); 303 DP_INFO(edev, " Firmware file : %s\n", qede_fw_file); 304 DP_INFO(edev, "*********************************\n"); 305 } 306 307 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) 308 { 309 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; 310 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 311 unsigned int i = 0, j = 0, qid; 312 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 313 struct qede_tx_queue *txq; 314 315 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); 316 317 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev), 318 RTE_ETHDEV_QUEUE_STAT_CNTRS); 319 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev), 320 RTE_ETHDEV_QUEUE_STAT_CNTRS); 321 322 for (qid = 0; qid < qdev->num_rx_queues; qid++) { 323 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 324 offsetof(struct qede_rx_queue, rcv_pkts), 0, 325 sizeof(uint64_t)); 326 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 327 offsetof(struct qede_rx_queue, rx_hw_errors), 0, 328 sizeof(uint64_t)); 329 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 330 offsetof(struct qede_rx_queue, rx_alloc_errors), 0, 331 sizeof(uint64_t)); 332 333 if (xstats) 334 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++) 335 OSAL_MEMSET((((char *) 336 (qdev->fp_array[qid].rxq)) + 337 qede_rxq_xstats_strings[j].offset), 338 0, 339 sizeof(uint64_t)); 340 341 i++; 342 if (i == rxq_stat_cntrs) 343 break; 344 } 345 346 i = 0; 347 348 for (qid = 0; qid < qdev->num_tx_queues; qid++) { 349 txq = qdev->fp_array[qid].txq; 350 351 OSAL_MEMSET((uint64_t *)(uintptr_t) 352 (((uint64_t)(uintptr_t)(txq)) + 353 offsetof(struct qede_tx_queue, xmit_pkts)), 0, 354 sizeof(uint64_t)); 355 356 i++; 357 if (i == txq_stat_cntrs) 358 break; 359 } 360 } 361 362 static int 363 qede_stop_vport(struct ecore_dev *edev) 364 { 365 struct ecore_hwfn *p_hwfn; 366 uint8_t vport_id; 367 int rc; 368 int i; 369 370 vport_id = 0; 371 for_each_hwfn(edev, i) { 372 p_hwfn = &edev->hwfns[i]; 373 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 374 vport_id); 375 if (rc != ECORE_SUCCESS) { 376 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc); 377 return rc; 378 } 379 } 380 381 DP_INFO(edev, "vport stopped\n"); 382 383 return 0; 384 } 385 386 static int 387 qede_start_vport(struct qede_dev *qdev, uint16_t mtu) 388 { 389 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 390 struct ecore_sp_vport_start_params params; 391 struct ecore_hwfn *p_hwfn; 392 int rc; 393 int i; 394 395 if (qdev->vport_started) 396 qede_stop_vport(edev); 397 398 memset(¶ms, 0, sizeof(params)); 399 params.vport_id = 0; 400 params.mtu = mtu; 401 /* @DPDK - Disable FW placement */ 402 params.zero_placement_offset = 1; 403 for_each_hwfn(edev, i) { 404 p_hwfn = &edev->hwfns[i]; 405 params.concrete_fid = p_hwfn->hw_info.concrete_fid; 406 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 407 rc = ecore_sp_vport_start(p_hwfn, ¶ms); 408 if (rc != ECORE_SUCCESS) { 409 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 410 return rc; 411 } 412 } 413 ecore_reset_vport_stats(edev); 414 qdev->vport_started = true; 415 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); 416 417 return 0; 418 } 419 420 #define QEDE_NPAR_TX_SWITCHING "npar_tx_switching" 421 #define QEDE_VF_TX_SWITCHING "vf_tx_switching" 422 423 /* Activate or deactivate vport via vport-update */ 424 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) 425 { 426 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 427 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 428 struct ecore_sp_vport_update_params params; 429 struct ecore_hwfn *p_hwfn; 430 uint8_t i; 431 int rc = -1; 432 433 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 434 params.vport_id = 0; 435 params.update_vport_active_rx_flg = 1; 436 params.update_vport_active_tx_flg = 1; 437 params.vport_active_rx_flg = flg; 438 params.vport_active_tx_flg = flg; 439 if (~qdev->enable_tx_switching & flg) { 440 params.update_tx_switching_flg = 1; 441 params.tx_switching_flg = !flg; 442 } 443 for_each_hwfn(edev, i) { 444 p_hwfn = &edev->hwfns[i]; 445 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 446 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 447 ECORE_SPQ_MODE_EBLOCK, NULL); 448 if (rc != ECORE_SUCCESS) { 449 DP_ERR(edev, "Failed to update vport\n"); 450 break; 451 } 452 } 453 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated"); 454 455 return rc; 456 } 457 458 static void 459 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, 460 uint16_t mtu, bool enable) 461 { 462 /* Enable LRO in split mode */ 463 sge_tpa_params->tpa_ipv4_en_flg = enable; 464 sge_tpa_params->tpa_ipv6_en_flg = enable; 465 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable; 466 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable; 467 /* set if tpa enable changes */ 468 sge_tpa_params->update_tpa_en_flg = 1; 469 /* set if tpa parameters should be handled */ 470 sge_tpa_params->update_tpa_param_flg = enable; 471 472 sge_tpa_params->max_buffers_per_cqe = 20; 473 /* Enable TPA in split mode. In this mode each TPA segment 474 * starts on the new BD, so there is one BD per segment. 475 */ 476 sge_tpa_params->tpa_pkt_split_flg = 1; 477 sge_tpa_params->tpa_hdr_data_split_flg = 0; 478 sge_tpa_params->tpa_gro_consistent_flg = 0; 479 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 480 sge_tpa_params->tpa_max_size = 0x7FFF; 481 sge_tpa_params->tpa_min_size_to_start = mtu / 2; 482 sge_tpa_params->tpa_min_size_to_cont = mtu / 2; 483 } 484 485 /* Enable/disable LRO via vport-update */ 486 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) 487 { 488 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 489 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 490 struct ecore_sp_vport_update_params params; 491 struct ecore_sge_tpa_params tpa_params; 492 struct ecore_hwfn *p_hwfn; 493 int rc; 494 int i; 495 496 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 497 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params)); 498 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg); 499 params.vport_id = 0; 500 params.sge_tpa_params = &tpa_params; 501 for_each_hwfn(edev, i) { 502 p_hwfn = &edev->hwfns[i]; 503 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 504 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 505 ECORE_SPQ_MODE_EBLOCK, NULL); 506 if (rc != ECORE_SUCCESS) { 507 DP_ERR(edev, "Failed to update LRO\n"); 508 return -1; 509 } 510 } 511 qdev->enable_lro = flg; 512 eth_dev->data->lro = flg; 513 514 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); 515 516 return 0; 517 } 518 519 static int 520 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, 521 enum qed_filter_rx_mode_type type) 522 { 523 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 524 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 525 struct ecore_filter_accept_flags flags; 526 527 memset(&flags, 0, sizeof(flags)); 528 529 flags.update_rx_mode_config = 1; 530 flags.update_tx_mode_config = 1; 531 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 532 ECORE_ACCEPT_MCAST_MATCHED | 533 ECORE_ACCEPT_BCAST; 534 535 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 536 ECORE_ACCEPT_MCAST_MATCHED | 537 ECORE_ACCEPT_BCAST; 538 539 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 540 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 541 if (IS_VF(edev)) { 542 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 543 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n"); 544 } 545 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 546 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 547 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC | 548 QED_FILTER_RX_MODE_TYPE_PROMISC)) { 549 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED | 550 ECORE_ACCEPT_MCAST_UNMATCHED; 551 } 552 553 return ecore_filter_accept_cmd(edev, 0, flags, false, false, 554 ECORE_SPQ_MODE_CB, NULL); 555 } 556 557 int 558 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 559 bool add) 560 { 561 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 562 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 563 struct qede_ucast_entry *tmp = NULL; 564 struct qede_ucast_entry *u; 565 struct rte_ether_addr *mac_addr; 566 567 mac_addr = (struct rte_ether_addr *)ucast->mac; 568 if (add) { 569 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 570 if ((memcmp(mac_addr, &tmp->mac, 571 RTE_ETHER_ADDR_LEN) == 0) && 572 ucast->vni == tmp->vni && 573 ucast->vlan == tmp->vlan) { 574 DP_INFO(edev, "Unicast MAC is already added" 575 " with vlan = %u, vni = %u\n", 576 ucast->vlan, ucast->vni); 577 return 0; 578 } 579 } 580 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), 581 RTE_CACHE_LINE_SIZE); 582 if (!u) { 583 DP_ERR(edev, "Did not allocate memory for ucast\n"); 584 return -ENOMEM; 585 } 586 rte_ether_addr_copy(mac_addr, &u->mac); 587 u->vlan = ucast->vlan; 588 u->vni = ucast->vni; 589 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); 590 qdev->num_uc_addr++; 591 } else { 592 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 593 if ((memcmp(mac_addr, &tmp->mac, 594 RTE_ETHER_ADDR_LEN) == 0) && 595 ucast->vlan == tmp->vlan && 596 ucast->vni == tmp->vni) 597 break; 598 } 599 if (tmp == NULL) { 600 DP_INFO(edev, "Unicast MAC is not found\n"); 601 return -EINVAL; 602 } 603 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); 604 qdev->num_uc_addr--; 605 } 606 607 return 0; 608 } 609 610 static int 611 qede_add_mcast_filters(struct rte_eth_dev *eth_dev, 612 struct rte_ether_addr *mc_addrs, 613 uint32_t mc_addrs_num) 614 { 615 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 616 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 617 struct ecore_filter_mcast mcast; 618 struct qede_mcast_entry *m = NULL; 619 uint8_t i; 620 int rc; 621 622 for (i = 0; i < mc_addrs_num; i++) { 623 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), 624 RTE_CACHE_LINE_SIZE); 625 if (!m) { 626 DP_ERR(edev, "Did not allocate memory for mcast\n"); 627 return -ENOMEM; 628 } 629 rte_ether_addr_copy(&mc_addrs[i], &m->mac); 630 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); 631 } 632 memset(&mcast, 0, sizeof(mcast)); 633 mcast.num_mc_addrs = mc_addrs_num; 634 mcast.opcode = ECORE_FILTER_ADD; 635 for (i = 0; i < mc_addrs_num; i++) 636 rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *) 637 &mcast.mac[i]); 638 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 639 if (rc != ECORE_SUCCESS) { 640 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc); 641 return -1; 642 } 643 644 return 0; 645 } 646 647 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev) 648 { 649 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 650 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 651 struct qede_mcast_entry *tmp = NULL; 652 struct ecore_filter_mcast mcast; 653 int j; 654 int rc; 655 656 memset(&mcast, 0, sizeof(mcast)); 657 mcast.num_mc_addrs = qdev->num_mc_addr; 658 mcast.opcode = ECORE_FILTER_REMOVE; 659 j = 0; 660 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 661 rte_ether_addr_copy(&tmp->mac, 662 (struct rte_ether_addr *)&mcast.mac[j]); 663 j++; 664 } 665 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 666 if (rc != ECORE_SUCCESS) { 667 DP_ERR(edev, "Failed to delete multicast filter\n"); 668 return -1; 669 } 670 /* Init the list */ 671 while (!SLIST_EMPTY(&qdev->mc_list_head)) { 672 tmp = SLIST_FIRST(&qdev->mc_list_head); 673 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list); 674 } 675 SLIST_INIT(&qdev->mc_list_head); 676 677 return 0; 678 } 679 680 enum _ecore_status_t 681 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 682 bool add) 683 { 684 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 685 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 686 enum _ecore_status_t rc = ECORE_INVAL; 687 688 if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) { 689 DP_ERR(edev, "Ucast filter table limit exceeded," 690 " Please enable promisc mode\n"); 691 return ECORE_INVAL; 692 } 693 694 rc = qede_ucast_filter(eth_dev, ucast, add); 695 if (rc == 0) 696 rc = ecore_filter_ucast_cmd(edev, ucast, 697 ECORE_SPQ_MODE_CB, NULL); 698 /* Indicate error only for add filter operation. 699 * Delete filter operations are not severe. 700 */ 701 if ((rc != ECORE_SUCCESS) && add) 702 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", 703 rc, add); 704 705 return rc; 706 } 707 708 static int 709 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr, 710 __rte_unused uint32_t index, __rte_unused uint32_t pool) 711 { 712 struct ecore_filter_ucast ucast; 713 int re; 714 715 if (!rte_is_valid_assigned_ether_addr(mac_addr)) 716 return -EINVAL; 717 718 qede_set_ucast_cmn_params(&ucast); 719 ucast.opcode = ECORE_FILTER_ADD; 720 ucast.type = ECORE_FILTER_MAC; 721 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac); 722 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); 723 return re; 724 } 725 726 static void 727 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 728 { 729 struct qede_dev *qdev = eth_dev->data->dev_private; 730 struct ecore_dev *edev = &qdev->edev; 731 struct ecore_filter_ucast ucast; 732 733 PMD_INIT_FUNC_TRACE(edev); 734 735 if (index >= qdev->dev_info.num_mac_filters) { 736 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 737 index, qdev->dev_info.num_mac_filters); 738 return; 739 } 740 741 if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) 742 return; 743 744 qede_set_ucast_cmn_params(&ucast); 745 ucast.opcode = ECORE_FILTER_REMOVE; 746 ucast.type = ECORE_FILTER_MAC; 747 748 /* Use the index maintained by rte */ 749 rte_ether_addr_copy(ð_dev->data->mac_addrs[index], 750 (struct rte_ether_addr *)&ucast.mac); 751 752 qede_mac_int_ops(eth_dev, &ucast, false); 753 } 754 755 static int 756 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr) 757 { 758 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 759 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 760 761 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 762 mac_addr->addr_bytes)) { 763 DP_ERR(edev, "Setting MAC address is not allowed\n"); 764 return -EPERM; 765 } 766 767 qede_mac_addr_remove(eth_dev, 0); 768 769 return qede_mac_addr_add(eth_dev, mac_addr, 0, 0); 770 } 771 772 void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) 773 { 774 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 775 struct ecore_sp_vport_update_params params; 776 struct ecore_hwfn *p_hwfn; 777 uint8_t i; 778 int rc; 779 780 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 781 params.vport_id = 0; 782 params.update_accept_any_vlan_flg = 1; 783 params.accept_any_vlan = flg; 784 for_each_hwfn(edev, i) { 785 p_hwfn = &edev->hwfns[i]; 786 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 787 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 788 ECORE_SPQ_MODE_EBLOCK, NULL); 789 if (rc != ECORE_SUCCESS) { 790 DP_ERR(edev, "Failed to configure accept-any-vlan\n"); 791 return; 792 } 793 } 794 795 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled"); 796 } 797 798 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg) 799 { 800 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 801 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 802 struct ecore_sp_vport_update_params params; 803 struct ecore_hwfn *p_hwfn; 804 uint8_t i; 805 int rc; 806 807 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 808 params.vport_id = 0; 809 params.update_inner_vlan_removal_flg = 1; 810 params.inner_vlan_removal_flg = flg; 811 for_each_hwfn(edev, i) { 812 p_hwfn = &edev->hwfns[i]; 813 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 814 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 815 ECORE_SPQ_MODE_EBLOCK, NULL); 816 if (rc != ECORE_SUCCESS) { 817 DP_ERR(edev, "Failed to update vport\n"); 818 return -1; 819 } 820 } 821 822 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled"); 823 return 0; 824 } 825 826 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 827 uint16_t vlan_id, int on) 828 { 829 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 830 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 831 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 832 struct qede_vlan_entry *tmp = NULL; 833 struct qede_vlan_entry *vlan; 834 struct ecore_filter_ucast ucast; 835 int rc; 836 837 if (on) { 838 if (qdev->configured_vlans == dev_info->num_vlan_filters) { 839 DP_ERR(edev, "Reached max VLAN filter limit" 840 " enabling accept_any_vlan\n"); 841 qede_config_accept_any_vlan(qdev, true); 842 return 0; 843 } 844 845 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 846 if (tmp->vid == vlan_id) { 847 DP_INFO(edev, "VLAN %u already configured\n", 848 vlan_id); 849 return 0; 850 } 851 } 852 853 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), 854 RTE_CACHE_LINE_SIZE); 855 856 if (!vlan) { 857 DP_ERR(edev, "Did not allocate memory for VLAN\n"); 858 return -ENOMEM; 859 } 860 861 qede_set_ucast_cmn_params(&ucast); 862 ucast.opcode = ECORE_FILTER_ADD; 863 ucast.type = ECORE_FILTER_VLAN; 864 ucast.vlan = vlan_id; 865 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 866 NULL); 867 if (rc != 0) { 868 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 869 rc); 870 rte_free(vlan); 871 } else { 872 vlan->vid = vlan_id; 873 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); 874 qdev->configured_vlans++; 875 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", 876 vlan_id, qdev->configured_vlans); 877 } 878 } else { 879 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 880 if (tmp->vid == vlan_id) 881 break; 882 } 883 884 if (!tmp) { 885 if (qdev->configured_vlans == 0) { 886 DP_INFO(edev, 887 "No VLAN filters configured yet\n"); 888 return 0; 889 } 890 891 DP_ERR(edev, "VLAN %u not configured\n", vlan_id); 892 return -EINVAL; 893 } 894 895 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); 896 897 qede_set_ucast_cmn_params(&ucast); 898 ucast.opcode = ECORE_FILTER_REMOVE; 899 ucast.type = ECORE_FILTER_VLAN; 900 ucast.vlan = vlan_id; 901 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 902 NULL); 903 if (rc != 0) { 904 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 905 vlan_id, rc); 906 } else { 907 qdev->configured_vlans--; 908 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", 909 vlan_id, qdev->configured_vlans); 910 } 911 } 912 913 return rc; 914 } 915 916 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 917 { 918 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 919 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 920 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 921 922 if (mask & ETH_VLAN_STRIP_MASK) { 923 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 924 (void)qede_vlan_stripping(eth_dev, 1); 925 else 926 (void)qede_vlan_stripping(eth_dev, 0); 927 } 928 929 if (mask & ETH_VLAN_FILTER_MASK) { 930 /* VLAN filtering kicks in when a VLAN is added */ 931 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 932 qede_vlan_filter_set(eth_dev, 0, 1); 933 } else { 934 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ 935 DP_ERR(edev, 936 " Please remove existing VLAN filters" 937 " before disabling VLAN filtering\n"); 938 /* Signal app that VLAN filtering is still 939 * enabled 940 */ 941 eth_dev->data->dev_conf.rxmode.offloads |= 942 DEV_RX_OFFLOAD_VLAN_FILTER; 943 } else { 944 qede_vlan_filter_set(eth_dev, 0, 0); 945 } 946 } 947 } 948 949 if (mask & ETH_VLAN_EXTEND_MASK) 950 DP_ERR(edev, "Extend VLAN not supported\n"); 951 952 qdev->vlan_offload_mask = mask; 953 954 DP_INFO(edev, "VLAN offload mask %d\n", mask); 955 956 return 0; 957 } 958 959 static void qede_prandom_bytes(uint32_t *buff) 960 { 961 uint8_t i; 962 963 srand((unsigned int)time(NULL)); 964 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 965 buff[i] = rand(); 966 } 967 968 int qede_config_rss(struct rte_eth_dev *eth_dev) 969 { 970 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 971 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 972 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; 973 struct rte_eth_rss_reta_entry64 reta_conf[2]; 974 struct rte_eth_rss_conf rss_conf; 975 uint32_t i, id, pos, q; 976 977 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 978 if (!rss_conf.rss_key) { 979 DP_INFO(edev, "Applying driver default key\n"); 980 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 981 qede_prandom_bytes(&def_rss_key[0]); 982 rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; 983 } 984 985 /* Configure RSS hash */ 986 if (qede_rss_hash_update(eth_dev, &rss_conf)) 987 return -EINVAL; 988 989 /* Configure default RETA */ 990 memset(reta_conf, 0, sizeof(reta_conf)); 991 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) 992 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; 993 994 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 995 id = i / RTE_RETA_GROUP_SIZE; 996 pos = i % RTE_RETA_GROUP_SIZE; 997 q = i % QEDE_RSS_COUNT(eth_dev); 998 reta_conf[id].reta[pos] = q; 999 } 1000 if (qede_rss_reta_update(eth_dev, &reta_conf[0], 1001 ECORE_RSS_IND_TABLE_SIZE)) 1002 return -EINVAL; 1003 1004 return 0; 1005 } 1006 1007 static void qede_fastpath_start(struct ecore_dev *edev) 1008 { 1009 struct ecore_hwfn *p_hwfn; 1010 int i; 1011 1012 for_each_hwfn(edev, i) { 1013 p_hwfn = &edev->hwfns[i]; 1014 ecore_hw_start_fastpath(p_hwfn); 1015 } 1016 } 1017 1018 static int qede_dev_start(struct rte_eth_dev *eth_dev) 1019 { 1020 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1021 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1022 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1023 1024 PMD_INIT_FUNC_TRACE(edev); 1025 1026 /* Update MTU only if it has changed */ 1027 if (eth_dev->data->mtu != qdev->mtu) { 1028 if (qede_update_mtu(eth_dev, qdev->mtu)) 1029 goto err; 1030 } 1031 1032 /* Configure TPA parameters */ 1033 if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1034 if (qede_enable_tpa(eth_dev, true)) 1035 return -EINVAL; 1036 /* Enable scatter mode for LRO */ 1037 if (!eth_dev->data->scattered_rx) 1038 rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER; 1039 } 1040 1041 /* Start queues */ 1042 if (qede_start_queues(eth_dev)) 1043 goto err; 1044 1045 if (IS_PF(edev)) 1046 qede_reset_queue_stats(qdev, true); 1047 1048 /* Newer SR-IOV PF driver expects RX/TX queues to be started before 1049 * enabling RSS. Hence RSS configuration is deferred upto this point. 1050 * Also, we would like to retain similar behavior in PF case, so we 1051 * don't do PF/VF specific check here. 1052 */ 1053 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 1054 if (qede_config_rss(eth_dev)) 1055 goto err; 1056 1057 /* Enable vport*/ 1058 if (qede_activate_vport(eth_dev, true)) 1059 goto err; 1060 1061 /* Update link status */ 1062 qede_link_update(eth_dev, 0); 1063 1064 /* Start/resume traffic */ 1065 qede_fastpath_start(edev); 1066 1067 DP_INFO(edev, "Device started\n"); 1068 1069 return 0; 1070 err: 1071 DP_ERR(edev, "Device start fails\n"); 1072 return -1; /* common error code is < 0 */ 1073 } 1074 1075 static void qede_dev_stop(struct rte_eth_dev *eth_dev) 1076 { 1077 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1078 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1079 1080 PMD_INIT_FUNC_TRACE(edev); 1081 1082 /* Disable vport */ 1083 if (qede_activate_vport(eth_dev, false)) 1084 return; 1085 1086 if (qdev->enable_lro) 1087 qede_enable_tpa(eth_dev, false); 1088 1089 /* Stop queues */ 1090 qede_stop_queues(eth_dev); 1091 1092 /* Disable traffic */ 1093 ecore_hw_stop_fastpath(edev); /* TBD - loop */ 1094 1095 DP_INFO(edev, "Device is stopped\n"); 1096 } 1097 1098 static const char * const valid_args[] = { 1099 QEDE_NPAR_TX_SWITCHING, 1100 QEDE_VF_TX_SWITCHING, 1101 NULL, 1102 }; 1103 1104 static int qede_args_check(const char *key, const char *val, void *opaque) 1105 { 1106 unsigned long tmp; 1107 int ret = 0; 1108 struct rte_eth_dev *eth_dev = opaque; 1109 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1110 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1111 1112 errno = 0; 1113 tmp = strtoul(val, NULL, 0); 1114 if (errno) { 1115 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val); 1116 return errno; 1117 } 1118 1119 if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) || 1120 ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) { 1121 qdev->enable_tx_switching = !!tmp; 1122 DP_INFO(edev, "Disabling %s tx-switching\n", 1123 strcmp(QEDE_NPAR_TX_SWITCHING, key) ? 1124 "VF" : "NPAR"); 1125 } 1126 1127 return ret; 1128 } 1129 1130 static int qede_args(struct rte_eth_dev *eth_dev) 1131 { 1132 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1133 struct rte_kvargs *kvlist; 1134 struct rte_devargs *devargs; 1135 int ret; 1136 int i; 1137 1138 devargs = pci_dev->device.devargs; 1139 if (!devargs) 1140 return 0; /* return success */ 1141 1142 kvlist = rte_kvargs_parse(devargs->args, valid_args); 1143 if (kvlist == NULL) 1144 return -EINVAL; 1145 1146 /* Process parameters. */ 1147 for (i = 0; (valid_args[i] != NULL); ++i) { 1148 if (rte_kvargs_count(kvlist, valid_args[i])) { 1149 ret = rte_kvargs_process(kvlist, valid_args[i], 1150 qede_args_check, eth_dev); 1151 if (ret != ECORE_SUCCESS) { 1152 rte_kvargs_free(kvlist); 1153 return ret; 1154 } 1155 } 1156 } 1157 rte_kvargs_free(kvlist); 1158 1159 return 0; 1160 } 1161 1162 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 1163 { 1164 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1165 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1166 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1167 int ret; 1168 1169 PMD_INIT_FUNC_TRACE(edev); 1170 1171 /* We need to have min 1 RX queue.There is no min check in 1172 * rte_eth_dev_configure(), so we are checking it here. 1173 */ 1174 if (eth_dev->data->nb_rx_queues == 0) { 1175 DP_ERR(edev, "Minimum one RX queue is required\n"); 1176 return -EINVAL; 1177 } 1178 1179 /* Enable Tx switching by default */ 1180 qdev->enable_tx_switching = 1; 1181 1182 /* Parse devargs and fix up rxmode */ 1183 if (qede_args(eth_dev)) 1184 DP_NOTICE(edev, false, 1185 "Invalid devargs supplied, requested change will not take effect\n"); 1186 1187 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || 1188 rxmode->mq_mode == ETH_MQ_RX_RSS)) { 1189 DP_ERR(edev, "Unsupported multi-queue mode\n"); 1190 return -ENOTSUP; 1191 } 1192 /* Flow director mode check */ 1193 if (qede_check_fdir_support(eth_dev)) 1194 return -ENOTSUP; 1195 1196 qede_dealloc_fp_resc(eth_dev); 1197 qdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns; 1198 qdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns; 1199 1200 if (qede_alloc_fp_resc(qdev)) 1201 return -ENOMEM; 1202 1203 /* If jumbo enabled adjust MTU */ 1204 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 1205 eth_dev->data->mtu = 1206 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1207 RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD; 1208 1209 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) 1210 eth_dev->data->scattered_rx = 1; 1211 1212 if (qede_start_vport(qdev, eth_dev->data->mtu)) 1213 return -1; 1214 1215 qdev->mtu = eth_dev->data->mtu; 1216 1217 /* Enable VLAN offloads by default */ 1218 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | 1219 ETH_VLAN_FILTER_MASK); 1220 if (ret) 1221 return ret; 1222 1223 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", 1224 QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev)); 1225 1226 if (ECORE_IS_CMT(edev)) 1227 DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n", 1228 qdev->num_rx_queues, qdev->num_tx_queues); 1229 1230 1231 return 0; 1232 } 1233 1234 /* Info about HW descriptor ring limitations */ 1235 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 1236 .nb_max = 0x8000, /* 32K */ 1237 .nb_min = 128, 1238 .nb_align = 128 /* lowest common multiple */ 1239 }; 1240 1241 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 1242 .nb_max = 0x8000, /* 32K */ 1243 .nb_min = 256, 1244 .nb_align = 256, 1245 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, 1246 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 1247 }; 1248 1249 static int 1250 qede_dev_info_get(struct rte_eth_dev *eth_dev, 1251 struct rte_eth_dev_info *dev_info) 1252 { 1253 struct qede_dev *qdev = eth_dev->data->dev_private; 1254 struct ecore_dev *edev = &qdev->edev; 1255 struct qed_link_output link; 1256 uint32_t speed_cap = 0; 1257 1258 PMD_INIT_FUNC_TRACE(edev); 1259 1260 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; 1261 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 1262 dev_info->rx_desc_lim = qede_rx_desc_lim; 1263 dev_info->tx_desc_lim = qede_tx_desc_lim; 1264 1265 if (IS_PF(edev)) 1266 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1267 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2); 1268 else 1269 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1270 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); 1271 /* Since CMT mode internally doubles the number of queues */ 1272 if (ECORE_IS_CMT(edev)) 1273 dev_info->max_rx_queues = dev_info->max_rx_queues / 2; 1274 1275 dev_info->max_tx_queues = dev_info->max_rx_queues; 1276 1277 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; 1278 dev_info->max_vfs = 0; 1279 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 1280 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1281 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 1282 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 1283 DEV_RX_OFFLOAD_UDP_CKSUM | 1284 DEV_RX_OFFLOAD_TCP_CKSUM | 1285 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1286 DEV_RX_OFFLOAD_TCP_LRO | 1287 DEV_RX_OFFLOAD_KEEP_CRC | 1288 DEV_RX_OFFLOAD_SCATTER | 1289 DEV_RX_OFFLOAD_JUMBO_FRAME | 1290 DEV_RX_OFFLOAD_VLAN_FILTER | 1291 DEV_RX_OFFLOAD_VLAN_STRIP); 1292 dev_info->rx_queue_offload_capa = 0; 1293 1294 /* TX offloads are on a per-packet basis, so it is applicable 1295 * to both at port and queue levels. 1296 */ 1297 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | 1298 DEV_TX_OFFLOAD_IPV4_CKSUM | 1299 DEV_TX_OFFLOAD_UDP_CKSUM | 1300 DEV_TX_OFFLOAD_TCP_CKSUM | 1301 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1302 DEV_TX_OFFLOAD_MULTI_SEGS | 1303 DEV_TX_OFFLOAD_TCP_TSO | 1304 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1305 DEV_TX_OFFLOAD_GENEVE_TNL_TSO); 1306 dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa; 1307 1308 dev_info->default_txconf = (struct rte_eth_txconf) { 1309 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS, 1310 }; 1311 1312 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1313 /* Packets are always dropped if no descriptors are available */ 1314 .rx_drop_en = 1, 1315 .offloads = 0, 1316 }; 1317 1318 memset(&link, 0, sizeof(struct qed_link_output)); 1319 qdev->ops->common->get_link(edev, &link); 1320 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1321 speed_cap |= ETH_LINK_SPEED_1G; 1322 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1323 speed_cap |= ETH_LINK_SPEED_10G; 1324 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1325 speed_cap |= ETH_LINK_SPEED_25G; 1326 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1327 speed_cap |= ETH_LINK_SPEED_40G; 1328 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1329 speed_cap |= ETH_LINK_SPEED_50G; 1330 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1331 speed_cap |= ETH_LINK_SPEED_100G; 1332 dev_info->speed_capa = speed_cap; 1333 1334 return 0; 1335 } 1336 1337 /* return 0 means link status changed, -1 means not changed */ 1338 int 1339 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 1340 { 1341 struct qede_dev *qdev = eth_dev->data->dev_private; 1342 struct ecore_dev *edev = &qdev->edev; 1343 struct qed_link_output q_link; 1344 struct rte_eth_link link; 1345 uint16_t link_duplex; 1346 1347 memset(&q_link, 0, sizeof(q_link)); 1348 memset(&link, 0, sizeof(link)); 1349 1350 qdev->ops->common->get_link(edev, &q_link); 1351 1352 /* Link Speed */ 1353 link.link_speed = q_link.speed; 1354 1355 /* Link Mode */ 1356 switch (q_link.duplex) { 1357 case QEDE_DUPLEX_HALF: 1358 link_duplex = ETH_LINK_HALF_DUPLEX; 1359 break; 1360 case QEDE_DUPLEX_FULL: 1361 link_duplex = ETH_LINK_FULL_DUPLEX; 1362 break; 1363 case QEDE_DUPLEX_UNKNOWN: 1364 default: 1365 link_duplex = -1; 1366 } 1367 link.link_duplex = link_duplex; 1368 1369 /* Link Status */ 1370 link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN; 1371 1372 /* AN */ 1373 link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 1374 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 1375 1376 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 1377 link.link_speed, link.link_duplex, 1378 link.link_autoneg, link.link_status); 1379 1380 return rte_eth_linkstatus_set(eth_dev, &link); 1381 } 1382 1383 static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 1384 { 1385 struct qede_dev *qdev = eth_dev->data->dev_private; 1386 struct ecore_dev *edev = &qdev->edev; 1387 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1388 enum _ecore_status_t ecore_status; 1389 1390 PMD_INIT_FUNC_TRACE(edev); 1391 1392 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1393 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1394 1395 ecore_status = qed_configure_filter_rx_mode(eth_dev, type); 1396 1397 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1398 } 1399 1400 static int qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 1401 { 1402 struct qede_dev *qdev = eth_dev->data->dev_private; 1403 struct ecore_dev *edev = &qdev->edev; 1404 enum _ecore_status_t ecore_status; 1405 1406 PMD_INIT_FUNC_TRACE(edev); 1407 1408 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1409 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1410 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 1411 else 1412 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1413 QED_FILTER_RX_MODE_TYPE_REGULAR); 1414 1415 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1416 } 1417 1418 static void qede_poll_sp_sb_cb(void *param) 1419 { 1420 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1421 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1422 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1423 int rc; 1424 1425 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 1426 qede_interrupt_action(&edev->hwfns[1]); 1427 1428 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 1429 qede_poll_sp_sb_cb, 1430 (void *)eth_dev); 1431 if (rc != 0) { 1432 DP_ERR(edev, "Unable to start periodic" 1433 " timer rc %d\n", rc); 1434 } 1435 } 1436 1437 static void qede_dev_close(struct rte_eth_dev *eth_dev) 1438 { 1439 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1440 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1441 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1442 1443 PMD_INIT_FUNC_TRACE(edev); 1444 1445 /* dev_stop() shall cleanup fp resources in hw but without releasing 1446 * dma memories and sw structures so that dev_start() can be called 1447 * by the app without reconfiguration. However, in dev_close() we 1448 * can release all the resources and device can be brought up newly 1449 */ 1450 if (eth_dev->data->dev_started) 1451 qede_dev_stop(eth_dev); 1452 1453 qede_stop_vport(edev); 1454 qdev->vport_started = false; 1455 qede_fdir_dealloc_resc(eth_dev); 1456 qede_dealloc_fp_resc(eth_dev); 1457 1458 eth_dev->data->nb_rx_queues = 0; 1459 eth_dev->data->nb_tx_queues = 0; 1460 1461 /* Bring the link down */ 1462 qede_dev_set_link_state(eth_dev, false); 1463 qdev->ops->common->slowpath_stop(edev); 1464 qdev->ops->common->remove(edev); 1465 rte_intr_disable(&pci_dev->intr_handle); 1466 1467 switch (pci_dev->intr_handle.type) { 1468 case RTE_INTR_HANDLE_UIO_INTX: 1469 case RTE_INTR_HANDLE_VFIO_LEGACY: 1470 rte_intr_callback_unregister(&pci_dev->intr_handle, 1471 qede_interrupt_handler_intx, 1472 (void *)eth_dev); 1473 break; 1474 default: 1475 rte_intr_callback_unregister(&pci_dev->intr_handle, 1476 qede_interrupt_handler, 1477 (void *)eth_dev); 1478 } 1479 1480 if (ECORE_IS_CMT(edev)) 1481 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 1482 } 1483 1484 static int 1485 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 1486 { 1487 struct qede_dev *qdev = eth_dev->data->dev_private; 1488 struct ecore_dev *edev = &qdev->edev; 1489 struct ecore_eth_stats stats; 1490 unsigned int i = 0, j = 0, qid, idx, hw_fn; 1491 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 1492 struct qede_tx_queue *txq; 1493 1494 ecore_get_vport_stats(edev, &stats); 1495 1496 /* RX Stats */ 1497 eth_stats->ipackets = stats.common.rx_ucast_pkts + 1498 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts; 1499 1500 eth_stats->ibytes = stats.common.rx_ucast_bytes + 1501 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes; 1502 1503 eth_stats->ierrors = stats.common.rx_crc_errors + 1504 stats.common.rx_align_errors + 1505 stats.common.rx_carrier_errors + 1506 stats.common.rx_oversize_packets + 1507 stats.common.rx_jabbers + stats.common.rx_undersize_packets; 1508 1509 eth_stats->rx_nombuf = stats.common.no_buff_discards; 1510 1511 eth_stats->imissed = stats.common.mftag_filter_discards + 1512 stats.common.mac_filter_discards + 1513 stats.common.no_buff_discards + 1514 stats.common.brb_truncates + stats.common.brb_discards; 1515 1516 /* TX stats */ 1517 eth_stats->opackets = stats.common.tx_ucast_pkts + 1518 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts; 1519 1520 eth_stats->obytes = stats.common.tx_ucast_bytes + 1521 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes; 1522 1523 eth_stats->oerrors = stats.common.tx_err_drop_pkts; 1524 1525 /* Queue stats */ 1526 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev), 1527 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1528 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev), 1529 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1530 if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) || 1531 txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev)) 1532 DP_VERBOSE(edev, ECORE_MSG_DEBUG, 1533 "Not all the queue stats will be displayed. Set" 1534 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" 1535 " appropriately and retry.\n"); 1536 1537 for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) { 1538 eth_stats->q_ipackets[i] = 0; 1539 eth_stats->q_errors[i] = 0; 1540 1541 for_each_hwfn(edev, hw_fn) { 1542 idx = qid * edev->num_hwfns + hw_fn; 1543 1544 eth_stats->q_ipackets[i] += 1545 *(uint64_t *) 1546 (((char *)(qdev->fp_array[idx].rxq)) + 1547 offsetof(struct qede_rx_queue, 1548 rcv_pkts)); 1549 eth_stats->q_errors[i] += 1550 *(uint64_t *) 1551 (((char *)(qdev->fp_array[idx].rxq)) + 1552 offsetof(struct qede_rx_queue, 1553 rx_hw_errors)) + 1554 *(uint64_t *) 1555 (((char *)(qdev->fp_array[idx].rxq)) + 1556 offsetof(struct qede_rx_queue, 1557 rx_alloc_errors)); 1558 } 1559 1560 i++; 1561 if (i == rxq_stat_cntrs) 1562 break; 1563 } 1564 1565 for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) { 1566 eth_stats->q_opackets[j] = 0; 1567 1568 for_each_hwfn(edev, hw_fn) { 1569 idx = qid * edev->num_hwfns + hw_fn; 1570 1571 txq = qdev->fp_array[idx].txq; 1572 eth_stats->q_opackets[j] += 1573 *((uint64_t *)(uintptr_t) 1574 (((uint64_t)(uintptr_t)(txq)) + 1575 offsetof(struct qede_tx_queue, 1576 xmit_pkts))); 1577 } 1578 1579 j++; 1580 if (j == txq_stat_cntrs) 1581 break; 1582 } 1583 1584 return 0; 1585 } 1586 1587 static unsigned 1588 qede_get_xstats_count(struct qede_dev *qdev) { 1589 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; 1590 1591 if (ECORE_IS_BB(&qdev->edev)) 1592 return RTE_DIM(qede_xstats_strings) + 1593 RTE_DIM(qede_bb_xstats_strings) + 1594 (RTE_DIM(qede_rxq_xstats_strings) * 1595 QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns); 1596 else 1597 return RTE_DIM(qede_xstats_strings) + 1598 RTE_DIM(qede_ah_xstats_strings) + 1599 (RTE_DIM(qede_rxq_xstats_strings) * 1600 QEDE_RSS_COUNT(dev)); 1601 } 1602 1603 static int 1604 qede_get_xstats_names(struct rte_eth_dev *dev, 1605 struct rte_eth_xstat_name *xstats_names, 1606 __rte_unused unsigned int limit) 1607 { 1608 struct qede_dev *qdev = dev->data->dev_private; 1609 struct ecore_dev *edev = &qdev->edev; 1610 const unsigned int stat_cnt = qede_get_xstats_count(qdev); 1611 unsigned int i, qid, hw_fn, stat_idx = 0; 1612 1613 if (xstats_names == NULL) 1614 return stat_cnt; 1615 1616 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1617 strlcpy(xstats_names[stat_idx].name, 1618 qede_xstats_strings[i].name, 1619 sizeof(xstats_names[stat_idx].name)); 1620 stat_idx++; 1621 } 1622 1623 if (ECORE_IS_BB(edev)) { 1624 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1625 strlcpy(xstats_names[stat_idx].name, 1626 qede_bb_xstats_strings[i].name, 1627 sizeof(xstats_names[stat_idx].name)); 1628 stat_idx++; 1629 } 1630 } else { 1631 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1632 strlcpy(xstats_names[stat_idx].name, 1633 qede_ah_xstats_strings[i].name, 1634 sizeof(xstats_names[stat_idx].name)); 1635 stat_idx++; 1636 } 1637 } 1638 1639 for (qid = 0; qid < QEDE_RSS_COUNT(dev); qid++) { 1640 for_each_hwfn(edev, hw_fn) { 1641 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1642 snprintf(xstats_names[stat_idx].name, 1643 RTE_ETH_XSTATS_NAME_SIZE, 1644 "%.4s%d.%d%s", 1645 qede_rxq_xstats_strings[i].name, 1646 hw_fn, qid, 1647 qede_rxq_xstats_strings[i].name + 4); 1648 stat_idx++; 1649 } 1650 } 1651 } 1652 1653 return stat_cnt; 1654 } 1655 1656 static int 1657 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1658 unsigned int n) 1659 { 1660 struct qede_dev *qdev = dev->data->dev_private; 1661 struct ecore_dev *edev = &qdev->edev; 1662 struct ecore_eth_stats stats; 1663 const unsigned int num = qede_get_xstats_count(qdev); 1664 unsigned int i, qid, hw_fn, fpidx, stat_idx = 0; 1665 1666 if (n < num) 1667 return num; 1668 1669 ecore_get_vport_stats(edev, &stats); 1670 1671 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1672 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + 1673 qede_xstats_strings[i].offset); 1674 xstats[stat_idx].id = stat_idx; 1675 stat_idx++; 1676 } 1677 1678 if (ECORE_IS_BB(edev)) { 1679 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1680 xstats[stat_idx].value = 1681 *(uint64_t *)(((char *)&stats) + 1682 qede_bb_xstats_strings[i].offset); 1683 xstats[stat_idx].id = stat_idx; 1684 stat_idx++; 1685 } 1686 } else { 1687 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1688 xstats[stat_idx].value = 1689 *(uint64_t *)(((char *)&stats) + 1690 qede_ah_xstats_strings[i].offset); 1691 xstats[stat_idx].id = stat_idx; 1692 stat_idx++; 1693 } 1694 } 1695 1696 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 1697 for_each_hwfn(edev, hw_fn) { 1698 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1699 fpidx = qid * edev->num_hwfns + hw_fn; 1700 xstats[stat_idx].value = *(uint64_t *) 1701 (((char *)(qdev->fp_array[fpidx].rxq)) + 1702 qede_rxq_xstats_strings[i].offset); 1703 xstats[stat_idx].id = stat_idx; 1704 stat_idx++; 1705 } 1706 1707 } 1708 } 1709 1710 return stat_idx; 1711 } 1712 1713 static int 1714 qede_reset_xstats(struct rte_eth_dev *dev) 1715 { 1716 struct qede_dev *qdev = dev->data->dev_private; 1717 struct ecore_dev *edev = &qdev->edev; 1718 1719 ecore_reset_vport_stats(edev); 1720 qede_reset_queue_stats(qdev, true); 1721 1722 return 0; 1723 } 1724 1725 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 1726 { 1727 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1728 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1729 struct qed_link_params link_params; 1730 int rc; 1731 1732 DP_INFO(edev, "setting link state %d\n", link_up); 1733 memset(&link_params, 0, sizeof(link_params)); 1734 link_params.link_up = link_up; 1735 rc = qdev->ops->common->set_link(edev, &link_params); 1736 if (rc != ECORE_SUCCESS) 1737 DP_ERR(edev, "Unable to set link state %d\n", link_up); 1738 1739 return rc; 1740 } 1741 1742 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 1743 { 1744 return qede_dev_set_link_state(eth_dev, true); 1745 } 1746 1747 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 1748 { 1749 return qede_dev_set_link_state(eth_dev, false); 1750 } 1751 1752 static int qede_reset_stats(struct rte_eth_dev *eth_dev) 1753 { 1754 struct qede_dev *qdev = eth_dev->data->dev_private; 1755 struct ecore_dev *edev = &qdev->edev; 1756 1757 ecore_reset_vport_stats(edev); 1758 qede_reset_queue_stats(qdev, false); 1759 1760 return 0; 1761 } 1762 1763 static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 1764 { 1765 enum qed_filter_rx_mode_type type = 1766 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1767 enum _ecore_status_t ecore_status; 1768 1769 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1770 type |= QED_FILTER_RX_MODE_TYPE_PROMISC; 1771 1772 ecore_status = qed_configure_filter_rx_mode(eth_dev, type); 1773 1774 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1775 } 1776 1777 static int qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 1778 { 1779 enum _ecore_status_t ecore_status; 1780 1781 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1782 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1783 QED_FILTER_RX_MODE_TYPE_PROMISC); 1784 else 1785 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1786 QED_FILTER_RX_MODE_TYPE_REGULAR); 1787 1788 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1789 } 1790 1791 static int 1792 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, 1793 struct rte_ether_addr *mc_addrs, 1794 uint32_t mc_addrs_num) 1795 { 1796 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1797 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1798 uint8_t i; 1799 1800 if (mc_addrs_num > ECORE_MAX_MC_ADDRS) { 1801 DP_ERR(edev, "Reached max multicast filters limit," 1802 "Please enable multicast promisc mode\n"); 1803 return -ENOSPC; 1804 } 1805 1806 for (i = 0; i < mc_addrs_num; i++) { 1807 if (!rte_is_multicast_ether_addr(&mc_addrs[i])) { 1808 DP_ERR(edev, "Not a valid multicast MAC\n"); 1809 return -EINVAL; 1810 } 1811 } 1812 1813 /* Flush all existing entries */ 1814 if (qede_del_mcast_filters(eth_dev)) 1815 return -1; 1816 1817 /* Set new mcast list */ 1818 return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num); 1819 } 1820 1821 /* Update MTU via vport-update without doing port restart. 1822 * The vport must be deactivated before calling this API. 1823 */ 1824 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) 1825 { 1826 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1827 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1828 struct ecore_hwfn *p_hwfn; 1829 int rc; 1830 int i; 1831 1832 if (IS_PF(edev)) { 1833 struct ecore_sp_vport_update_params params; 1834 1835 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 1836 params.vport_id = 0; 1837 params.mtu = mtu; 1838 params.vport_id = 0; 1839 for_each_hwfn(edev, i) { 1840 p_hwfn = &edev->hwfns[i]; 1841 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1842 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 1843 ECORE_SPQ_MODE_EBLOCK, NULL); 1844 if (rc != ECORE_SUCCESS) 1845 goto err; 1846 } 1847 } else { 1848 for_each_hwfn(edev, i) { 1849 p_hwfn = &edev->hwfns[i]; 1850 rc = ecore_vf_pf_update_mtu(p_hwfn, mtu); 1851 if (rc == ECORE_INVAL) { 1852 DP_INFO(edev, "VF MTU Update TLV not supported\n"); 1853 /* Recreate vport */ 1854 rc = qede_start_vport(qdev, mtu); 1855 if (rc != ECORE_SUCCESS) 1856 goto err; 1857 1858 /* Restore config lost due to vport stop */ 1859 if (eth_dev->data->promiscuous) 1860 qede_promiscuous_enable(eth_dev); 1861 else 1862 qede_promiscuous_disable(eth_dev); 1863 1864 if (eth_dev->data->all_multicast) 1865 qede_allmulticast_enable(eth_dev); 1866 else 1867 qede_allmulticast_disable(eth_dev); 1868 1869 qede_vlan_offload_set(eth_dev, 1870 qdev->vlan_offload_mask); 1871 } else if (rc != ECORE_SUCCESS) { 1872 goto err; 1873 } 1874 } 1875 } 1876 DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu); 1877 1878 return 0; 1879 1880 err: 1881 DP_ERR(edev, "Failed to update MTU\n"); 1882 return -1; 1883 } 1884 1885 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 1886 struct rte_eth_fc_conf *fc_conf) 1887 { 1888 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1889 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1890 struct qed_link_output current_link; 1891 struct qed_link_params params; 1892 1893 memset(¤t_link, 0, sizeof(current_link)); 1894 qdev->ops->common->get_link(edev, ¤t_link); 1895 1896 memset(¶ms, 0, sizeof(params)); 1897 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 1898 if (fc_conf->autoneg) { 1899 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 1900 DP_ERR(edev, "Autoneg not supported\n"); 1901 return -EINVAL; 1902 } 1903 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1904 } 1905 1906 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 1907 if (fc_conf->mode == RTE_FC_FULL) 1908 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 1909 QED_LINK_PAUSE_RX_ENABLE); 1910 if (fc_conf->mode == RTE_FC_TX_PAUSE) 1911 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1912 if (fc_conf->mode == RTE_FC_RX_PAUSE) 1913 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1914 1915 params.link_up = true; 1916 (void)qdev->ops->common->set_link(edev, ¶ms); 1917 1918 return 0; 1919 } 1920 1921 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 1922 struct rte_eth_fc_conf *fc_conf) 1923 { 1924 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1925 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1926 struct qed_link_output current_link; 1927 1928 memset(¤t_link, 0, sizeof(current_link)); 1929 qdev->ops->common->get_link(edev, ¤t_link); 1930 1931 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1932 fc_conf->autoneg = true; 1933 1934 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 1935 QED_LINK_PAUSE_TX_ENABLE)) 1936 fc_conf->mode = RTE_FC_FULL; 1937 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 1938 fc_conf->mode = RTE_FC_RX_PAUSE; 1939 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 1940 fc_conf->mode = RTE_FC_TX_PAUSE; 1941 else 1942 fc_conf->mode = RTE_FC_NONE; 1943 1944 return 0; 1945 } 1946 1947 static const uint32_t * 1948 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 1949 { 1950 static const uint32_t ptypes[] = { 1951 RTE_PTYPE_L2_ETHER, 1952 RTE_PTYPE_L2_ETHER_VLAN, 1953 RTE_PTYPE_L3_IPV4, 1954 RTE_PTYPE_L3_IPV6, 1955 RTE_PTYPE_L4_TCP, 1956 RTE_PTYPE_L4_UDP, 1957 RTE_PTYPE_TUNNEL_VXLAN, 1958 RTE_PTYPE_L4_FRAG, 1959 RTE_PTYPE_TUNNEL_GENEVE, 1960 RTE_PTYPE_TUNNEL_GRE, 1961 /* Inner */ 1962 RTE_PTYPE_INNER_L2_ETHER, 1963 RTE_PTYPE_INNER_L2_ETHER_VLAN, 1964 RTE_PTYPE_INNER_L3_IPV4, 1965 RTE_PTYPE_INNER_L3_IPV6, 1966 RTE_PTYPE_INNER_L4_TCP, 1967 RTE_PTYPE_INNER_L4_UDP, 1968 RTE_PTYPE_INNER_L4_FRAG, 1969 RTE_PTYPE_UNKNOWN 1970 }; 1971 1972 if (eth_dev->rx_pkt_burst == qede_recv_pkts || 1973 eth_dev->rx_pkt_burst == qede_recv_pkts_cmt) 1974 return ptypes; 1975 1976 return NULL; 1977 } 1978 1979 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) 1980 { 1981 *rss_caps = 0; 1982 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 1983 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 1984 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 1985 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 1986 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 1987 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 1988 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; 1989 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; 1990 } 1991 1992 int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 1993 struct rte_eth_rss_conf *rss_conf) 1994 { 1995 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1996 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1997 struct ecore_sp_vport_update_params vport_update_params; 1998 struct ecore_rss_params rss_params; 1999 struct ecore_hwfn *p_hwfn; 2000 uint32_t *key = (uint32_t *)rss_conf->rss_key; 2001 uint64_t hf = rss_conf->rss_hf; 2002 uint8_t len = rss_conf->rss_key_len; 2003 uint8_t idx, i, j, fpidx; 2004 int rc; 2005 2006 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2007 memset(&rss_params, 0, sizeof(rss_params)); 2008 2009 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", 2010 (unsigned long)hf, len, key); 2011 2012 if (hf != 0) { 2013 /* Enabling RSS */ 2014 DP_INFO(edev, "Enabling rss\n"); 2015 2016 /* RSS caps */ 2017 qede_init_rss_caps(&rss_params.rss_caps, hf); 2018 rss_params.update_rss_capabilities = 1; 2019 2020 /* RSS hash key */ 2021 if (key) { 2022 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { 2023 DP_ERR(edev, "RSS key length exceeds limit\n"); 2024 return -EINVAL; 2025 } 2026 DP_INFO(edev, "Applying user supplied hash key\n"); 2027 rss_params.update_rss_key = 1; 2028 memcpy(&rss_params.rss_key, key, len); 2029 } 2030 rss_params.rss_enable = 1; 2031 } 2032 2033 rss_params.update_rss_config = 1; 2034 /* tbl_size has to be set with capabilities */ 2035 rss_params.rss_table_size_log = 7; 2036 vport_update_params.vport_id = 0; 2037 2038 for_each_hwfn(edev, i) { 2039 /* pass the L2 handles instead of qids */ 2040 for (j = 0 ; j < ECORE_RSS_IND_TABLE_SIZE ; j++) { 2041 idx = j % QEDE_RSS_COUNT(eth_dev); 2042 fpidx = idx * edev->num_hwfns + i; 2043 rss_params.rss_ind_table[j] = 2044 qdev->fp_array[fpidx].rxq->handle; 2045 } 2046 2047 vport_update_params.rss_params = &rss_params; 2048 2049 p_hwfn = &edev->hwfns[i]; 2050 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2051 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2052 ECORE_SPQ_MODE_EBLOCK, NULL); 2053 if (rc) { 2054 DP_ERR(edev, "vport-update for RSS failed\n"); 2055 return rc; 2056 } 2057 } 2058 qdev->rss_enable = rss_params.rss_enable; 2059 2060 /* Update local structure for hash query */ 2061 qdev->rss_conf.rss_hf = hf; 2062 qdev->rss_conf.rss_key_len = len; 2063 if (qdev->rss_enable) { 2064 if (qdev->rss_conf.rss_key == NULL) { 2065 qdev->rss_conf.rss_key = (uint8_t *)malloc(len); 2066 if (qdev->rss_conf.rss_key == NULL) { 2067 DP_ERR(edev, "No memory to store RSS key\n"); 2068 return -ENOMEM; 2069 } 2070 } 2071 if (key && len) { 2072 DP_INFO(edev, "Storing RSS key\n"); 2073 memcpy(qdev->rss_conf.rss_key, key, len); 2074 } 2075 } else if (!qdev->rss_enable && len == 0) { 2076 if (qdev->rss_conf.rss_key) { 2077 free(qdev->rss_conf.rss_key); 2078 qdev->rss_conf.rss_key = NULL; 2079 DP_INFO(edev, "Free RSS key\n"); 2080 } 2081 } 2082 2083 return 0; 2084 } 2085 2086 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 2087 struct rte_eth_rss_conf *rss_conf) 2088 { 2089 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2090 2091 rss_conf->rss_hf = qdev->rss_conf.rss_hf; 2092 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; 2093 2094 if (rss_conf->rss_key && qdev->rss_conf.rss_key) 2095 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, 2096 rss_conf->rss_key_len); 2097 return 0; 2098 } 2099 2100 int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 2101 struct rte_eth_rss_reta_entry64 *reta_conf, 2102 uint16_t reta_size) 2103 { 2104 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2105 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2106 struct ecore_sp_vport_update_params vport_update_params; 2107 struct ecore_rss_params *params; 2108 uint16_t i, j, idx, fid, shift; 2109 struct ecore_hwfn *p_hwfn; 2110 uint8_t entry; 2111 int rc = 0; 2112 2113 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2114 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 2115 reta_size); 2116 return -EINVAL; 2117 } 2118 2119 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2120 params = rte_zmalloc("qede_rss", sizeof(*params), RTE_CACHE_LINE_SIZE); 2121 if (params == NULL) { 2122 DP_ERR(edev, "failed to allocate memory\n"); 2123 return -ENOMEM; 2124 } 2125 2126 params->update_rss_ind_table = 1; 2127 params->rss_table_size_log = 7; 2128 params->update_rss_config = 1; 2129 2130 vport_update_params.vport_id = 0; 2131 /* Use the current value of rss_enable */ 2132 params->rss_enable = qdev->rss_enable; 2133 vport_update_params.rss_params = params; 2134 2135 for_each_hwfn(edev, i) { 2136 for (j = 0; j < reta_size; j++) { 2137 idx = j / RTE_RETA_GROUP_SIZE; 2138 shift = j % RTE_RETA_GROUP_SIZE; 2139 if (reta_conf[idx].mask & (1ULL << shift)) { 2140 entry = reta_conf[idx].reta[shift]; 2141 fid = entry * edev->num_hwfns + i; 2142 /* Pass rxq handles to ecore */ 2143 params->rss_ind_table[j] = 2144 qdev->fp_array[fid].rxq->handle; 2145 /* Update the local copy for RETA query cmd */ 2146 qdev->rss_ind_table[j] = entry; 2147 } 2148 } 2149 2150 p_hwfn = &edev->hwfns[i]; 2151 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2152 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2153 ECORE_SPQ_MODE_EBLOCK, NULL); 2154 if (rc) { 2155 DP_ERR(edev, "vport-update for RSS failed\n"); 2156 goto out; 2157 } 2158 } 2159 2160 out: 2161 rte_free(params); 2162 return rc; 2163 } 2164 2165 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 2166 struct rte_eth_rss_reta_entry64 *reta_conf, 2167 uint16_t reta_size) 2168 { 2169 struct qede_dev *qdev = eth_dev->data->dev_private; 2170 struct ecore_dev *edev = &qdev->edev; 2171 uint16_t i, idx, shift; 2172 uint8_t entry; 2173 2174 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2175 DP_ERR(edev, "reta_size %d is not supported\n", 2176 reta_size); 2177 return -EINVAL; 2178 } 2179 2180 for (i = 0; i < reta_size; i++) { 2181 idx = i / RTE_RETA_GROUP_SIZE; 2182 shift = i % RTE_RETA_GROUP_SIZE; 2183 if (reta_conf[idx].mask & (1ULL << shift)) { 2184 entry = qdev->rss_ind_table[i]; 2185 reta_conf[idx].reta[shift] = entry; 2186 } 2187 } 2188 2189 return 0; 2190 } 2191 2192 2193 2194 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 2195 { 2196 struct qede_dev *qdev = QEDE_INIT_QDEV(dev); 2197 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2198 struct rte_eth_dev_info dev_info = {0}; 2199 struct qede_fastpath *fp; 2200 uint32_t max_rx_pkt_len; 2201 uint32_t frame_size; 2202 uint16_t bufsz; 2203 bool restart = false; 2204 int i, rc; 2205 2206 PMD_INIT_FUNC_TRACE(edev); 2207 rc = qede_dev_info_get(dev, &dev_info); 2208 if (rc != 0) { 2209 DP_ERR(edev, "Error during getting ethernet device info\n"); 2210 return rc; 2211 } 2212 max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN; 2213 frame_size = max_rx_pkt_len; 2214 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) { 2215 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n", 2216 mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN - 2217 QEDE_ETH_OVERHEAD); 2218 return -EINVAL; 2219 } 2220 if (!dev->data->scattered_rx && 2221 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 2222 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", 2223 dev->data->min_rx_buf_size); 2224 return -EINVAL; 2225 } 2226 /* Temporarily replace I/O functions with dummy ones. It cannot 2227 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. 2228 */ 2229 dev->rx_pkt_burst = qede_rxtx_pkts_dummy; 2230 dev->tx_pkt_burst = qede_rxtx_pkts_dummy; 2231 if (dev->data->dev_started) { 2232 dev->data->dev_started = 0; 2233 qede_dev_stop(dev); 2234 restart = true; 2235 } 2236 rte_delay_ms(1000); 2237 qdev->mtu = mtu; 2238 2239 /* Fix up RX buf size for all queues of the port */ 2240 for (i = 0; i < qdev->num_rx_queues; i++) { 2241 fp = &qdev->fp_array[i]; 2242 if (fp->rxq != NULL) { 2243 bufsz = (uint16_t)rte_pktmbuf_data_room_size( 2244 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; 2245 /* cache align the mbuf size to simplfy rx_buf_size 2246 * calculation 2247 */ 2248 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz); 2249 rc = qede_calc_rx_buf_size(dev, bufsz, frame_size); 2250 if (rc < 0) 2251 return rc; 2252 2253 fp->rxq->rx_buf_size = rc; 2254 } 2255 } 2256 if (max_rx_pkt_len > RTE_ETHER_MAX_LEN) 2257 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 2258 else 2259 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2260 2261 if (!dev->data->dev_started && restart) { 2262 qede_dev_start(dev); 2263 dev->data->dev_started = 1; 2264 } 2265 2266 /* update max frame size */ 2267 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; 2268 /* Reassign back */ 2269 if (ECORE_IS_CMT(edev)) { 2270 dev->rx_pkt_burst = qede_recv_pkts_cmt; 2271 dev->tx_pkt_burst = qede_xmit_pkts_cmt; 2272 } else { 2273 dev->rx_pkt_burst = qede_recv_pkts; 2274 dev->tx_pkt_burst = qede_xmit_pkts; 2275 } 2276 return 0; 2277 } 2278 2279 static int 2280 qede_dev_reset(struct rte_eth_dev *dev) 2281 { 2282 int ret; 2283 2284 ret = qede_eth_dev_uninit(dev); 2285 if (ret) 2286 return ret; 2287 2288 return qede_eth_dev_init(dev); 2289 } 2290 2291 static const struct eth_dev_ops qede_eth_dev_ops = { 2292 .dev_configure = qede_dev_configure, 2293 .dev_infos_get = qede_dev_info_get, 2294 .rx_queue_setup = qede_rx_queue_setup, 2295 .rx_queue_release = qede_rx_queue_release, 2296 .rx_descriptor_status = qede_rx_descriptor_status, 2297 .tx_queue_setup = qede_tx_queue_setup, 2298 .tx_queue_release = qede_tx_queue_release, 2299 .dev_start = qede_dev_start, 2300 .dev_reset = qede_dev_reset, 2301 .dev_set_link_up = qede_dev_set_link_up, 2302 .dev_set_link_down = qede_dev_set_link_down, 2303 .link_update = qede_link_update, 2304 .promiscuous_enable = qede_promiscuous_enable, 2305 .promiscuous_disable = qede_promiscuous_disable, 2306 .allmulticast_enable = qede_allmulticast_enable, 2307 .allmulticast_disable = qede_allmulticast_disable, 2308 .set_mc_addr_list = qede_set_mc_addr_list, 2309 .dev_stop = qede_dev_stop, 2310 .dev_close = qede_dev_close, 2311 .stats_get = qede_get_stats, 2312 .stats_reset = qede_reset_stats, 2313 .xstats_get = qede_get_xstats, 2314 .xstats_reset = qede_reset_xstats, 2315 .xstats_get_names = qede_get_xstats_names, 2316 .mac_addr_add = qede_mac_addr_add, 2317 .mac_addr_remove = qede_mac_addr_remove, 2318 .mac_addr_set = qede_mac_addr_set, 2319 .vlan_offload_set = qede_vlan_offload_set, 2320 .vlan_filter_set = qede_vlan_filter_set, 2321 .flow_ctrl_set = qede_flow_ctrl_set, 2322 .flow_ctrl_get = qede_flow_ctrl_get, 2323 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2324 .rss_hash_update = qede_rss_hash_update, 2325 .rss_hash_conf_get = qede_rss_hash_conf_get, 2326 .reta_update = qede_rss_reta_update, 2327 .reta_query = qede_rss_reta_query, 2328 .mtu_set = qede_set_mtu, 2329 .filter_ctrl = qede_dev_filter_ctrl, 2330 .udp_tunnel_port_add = qede_udp_dst_port_add, 2331 .udp_tunnel_port_del = qede_udp_dst_port_del, 2332 }; 2333 2334 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 2335 .dev_configure = qede_dev_configure, 2336 .dev_infos_get = qede_dev_info_get, 2337 .rx_queue_setup = qede_rx_queue_setup, 2338 .rx_queue_release = qede_rx_queue_release, 2339 .rx_descriptor_status = qede_rx_descriptor_status, 2340 .tx_queue_setup = qede_tx_queue_setup, 2341 .tx_queue_release = qede_tx_queue_release, 2342 .dev_start = qede_dev_start, 2343 .dev_reset = qede_dev_reset, 2344 .dev_set_link_up = qede_dev_set_link_up, 2345 .dev_set_link_down = qede_dev_set_link_down, 2346 .link_update = qede_link_update, 2347 .promiscuous_enable = qede_promiscuous_enable, 2348 .promiscuous_disable = qede_promiscuous_disable, 2349 .allmulticast_enable = qede_allmulticast_enable, 2350 .allmulticast_disable = qede_allmulticast_disable, 2351 .set_mc_addr_list = qede_set_mc_addr_list, 2352 .dev_stop = qede_dev_stop, 2353 .dev_close = qede_dev_close, 2354 .stats_get = qede_get_stats, 2355 .stats_reset = qede_reset_stats, 2356 .xstats_get = qede_get_xstats, 2357 .xstats_reset = qede_reset_xstats, 2358 .xstats_get_names = qede_get_xstats_names, 2359 .vlan_offload_set = qede_vlan_offload_set, 2360 .vlan_filter_set = qede_vlan_filter_set, 2361 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2362 .rss_hash_update = qede_rss_hash_update, 2363 .rss_hash_conf_get = qede_rss_hash_conf_get, 2364 .reta_update = qede_rss_reta_update, 2365 .reta_query = qede_rss_reta_query, 2366 .mtu_set = qede_set_mtu, 2367 .udp_tunnel_port_add = qede_udp_dst_port_add, 2368 .udp_tunnel_port_del = qede_udp_dst_port_del, 2369 .mac_addr_add = qede_mac_addr_add, 2370 .mac_addr_remove = qede_mac_addr_remove, 2371 .mac_addr_set = qede_mac_addr_set, 2372 }; 2373 2374 static void qede_update_pf_params(struct ecore_dev *edev) 2375 { 2376 struct ecore_pf_params pf_params; 2377 2378 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 2379 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; 2380 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 2381 qed_ops->common->update_pf_params(edev, &pf_params); 2382 } 2383 2384 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 2385 { 2386 struct rte_pci_device *pci_dev; 2387 struct rte_pci_addr pci_addr; 2388 struct qede_dev *adapter; 2389 struct ecore_dev *edev; 2390 struct qed_dev_eth_info dev_info; 2391 struct qed_slowpath_params params; 2392 static bool do_once = true; 2393 uint8_t bulletin_change; 2394 uint8_t vf_mac[RTE_ETHER_ADDR_LEN]; 2395 uint8_t is_mac_forced; 2396 bool is_mac_exist; 2397 /* Fix up ecore debug level */ 2398 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 2399 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 2400 uint32_t int_mode; 2401 int rc; 2402 2403 /* Extract key data structures */ 2404 adapter = eth_dev->data->dev_private; 2405 adapter->ethdev = eth_dev; 2406 edev = &adapter->edev; 2407 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2408 pci_addr = pci_dev->addr; 2409 2410 PMD_INIT_FUNC_TRACE(edev); 2411 2412 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 2413 pci_addr.bus, pci_addr.devid, pci_addr.function, 2414 eth_dev->data->port_id); 2415 2416 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2417 DP_ERR(edev, "Skipping device init from secondary process\n"); 2418 return 0; 2419 } 2420 2421 rte_eth_copy_pci_info(eth_dev, pci_dev); 2422 2423 /* @DPDK */ 2424 edev->vendor_id = pci_dev->id.vendor_id; 2425 edev->device_id = pci_dev->id.device_id; 2426 2427 qed_ops = qed_get_eth_ops(); 2428 if (!qed_ops) { 2429 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 2430 return -EINVAL; 2431 } 2432 2433 DP_INFO(edev, "Starting qede probe\n"); 2434 rc = qed_ops->common->probe(edev, pci_dev, dp_module, 2435 dp_level, is_vf); 2436 if (rc != 0) { 2437 DP_ERR(edev, "qede probe failed rc %d\n", rc); 2438 return -ENODEV; 2439 } 2440 qede_update_pf_params(edev); 2441 2442 switch (pci_dev->intr_handle.type) { 2443 case RTE_INTR_HANDLE_UIO_INTX: 2444 case RTE_INTR_HANDLE_VFIO_LEGACY: 2445 int_mode = ECORE_INT_MODE_INTA; 2446 rte_intr_callback_register(&pci_dev->intr_handle, 2447 qede_interrupt_handler_intx, 2448 (void *)eth_dev); 2449 break; 2450 default: 2451 int_mode = ECORE_INT_MODE_MSIX; 2452 rte_intr_callback_register(&pci_dev->intr_handle, 2453 qede_interrupt_handler, 2454 (void *)eth_dev); 2455 } 2456 2457 if (rte_intr_enable(&pci_dev->intr_handle)) { 2458 DP_ERR(edev, "rte_intr_enable() failed\n"); 2459 return -ENODEV; 2460 } 2461 2462 /* Start the Slowpath-process */ 2463 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 2464 2465 params.int_mode = int_mode; 2466 params.drv_major = QEDE_PMD_VERSION_MAJOR; 2467 params.drv_minor = QEDE_PMD_VERSION_MINOR; 2468 params.drv_rev = QEDE_PMD_VERSION_REVISION; 2469 params.drv_eng = QEDE_PMD_VERSION_PATCH; 2470 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, 2471 QEDE_PMD_DRV_VER_STR_SIZE); 2472 2473 if (ECORE_IS_CMT(edev)) { 2474 eth_dev->rx_pkt_burst = qede_recv_pkts_cmt; 2475 eth_dev->tx_pkt_burst = qede_xmit_pkts_cmt; 2476 } else { 2477 eth_dev->rx_pkt_burst = qede_recv_pkts; 2478 eth_dev->tx_pkt_burst = qede_xmit_pkts; 2479 } 2480 2481 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; 2482 2483 /* For CMT mode device do periodic polling for slowpath events. 2484 * This is required since uio device uses only one MSI-x 2485 * interrupt vector but we need one for each engine. 2486 */ 2487 if (ECORE_IS_CMT(edev) && IS_PF(edev)) { 2488 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 2489 qede_poll_sp_sb_cb, 2490 (void *)eth_dev); 2491 if (rc != 0) { 2492 DP_ERR(edev, "Unable to start periodic" 2493 " timer rc %d\n", rc); 2494 return -EINVAL; 2495 } 2496 } 2497 2498 rc = qed_ops->common->slowpath_start(edev, ¶ms); 2499 if (rc) { 2500 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 2501 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2502 (void *)eth_dev); 2503 return -ENODEV; 2504 } 2505 2506 rc = qed_ops->fill_dev_info(edev, &dev_info); 2507 if (rc) { 2508 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 2509 qed_ops->common->slowpath_stop(edev); 2510 qed_ops->common->remove(edev); 2511 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2512 (void *)eth_dev); 2513 return -ENODEV; 2514 } 2515 2516 qede_alloc_etherdev(adapter, &dev_info); 2517 2518 adapter->ops->common->set_name(edev, edev->name); 2519 2520 if (!is_vf) 2521 adapter->dev_info.num_mac_filters = 2522 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 2523 ECORE_MAC); 2524 else 2525 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 2526 (uint32_t *)&adapter->dev_info.num_mac_filters); 2527 2528 /* Allocate memory for storing MAC addr */ 2529 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 2530 (RTE_ETHER_ADDR_LEN * 2531 adapter->dev_info.num_mac_filters), 2532 RTE_CACHE_LINE_SIZE); 2533 2534 if (eth_dev->data->mac_addrs == NULL) { 2535 DP_ERR(edev, "Failed to allocate MAC address\n"); 2536 qed_ops->common->slowpath_stop(edev); 2537 qed_ops->common->remove(edev); 2538 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2539 (void *)eth_dev); 2540 return -ENOMEM; 2541 } 2542 2543 if (!is_vf) { 2544 rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0]. 2545 hw_info.hw_mac_addr, 2546 ð_dev->data->mac_addrs[0]); 2547 rte_ether_addr_copy(ð_dev->data->mac_addrs[0], 2548 &adapter->primary_mac); 2549 } else { 2550 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 2551 &bulletin_change); 2552 if (bulletin_change) { 2553 is_mac_exist = 2554 ecore_vf_bulletin_get_forced_mac( 2555 ECORE_LEADING_HWFN(edev), 2556 vf_mac, 2557 &is_mac_forced); 2558 if (is_mac_exist) { 2559 DP_INFO(edev, "VF macaddr received from PF\n"); 2560 rte_ether_addr_copy( 2561 (struct rte_ether_addr *)&vf_mac, 2562 ð_dev->data->mac_addrs[0]); 2563 rte_ether_addr_copy( 2564 ð_dev->data->mac_addrs[0], 2565 &adapter->primary_mac); 2566 } else { 2567 DP_ERR(edev, "No VF macaddr assigned\n"); 2568 } 2569 } 2570 } 2571 2572 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 2573 2574 if (do_once) { 2575 qede_print_adapter_info(adapter); 2576 do_once = false; 2577 } 2578 2579 /* Bring-up the link */ 2580 qede_dev_set_link_state(eth_dev, true); 2581 2582 adapter->num_tx_queues = 0; 2583 adapter->num_rx_queues = 0; 2584 SLIST_INIT(&adapter->arfs_info.arfs_list_head); 2585 SLIST_INIT(&adapter->vlan_list_head); 2586 SLIST_INIT(&adapter->uc_list_head); 2587 SLIST_INIT(&adapter->mc_list_head); 2588 adapter->mtu = RTE_ETHER_MTU; 2589 adapter->vport_started = false; 2590 2591 /* VF tunnel offloads is enabled by default in PF driver */ 2592 adapter->vxlan.num_filters = 0; 2593 adapter->geneve.num_filters = 0; 2594 adapter->ipgre.num_filters = 0; 2595 if (is_vf) { 2596 adapter->vxlan.enable = true; 2597 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | 2598 ETH_TUNNEL_FILTER_IVLAN; 2599 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; 2600 adapter->geneve.enable = true; 2601 adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC | 2602 ETH_TUNNEL_FILTER_IVLAN; 2603 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT; 2604 adapter->ipgre.enable = true; 2605 adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC | 2606 ETH_TUNNEL_FILTER_IVLAN; 2607 } else { 2608 adapter->vxlan.enable = false; 2609 adapter->geneve.enable = false; 2610 adapter->ipgre.enable = false; 2611 } 2612 2613 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", 2614 adapter->primary_mac.addr_bytes[0], 2615 adapter->primary_mac.addr_bytes[1], 2616 adapter->primary_mac.addr_bytes[2], 2617 adapter->primary_mac.addr_bytes[3], 2618 adapter->primary_mac.addr_bytes[4], 2619 adapter->primary_mac.addr_bytes[5]); 2620 2621 DP_INFO(edev, "Device initialized\n"); 2622 2623 return 0; 2624 } 2625 2626 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 2627 { 2628 return qede_common_dev_init(eth_dev, 1); 2629 } 2630 2631 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 2632 { 2633 return qede_common_dev_init(eth_dev, 0); 2634 } 2635 2636 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 2637 { 2638 struct qede_dev *qdev = eth_dev->data->dev_private; 2639 struct ecore_dev *edev = &qdev->edev; 2640 2641 PMD_INIT_FUNC_TRACE(edev); 2642 2643 /* only uninitialize in the primary process */ 2644 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2645 return 0; 2646 2647 /* safe to close dev here */ 2648 qede_dev_close(eth_dev); 2649 2650 eth_dev->dev_ops = NULL; 2651 eth_dev->rx_pkt_burst = NULL; 2652 eth_dev->tx_pkt_burst = NULL; 2653 2654 return 0; 2655 } 2656 2657 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2658 { 2659 return qede_dev_common_uninit(eth_dev); 2660 } 2661 2662 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2663 { 2664 return qede_dev_common_uninit(eth_dev); 2665 } 2666 2667 static const struct rte_pci_id pci_id_qedevf_map[] = { 2668 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2669 { 2670 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF) 2671 }, 2672 { 2673 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV) 2674 }, 2675 { 2676 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV) 2677 }, 2678 {.vendor_id = 0,} 2679 }; 2680 2681 static const struct rte_pci_id pci_id_qede_map[] = { 2682 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2683 { 2684 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E) 2685 }, 2686 { 2687 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S) 2688 }, 2689 { 2690 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40) 2691 }, 2692 { 2693 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25) 2694 }, 2695 { 2696 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100) 2697 }, 2698 { 2699 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50) 2700 }, 2701 { 2702 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G) 2703 }, 2704 { 2705 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G) 2706 }, 2707 { 2708 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G) 2709 }, 2710 { 2711 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G) 2712 }, 2713 {.vendor_id = 0,} 2714 }; 2715 2716 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2717 struct rte_pci_device *pci_dev) 2718 { 2719 return rte_eth_dev_pci_generic_probe(pci_dev, 2720 sizeof(struct qede_dev), qedevf_eth_dev_init); 2721 } 2722 2723 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2724 { 2725 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit); 2726 } 2727 2728 static struct rte_pci_driver rte_qedevf_pmd = { 2729 .id_table = pci_id_qedevf_map, 2730 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2731 .probe = qedevf_eth_dev_pci_probe, 2732 .remove = qedevf_eth_dev_pci_remove, 2733 }; 2734 2735 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2736 struct rte_pci_device *pci_dev) 2737 { 2738 return rte_eth_dev_pci_generic_probe(pci_dev, 2739 sizeof(struct qede_dev), qede_eth_dev_init); 2740 } 2741 2742 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2743 { 2744 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit); 2745 } 2746 2747 static struct rte_pci_driver rte_qede_pmd = { 2748 .id_table = pci_id_qede_map, 2749 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2750 .probe = qede_eth_dev_pci_probe, 2751 .remove = qede_eth_dev_pci_remove, 2752 }; 2753 2754 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd); 2755 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 2756 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); 2757 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); 2758 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 2759 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); 2760 2761 RTE_INIT(qede_init_log) 2762 { 2763 qede_logtype_init = rte_log_register("pmd.net.qede.init"); 2764 if (qede_logtype_init >= 0) 2765 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE); 2766 qede_logtype_driver = rte_log_register("pmd.net.qede.driver"); 2767 if (qede_logtype_driver >= 0) 2768 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE); 2769 } 2770