1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include "qede_ethdev.h" 8 #include <rte_string_fns.h> 9 #include <rte_alarm.h> 10 #include <rte_version.h> 11 #include <rte_kvargs.h> 12 13 /* Globals */ 14 int qede_logtype_init; 15 int qede_logtype_driver; 16 17 static const struct qed_eth_ops *qed_ops; 18 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev); 19 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev); 20 21 #define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */ 22 23 struct rte_qede_xstats_name_off { 24 char name[RTE_ETH_XSTATS_NAME_SIZE]; 25 uint64_t offset; 26 }; 27 28 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 29 {"rx_unicast_bytes", 30 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)}, 31 {"rx_multicast_bytes", 32 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)}, 33 {"rx_broadcast_bytes", 34 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)}, 35 {"rx_unicast_packets", 36 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)}, 37 {"rx_multicast_packets", 38 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)}, 39 {"rx_broadcast_packets", 40 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)}, 41 42 {"tx_unicast_bytes", 43 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)}, 44 {"tx_multicast_bytes", 45 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)}, 46 {"tx_broadcast_bytes", 47 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)}, 48 {"tx_unicast_packets", 49 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)}, 50 {"tx_multicast_packets", 51 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)}, 52 {"tx_broadcast_packets", 53 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)}, 54 55 {"rx_64_byte_packets", 56 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)}, 57 {"rx_65_to_127_byte_packets", 58 offsetof(struct ecore_eth_stats_common, 59 rx_65_to_127_byte_packets)}, 60 {"rx_128_to_255_byte_packets", 61 offsetof(struct ecore_eth_stats_common, 62 rx_128_to_255_byte_packets)}, 63 {"rx_256_to_511_byte_packets", 64 offsetof(struct ecore_eth_stats_common, 65 rx_256_to_511_byte_packets)}, 66 {"rx_512_to_1023_byte_packets", 67 offsetof(struct ecore_eth_stats_common, 68 rx_512_to_1023_byte_packets)}, 69 {"rx_1024_to_1518_byte_packets", 70 offsetof(struct ecore_eth_stats_common, 71 rx_1024_to_1518_byte_packets)}, 72 {"tx_64_byte_packets", 73 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)}, 74 {"tx_65_to_127_byte_packets", 75 offsetof(struct ecore_eth_stats_common, 76 tx_65_to_127_byte_packets)}, 77 {"tx_128_to_255_byte_packets", 78 offsetof(struct ecore_eth_stats_common, 79 tx_128_to_255_byte_packets)}, 80 {"tx_256_to_511_byte_packets", 81 offsetof(struct ecore_eth_stats_common, 82 tx_256_to_511_byte_packets)}, 83 {"tx_512_to_1023_byte_packets", 84 offsetof(struct ecore_eth_stats_common, 85 tx_512_to_1023_byte_packets)}, 86 {"tx_1024_to_1518_byte_packets", 87 offsetof(struct ecore_eth_stats_common, 88 tx_1024_to_1518_byte_packets)}, 89 90 {"rx_mac_crtl_frames", 91 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)}, 92 {"tx_mac_control_frames", 93 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)}, 94 {"rx_pause_frames", 95 offsetof(struct ecore_eth_stats_common, rx_pause_frames)}, 96 {"tx_pause_frames", 97 offsetof(struct ecore_eth_stats_common, tx_pause_frames)}, 98 {"rx_priority_flow_control_frames", 99 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)}, 100 {"tx_priority_flow_control_frames", 101 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)}, 102 103 {"rx_crc_errors", 104 offsetof(struct ecore_eth_stats_common, rx_crc_errors)}, 105 {"rx_align_errors", 106 offsetof(struct ecore_eth_stats_common, rx_align_errors)}, 107 {"rx_carrier_errors", 108 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)}, 109 {"rx_oversize_packet_errors", 110 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)}, 111 {"rx_jabber_errors", 112 offsetof(struct ecore_eth_stats_common, rx_jabbers)}, 113 {"rx_undersize_packet_errors", 114 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)}, 115 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)}, 116 {"rx_host_buffer_not_available", 117 offsetof(struct ecore_eth_stats_common, no_buff_discards)}, 118 /* Number of packets discarded because they are bigger than MTU */ 119 {"rx_packet_too_big_discards", 120 offsetof(struct ecore_eth_stats_common, 121 packet_too_big_discard)}, 122 {"rx_ttl_zero_discards", 123 offsetof(struct ecore_eth_stats_common, ttl0_discard)}, 124 {"rx_multi_function_tag_filter_discards", 125 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)}, 126 {"rx_mac_filter_discards", 127 offsetof(struct ecore_eth_stats_common, mac_filter_discards)}, 128 {"rx_gft_filter_drop", 129 offsetof(struct ecore_eth_stats_common, gft_filter_drop)}, 130 {"rx_hw_buffer_truncates", 131 offsetof(struct ecore_eth_stats_common, brb_truncates)}, 132 {"rx_hw_buffer_discards", 133 offsetof(struct ecore_eth_stats_common, brb_discards)}, 134 {"tx_error_drop_packets", 135 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)}, 136 137 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)}, 138 {"rx_mac_unicast_packets", 139 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)}, 140 {"rx_mac_multicast_packets", 141 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)}, 142 {"rx_mac_broadcast_packets", 143 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)}, 144 {"rx_mac_frames_ok", 145 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)}, 146 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)}, 147 {"tx_mac_unicast_packets", 148 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)}, 149 {"tx_mac_multicast_packets", 150 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)}, 151 {"tx_mac_broadcast_packets", 152 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)}, 153 154 {"lro_coalesced_packets", 155 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)}, 156 {"lro_coalesced_events", 157 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)}, 158 {"lro_aborts_num", 159 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)}, 160 {"lro_not_coalesced_packets", 161 offsetof(struct ecore_eth_stats_common, 162 tpa_not_coalesced_pkts)}, 163 {"lro_coalesced_bytes", 164 offsetof(struct ecore_eth_stats_common, 165 tpa_coalesced_bytes)}, 166 }; 167 168 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = { 169 {"rx_1519_to_1522_byte_packets", 170 offsetof(struct ecore_eth_stats, bb) + 171 offsetof(struct ecore_eth_stats_bb, 172 rx_1519_to_1522_byte_packets)}, 173 {"rx_1519_to_2047_byte_packets", 174 offsetof(struct ecore_eth_stats, bb) + 175 offsetof(struct ecore_eth_stats_bb, 176 rx_1519_to_2047_byte_packets)}, 177 {"rx_2048_to_4095_byte_packets", 178 offsetof(struct ecore_eth_stats, bb) + 179 offsetof(struct ecore_eth_stats_bb, 180 rx_2048_to_4095_byte_packets)}, 181 {"rx_4096_to_9216_byte_packets", 182 offsetof(struct ecore_eth_stats, bb) + 183 offsetof(struct ecore_eth_stats_bb, 184 rx_4096_to_9216_byte_packets)}, 185 {"rx_9217_to_16383_byte_packets", 186 offsetof(struct ecore_eth_stats, bb) + 187 offsetof(struct ecore_eth_stats_bb, 188 rx_9217_to_16383_byte_packets)}, 189 190 {"tx_1519_to_2047_byte_packets", 191 offsetof(struct ecore_eth_stats, bb) + 192 offsetof(struct ecore_eth_stats_bb, 193 tx_1519_to_2047_byte_packets)}, 194 {"tx_2048_to_4095_byte_packets", 195 offsetof(struct ecore_eth_stats, bb) + 196 offsetof(struct ecore_eth_stats_bb, 197 tx_2048_to_4095_byte_packets)}, 198 {"tx_4096_to_9216_byte_packets", 199 offsetof(struct ecore_eth_stats, bb) + 200 offsetof(struct ecore_eth_stats_bb, 201 tx_4096_to_9216_byte_packets)}, 202 {"tx_9217_to_16383_byte_packets", 203 offsetof(struct ecore_eth_stats, bb) + 204 offsetof(struct ecore_eth_stats_bb, 205 tx_9217_to_16383_byte_packets)}, 206 207 {"tx_lpi_entry_count", 208 offsetof(struct ecore_eth_stats, bb) + 209 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)}, 210 {"tx_total_collisions", 211 offsetof(struct ecore_eth_stats, bb) + 212 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)}, 213 }; 214 215 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = { 216 {"rx_1519_to_max_byte_packets", 217 offsetof(struct ecore_eth_stats, ah) + 218 offsetof(struct ecore_eth_stats_ah, 219 rx_1519_to_max_byte_packets)}, 220 {"tx_1519_to_max_byte_packets", 221 offsetof(struct ecore_eth_stats, ah) + 222 offsetof(struct ecore_eth_stats_ah, 223 tx_1519_to_max_byte_packets)}, 224 }; 225 226 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { 227 {"rx_q_segments", 228 offsetof(struct qede_rx_queue, rx_segs)}, 229 {"rx_q_hw_errors", 230 offsetof(struct qede_rx_queue, rx_hw_errors)}, 231 {"rx_q_allocation_errors", 232 offsetof(struct qede_rx_queue, rx_alloc_errors)} 233 }; 234 235 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 236 { 237 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 238 } 239 240 static void 241 qede_interrupt_handler_intx(void *param) 242 { 243 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 244 struct qede_dev *qdev = eth_dev->data->dev_private; 245 struct ecore_dev *edev = &qdev->edev; 246 u64 status; 247 248 /* Check if our device actually raised an interrupt */ 249 status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev)); 250 if (status & 0x1) { 251 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 252 253 if (rte_intr_ack(eth_dev->intr_handle)) 254 DP_ERR(edev, "rte_intr_ack failed\n"); 255 } 256 } 257 258 static void 259 qede_interrupt_handler(void *param) 260 { 261 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 262 struct qede_dev *qdev = eth_dev->data->dev_private; 263 struct ecore_dev *edev = &qdev->edev; 264 265 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 266 if (rte_intr_ack(eth_dev->intr_handle)) 267 DP_ERR(edev, "rte_intr_ack failed\n"); 268 } 269 270 static void 271 qede_assign_rxtx_handlers(struct rte_eth_dev *dev) 272 { 273 uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; 274 struct qede_dev *qdev = dev->data->dev_private; 275 struct ecore_dev *edev = &qdev->edev; 276 bool use_tx_offload = false; 277 278 if (ECORE_IS_CMT(edev)) { 279 dev->rx_pkt_burst = qede_recv_pkts_cmt; 280 dev->tx_pkt_burst = qede_xmit_pkts_cmt; 281 return; 282 } 283 284 if (dev->data->lro || dev->data->scattered_rx) { 285 DP_INFO(edev, "Assigning qede_recv_pkts\n"); 286 dev->rx_pkt_burst = qede_recv_pkts; 287 } else { 288 DP_INFO(edev, "Assigning qede_recv_pkts_regular\n"); 289 dev->rx_pkt_burst = qede_recv_pkts_regular; 290 } 291 292 use_tx_offload = !!(tx_offloads & 293 (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | /* tunnel */ 294 DEV_TX_OFFLOAD_TCP_TSO | /* tso */ 295 DEV_TX_OFFLOAD_VLAN_INSERT)); /* vlan insert */ 296 297 if (use_tx_offload) { 298 DP_INFO(edev, "Assigning qede_xmit_pkts\n"); 299 dev->tx_pkt_burst = qede_xmit_pkts; 300 } else { 301 DP_INFO(edev, "Assigning qede_xmit_pkts_regular\n"); 302 dev->tx_pkt_burst = qede_xmit_pkts_regular; 303 } 304 } 305 306 static void 307 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 308 { 309 rte_memcpy(&qdev->dev_info, info, sizeof(*info)); 310 qdev->ops = qed_ops; 311 } 312 313 static void qede_print_adapter_info(struct qede_dev *qdev) 314 { 315 struct ecore_dev *edev = &qdev->edev; 316 struct qed_dev_info *info = &qdev->dev_info.common; 317 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 318 319 DP_INFO(edev, "**************************************************\n"); 320 DP_INFO(edev, " DPDK version\t\t\t: %s\n", rte_version()); 321 DP_INFO(edev, " Chip details\t\t\t: %s %c%d\n", 322 ECORE_IS_BB(edev) ? "BB" : "AH", 323 'A' + edev->chip_rev, 324 (int)edev->chip_metal); 325 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 326 QEDE_PMD_DRV_VERSION); 327 DP_INFO(edev, " Driver version\t\t\t: %s\n", ver_str); 328 329 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 330 QEDE_PMD_BASE_VERSION); 331 DP_INFO(edev, " Base version\t\t\t: %s\n", ver_str); 332 333 if (!IS_VF(edev)) 334 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%s", 335 QEDE_PMD_FW_VERSION); 336 else 337 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", 338 info->fw_major, info->fw_minor, 339 info->fw_rev, info->fw_eng); 340 DP_INFO(edev, " Firmware version\t\t\t: %s\n", ver_str); 341 342 snprintf(ver_str, MCP_DRV_VER_STR_SIZE, 343 "%d.%d.%d.%d", 344 (info->mfw_rev & QED_MFW_VERSION_3_MASK) >> 345 QED_MFW_VERSION_3_OFFSET, 346 (info->mfw_rev & QED_MFW_VERSION_2_MASK) >> 347 QED_MFW_VERSION_2_OFFSET, 348 (info->mfw_rev & QED_MFW_VERSION_1_MASK) >> 349 QED_MFW_VERSION_1_OFFSET, 350 (info->mfw_rev & QED_MFW_VERSION_0_MASK) >> 351 QED_MFW_VERSION_0_OFFSET); 352 DP_INFO(edev, " Management Firmware version\t: %s\n", ver_str); 353 DP_INFO(edev, " Firmware file\t\t\t: %s\n", qede_fw_file); 354 DP_INFO(edev, "**************************************************\n"); 355 } 356 357 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) 358 { 359 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; 360 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 361 unsigned int i = 0, j = 0, qid; 362 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 363 struct qede_tx_queue *txq; 364 365 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); 366 367 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev), 368 RTE_ETHDEV_QUEUE_STAT_CNTRS); 369 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev), 370 RTE_ETHDEV_QUEUE_STAT_CNTRS); 371 372 for (qid = 0; qid < qdev->num_rx_queues; qid++) { 373 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 374 offsetof(struct qede_rx_queue, rcv_pkts), 0, 375 sizeof(uint64_t)); 376 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 377 offsetof(struct qede_rx_queue, rx_hw_errors), 0, 378 sizeof(uint64_t)); 379 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 380 offsetof(struct qede_rx_queue, rx_alloc_errors), 0, 381 sizeof(uint64_t)); 382 383 if (xstats) 384 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++) 385 OSAL_MEMSET((((char *) 386 (qdev->fp_array[qid].rxq)) + 387 qede_rxq_xstats_strings[j].offset), 388 0, 389 sizeof(uint64_t)); 390 391 i++; 392 if (i == rxq_stat_cntrs) 393 break; 394 } 395 396 i = 0; 397 398 for (qid = 0; qid < qdev->num_tx_queues; qid++) { 399 txq = qdev->fp_array[qid].txq; 400 401 OSAL_MEMSET((uint64_t *)(uintptr_t) 402 (((uint64_t)(uintptr_t)(txq)) + 403 offsetof(struct qede_tx_queue, xmit_pkts)), 0, 404 sizeof(uint64_t)); 405 406 i++; 407 if (i == txq_stat_cntrs) 408 break; 409 } 410 } 411 412 static int 413 qede_stop_vport(struct ecore_dev *edev) 414 { 415 struct ecore_hwfn *p_hwfn; 416 uint8_t vport_id; 417 int rc; 418 int i; 419 420 vport_id = 0; 421 for_each_hwfn(edev, i) { 422 p_hwfn = &edev->hwfns[i]; 423 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 424 vport_id); 425 if (rc != ECORE_SUCCESS) { 426 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc); 427 return rc; 428 } 429 } 430 431 DP_INFO(edev, "vport stopped\n"); 432 433 return 0; 434 } 435 436 static int 437 qede_start_vport(struct qede_dev *qdev, uint16_t mtu) 438 { 439 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 440 struct ecore_sp_vport_start_params params; 441 struct ecore_hwfn *p_hwfn; 442 int rc; 443 int i; 444 445 if (qdev->vport_started) 446 qede_stop_vport(edev); 447 448 memset(¶ms, 0, sizeof(params)); 449 params.vport_id = 0; 450 params.mtu = mtu; 451 /* @DPDK - Disable FW placement */ 452 params.zero_placement_offset = 1; 453 for_each_hwfn(edev, i) { 454 p_hwfn = &edev->hwfns[i]; 455 params.concrete_fid = p_hwfn->hw_info.concrete_fid; 456 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 457 rc = ecore_sp_vport_start(p_hwfn, ¶ms); 458 if (rc != ECORE_SUCCESS) { 459 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 460 return rc; 461 } 462 } 463 ecore_reset_vport_stats(edev); 464 qdev->vport_started = true; 465 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); 466 467 return 0; 468 } 469 470 #define QEDE_NPAR_TX_SWITCHING "npar_tx_switching" 471 #define QEDE_VF_TX_SWITCHING "vf_tx_switching" 472 473 /* Activate or deactivate vport via vport-update */ 474 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) 475 { 476 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 477 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 478 struct ecore_sp_vport_update_params params; 479 struct ecore_hwfn *p_hwfn; 480 uint8_t i; 481 int rc = -1; 482 483 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 484 params.vport_id = 0; 485 params.update_vport_active_rx_flg = 1; 486 params.update_vport_active_tx_flg = 1; 487 params.vport_active_rx_flg = flg; 488 params.vport_active_tx_flg = flg; 489 if ((qdev->enable_tx_switching == false) && (flg == true)) { 490 params.update_tx_switching_flg = 1; 491 params.tx_switching_flg = !flg; 492 } 493 for_each_hwfn(edev, i) { 494 p_hwfn = &edev->hwfns[i]; 495 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 496 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 497 ECORE_SPQ_MODE_EBLOCK, NULL); 498 if (rc != ECORE_SUCCESS) { 499 DP_ERR(edev, "Failed to update vport\n"); 500 break; 501 } 502 } 503 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated"); 504 505 return rc; 506 } 507 508 static void 509 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, 510 uint16_t mtu, bool enable) 511 { 512 /* Enable LRO in split mode */ 513 sge_tpa_params->tpa_ipv4_en_flg = enable; 514 sge_tpa_params->tpa_ipv6_en_flg = enable; 515 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable; 516 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable; 517 /* set if tpa enable changes */ 518 sge_tpa_params->update_tpa_en_flg = 1; 519 /* set if tpa parameters should be handled */ 520 sge_tpa_params->update_tpa_param_flg = enable; 521 522 sge_tpa_params->max_buffers_per_cqe = 20; 523 /* Enable TPA in split mode. In this mode each TPA segment 524 * starts on the new BD, so there is one BD per segment. 525 */ 526 sge_tpa_params->tpa_pkt_split_flg = 1; 527 sge_tpa_params->tpa_hdr_data_split_flg = 0; 528 sge_tpa_params->tpa_gro_consistent_flg = 0; 529 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 530 sge_tpa_params->tpa_max_size = 0x7FFF; 531 sge_tpa_params->tpa_min_size_to_start = mtu / 2; 532 sge_tpa_params->tpa_min_size_to_cont = mtu / 2; 533 } 534 535 /* Enable/disable LRO via vport-update */ 536 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) 537 { 538 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 539 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 540 struct ecore_sp_vport_update_params params; 541 struct ecore_sge_tpa_params tpa_params; 542 struct ecore_hwfn *p_hwfn; 543 int rc; 544 int i; 545 546 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 547 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params)); 548 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg); 549 params.vport_id = 0; 550 params.sge_tpa_params = &tpa_params; 551 for_each_hwfn(edev, i) { 552 p_hwfn = &edev->hwfns[i]; 553 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 554 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 555 ECORE_SPQ_MODE_EBLOCK, NULL); 556 if (rc != ECORE_SUCCESS) { 557 DP_ERR(edev, "Failed to update LRO\n"); 558 return -1; 559 } 560 } 561 qdev->enable_lro = flg; 562 eth_dev->data->lro = flg; 563 564 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); 565 566 return 0; 567 } 568 569 static int 570 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, 571 enum qed_filter_rx_mode_type type) 572 { 573 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 574 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 575 struct ecore_filter_accept_flags flags; 576 577 memset(&flags, 0, sizeof(flags)); 578 579 flags.update_rx_mode_config = 1; 580 flags.update_tx_mode_config = 1; 581 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 582 ECORE_ACCEPT_MCAST_MATCHED | 583 ECORE_ACCEPT_BCAST; 584 585 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 586 ECORE_ACCEPT_MCAST_MATCHED | 587 ECORE_ACCEPT_BCAST; 588 589 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 590 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 591 if (IS_VF(edev)) { 592 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 593 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n"); 594 } 595 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 596 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 597 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC | 598 QED_FILTER_RX_MODE_TYPE_PROMISC)) { 599 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED | 600 ECORE_ACCEPT_MCAST_UNMATCHED; 601 } 602 603 return ecore_filter_accept_cmd(edev, 0, flags, false, false, 604 ECORE_SPQ_MODE_CB, NULL); 605 } 606 607 int 608 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 609 bool add) 610 { 611 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 612 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 613 struct qede_ucast_entry *tmp = NULL; 614 struct qede_ucast_entry *u; 615 struct rte_ether_addr *mac_addr; 616 617 mac_addr = (struct rte_ether_addr *)ucast->mac; 618 if (add) { 619 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 620 if ((memcmp(mac_addr, &tmp->mac, 621 RTE_ETHER_ADDR_LEN) == 0) && 622 ucast->vni == tmp->vni && 623 ucast->vlan == tmp->vlan) { 624 DP_INFO(edev, "Unicast MAC is already added" 625 " with vlan = %u, vni = %u\n", 626 ucast->vlan, ucast->vni); 627 return 0; 628 } 629 } 630 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), 631 RTE_CACHE_LINE_SIZE); 632 if (!u) { 633 DP_ERR(edev, "Did not allocate memory for ucast\n"); 634 return -ENOMEM; 635 } 636 rte_ether_addr_copy(mac_addr, &u->mac); 637 u->vlan = ucast->vlan; 638 u->vni = ucast->vni; 639 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); 640 qdev->num_uc_addr++; 641 } else { 642 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 643 if ((memcmp(mac_addr, &tmp->mac, 644 RTE_ETHER_ADDR_LEN) == 0) && 645 ucast->vlan == tmp->vlan && 646 ucast->vni == tmp->vni) 647 break; 648 } 649 if (tmp == NULL) { 650 DP_INFO(edev, "Unicast MAC is not found\n"); 651 return -EINVAL; 652 } 653 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); 654 qdev->num_uc_addr--; 655 } 656 657 return 0; 658 } 659 660 static int 661 qede_add_mcast_filters(struct rte_eth_dev *eth_dev, 662 struct rte_ether_addr *mc_addrs, 663 uint32_t mc_addrs_num) 664 { 665 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 666 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 667 struct ecore_filter_mcast mcast; 668 struct qede_mcast_entry *m = NULL; 669 uint8_t i; 670 int rc; 671 672 for (i = 0; i < mc_addrs_num; i++) { 673 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), 674 RTE_CACHE_LINE_SIZE); 675 if (!m) { 676 DP_ERR(edev, "Did not allocate memory for mcast\n"); 677 return -ENOMEM; 678 } 679 rte_ether_addr_copy(&mc_addrs[i], &m->mac); 680 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); 681 } 682 memset(&mcast, 0, sizeof(mcast)); 683 mcast.num_mc_addrs = mc_addrs_num; 684 mcast.opcode = ECORE_FILTER_ADD; 685 for (i = 0; i < mc_addrs_num; i++) 686 rte_ether_addr_copy(&mc_addrs[i], (struct rte_ether_addr *) 687 &mcast.mac[i]); 688 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 689 if (rc != ECORE_SUCCESS) { 690 DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc); 691 return -1; 692 } 693 694 return 0; 695 } 696 697 static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev) 698 { 699 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 700 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 701 struct qede_mcast_entry *tmp = NULL; 702 struct ecore_filter_mcast mcast; 703 int j; 704 int rc; 705 706 memset(&mcast, 0, sizeof(mcast)); 707 mcast.num_mc_addrs = qdev->num_mc_addr; 708 mcast.opcode = ECORE_FILTER_REMOVE; 709 j = 0; 710 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 711 rte_ether_addr_copy(&tmp->mac, 712 (struct rte_ether_addr *)&mcast.mac[j]); 713 j++; 714 } 715 rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); 716 if (rc != ECORE_SUCCESS) { 717 DP_ERR(edev, "Failed to delete multicast filter\n"); 718 return -1; 719 } 720 /* Init the list */ 721 while (!SLIST_EMPTY(&qdev->mc_list_head)) { 722 tmp = SLIST_FIRST(&qdev->mc_list_head); 723 SLIST_REMOVE_HEAD(&qdev->mc_list_head, list); 724 } 725 SLIST_INIT(&qdev->mc_list_head); 726 727 return 0; 728 } 729 730 enum _ecore_status_t 731 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 732 bool add) 733 { 734 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 735 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 736 enum _ecore_status_t rc = ECORE_INVAL; 737 738 if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) { 739 DP_ERR(edev, "Ucast filter table limit exceeded," 740 " Please enable promisc mode\n"); 741 return ECORE_INVAL; 742 } 743 744 rc = qede_ucast_filter(eth_dev, ucast, add); 745 if (rc == 0) 746 rc = ecore_filter_ucast_cmd(edev, ucast, 747 ECORE_SPQ_MODE_CB, NULL); 748 /* Indicate error only for add filter operation. 749 * Delete filter operations are not severe. 750 */ 751 if ((rc != ECORE_SUCCESS) && add) 752 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", 753 rc, add); 754 755 return rc; 756 } 757 758 static int 759 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr, 760 __rte_unused uint32_t index, __rte_unused uint32_t pool) 761 { 762 struct ecore_filter_ucast ucast; 763 int re; 764 765 if (!rte_is_valid_assigned_ether_addr(mac_addr)) 766 return -EINVAL; 767 768 qede_set_ucast_cmn_params(&ucast); 769 ucast.opcode = ECORE_FILTER_ADD; 770 ucast.type = ECORE_FILTER_MAC; 771 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)&ucast.mac); 772 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); 773 return re; 774 } 775 776 static void 777 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 778 { 779 struct qede_dev *qdev = eth_dev->data->dev_private; 780 struct ecore_dev *edev = &qdev->edev; 781 struct ecore_filter_ucast ucast; 782 783 PMD_INIT_FUNC_TRACE(edev); 784 785 if (index >= qdev->dev_info.num_mac_filters) { 786 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 787 index, qdev->dev_info.num_mac_filters); 788 return; 789 } 790 791 if (!rte_is_valid_assigned_ether_addr(ð_dev->data->mac_addrs[index])) 792 return; 793 794 qede_set_ucast_cmn_params(&ucast); 795 ucast.opcode = ECORE_FILTER_REMOVE; 796 ucast.type = ECORE_FILTER_MAC; 797 798 /* Use the index maintained by rte */ 799 rte_ether_addr_copy(ð_dev->data->mac_addrs[index], 800 (struct rte_ether_addr *)&ucast.mac); 801 802 qede_mac_int_ops(eth_dev, &ucast, false); 803 } 804 805 static int 806 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mac_addr) 807 { 808 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 809 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 810 811 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 812 mac_addr->addr_bytes)) { 813 DP_ERR(edev, "Setting MAC address is not allowed\n"); 814 return -EPERM; 815 } 816 817 qede_mac_addr_remove(eth_dev, 0); 818 819 return qede_mac_addr_add(eth_dev, mac_addr, 0, 0); 820 } 821 822 void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) 823 { 824 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 825 struct ecore_sp_vport_update_params params; 826 struct ecore_hwfn *p_hwfn; 827 uint8_t i; 828 int rc; 829 830 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 831 params.vport_id = 0; 832 params.update_accept_any_vlan_flg = 1; 833 params.accept_any_vlan = flg; 834 for_each_hwfn(edev, i) { 835 p_hwfn = &edev->hwfns[i]; 836 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 837 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 838 ECORE_SPQ_MODE_EBLOCK, NULL); 839 if (rc != ECORE_SUCCESS) { 840 DP_ERR(edev, "Failed to configure accept-any-vlan\n"); 841 return; 842 } 843 } 844 845 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled"); 846 } 847 848 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg) 849 { 850 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 851 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 852 struct ecore_sp_vport_update_params params; 853 struct ecore_hwfn *p_hwfn; 854 uint8_t i; 855 int rc; 856 857 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 858 params.vport_id = 0; 859 params.update_inner_vlan_removal_flg = 1; 860 params.inner_vlan_removal_flg = flg; 861 for_each_hwfn(edev, i) { 862 p_hwfn = &edev->hwfns[i]; 863 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 864 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 865 ECORE_SPQ_MODE_EBLOCK, NULL); 866 if (rc != ECORE_SUCCESS) { 867 DP_ERR(edev, "Failed to update vport\n"); 868 return -1; 869 } 870 } 871 872 qdev->vlan_strip_flg = flg; 873 874 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled"); 875 return 0; 876 } 877 878 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 879 uint16_t vlan_id, int on) 880 { 881 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 882 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 883 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 884 struct qede_vlan_entry *tmp = NULL; 885 struct qede_vlan_entry *vlan; 886 struct ecore_filter_ucast ucast; 887 int rc; 888 889 if (on) { 890 if (qdev->configured_vlans == dev_info->num_vlan_filters) { 891 DP_ERR(edev, "Reached max VLAN filter limit" 892 " enabling accept_any_vlan\n"); 893 qede_config_accept_any_vlan(qdev, true); 894 return 0; 895 } 896 897 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 898 if (tmp->vid == vlan_id) { 899 DP_INFO(edev, "VLAN %u already configured\n", 900 vlan_id); 901 return 0; 902 } 903 } 904 905 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), 906 RTE_CACHE_LINE_SIZE); 907 908 if (!vlan) { 909 DP_ERR(edev, "Did not allocate memory for VLAN\n"); 910 return -ENOMEM; 911 } 912 913 qede_set_ucast_cmn_params(&ucast); 914 ucast.opcode = ECORE_FILTER_ADD; 915 ucast.type = ECORE_FILTER_VLAN; 916 ucast.vlan = vlan_id; 917 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 918 NULL); 919 if (rc != 0) { 920 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 921 rc); 922 rte_free(vlan); 923 } else { 924 vlan->vid = vlan_id; 925 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); 926 qdev->configured_vlans++; 927 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", 928 vlan_id, qdev->configured_vlans); 929 } 930 } else { 931 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 932 if (tmp->vid == vlan_id) 933 break; 934 } 935 936 if (!tmp) { 937 if (qdev->configured_vlans == 0) { 938 DP_INFO(edev, 939 "No VLAN filters configured yet\n"); 940 return 0; 941 } 942 943 DP_ERR(edev, "VLAN %u not configured\n", vlan_id); 944 return -EINVAL; 945 } 946 947 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); 948 949 qede_set_ucast_cmn_params(&ucast); 950 ucast.opcode = ECORE_FILTER_REMOVE; 951 ucast.type = ECORE_FILTER_VLAN; 952 ucast.vlan = vlan_id; 953 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 954 NULL); 955 if (rc != 0) { 956 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 957 vlan_id, rc); 958 } else { 959 qdev->configured_vlans--; 960 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", 961 vlan_id, qdev->configured_vlans); 962 } 963 } 964 965 return rc; 966 } 967 968 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 969 { 970 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 971 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 972 uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; 973 974 if (mask & ETH_VLAN_STRIP_MASK) { 975 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 976 (void)qede_vlan_stripping(eth_dev, 1); 977 else 978 (void)qede_vlan_stripping(eth_dev, 0); 979 } 980 981 if (mask & ETH_VLAN_FILTER_MASK) { 982 /* VLAN filtering kicks in when a VLAN is added */ 983 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) { 984 qede_vlan_filter_set(eth_dev, 0, 1); 985 } else { 986 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ 987 DP_ERR(edev, 988 " Please remove existing VLAN filters" 989 " before disabling VLAN filtering\n"); 990 /* Signal app that VLAN filtering is still 991 * enabled 992 */ 993 eth_dev->data->dev_conf.rxmode.offloads |= 994 DEV_RX_OFFLOAD_VLAN_FILTER; 995 } else { 996 qede_vlan_filter_set(eth_dev, 0, 0); 997 } 998 } 999 } 1000 1001 if (mask & ETH_VLAN_EXTEND_MASK) 1002 DP_ERR(edev, "Extend VLAN not supported\n"); 1003 1004 qdev->vlan_offload_mask = mask; 1005 1006 DP_INFO(edev, "VLAN offload mask %d\n", mask); 1007 1008 return 0; 1009 } 1010 1011 static void qede_prandom_bytes(uint32_t *buff) 1012 { 1013 uint8_t i; 1014 1015 srand((unsigned int)time(NULL)); 1016 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 1017 buff[i] = rand(); 1018 } 1019 1020 int qede_config_rss(struct rte_eth_dev *eth_dev) 1021 { 1022 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1023 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1024 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; 1025 struct rte_eth_rss_reta_entry64 reta_conf[2]; 1026 struct rte_eth_rss_conf rss_conf; 1027 uint32_t i, id, pos, q; 1028 1029 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 1030 if (!rss_conf.rss_key) { 1031 DP_INFO(edev, "Applying driver default key\n"); 1032 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1033 qede_prandom_bytes(&def_rss_key[0]); 1034 rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; 1035 } 1036 1037 /* Configure RSS hash */ 1038 if (qede_rss_hash_update(eth_dev, &rss_conf)) 1039 return -EINVAL; 1040 1041 /* Configure default RETA */ 1042 memset(reta_conf, 0, sizeof(reta_conf)); 1043 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) 1044 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; 1045 1046 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 1047 id = i / RTE_RETA_GROUP_SIZE; 1048 pos = i % RTE_RETA_GROUP_SIZE; 1049 q = i % QEDE_RSS_COUNT(eth_dev); 1050 reta_conf[id].reta[pos] = q; 1051 } 1052 if (qede_rss_reta_update(eth_dev, &reta_conf[0], 1053 ECORE_RSS_IND_TABLE_SIZE)) 1054 return -EINVAL; 1055 1056 return 0; 1057 } 1058 1059 static void qede_fastpath_start(struct ecore_dev *edev) 1060 { 1061 struct ecore_hwfn *p_hwfn; 1062 int i; 1063 1064 for_each_hwfn(edev, i) { 1065 p_hwfn = &edev->hwfns[i]; 1066 ecore_hw_start_fastpath(p_hwfn); 1067 } 1068 } 1069 1070 static int qede_dev_start(struct rte_eth_dev *eth_dev) 1071 { 1072 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1073 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1074 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1075 1076 PMD_INIT_FUNC_TRACE(edev); 1077 1078 /* Update MTU only if it has changed */ 1079 if (qdev->new_mtu && qdev->new_mtu != qdev->mtu) { 1080 if (qede_update_mtu(eth_dev, qdev->new_mtu)) 1081 goto err; 1082 qdev->mtu = qdev->new_mtu; 1083 qdev->new_mtu = 0; 1084 } 1085 1086 /* Configure TPA parameters */ 1087 if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) { 1088 if (qede_enable_tpa(eth_dev, true)) 1089 return -EINVAL; 1090 /* Enable scatter mode for LRO */ 1091 if (!eth_dev->data->scattered_rx) 1092 rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER; 1093 } 1094 1095 /* Start queues */ 1096 if (qede_start_queues(eth_dev)) 1097 goto err; 1098 1099 if (IS_PF(edev)) 1100 qede_reset_queue_stats(qdev, true); 1101 1102 /* Newer SR-IOV PF driver expects RX/TX queues to be started before 1103 * enabling RSS. Hence RSS configuration is deferred up to this point. 1104 * Also, we would like to retain similar behavior in PF case, so we 1105 * don't do PF/VF specific check here. 1106 */ 1107 if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 1108 if (qede_config_rss(eth_dev)) 1109 goto err; 1110 1111 /* Enable vport*/ 1112 if (qede_activate_vport(eth_dev, true)) 1113 goto err; 1114 1115 /* Update link status */ 1116 qede_link_update(eth_dev, 0); 1117 1118 /* Start/resume traffic */ 1119 qede_fastpath_start(edev); 1120 1121 qede_assign_rxtx_handlers(eth_dev); 1122 DP_INFO(edev, "Device started\n"); 1123 1124 return 0; 1125 err: 1126 DP_ERR(edev, "Device start fails\n"); 1127 return -1; /* common error code is < 0 */ 1128 } 1129 1130 static void qede_dev_stop(struct rte_eth_dev *eth_dev) 1131 { 1132 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1133 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1134 1135 PMD_INIT_FUNC_TRACE(edev); 1136 1137 /* Disable vport */ 1138 if (qede_activate_vport(eth_dev, false)) 1139 return; 1140 1141 if (qdev->enable_lro) 1142 qede_enable_tpa(eth_dev, false); 1143 1144 /* Stop queues */ 1145 qede_stop_queues(eth_dev); 1146 1147 /* Disable traffic */ 1148 ecore_hw_stop_fastpath(edev); /* TBD - loop */ 1149 1150 DP_INFO(edev, "Device is stopped\n"); 1151 } 1152 1153 static const char * const valid_args[] = { 1154 QEDE_NPAR_TX_SWITCHING, 1155 QEDE_VF_TX_SWITCHING, 1156 NULL, 1157 }; 1158 1159 static int qede_args_check(const char *key, const char *val, void *opaque) 1160 { 1161 unsigned long tmp; 1162 int ret = 0; 1163 struct rte_eth_dev *eth_dev = opaque; 1164 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1165 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1166 1167 errno = 0; 1168 tmp = strtoul(val, NULL, 0); 1169 if (errno) { 1170 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val); 1171 return errno; 1172 } 1173 1174 if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) || 1175 ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) { 1176 qdev->enable_tx_switching = !!tmp; 1177 DP_INFO(edev, "Disabling %s tx-switching\n", 1178 strcmp(QEDE_NPAR_TX_SWITCHING, key) ? 1179 "VF" : "NPAR"); 1180 } 1181 1182 return ret; 1183 } 1184 1185 static int qede_args(struct rte_eth_dev *eth_dev) 1186 { 1187 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1188 struct rte_kvargs *kvlist; 1189 struct rte_devargs *devargs; 1190 int ret; 1191 int i; 1192 1193 devargs = pci_dev->device.devargs; 1194 if (!devargs) 1195 return 0; /* return success */ 1196 1197 kvlist = rte_kvargs_parse(devargs->args, valid_args); 1198 if (kvlist == NULL) 1199 return -EINVAL; 1200 1201 /* Process parameters. */ 1202 for (i = 0; (valid_args[i] != NULL); ++i) { 1203 if (rte_kvargs_count(kvlist, valid_args[i])) { 1204 ret = rte_kvargs_process(kvlist, valid_args[i], 1205 qede_args_check, eth_dev); 1206 if (ret != ECORE_SUCCESS) { 1207 rte_kvargs_free(kvlist); 1208 return ret; 1209 } 1210 } 1211 } 1212 rte_kvargs_free(kvlist); 1213 1214 return 0; 1215 } 1216 1217 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 1218 { 1219 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1220 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1221 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1222 int ret; 1223 1224 PMD_INIT_FUNC_TRACE(edev); 1225 1226 if (rxmode->mq_mode & ETH_MQ_RX_RSS_FLAG) 1227 rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH; 1228 1229 /* We need to have min 1 RX queue.There is no min check in 1230 * rte_eth_dev_configure(), so we are checking it here. 1231 */ 1232 if (eth_dev->data->nb_rx_queues == 0) { 1233 DP_ERR(edev, "Minimum one RX queue is required\n"); 1234 return -EINVAL; 1235 } 1236 1237 /* Enable Tx switching by default */ 1238 qdev->enable_tx_switching = 1; 1239 1240 /* Parse devargs and fix up rxmode */ 1241 if (qede_args(eth_dev)) 1242 DP_NOTICE(edev, false, 1243 "Invalid devargs supplied, requested change will not take effect\n"); 1244 1245 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || 1246 rxmode->mq_mode == ETH_MQ_RX_RSS)) { 1247 DP_ERR(edev, "Unsupported multi-queue mode\n"); 1248 return -ENOTSUP; 1249 } 1250 /* Flow director mode check */ 1251 if (qede_check_fdir_support(eth_dev)) 1252 return -ENOTSUP; 1253 1254 qede_dealloc_fp_resc(eth_dev); 1255 qdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns; 1256 qdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns; 1257 1258 if (qede_alloc_fp_resc(qdev)) 1259 return -ENOMEM; 1260 1261 /* If jumbo enabled adjust MTU */ 1262 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) 1263 eth_dev->data->mtu = 1264 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1265 RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD; 1266 1267 if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) 1268 eth_dev->data->scattered_rx = 1; 1269 1270 if (qede_start_vport(qdev, eth_dev->data->mtu)) 1271 return -1; 1272 1273 qdev->mtu = eth_dev->data->mtu; 1274 1275 /* Enable VLAN offloads by default */ 1276 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | 1277 ETH_VLAN_FILTER_MASK); 1278 if (ret) 1279 return ret; 1280 1281 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", 1282 QEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev)); 1283 1284 if (ECORE_IS_CMT(edev)) 1285 DP_INFO(edev, "Actual HW queues for CMT mode - RX = %d TX = %d\n", 1286 qdev->num_rx_queues, qdev->num_tx_queues); 1287 1288 1289 return 0; 1290 } 1291 1292 /* Info about HW descriptor ring limitations */ 1293 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 1294 .nb_max = 0x8000, /* 32K */ 1295 .nb_min = 128, 1296 .nb_align = 128 /* lowest common multiple */ 1297 }; 1298 1299 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 1300 .nb_max = 0x8000, /* 32K */ 1301 .nb_min = 256, 1302 .nb_align = 256, 1303 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, 1304 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 1305 }; 1306 1307 static int 1308 qede_dev_info_get(struct rte_eth_dev *eth_dev, 1309 struct rte_eth_dev_info *dev_info) 1310 { 1311 struct qede_dev *qdev = eth_dev->data->dev_private; 1312 struct ecore_dev *edev = &qdev->edev; 1313 struct qed_link_output link; 1314 uint32_t speed_cap = 0; 1315 1316 PMD_INIT_FUNC_TRACE(edev); 1317 1318 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; 1319 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 1320 dev_info->rx_desc_lim = qede_rx_desc_lim; 1321 dev_info->tx_desc_lim = qede_tx_desc_lim; 1322 1323 if (IS_PF(edev)) 1324 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1325 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2); 1326 else 1327 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1328 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); 1329 /* Since CMT mode internally doubles the number of queues */ 1330 if (ECORE_IS_CMT(edev)) 1331 dev_info->max_rx_queues = dev_info->max_rx_queues / 2; 1332 1333 dev_info->max_tx_queues = dev_info->max_rx_queues; 1334 1335 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; 1336 dev_info->max_vfs = 0; 1337 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 1338 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1339 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 1340 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM | 1341 DEV_RX_OFFLOAD_UDP_CKSUM | 1342 DEV_RX_OFFLOAD_TCP_CKSUM | 1343 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1344 DEV_RX_OFFLOAD_TCP_LRO | 1345 DEV_RX_OFFLOAD_KEEP_CRC | 1346 DEV_RX_OFFLOAD_SCATTER | 1347 DEV_RX_OFFLOAD_JUMBO_FRAME | 1348 DEV_RX_OFFLOAD_VLAN_FILTER | 1349 DEV_RX_OFFLOAD_VLAN_STRIP | 1350 DEV_RX_OFFLOAD_RSS_HASH); 1351 dev_info->rx_queue_offload_capa = 0; 1352 1353 /* TX offloads are on a per-packet basis, so it is applicable 1354 * to both at port and queue levels. 1355 */ 1356 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | 1357 DEV_TX_OFFLOAD_IPV4_CKSUM | 1358 DEV_TX_OFFLOAD_UDP_CKSUM | 1359 DEV_TX_OFFLOAD_TCP_CKSUM | 1360 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1361 DEV_TX_OFFLOAD_MULTI_SEGS | 1362 DEV_TX_OFFLOAD_TCP_TSO | 1363 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1364 DEV_TX_OFFLOAD_GENEVE_TNL_TSO); 1365 dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa; 1366 1367 dev_info->default_txconf = (struct rte_eth_txconf) { 1368 .offloads = DEV_TX_OFFLOAD_MULTI_SEGS, 1369 }; 1370 1371 dev_info->default_rxconf = (struct rte_eth_rxconf) { 1372 /* Packets are always dropped if no descriptors are available */ 1373 .rx_drop_en = 1, 1374 .offloads = 0, 1375 }; 1376 1377 memset(&link, 0, sizeof(struct qed_link_output)); 1378 qdev->ops->common->get_link(edev, &link); 1379 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1380 speed_cap |= ETH_LINK_SPEED_1G; 1381 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1382 speed_cap |= ETH_LINK_SPEED_10G; 1383 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1384 speed_cap |= ETH_LINK_SPEED_25G; 1385 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1386 speed_cap |= ETH_LINK_SPEED_40G; 1387 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1388 speed_cap |= ETH_LINK_SPEED_50G; 1389 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1390 speed_cap |= ETH_LINK_SPEED_100G; 1391 dev_info->speed_capa = speed_cap; 1392 1393 return 0; 1394 } 1395 1396 /* return 0 means link status changed, -1 means not changed */ 1397 int 1398 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 1399 { 1400 struct qede_dev *qdev = eth_dev->data->dev_private; 1401 struct ecore_dev *edev = &qdev->edev; 1402 struct qed_link_output q_link; 1403 struct rte_eth_link link; 1404 uint16_t link_duplex; 1405 1406 memset(&q_link, 0, sizeof(q_link)); 1407 memset(&link, 0, sizeof(link)); 1408 1409 qdev->ops->common->get_link(edev, &q_link); 1410 1411 /* Link Speed */ 1412 link.link_speed = q_link.speed; 1413 1414 /* Link Mode */ 1415 switch (q_link.duplex) { 1416 case QEDE_DUPLEX_HALF: 1417 link_duplex = ETH_LINK_HALF_DUPLEX; 1418 break; 1419 case QEDE_DUPLEX_FULL: 1420 link_duplex = ETH_LINK_FULL_DUPLEX; 1421 break; 1422 case QEDE_DUPLEX_UNKNOWN: 1423 default: 1424 link_duplex = -1; 1425 } 1426 link.link_duplex = link_duplex; 1427 1428 /* Link Status */ 1429 link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN; 1430 1431 /* AN */ 1432 link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 1433 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 1434 1435 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 1436 link.link_speed, link.link_duplex, 1437 link.link_autoneg, link.link_status); 1438 1439 return rte_eth_linkstatus_set(eth_dev, &link); 1440 } 1441 1442 static int qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 1443 { 1444 struct qede_dev *qdev = eth_dev->data->dev_private; 1445 struct ecore_dev *edev = &qdev->edev; 1446 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1447 enum _ecore_status_t ecore_status; 1448 1449 PMD_INIT_FUNC_TRACE(edev); 1450 1451 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1452 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1453 1454 ecore_status = qed_configure_filter_rx_mode(eth_dev, type); 1455 1456 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1457 } 1458 1459 static int qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 1460 { 1461 struct qede_dev *qdev = eth_dev->data->dev_private; 1462 struct ecore_dev *edev = &qdev->edev; 1463 enum _ecore_status_t ecore_status; 1464 1465 PMD_INIT_FUNC_TRACE(edev); 1466 1467 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1468 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1469 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 1470 else 1471 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1472 QED_FILTER_RX_MODE_TYPE_REGULAR); 1473 1474 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1475 } 1476 1477 static void qede_poll_sp_sb_cb(void *param) 1478 { 1479 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1480 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1481 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1482 int rc; 1483 1484 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 1485 qede_interrupt_action(&edev->hwfns[1]); 1486 1487 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 1488 qede_poll_sp_sb_cb, 1489 (void *)eth_dev); 1490 if (rc != 0) { 1491 DP_ERR(edev, "Unable to start periodic" 1492 " timer rc %d\n", rc); 1493 } 1494 } 1495 1496 static void qede_dev_close(struct rte_eth_dev *eth_dev) 1497 { 1498 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1499 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1500 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1501 1502 PMD_INIT_FUNC_TRACE(edev); 1503 1504 /* dev_stop() shall cleanup fp resources in hw but without releasing 1505 * dma memories and sw structures so that dev_start() can be called 1506 * by the app without reconfiguration. However, in dev_close() we 1507 * can release all the resources and device can be brought up newly 1508 */ 1509 if (eth_dev->data->dev_started) 1510 qede_dev_stop(eth_dev); 1511 1512 if (qdev->vport_started) 1513 qede_stop_vport(edev); 1514 qdev->vport_started = false; 1515 qede_fdir_dealloc_resc(eth_dev); 1516 qede_dealloc_fp_resc(eth_dev); 1517 1518 eth_dev->data->nb_rx_queues = 0; 1519 eth_dev->data->nb_tx_queues = 0; 1520 1521 /* Bring the link down */ 1522 qede_dev_set_link_state(eth_dev, false); 1523 qdev->ops->common->slowpath_stop(edev); 1524 qdev->ops->common->remove(edev); 1525 rte_intr_disable(&pci_dev->intr_handle); 1526 1527 switch (pci_dev->intr_handle.type) { 1528 case RTE_INTR_HANDLE_UIO_INTX: 1529 case RTE_INTR_HANDLE_VFIO_LEGACY: 1530 rte_intr_callback_unregister(&pci_dev->intr_handle, 1531 qede_interrupt_handler_intx, 1532 (void *)eth_dev); 1533 break; 1534 default: 1535 rte_intr_callback_unregister(&pci_dev->intr_handle, 1536 qede_interrupt_handler, 1537 (void *)eth_dev); 1538 } 1539 1540 if (ECORE_IS_CMT(edev)) 1541 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 1542 } 1543 1544 static int 1545 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 1546 { 1547 struct qede_dev *qdev = eth_dev->data->dev_private; 1548 struct ecore_dev *edev = &qdev->edev; 1549 struct ecore_eth_stats stats; 1550 unsigned int i = 0, j = 0, qid, idx, hw_fn; 1551 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 1552 struct qede_tx_queue *txq; 1553 1554 ecore_get_vport_stats(edev, &stats); 1555 1556 /* RX Stats */ 1557 eth_stats->ipackets = stats.common.rx_ucast_pkts + 1558 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts; 1559 1560 eth_stats->ibytes = stats.common.rx_ucast_bytes + 1561 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes; 1562 1563 eth_stats->ierrors = stats.common.rx_crc_errors + 1564 stats.common.rx_align_errors + 1565 stats.common.rx_carrier_errors + 1566 stats.common.rx_oversize_packets + 1567 stats.common.rx_jabbers + stats.common.rx_undersize_packets; 1568 1569 eth_stats->rx_nombuf = stats.common.no_buff_discards; 1570 1571 eth_stats->imissed = stats.common.mftag_filter_discards + 1572 stats.common.mac_filter_discards + 1573 stats.common.no_buff_discards + 1574 stats.common.brb_truncates + stats.common.brb_discards; 1575 1576 /* TX stats */ 1577 eth_stats->opackets = stats.common.tx_ucast_pkts + 1578 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts; 1579 1580 eth_stats->obytes = stats.common.tx_ucast_bytes + 1581 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes; 1582 1583 eth_stats->oerrors = stats.common.tx_err_drop_pkts; 1584 1585 /* Queue stats */ 1586 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev), 1587 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1588 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev), 1589 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1590 if (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) || 1591 txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev)) 1592 DP_VERBOSE(edev, ECORE_MSG_DEBUG, 1593 "Not all the queue stats will be displayed. Set" 1594 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" 1595 " appropriately and retry.\n"); 1596 1597 for (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) { 1598 eth_stats->q_ipackets[i] = 0; 1599 eth_stats->q_errors[i] = 0; 1600 1601 for_each_hwfn(edev, hw_fn) { 1602 idx = qid * edev->num_hwfns + hw_fn; 1603 1604 eth_stats->q_ipackets[i] += 1605 *(uint64_t *) 1606 (((char *)(qdev->fp_array[idx].rxq)) + 1607 offsetof(struct qede_rx_queue, 1608 rcv_pkts)); 1609 eth_stats->q_errors[i] += 1610 *(uint64_t *) 1611 (((char *)(qdev->fp_array[idx].rxq)) + 1612 offsetof(struct qede_rx_queue, 1613 rx_hw_errors)) + 1614 *(uint64_t *) 1615 (((char *)(qdev->fp_array[idx].rxq)) + 1616 offsetof(struct qede_rx_queue, 1617 rx_alloc_errors)); 1618 } 1619 1620 i++; 1621 if (i == rxq_stat_cntrs) 1622 break; 1623 } 1624 1625 for (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) { 1626 eth_stats->q_opackets[j] = 0; 1627 1628 for_each_hwfn(edev, hw_fn) { 1629 idx = qid * edev->num_hwfns + hw_fn; 1630 1631 txq = qdev->fp_array[idx].txq; 1632 eth_stats->q_opackets[j] += 1633 *((uint64_t *)(uintptr_t) 1634 (((uint64_t)(uintptr_t)(txq)) + 1635 offsetof(struct qede_tx_queue, 1636 xmit_pkts))); 1637 } 1638 1639 j++; 1640 if (j == txq_stat_cntrs) 1641 break; 1642 } 1643 1644 return 0; 1645 } 1646 1647 static unsigned 1648 qede_get_xstats_count(struct qede_dev *qdev) { 1649 struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev; 1650 1651 if (ECORE_IS_BB(&qdev->edev)) 1652 return RTE_DIM(qede_xstats_strings) + 1653 RTE_DIM(qede_bb_xstats_strings) + 1654 (RTE_DIM(qede_rxq_xstats_strings) * 1655 QEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns); 1656 else 1657 return RTE_DIM(qede_xstats_strings) + 1658 RTE_DIM(qede_ah_xstats_strings) + 1659 (RTE_DIM(qede_rxq_xstats_strings) * 1660 QEDE_RSS_COUNT(dev)); 1661 } 1662 1663 static int 1664 qede_get_xstats_names(struct rte_eth_dev *dev, 1665 struct rte_eth_xstat_name *xstats_names, 1666 __rte_unused unsigned int limit) 1667 { 1668 struct qede_dev *qdev = dev->data->dev_private; 1669 struct ecore_dev *edev = &qdev->edev; 1670 const unsigned int stat_cnt = qede_get_xstats_count(qdev); 1671 unsigned int i, qid, hw_fn, stat_idx = 0; 1672 1673 if (xstats_names == NULL) 1674 return stat_cnt; 1675 1676 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1677 strlcpy(xstats_names[stat_idx].name, 1678 qede_xstats_strings[i].name, 1679 sizeof(xstats_names[stat_idx].name)); 1680 stat_idx++; 1681 } 1682 1683 if (ECORE_IS_BB(edev)) { 1684 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1685 strlcpy(xstats_names[stat_idx].name, 1686 qede_bb_xstats_strings[i].name, 1687 sizeof(xstats_names[stat_idx].name)); 1688 stat_idx++; 1689 } 1690 } else { 1691 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1692 strlcpy(xstats_names[stat_idx].name, 1693 qede_ah_xstats_strings[i].name, 1694 sizeof(xstats_names[stat_idx].name)); 1695 stat_idx++; 1696 } 1697 } 1698 1699 for (qid = 0; qid < QEDE_RSS_COUNT(dev); qid++) { 1700 for_each_hwfn(edev, hw_fn) { 1701 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1702 snprintf(xstats_names[stat_idx].name, 1703 RTE_ETH_XSTATS_NAME_SIZE, 1704 "%.4s%d.%d%s", 1705 qede_rxq_xstats_strings[i].name, 1706 hw_fn, qid, 1707 qede_rxq_xstats_strings[i].name + 4); 1708 stat_idx++; 1709 } 1710 } 1711 } 1712 1713 return stat_cnt; 1714 } 1715 1716 static int 1717 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1718 unsigned int n) 1719 { 1720 struct qede_dev *qdev = dev->data->dev_private; 1721 struct ecore_dev *edev = &qdev->edev; 1722 struct ecore_eth_stats stats; 1723 const unsigned int num = qede_get_xstats_count(qdev); 1724 unsigned int i, qid, hw_fn, fpidx, stat_idx = 0; 1725 1726 if (n < num) 1727 return num; 1728 1729 ecore_get_vport_stats(edev, &stats); 1730 1731 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1732 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + 1733 qede_xstats_strings[i].offset); 1734 xstats[stat_idx].id = stat_idx; 1735 stat_idx++; 1736 } 1737 1738 if (ECORE_IS_BB(edev)) { 1739 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1740 xstats[stat_idx].value = 1741 *(uint64_t *)(((char *)&stats) + 1742 qede_bb_xstats_strings[i].offset); 1743 xstats[stat_idx].id = stat_idx; 1744 stat_idx++; 1745 } 1746 } else { 1747 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1748 xstats[stat_idx].value = 1749 *(uint64_t *)(((char *)&stats) + 1750 qede_ah_xstats_strings[i].offset); 1751 xstats[stat_idx].id = stat_idx; 1752 stat_idx++; 1753 } 1754 } 1755 1756 for (qid = 0; qid < dev->data->nb_rx_queues; qid++) { 1757 for_each_hwfn(edev, hw_fn) { 1758 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1759 fpidx = qid * edev->num_hwfns + hw_fn; 1760 xstats[stat_idx].value = *(uint64_t *) 1761 (((char *)(qdev->fp_array[fpidx].rxq)) + 1762 qede_rxq_xstats_strings[i].offset); 1763 xstats[stat_idx].id = stat_idx; 1764 stat_idx++; 1765 } 1766 1767 } 1768 } 1769 1770 return stat_idx; 1771 } 1772 1773 static int 1774 qede_reset_xstats(struct rte_eth_dev *dev) 1775 { 1776 struct qede_dev *qdev = dev->data->dev_private; 1777 struct ecore_dev *edev = &qdev->edev; 1778 1779 ecore_reset_vport_stats(edev); 1780 qede_reset_queue_stats(qdev, true); 1781 1782 return 0; 1783 } 1784 1785 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 1786 { 1787 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1788 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1789 struct qed_link_params link_params; 1790 int rc; 1791 1792 DP_INFO(edev, "setting link state %d\n", link_up); 1793 memset(&link_params, 0, sizeof(link_params)); 1794 link_params.link_up = link_up; 1795 rc = qdev->ops->common->set_link(edev, &link_params); 1796 if (rc != ECORE_SUCCESS) 1797 DP_ERR(edev, "Unable to set link state %d\n", link_up); 1798 1799 return rc; 1800 } 1801 1802 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 1803 { 1804 return qede_dev_set_link_state(eth_dev, true); 1805 } 1806 1807 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 1808 { 1809 return qede_dev_set_link_state(eth_dev, false); 1810 } 1811 1812 static int qede_reset_stats(struct rte_eth_dev *eth_dev) 1813 { 1814 struct qede_dev *qdev = eth_dev->data->dev_private; 1815 struct ecore_dev *edev = &qdev->edev; 1816 1817 ecore_reset_vport_stats(edev); 1818 qede_reset_queue_stats(qdev, false); 1819 1820 return 0; 1821 } 1822 1823 static int qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 1824 { 1825 enum qed_filter_rx_mode_type type = 1826 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1827 enum _ecore_status_t ecore_status; 1828 1829 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1830 type |= QED_FILTER_RX_MODE_TYPE_PROMISC; 1831 1832 ecore_status = qed_configure_filter_rx_mode(eth_dev, type); 1833 1834 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1835 } 1836 1837 static int qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 1838 { 1839 enum _ecore_status_t ecore_status; 1840 1841 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1842 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1843 QED_FILTER_RX_MODE_TYPE_PROMISC); 1844 else 1845 ecore_status = qed_configure_filter_rx_mode(eth_dev, 1846 QED_FILTER_RX_MODE_TYPE_REGULAR); 1847 1848 return ecore_status >= ECORE_SUCCESS ? 0 : -EAGAIN; 1849 } 1850 1851 static int 1852 qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, 1853 struct rte_ether_addr *mc_addrs, 1854 uint32_t mc_addrs_num) 1855 { 1856 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1857 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1858 uint8_t i; 1859 1860 if (mc_addrs_num > ECORE_MAX_MC_ADDRS) { 1861 DP_ERR(edev, "Reached max multicast filters limit," 1862 "Please enable multicast promisc mode\n"); 1863 return -ENOSPC; 1864 } 1865 1866 for (i = 0; i < mc_addrs_num; i++) { 1867 if (!rte_is_multicast_ether_addr(&mc_addrs[i])) { 1868 DP_ERR(edev, "Not a valid multicast MAC\n"); 1869 return -EINVAL; 1870 } 1871 } 1872 1873 /* Flush all existing entries */ 1874 if (qede_del_mcast_filters(eth_dev)) 1875 return -1; 1876 1877 /* Set new mcast list */ 1878 return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num); 1879 } 1880 1881 /* Update MTU via vport-update without doing port restart. 1882 * The vport must be deactivated before calling this API. 1883 */ 1884 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) 1885 { 1886 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1887 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1888 struct ecore_hwfn *p_hwfn; 1889 int rc; 1890 int i; 1891 1892 if (IS_PF(edev)) { 1893 struct ecore_sp_vport_update_params params; 1894 1895 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 1896 params.vport_id = 0; 1897 params.mtu = mtu; 1898 params.vport_id = 0; 1899 for_each_hwfn(edev, i) { 1900 p_hwfn = &edev->hwfns[i]; 1901 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1902 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 1903 ECORE_SPQ_MODE_EBLOCK, NULL); 1904 if (rc != ECORE_SUCCESS) 1905 goto err; 1906 } 1907 } else { 1908 for_each_hwfn(edev, i) { 1909 p_hwfn = &edev->hwfns[i]; 1910 rc = ecore_vf_pf_update_mtu(p_hwfn, mtu); 1911 if (rc == ECORE_INVAL) { 1912 DP_INFO(edev, "VF MTU Update TLV not supported\n"); 1913 /* Recreate vport */ 1914 rc = qede_start_vport(qdev, mtu); 1915 if (rc != ECORE_SUCCESS) 1916 goto err; 1917 1918 /* Restore config lost due to vport stop */ 1919 if (eth_dev->data->promiscuous) 1920 qede_promiscuous_enable(eth_dev); 1921 else 1922 qede_promiscuous_disable(eth_dev); 1923 1924 if (eth_dev->data->all_multicast) 1925 qede_allmulticast_enable(eth_dev); 1926 else 1927 qede_allmulticast_disable(eth_dev); 1928 1929 qede_vlan_offload_set(eth_dev, 1930 qdev->vlan_offload_mask); 1931 } else if (rc != ECORE_SUCCESS) { 1932 goto err; 1933 } 1934 } 1935 } 1936 DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu); 1937 1938 return 0; 1939 1940 err: 1941 DP_ERR(edev, "Failed to update MTU\n"); 1942 return -1; 1943 } 1944 1945 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 1946 struct rte_eth_fc_conf *fc_conf) 1947 { 1948 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1949 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1950 struct qed_link_output current_link; 1951 struct qed_link_params params; 1952 1953 memset(¤t_link, 0, sizeof(current_link)); 1954 qdev->ops->common->get_link(edev, ¤t_link); 1955 1956 memset(¶ms, 0, sizeof(params)); 1957 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 1958 if (fc_conf->autoneg) { 1959 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 1960 DP_ERR(edev, "Autoneg not supported\n"); 1961 return -EINVAL; 1962 } 1963 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1964 } 1965 1966 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 1967 if (fc_conf->mode == RTE_FC_FULL) 1968 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 1969 QED_LINK_PAUSE_RX_ENABLE); 1970 if (fc_conf->mode == RTE_FC_TX_PAUSE) 1971 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1972 if (fc_conf->mode == RTE_FC_RX_PAUSE) 1973 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1974 1975 params.link_up = true; 1976 (void)qdev->ops->common->set_link(edev, ¶ms); 1977 1978 return 0; 1979 } 1980 1981 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 1982 struct rte_eth_fc_conf *fc_conf) 1983 { 1984 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1985 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1986 struct qed_link_output current_link; 1987 1988 memset(¤t_link, 0, sizeof(current_link)); 1989 qdev->ops->common->get_link(edev, ¤t_link); 1990 1991 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1992 fc_conf->autoneg = true; 1993 1994 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 1995 QED_LINK_PAUSE_TX_ENABLE)) 1996 fc_conf->mode = RTE_FC_FULL; 1997 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 1998 fc_conf->mode = RTE_FC_RX_PAUSE; 1999 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 2000 fc_conf->mode = RTE_FC_TX_PAUSE; 2001 else 2002 fc_conf->mode = RTE_FC_NONE; 2003 2004 return 0; 2005 } 2006 2007 static const uint32_t * 2008 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 2009 { 2010 static const uint32_t ptypes[] = { 2011 RTE_PTYPE_L2_ETHER, 2012 RTE_PTYPE_L2_ETHER_VLAN, 2013 RTE_PTYPE_L3_IPV4, 2014 RTE_PTYPE_L3_IPV6, 2015 RTE_PTYPE_L4_TCP, 2016 RTE_PTYPE_L4_UDP, 2017 RTE_PTYPE_TUNNEL_VXLAN, 2018 RTE_PTYPE_L4_FRAG, 2019 RTE_PTYPE_TUNNEL_GENEVE, 2020 RTE_PTYPE_TUNNEL_GRE, 2021 /* Inner */ 2022 RTE_PTYPE_INNER_L2_ETHER, 2023 RTE_PTYPE_INNER_L2_ETHER_VLAN, 2024 RTE_PTYPE_INNER_L3_IPV4, 2025 RTE_PTYPE_INNER_L3_IPV6, 2026 RTE_PTYPE_INNER_L4_TCP, 2027 RTE_PTYPE_INNER_L4_UDP, 2028 RTE_PTYPE_INNER_L4_FRAG, 2029 RTE_PTYPE_UNKNOWN 2030 }; 2031 2032 if (eth_dev->rx_pkt_burst == qede_recv_pkts || 2033 eth_dev->rx_pkt_burst == qede_recv_pkts_regular || 2034 eth_dev->rx_pkt_burst == qede_recv_pkts_cmt) 2035 return ptypes; 2036 2037 return NULL; 2038 } 2039 2040 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) 2041 { 2042 *rss_caps = 0; 2043 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 2044 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 2045 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 2046 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 2047 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 2048 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 2049 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; 2050 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; 2051 } 2052 2053 int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 2054 struct rte_eth_rss_conf *rss_conf) 2055 { 2056 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2057 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2058 struct ecore_sp_vport_update_params vport_update_params; 2059 struct ecore_rss_params rss_params; 2060 struct ecore_hwfn *p_hwfn; 2061 uint32_t *key = (uint32_t *)rss_conf->rss_key; 2062 uint64_t hf = rss_conf->rss_hf; 2063 uint8_t len = rss_conf->rss_key_len; 2064 uint8_t idx, i, j, fpidx; 2065 int rc; 2066 2067 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2068 memset(&rss_params, 0, sizeof(rss_params)); 2069 2070 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", 2071 (unsigned long)hf, len, key); 2072 2073 if (hf != 0) { 2074 /* Enabling RSS */ 2075 DP_INFO(edev, "Enabling rss\n"); 2076 2077 /* RSS caps */ 2078 qede_init_rss_caps(&rss_params.rss_caps, hf); 2079 rss_params.update_rss_capabilities = 1; 2080 2081 /* RSS hash key */ 2082 if (key) { 2083 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { 2084 DP_ERR(edev, "RSS key length exceeds limit\n"); 2085 return -EINVAL; 2086 } 2087 DP_INFO(edev, "Applying user supplied hash key\n"); 2088 rss_params.update_rss_key = 1; 2089 memcpy(&rss_params.rss_key, key, len); 2090 } 2091 rss_params.rss_enable = 1; 2092 } 2093 2094 rss_params.update_rss_config = 1; 2095 /* tbl_size has to be set with capabilities */ 2096 rss_params.rss_table_size_log = 7; 2097 vport_update_params.vport_id = 0; 2098 2099 for_each_hwfn(edev, i) { 2100 /* pass the L2 handles instead of qids */ 2101 for (j = 0 ; j < ECORE_RSS_IND_TABLE_SIZE ; j++) { 2102 idx = j % QEDE_RSS_COUNT(eth_dev); 2103 fpidx = idx * edev->num_hwfns + i; 2104 rss_params.rss_ind_table[j] = 2105 qdev->fp_array[fpidx].rxq->handle; 2106 } 2107 2108 vport_update_params.rss_params = &rss_params; 2109 2110 p_hwfn = &edev->hwfns[i]; 2111 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2112 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2113 ECORE_SPQ_MODE_EBLOCK, NULL); 2114 if (rc) { 2115 DP_ERR(edev, "vport-update for RSS failed\n"); 2116 return rc; 2117 } 2118 } 2119 qdev->rss_enable = rss_params.rss_enable; 2120 2121 /* Update local structure for hash query */ 2122 qdev->rss_conf.rss_hf = hf; 2123 qdev->rss_conf.rss_key_len = len; 2124 if (qdev->rss_enable) { 2125 if (qdev->rss_conf.rss_key == NULL) { 2126 qdev->rss_conf.rss_key = (uint8_t *)malloc(len); 2127 if (qdev->rss_conf.rss_key == NULL) { 2128 DP_ERR(edev, "No memory to store RSS key\n"); 2129 return -ENOMEM; 2130 } 2131 } 2132 if (key && len) { 2133 DP_INFO(edev, "Storing RSS key\n"); 2134 memcpy(qdev->rss_conf.rss_key, key, len); 2135 } 2136 } else if (!qdev->rss_enable && len == 0) { 2137 if (qdev->rss_conf.rss_key) { 2138 free(qdev->rss_conf.rss_key); 2139 qdev->rss_conf.rss_key = NULL; 2140 DP_INFO(edev, "Free RSS key\n"); 2141 } 2142 } 2143 2144 return 0; 2145 } 2146 2147 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 2148 struct rte_eth_rss_conf *rss_conf) 2149 { 2150 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2151 2152 rss_conf->rss_hf = qdev->rss_conf.rss_hf; 2153 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; 2154 2155 if (rss_conf->rss_key && qdev->rss_conf.rss_key) 2156 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, 2157 rss_conf->rss_key_len); 2158 return 0; 2159 } 2160 2161 int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 2162 struct rte_eth_rss_reta_entry64 *reta_conf, 2163 uint16_t reta_size) 2164 { 2165 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2166 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2167 struct ecore_sp_vport_update_params vport_update_params; 2168 struct ecore_rss_params *params; 2169 uint16_t i, j, idx, fid, shift; 2170 struct ecore_hwfn *p_hwfn; 2171 uint8_t entry; 2172 int rc = 0; 2173 2174 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2175 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 2176 reta_size); 2177 return -EINVAL; 2178 } 2179 2180 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2181 params = rte_zmalloc("qede_rss", sizeof(*params), RTE_CACHE_LINE_SIZE); 2182 if (params == NULL) { 2183 DP_ERR(edev, "failed to allocate memory\n"); 2184 return -ENOMEM; 2185 } 2186 2187 params->update_rss_ind_table = 1; 2188 params->rss_table_size_log = 7; 2189 params->update_rss_config = 1; 2190 2191 vport_update_params.vport_id = 0; 2192 /* Use the current value of rss_enable */ 2193 params->rss_enable = qdev->rss_enable; 2194 vport_update_params.rss_params = params; 2195 2196 for_each_hwfn(edev, i) { 2197 for (j = 0; j < reta_size; j++) { 2198 idx = j / RTE_RETA_GROUP_SIZE; 2199 shift = j % RTE_RETA_GROUP_SIZE; 2200 if (reta_conf[idx].mask & (1ULL << shift)) { 2201 entry = reta_conf[idx].reta[shift]; 2202 fid = entry * edev->num_hwfns + i; 2203 /* Pass rxq handles to ecore */ 2204 params->rss_ind_table[j] = 2205 qdev->fp_array[fid].rxq->handle; 2206 /* Update the local copy for RETA query cmd */ 2207 qdev->rss_ind_table[j] = entry; 2208 } 2209 } 2210 2211 p_hwfn = &edev->hwfns[i]; 2212 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2213 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2214 ECORE_SPQ_MODE_EBLOCK, NULL); 2215 if (rc) { 2216 DP_ERR(edev, "vport-update for RSS failed\n"); 2217 goto out; 2218 } 2219 } 2220 2221 out: 2222 rte_free(params); 2223 return rc; 2224 } 2225 2226 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 2227 struct rte_eth_rss_reta_entry64 *reta_conf, 2228 uint16_t reta_size) 2229 { 2230 struct qede_dev *qdev = eth_dev->data->dev_private; 2231 struct ecore_dev *edev = &qdev->edev; 2232 uint16_t i, idx, shift; 2233 uint8_t entry; 2234 2235 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2236 DP_ERR(edev, "reta_size %d is not supported\n", 2237 reta_size); 2238 return -EINVAL; 2239 } 2240 2241 for (i = 0; i < reta_size; i++) { 2242 idx = i / RTE_RETA_GROUP_SIZE; 2243 shift = i % RTE_RETA_GROUP_SIZE; 2244 if (reta_conf[idx].mask & (1ULL << shift)) { 2245 entry = qdev->rss_ind_table[i]; 2246 reta_conf[idx].reta[shift] = entry; 2247 } 2248 } 2249 2250 return 0; 2251 } 2252 2253 2254 2255 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 2256 { 2257 struct qede_dev *qdev = QEDE_INIT_QDEV(dev); 2258 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2259 struct rte_eth_dev_info dev_info = {0}; 2260 struct qede_fastpath *fp; 2261 uint32_t max_rx_pkt_len; 2262 uint32_t frame_size; 2263 uint16_t bufsz; 2264 bool restart = false; 2265 int i, rc; 2266 2267 PMD_INIT_FUNC_TRACE(edev); 2268 rc = qede_dev_info_get(dev, &dev_info); 2269 if (rc != 0) { 2270 DP_ERR(edev, "Error during getting ethernet device info\n"); 2271 return rc; 2272 } 2273 max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN; 2274 frame_size = max_rx_pkt_len; 2275 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) { 2276 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n", 2277 mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN - 2278 QEDE_ETH_OVERHEAD); 2279 return -EINVAL; 2280 } 2281 if (!dev->data->scattered_rx && 2282 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 2283 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", 2284 dev->data->min_rx_buf_size); 2285 return -EINVAL; 2286 } 2287 /* Temporarily replace I/O functions with dummy ones. It cannot 2288 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. 2289 */ 2290 dev->rx_pkt_burst = qede_rxtx_pkts_dummy; 2291 dev->tx_pkt_burst = qede_rxtx_pkts_dummy; 2292 if (dev->data->dev_started) { 2293 dev->data->dev_started = 0; 2294 qede_dev_stop(dev); 2295 restart = true; 2296 } 2297 rte_delay_ms(1000); 2298 qdev->new_mtu = mtu; 2299 2300 /* Fix up RX buf size for all queues of the port */ 2301 for (i = 0; i < qdev->num_rx_queues; i++) { 2302 fp = &qdev->fp_array[i]; 2303 if (fp->rxq != NULL) { 2304 bufsz = (uint16_t)rte_pktmbuf_data_room_size( 2305 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; 2306 /* cache align the mbuf size to simplfy rx_buf_size 2307 * calculation 2308 */ 2309 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz); 2310 rc = qede_calc_rx_buf_size(dev, bufsz, frame_size); 2311 if (rc < 0) 2312 return rc; 2313 2314 fp->rxq->rx_buf_size = rc; 2315 } 2316 } 2317 if (max_rx_pkt_len > RTE_ETHER_MAX_LEN) 2318 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 2319 else 2320 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; 2321 2322 if (!dev->data->dev_started && restart) { 2323 qede_dev_start(dev); 2324 dev->data->dev_started = 1; 2325 } 2326 2327 /* update max frame size */ 2328 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; 2329 2330 /* Reassign back */ 2331 qede_assign_rxtx_handlers(dev); 2332 if (ECORE_IS_CMT(edev)) { 2333 dev->rx_pkt_burst = qede_recv_pkts_cmt; 2334 dev->tx_pkt_burst = qede_xmit_pkts_cmt; 2335 } else { 2336 dev->rx_pkt_burst = qede_recv_pkts; 2337 dev->tx_pkt_burst = qede_xmit_pkts; 2338 } 2339 return 0; 2340 } 2341 2342 static int 2343 qede_dev_reset(struct rte_eth_dev *dev) 2344 { 2345 int ret; 2346 2347 ret = qede_eth_dev_uninit(dev); 2348 if (ret) 2349 return ret; 2350 2351 return qede_eth_dev_init(dev); 2352 } 2353 2354 static const struct eth_dev_ops qede_eth_dev_ops = { 2355 .dev_configure = qede_dev_configure, 2356 .dev_infos_get = qede_dev_info_get, 2357 .rx_queue_setup = qede_rx_queue_setup, 2358 .rx_queue_release = qede_rx_queue_release, 2359 .rx_descriptor_status = qede_rx_descriptor_status, 2360 .tx_queue_setup = qede_tx_queue_setup, 2361 .tx_queue_release = qede_tx_queue_release, 2362 .dev_start = qede_dev_start, 2363 .dev_reset = qede_dev_reset, 2364 .dev_set_link_up = qede_dev_set_link_up, 2365 .dev_set_link_down = qede_dev_set_link_down, 2366 .link_update = qede_link_update, 2367 .promiscuous_enable = qede_promiscuous_enable, 2368 .promiscuous_disable = qede_promiscuous_disable, 2369 .allmulticast_enable = qede_allmulticast_enable, 2370 .allmulticast_disable = qede_allmulticast_disable, 2371 .set_mc_addr_list = qede_set_mc_addr_list, 2372 .dev_stop = qede_dev_stop, 2373 .dev_close = qede_dev_close, 2374 .stats_get = qede_get_stats, 2375 .stats_reset = qede_reset_stats, 2376 .xstats_get = qede_get_xstats, 2377 .xstats_reset = qede_reset_xstats, 2378 .xstats_get_names = qede_get_xstats_names, 2379 .mac_addr_add = qede_mac_addr_add, 2380 .mac_addr_remove = qede_mac_addr_remove, 2381 .mac_addr_set = qede_mac_addr_set, 2382 .vlan_offload_set = qede_vlan_offload_set, 2383 .vlan_filter_set = qede_vlan_filter_set, 2384 .flow_ctrl_set = qede_flow_ctrl_set, 2385 .flow_ctrl_get = qede_flow_ctrl_get, 2386 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2387 .rss_hash_update = qede_rss_hash_update, 2388 .rss_hash_conf_get = qede_rss_hash_conf_get, 2389 .reta_update = qede_rss_reta_update, 2390 .reta_query = qede_rss_reta_query, 2391 .mtu_set = qede_set_mtu, 2392 .filter_ctrl = qede_dev_filter_ctrl, 2393 .udp_tunnel_port_add = qede_udp_dst_port_add, 2394 .udp_tunnel_port_del = qede_udp_dst_port_del, 2395 }; 2396 2397 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 2398 .dev_configure = qede_dev_configure, 2399 .dev_infos_get = qede_dev_info_get, 2400 .rx_queue_setup = qede_rx_queue_setup, 2401 .rx_queue_release = qede_rx_queue_release, 2402 .rx_descriptor_status = qede_rx_descriptor_status, 2403 .tx_queue_setup = qede_tx_queue_setup, 2404 .tx_queue_release = qede_tx_queue_release, 2405 .dev_start = qede_dev_start, 2406 .dev_reset = qede_dev_reset, 2407 .dev_set_link_up = qede_dev_set_link_up, 2408 .dev_set_link_down = qede_dev_set_link_down, 2409 .link_update = qede_link_update, 2410 .promiscuous_enable = qede_promiscuous_enable, 2411 .promiscuous_disable = qede_promiscuous_disable, 2412 .allmulticast_enable = qede_allmulticast_enable, 2413 .allmulticast_disable = qede_allmulticast_disable, 2414 .set_mc_addr_list = qede_set_mc_addr_list, 2415 .dev_stop = qede_dev_stop, 2416 .dev_close = qede_dev_close, 2417 .stats_get = qede_get_stats, 2418 .stats_reset = qede_reset_stats, 2419 .xstats_get = qede_get_xstats, 2420 .xstats_reset = qede_reset_xstats, 2421 .xstats_get_names = qede_get_xstats_names, 2422 .vlan_offload_set = qede_vlan_offload_set, 2423 .vlan_filter_set = qede_vlan_filter_set, 2424 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2425 .rss_hash_update = qede_rss_hash_update, 2426 .rss_hash_conf_get = qede_rss_hash_conf_get, 2427 .reta_update = qede_rss_reta_update, 2428 .reta_query = qede_rss_reta_query, 2429 .mtu_set = qede_set_mtu, 2430 .udp_tunnel_port_add = qede_udp_dst_port_add, 2431 .udp_tunnel_port_del = qede_udp_dst_port_del, 2432 .mac_addr_add = qede_mac_addr_add, 2433 .mac_addr_remove = qede_mac_addr_remove, 2434 .mac_addr_set = qede_mac_addr_set, 2435 }; 2436 2437 static void qede_update_pf_params(struct ecore_dev *edev) 2438 { 2439 struct ecore_pf_params pf_params; 2440 2441 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 2442 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; 2443 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 2444 qed_ops->common->update_pf_params(edev, &pf_params); 2445 } 2446 2447 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 2448 { 2449 struct rte_pci_device *pci_dev; 2450 struct rte_pci_addr pci_addr; 2451 struct qede_dev *adapter; 2452 struct ecore_dev *edev; 2453 struct qed_dev_eth_info dev_info; 2454 struct qed_slowpath_params params; 2455 static bool do_once = true; 2456 uint8_t bulletin_change; 2457 uint8_t vf_mac[RTE_ETHER_ADDR_LEN]; 2458 uint8_t is_mac_forced; 2459 bool is_mac_exist; 2460 /* Fix up ecore debug level */ 2461 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 2462 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 2463 uint32_t int_mode; 2464 int rc; 2465 2466 /* Extract key data structures */ 2467 adapter = eth_dev->data->dev_private; 2468 adapter->ethdev = eth_dev; 2469 edev = &adapter->edev; 2470 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2471 pci_addr = pci_dev->addr; 2472 2473 PMD_INIT_FUNC_TRACE(edev); 2474 2475 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 2476 pci_addr.bus, pci_addr.devid, pci_addr.function, 2477 eth_dev->data->port_id); 2478 2479 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2480 DP_ERR(edev, "Skipping device init from secondary process\n"); 2481 return 0; 2482 } 2483 2484 rte_eth_copy_pci_info(eth_dev, pci_dev); 2485 2486 /* @DPDK */ 2487 edev->vendor_id = pci_dev->id.vendor_id; 2488 edev->device_id = pci_dev->id.device_id; 2489 2490 qed_ops = qed_get_eth_ops(); 2491 if (!qed_ops) { 2492 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 2493 rc = -EINVAL; 2494 goto err; 2495 } 2496 2497 DP_INFO(edev, "Starting qede probe\n"); 2498 rc = qed_ops->common->probe(edev, pci_dev, dp_module, 2499 dp_level, is_vf); 2500 if (rc != 0) { 2501 DP_ERR(edev, "qede probe failed rc %d\n", rc); 2502 rc = -ENODEV; 2503 goto err; 2504 } 2505 qede_update_pf_params(edev); 2506 2507 switch (pci_dev->intr_handle.type) { 2508 case RTE_INTR_HANDLE_UIO_INTX: 2509 case RTE_INTR_HANDLE_VFIO_LEGACY: 2510 int_mode = ECORE_INT_MODE_INTA; 2511 rte_intr_callback_register(&pci_dev->intr_handle, 2512 qede_interrupt_handler_intx, 2513 (void *)eth_dev); 2514 break; 2515 default: 2516 int_mode = ECORE_INT_MODE_MSIX; 2517 rte_intr_callback_register(&pci_dev->intr_handle, 2518 qede_interrupt_handler, 2519 (void *)eth_dev); 2520 } 2521 2522 if (rte_intr_enable(&pci_dev->intr_handle)) { 2523 DP_ERR(edev, "rte_intr_enable() failed\n"); 2524 rc = -ENODEV; 2525 goto err; 2526 } 2527 2528 /* Start the Slowpath-process */ 2529 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 2530 2531 params.int_mode = int_mode; 2532 params.drv_major = QEDE_PMD_VERSION_MAJOR; 2533 params.drv_minor = QEDE_PMD_VERSION_MINOR; 2534 params.drv_rev = QEDE_PMD_VERSION_REVISION; 2535 params.drv_eng = QEDE_PMD_VERSION_PATCH; 2536 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, 2537 QEDE_PMD_DRV_VER_STR_SIZE); 2538 2539 qede_assign_rxtx_handlers(eth_dev); 2540 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; 2541 2542 /* For CMT mode device do periodic polling for slowpath events. 2543 * This is required since uio device uses only one MSI-x 2544 * interrupt vector but we need one for each engine. 2545 */ 2546 if (ECORE_IS_CMT(edev) && IS_PF(edev)) { 2547 rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD, 2548 qede_poll_sp_sb_cb, 2549 (void *)eth_dev); 2550 if (rc != 0) { 2551 DP_ERR(edev, "Unable to start periodic" 2552 " timer rc %d\n", rc); 2553 rc = -EINVAL; 2554 goto err; 2555 } 2556 } 2557 2558 rc = qed_ops->common->slowpath_start(edev, ¶ms); 2559 if (rc) { 2560 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 2561 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2562 (void *)eth_dev); 2563 rc = -ENODEV; 2564 goto err; 2565 } 2566 2567 rc = qed_ops->fill_dev_info(edev, &dev_info); 2568 if (rc) { 2569 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 2570 qed_ops->common->slowpath_stop(edev); 2571 qed_ops->common->remove(edev); 2572 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2573 (void *)eth_dev); 2574 rc = -ENODEV; 2575 goto err; 2576 } 2577 2578 qede_alloc_etherdev(adapter, &dev_info); 2579 2580 if (do_once) { 2581 qede_print_adapter_info(adapter); 2582 do_once = false; 2583 } 2584 2585 adapter->ops->common->set_name(edev, edev->name); 2586 2587 if (!is_vf) 2588 adapter->dev_info.num_mac_filters = 2589 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 2590 ECORE_MAC); 2591 else 2592 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 2593 (uint32_t *)&adapter->dev_info.num_mac_filters); 2594 2595 /* Allocate memory for storing MAC addr */ 2596 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 2597 (RTE_ETHER_ADDR_LEN * 2598 adapter->dev_info.num_mac_filters), 2599 RTE_CACHE_LINE_SIZE); 2600 2601 if (eth_dev->data->mac_addrs == NULL) { 2602 DP_ERR(edev, "Failed to allocate MAC address\n"); 2603 qed_ops->common->slowpath_stop(edev); 2604 qed_ops->common->remove(edev); 2605 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2606 (void *)eth_dev); 2607 return -ENOMEM; 2608 } 2609 2610 if (!is_vf) { 2611 rte_ether_addr_copy((struct rte_ether_addr *)edev->hwfns[0]. 2612 hw_info.hw_mac_addr, 2613 ð_dev->data->mac_addrs[0]); 2614 rte_ether_addr_copy(ð_dev->data->mac_addrs[0], 2615 &adapter->primary_mac); 2616 } else { 2617 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 2618 &bulletin_change); 2619 if (bulletin_change) { 2620 is_mac_exist = 2621 ecore_vf_bulletin_get_forced_mac( 2622 ECORE_LEADING_HWFN(edev), 2623 vf_mac, 2624 &is_mac_forced); 2625 if (is_mac_exist) { 2626 DP_INFO(edev, "VF macaddr received from PF\n"); 2627 rte_ether_addr_copy( 2628 (struct rte_ether_addr *)&vf_mac, 2629 ð_dev->data->mac_addrs[0]); 2630 rte_ether_addr_copy( 2631 ð_dev->data->mac_addrs[0], 2632 &adapter->primary_mac); 2633 } else { 2634 DP_ERR(edev, "No VF macaddr assigned\n"); 2635 } 2636 } 2637 } 2638 2639 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 2640 2641 /* Bring-up the link */ 2642 qede_dev_set_link_state(eth_dev, true); 2643 2644 adapter->num_tx_queues = 0; 2645 adapter->num_rx_queues = 0; 2646 SLIST_INIT(&adapter->arfs_info.arfs_list_head); 2647 SLIST_INIT(&adapter->vlan_list_head); 2648 SLIST_INIT(&adapter->uc_list_head); 2649 SLIST_INIT(&adapter->mc_list_head); 2650 adapter->mtu = RTE_ETHER_MTU; 2651 adapter->vport_started = false; 2652 2653 /* VF tunnel offloads is enabled by default in PF driver */ 2654 adapter->vxlan.num_filters = 0; 2655 adapter->geneve.num_filters = 0; 2656 adapter->ipgre.num_filters = 0; 2657 if (is_vf) { 2658 adapter->vxlan.enable = true; 2659 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | 2660 ETH_TUNNEL_FILTER_IVLAN; 2661 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; 2662 adapter->geneve.enable = true; 2663 adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC | 2664 ETH_TUNNEL_FILTER_IVLAN; 2665 adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT; 2666 adapter->ipgre.enable = true; 2667 adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC | 2668 ETH_TUNNEL_FILTER_IVLAN; 2669 } else { 2670 adapter->vxlan.enable = false; 2671 adapter->geneve.enable = false; 2672 adapter->ipgre.enable = false; 2673 } 2674 2675 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", 2676 adapter->primary_mac.addr_bytes[0], 2677 adapter->primary_mac.addr_bytes[1], 2678 adapter->primary_mac.addr_bytes[2], 2679 adapter->primary_mac.addr_bytes[3], 2680 adapter->primary_mac.addr_bytes[4], 2681 adapter->primary_mac.addr_bytes[5]); 2682 2683 DP_INFO(edev, "Device initialized\n"); 2684 2685 return 0; 2686 2687 err: 2688 if (do_once) { 2689 qede_print_adapter_info(adapter); 2690 do_once = false; 2691 } 2692 return rc; 2693 } 2694 2695 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 2696 { 2697 return qede_common_dev_init(eth_dev, 1); 2698 } 2699 2700 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 2701 { 2702 return qede_common_dev_init(eth_dev, 0); 2703 } 2704 2705 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 2706 { 2707 struct qede_dev *qdev = eth_dev->data->dev_private; 2708 struct ecore_dev *edev = &qdev->edev; 2709 2710 PMD_INIT_FUNC_TRACE(edev); 2711 2712 /* only uninitialize in the primary process */ 2713 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2714 return 0; 2715 2716 /* safe to close dev here */ 2717 qede_dev_close(eth_dev); 2718 2719 eth_dev->dev_ops = NULL; 2720 eth_dev->rx_pkt_burst = NULL; 2721 eth_dev->tx_pkt_burst = NULL; 2722 2723 return 0; 2724 } 2725 2726 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2727 { 2728 return qede_dev_common_uninit(eth_dev); 2729 } 2730 2731 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2732 { 2733 return qede_dev_common_uninit(eth_dev); 2734 } 2735 2736 static const struct rte_pci_id pci_id_qedevf_map[] = { 2737 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2738 { 2739 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF) 2740 }, 2741 { 2742 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV) 2743 }, 2744 { 2745 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV) 2746 }, 2747 {.vendor_id = 0,} 2748 }; 2749 2750 static const struct rte_pci_id pci_id_qede_map[] = { 2751 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2752 { 2753 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E) 2754 }, 2755 { 2756 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S) 2757 }, 2758 { 2759 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40) 2760 }, 2761 { 2762 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25) 2763 }, 2764 { 2765 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100) 2766 }, 2767 { 2768 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50) 2769 }, 2770 { 2771 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G) 2772 }, 2773 { 2774 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G) 2775 }, 2776 { 2777 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G) 2778 }, 2779 { 2780 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G) 2781 }, 2782 {.vendor_id = 0,} 2783 }; 2784 2785 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2786 struct rte_pci_device *pci_dev) 2787 { 2788 return rte_eth_dev_pci_generic_probe(pci_dev, 2789 sizeof(struct qede_dev), qedevf_eth_dev_init); 2790 } 2791 2792 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2793 { 2794 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit); 2795 } 2796 2797 static struct rte_pci_driver rte_qedevf_pmd = { 2798 .id_table = pci_id_qedevf_map, 2799 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2800 .probe = qedevf_eth_dev_pci_probe, 2801 .remove = qedevf_eth_dev_pci_remove, 2802 }; 2803 2804 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2805 struct rte_pci_device *pci_dev) 2806 { 2807 return rte_eth_dev_pci_generic_probe(pci_dev, 2808 sizeof(struct qede_dev), qede_eth_dev_init); 2809 } 2810 2811 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2812 { 2813 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit); 2814 } 2815 2816 static struct rte_pci_driver rte_qede_pmd = { 2817 .id_table = pci_id_qede_map, 2818 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2819 .probe = qede_eth_dev_pci_probe, 2820 .remove = qede_eth_dev_pci_remove, 2821 }; 2822 2823 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd); 2824 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 2825 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); 2826 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); 2827 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 2828 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); 2829 2830 RTE_INIT(qede_init_log) 2831 { 2832 qede_logtype_init = rte_log_register("pmd.net.qede.init"); 2833 if (qede_logtype_init >= 0) 2834 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE); 2835 qede_logtype_driver = rte_log_register("pmd.net.qede.driver"); 2836 if (qede_logtype_driver >= 0) 2837 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE); 2838 } 2839