1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "qede_ethdev.h" 10 #include <rte_alarm.h> 11 #include <rte_version.h> 12 #include <rte_kvargs.h> 13 14 /* Globals */ 15 int qede_logtype_init; 16 int qede_logtype_driver; 17 18 static const struct qed_eth_ops *qed_ops; 19 static int64_t timer_period = 1; 20 21 /* VXLAN tunnel classification mapping */ 22 const struct _qede_udp_tunn_types { 23 uint16_t rte_filter_type; 24 enum ecore_filter_ucast_type qede_type; 25 enum ecore_tunn_clss qede_tunn_clss; 26 const char *string; 27 } qede_tunn_types[] = { 28 { 29 ETH_TUNNEL_FILTER_OMAC, 30 ECORE_FILTER_MAC, 31 ECORE_TUNN_CLSS_MAC_VLAN, 32 "outer-mac" 33 }, 34 { 35 ETH_TUNNEL_FILTER_TENID, 36 ECORE_FILTER_VNI, 37 ECORE_TUNN_CLSS_MAC_VNI, 38 "vni" 39 }, 40 { 41 ETH_TUNNEL_FILTER_IMAC, 42 ECORE_FILTER_INNER_MAC, 43 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 44 "inner-mac" 45 }, 46 { 47 ETH_TUNNEL_FILTER_IVLAN, 48 ECORE_FILTER_INNER_VLAN, 49 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 50 "inner-vlan" 51 }, 52 { 53 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID, 54 ECORE_FILTER_MAC_VNI_PAIR, 55 ECORE_TUNN_CLSS_MAC_VNI, 56 "outer-mac and vni" 57 }, 58 { 59 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC, 60 ECORE_FILTER_UNUSED, 61 MAX_ECORE_TUNN_CLSS, 62 "outer-mac and inner-mac" 63 }, 64 { 65 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN, 66 ECORE_FILTER_UNUSED, 67 MAX_ECORE_TUNN_CLSS, 68 "outer-mac and inner-vlan" 69 }, 70 { 71 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC, 72 ECORE_FILTER_INNER_MAC_VNI_PAIR, 73 ECORE_TUNN_CLSS_INNER_MAC_VNI, 74 "vni and inner-mac", 75 }, 76 { 77 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN, 78 ECORE_FILTER_UNUSED, 79 MAX_ECORE_TUNN_CLSS, 80 "vni and inner-vlan", 81 }, 82 { 83 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN, 84 ECORE_FILTER_INNER_PAIR, 85 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 86 "inner-mac and inner-vlan", 87 }, 88 { 89 ETH_TUNNEL_FILTER_OIP, 90 ECORE_FILTER_UNUSED, 91 MAX_ECORE_TUNN_CLSS, 92 "outer-IP" 93 }, 94 { 95 ETH_TUNNEL_FILTER_IIP, 96 ECORE_FILTER_UNUSED, 97 MAX_ECORE_TUNN_CLSS, 98 "inner-IP" 99 }, 100 { 101 RTE_TUNNEL_FILTER_IMAC_IVLAN, 102 ECORE_FILTER_UNUSED, 103 MAX_ECORE_TUNN_CLSS, 104 "IMAC_IVLAN" 105 }, 106 { 107 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID, 108 ECORE_FILTER_UNUSED, 109 MAX_ECORE_TUNN_CLSS, 110 "IMAC_IVLAN_TENID" 111 }, 112 { 113 RTE_TUNNEL_FILTER_IMAC_TENID, 114 ECORE_FILTER_UNUSED, 115 MAX_ECORE_TUNN_CLSS, 116 "IMAC_TENID" 117 }, 118 { 119 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC, 120 ECORE_FILTER_UNUSED, 121 MAX_ECORE_TUNN_CLSS, 122 "OMAC_TENID_IMAC" 123 }, 124 }; 125 126 struct rte_qede_xstats_name_off { 127 char name[RTE_ETH_XSTATS_NAME_SIZE]; 128 uint64_t offset; 129 }; 130 131 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 132 {"rx_unicast_bytes", 133 offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)}, 134 {"rx_multicast_bytes", 135 offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)}, 136 {"rx_broadcast_bytes", 137 offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)}, 138 {"rx_unicast_packets", 139 offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)}, 140 {"rx_multicast_packets", 141 offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)}, 142 {"rx_broadcast_packets", 143 offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)}, 144 145 {"tx_unicast_bytes", 146 offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)}, 147 {"tx_multicast_bytes", 148 offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)}, 149 {"tx_broadcast_bytes", 150 offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)}, 151 {"tx_unicast_packets", 152 offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)}, 153 {"tx_multicast_packets", 154 offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)}, 155 {"tx_broadcast_packets", 156 offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)}, 157 158 {"rx_64_byte_packets", 159 offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)}, 160 {"rx_65_to_127_byte_packets", 161 offsetof(struct ecore_eth_stats_common, 162 rx_65_to_127_byte_packets)}, 163 {"rx_128_to_255_byte_packets", 164 offsetof(struct ecore_eth_stats_common, 165 rx_128_to_255_byte_packets)}, 166 {"rx_256_to_511_byte_packets", 167 offsetof(struct ecore_eth_stats_common, 168 rx_256_to_511_byte_packets)}, 169 {"rx_512_to_1023_byte_packets", 170 offsetof(struct ecore_eth_stats_common, 171 rx_512_to_1023_byte_packets)}, 172 {"rx_1024_to_1518_byte_packets", 173 offsetof(struct ecore_eth_stats_common, 174 rx_1024_to_1518_byte_packets)}, 175 {"tx_64_byte_packets", 176 offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)}, 177 {"tx_65_to_127_byte_packets", 178 offsetof(struct ecore_eth_stats_common, 179 tx_65_to_127_byte_packets)}, 180 {"tx_128_to_255_byte_packets", 181 offsetof(struct ecore_eth_stats_common, 182 tx_128_to_255_byte_packets)}, 183 {"tx_256_to_511_byte_packets", 184 offsetof(struct ecore_eth_stats_common, 185 tx_256_to_511_byte_packets)}, 186 {"tx_512_to_1023_byte_packets", 187 offsetof(struct ecore_eth_stats_common, 188 tx_512_to_1023_byte_packets)}, 189 {"tx_1024_to_1518_byte_packets", 190 offsetof(struct ecore_eth_stats_common, 191 tx_1024_to_1518_byte_packets)}, 192 193 {"rx_mac_crtl_frames", 194 offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)}, 195 {"tx_mac_control_frames", 196 offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)}, 197 {"rx_pause_frames", 198 offsetof(struct ecore_eth_stats_common, rx_pause_frames)}, 199 {"tx_pause_frames", 200 offsetof(struct ecore_eth_stats_common, tx_pause_frames)}, 201 {"rx_priority_flow_control_frames", 202 offsetof(struct ecore_eth_stats_common, rx_pfc_frames)}, 203 {"tx_priority_flow_control_frames", 204 offsetof(struct ecore_eth_stats_common, tx_pfc_frames)}, 205 206 {"rx_crc_errors", 207 offsetof(struct ecore_eth_stats_common, rx_crc_errors)}, 208 {"rx_align_errors", 209 offsetof(struct ecore_eth_stats_common, rx_align_errors)}, 210 {"rx_carrier_errors", 211 offsetof(struct ecore_eth_stats_common, rx_carrier_errors)}, 212 {"rx_oversize_packet_errors", 213 offsetof(struct ecore_eth_stats_common, rx_oversize_packets)}, 214 {"rx_jabber_errors", 215 offsetof(struct ecore_eth_stats_common, rx_jabbers)}, 216 {"rx_undersize_packet_errors", 217 offsetof(struct ecore_eth_stats_common, rx_undersize_packets)}, 218 {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)}, 219 {"rx_host_buffer_not_available", 220 offsetof(struct ecore_eth_stats_common, no_buff_discards)}, 221 /* Number of packets discarded because they are bigger than MTU */ 222 {"rx_packet_too_big_discards", 223 offsetof(struct ecore_eth_stats_common, 224 packet_too_big_discard)}, 225 {"rx_ttl_zero_discards", 226 offsetof(struct ecore_eth_stats_common, ttl0_discard)}, 227 {"rx_multi_function_tag_filter_discards", 228 offsetof(struct ecore_eth_stats_common, mftag_filter_discards)}, 229 {"rx_mac_filter_discards", 230 offsetof(struct ecore_eth_stats_common, mac_filter_discards)}, 231 {"rx_hw_buffer_truncates", 232 offsetof(struct ecore_eth_stats_common, brb_truncates)}, 233 {"rx_hw_buffer_discards", 234 offsetof(struct ecore_eth_stats_common, brb_discards)}, 235 {"tx_error_drop_packets", 236 offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)}, 237 238 {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)}, 239 {"rx_mac_unicast_packets", 240 offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)}, 241 {"rx_mac_multicast_packets", 242 offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)}, 243 {"rx_mac_broadcast_packets", 244 offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)}, 245 {"rx_mac_frames_ok", 246 offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)}, 247 {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)}, 248 {"tx_mac_unicast_packets", 249 offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)}, 250 {"tx_mac_multicast_packets", 251 offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)}, 252 {"tx_mac_broadcast_packets", 253 offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)}, 254 255 {"lro_coalesced_packets", 256 offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)}, 257 {"lro_coalesced_events", 258 offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)}, 259 {"lro_aborts_num", 260 offsetof(struct ecore_eth_stats_common, tpa_aborts_num)}, 261 {"lro_not_coalesced_packets", 262 offsetof(struct ecore_eth_stats_common, 263 tpa_not_coalesced_pkts)}, 264 {"lro_coalesced_bytes", 265 offsetof(struct ecore_eth_stats_common, 266 tpa_coalesced_bytes)}, 267 }; 268 269 static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = { 270 {"rx_1519_to_1522_byte_packets", 271 offsetof(struct ecore_eth_stats, bb) + 272 offsetof(struct ecore_eth_stats_bb, 273 rx_1519_to_1522_byte_packets)}, 274 {"rx_1519_to_2047_byte_packets", 275 offsetof(struct ecore_eth_stats, bb) + 276 offsetof(struct ecore_eth_stats_bb, 277 rx_1519_to_2047_byte_packets)}, 278 {"rx_2048_to_4095_byte_packets", 279 offsetof(struct ecore_eth_stats, bb) + 280 offsetof(struct ecore_eth_stats_bb, 281 rx_2048_to_4095_byte_packets)}, 282 {"rx_4096_to_9216_byte_packets", 283 offsetof(struct ecore_eth_stats, bb) + 284 offsetof(struct ecore_eth_stats_bb, 285 rx_4096_to_9216_byte_packets)}, 286 {"rx_9217_to_16383_byte_packets", 287 offsetof(struct ecore_eth_stats, bb) + 288 offsetof(struct ecore_eth_stats_bb, 289 rx_9217_to_16383_byte_packets)}, 290 291 {"tx_1519_to_2047_byte_packets", 292 offsetof(struct ecore_eth_stats, bb) + 293 offsetof(struct ecore_eth_stats_bb, 294 tx_1519_to_2047_byte_packets)}, 295 {"tx_2048_to_4095_byte_packets", 296 offsetof(struct ecore_eth_stats, bb) + 297 offsetof(struct ecore_eth_stats_bb, 298 tx_2048_to_4095_byte_packets)}, 299 {"tx_4096_to_9216_byte_packets", 300 offsetof(struct ecore_eth_stats, bb) + 301 offsetof(struct ecore_eth_stats_bb, 302 tx_4096_to_9216_byte_packets)}, 303 {"tx_9217_to_16383_byte_packets", 304 offsetof(struct ecore_eth_stats, bb) + 305 offsetof(struct ecore_eth_stats_bb, 306 tx_9217_to_16383_byte_packets)}, 307 308 {"tx_lpi_entry_count", 309 offsetof(struct ecore_eth_stats, bb) + 310 offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)}, 311 {"tx_total_collisions", 312 offsetof(struct ecore_eth_stats, bb) + 313 offsetof(struct ecore_eth_stats_bb, tx_total_collisions)}, 314 }; 315 316 static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = { 317 {"rx_1519_to_max_byte_packets", 318 offsetof(struct ecore_eth_stats, ah) + 319 offsetof(struct ecore_eth_stats_ah, 320 rx_1519_to_max_byte_packets)}, 321 {"tx_1519_to_max_byte_packets", 322 offsetof(struct ecore_eth_stats, ah) + 323 offsetof(struct ecore_eth_stats_ah, 324 tx_1519_to_max_byte_packets)}, 325 }; 326 327 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { 328 {"rx_q_segments", 329 offsetof(struct qede_rx_queue, rx_segs)}, 330 {"rx_q_hw_errors", 331 offsetof(struct qede_rx_queue, rx_hw_errors)}, 332 {"rx_q_allocation_errors", 333 offsetof(struct qede_rx_queue, rx_alloc_errors)} 334 }; 335 336 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 337 { 338 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 339 } 340 341 static void 342 qede_interrupt_handler(void *param) 343 { 344 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 345 struct qede_dev *qdev = eth_dev->data->dev_private; 346 struct ecore_dev *edev = &qdev->edev; 347 348 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 349 if (rte_intr_enable(eth_dev->intr_handle)) 350 DP_ERR(edev, "rte_intr_enable failed\n"); 351 } 352 353 static void 354 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 355 { 356 rte_memcpy(&qdev->dev_info, info, sizeof(*info)); 357 qdev->ops = qed_ops; 358 } 359 360 static void qede_print_adapter_info(struct qede_dev *qdev) 361 { 362 struct ecore_dev *edev = &qdev->edev; 363 struct qed_dev_info *info = &qdev->dev_info.common; 364 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE]; 365 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 366 367 DP_INFO(edev, "*********************************\n"); 368 DP_INFO(edev, " DPDK version:%s\n", rte_version()); 369 DP_INFO(edev, " Chip details : %s %c%d\n", 370 ECORE_IS_BB(edev) ? "BB" : "AH", 371 'A' + edev->chip_rev, 372 (int)edev->chip_metal); 373 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", 374 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng); 375 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s", 376 ver_str, QEDE_PMD_VERSION); 377 DP_INFO(edev, " Driver version : %s\n", drv_ver); 378 DP_INFO(edev, " Firmware version : %s\n", ver_str); 379 380 snprintf(ver_str, MCP_DRV_VER_STR_SIZE, 381 "%d.%d.%d.%d", 382 (info->mfw_rev >> 24) & 0xff, 383 (info->mfw_rev >> 16) & 0xff, 384 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff); 385 DP_INFO(edev, " Management Firmware version : %s\n", ver_str); 386 DP_INFO(edev, " Firmware file : %s\n", fw_file); 387 DP_INFO(edev, "*********************************\n"); 388 } 389 390 static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) 391 { 392 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 393 unsigned int i = 0, j = 0, qid; 394 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 395 struct qede_tx_queue *txq; 396 397 DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); 398 399 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 400 RTE_ETHDEV_QUEUE_STAT_CNTRS); 401 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), 402 RTE_ETHDEV_QUEUE_STAT_CNTRS); 403 404 for_each_rss(qid) { 405 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 406 offsetof(struct qede_rx_queue, rcv_pkts), 0, 407 sizeof(uint64_t)); 408 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 409 offsetof(struct qede_rx_queue, rx_hw_errors), 0, 410 sizeof(uint64_t)); 411 OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + 412 offsetof(struct qede_rx_queue, rx_alloc_errors), 0, 413 sizeof(uint64_t)); 414 415 if (xstats) 416 for (j = 0; j < RTE_DIM(qede_rxq_xstats_strings); j++) 417 OSAL_MEMSET((((char *) 418 (qdev->fp_array[qid].rxq)) + 419 qede_rxq_xstats_strings[j].offset), 420 0, 421 sizeof(uint64_t)); 422 423 i++; 424 if (i == rxq_stat_cntrs) 425 break; 426 } 427 428 i = 0; 429 430 for_each_tss(qid) { 431 txq = qdev->fp_array[qid].txq; 432 433 OSAL_MEMSET((uint64_t *)(uintptr_t) 434 (((uint64_t)(uintptr_t)(txq)) + 435 offsetof(struct qede_tx_queue, xmit_pkts)), 0, 436 sizeof(uint64_t)); 437 438 i++; 439 if (i == txq_stat_cntrs) 440 break; 441 } 442 } 443 444 static int 445 qede_start_vport(struct qede_dev *qdev, uint16_t mtu) 446 { 447 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 448 struct ecore_sp_vport_start_params params; 449 struct ecore_hwfn *p_hwfn; 450 int rc; 451 int i; 452 453 memset(¶ms, 0, sizeof(params)); 454 params.vport_id = 0; 455 params.mtu = mtu; 456 /* @DPDK - Disable FW placement */ 457 params.zero_placement_offset = 1; 458 for_each_hwfn(edev, i) { 459 p_hwfn = &edev->hwfns[i]; 460 params.concrete_fid = p_hwfn->hw_info.concrete_fid; 461 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 462 rc = ecore_sp_vport_start(p_hwfn, ¶ms); 463 if (rc != ECORE_SUCCESS) { 464 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 465 return rc; 466 } 467 } 468 ecore_reset_vport_stats(edev); 469 if (IS_PF(edev)) 470 qede_reset_queue_stats(qdev, true); 471 DP_INFO(edev, "VPORT started with MTU = %u\n", mtu); 472 473 return 0; 474 } 475 476 static int 477 qede_stop_vport(struct ecore_dev *edev) 478 { 479 struct ecore_hwfn *p_hwfn; 480 uint8_t vport_id; 481 int rc; 482 int i; 483 484 vport_id = 0; 485 for_each_hwfn(edev, i) { 486 p_hwfn = &edev->hwfns[i]; 487 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 488 vport_id); 489 if (rc != ECORE_SUCCESS) { 490 DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc); 491 return rc; 492 } 493 } 494 495 return 0; 496 } 497 498 /* Activate or deactivate vport via vport-update */ 499 int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg) 500 { 501 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 502 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 503 struct ecore_sp_vport_update_params params; 504 struct ecore_hwfn *p_hwfn; 505 uint8_t i; 506 int rc = -1; 507 508 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 509 params.vport_id = 0; 510 params.update_vport_active_rx_flg = 1; 511 params.update_vport_active_tx_flg = 1; 512 params.vport_active_rx_flg = flg; 513 params.vport_active_tx_flg = flg; 514 if (!qdev->enable_tx_switching) { 515 if (IS_VF(edev)) { 516 params.update_tx_switching_flg = 1; 517 params.tx_switching_flg = !flg; 518 DP_INFO(edev, "VF tx-switching is disabled\n"); 519 } 520 } 521 for_each_hwfn(edev, i) { 522 p_hwfn = &edev->hwfns[i]; 523 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 524 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 525 ECORE_SPQ_MODE_EBLOCK, NULL); 526 if (rc != ECORE_SUCCESS) { 527 DP_ERR(edev, "Failed to update vport\n"); 528 break; 529 } 530 } 531 DP_INFO(edev, "vport is %s\n", flg ? "activated" : "deactivated"); 532 533 return rc; 534 } 535 536 static void 537 qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params, 538 uint16_t mtu, bool enable) 539 { 540 /* Enable LRO in split mode */ 541 sge_tpa_params->tpa_ipv4_en_flg = enable; 542 sge_tpa_params->tpa_ipv6_en_flg = enable; 543 sge_tpa_params->tpa_ipv4_tunn_en_flg = enable; 544 sge_tpa_params->tpa_ipv6_tunn_en_flg = enable; 545 /* set if tpa enable changes */ 546 sge_tpa_params->update_tpa_en_flg = 1; 547 /* set if tpa parameters should be handled */ 548 sge_tpa_params->update_tpa_param_flg = enable; 549 550 sge_tpa_params->max_buffers_per_cqe = 20; 551 /* Enable TPA in split mode. In this mode each TPA segment 552 * starts on the new BD, so there is one BD per segment. 553 */ 554 sge_tpa_params->tpa_pkt_split_flg = 1; 555 sge_tpa_params->tpa_hdr_data_split_flg = 0; 556 sge_tpa_params->tpa_gro_consistent_flg = 0; 557 sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 558 sge_tpa_params->tpa_max_size = 0x7FFF; 559 sge_tpa_params->tpa_min_size_to_start = mtu / 2; 560 sge_tpa_params->tpa_min_size_to_cont = mtu / 2; 561 } 562 563 /* Enable/disable LRO via vport-update */ 564 int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg) 565 { 566 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 567 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 568 struct ecore_sp_vport_update_params params; 569 struct ecore_sge_tpa_params tpa_params; 570 struct ecore_hwfn *p_hwfn; 571 int rc; 572 int i; 573 574 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 575 memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params)); 576 qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg); 577 params.vport_id = 0; 578 params.sge_tpa_params = &tpa_params; 579 for_each_hwfn(edev, i) { 580 p_hwfn = &edev->hwfns[i]; 581 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 582 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 583 ECORE_SPQ_MODE_EBLOCK, NULL); 584 if (rc != ECORE_SUCCESS) { 585 DP_ERR(edev, "Failed to update LRO\n"); 586 return -1; 587 } 588 } 589 qdev->enable_lro = flg; 590 DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled"); 591 592 return 0; 593 } 594 595 /* Update MTU via vport-update without doing port restart. 596 * The vport must be deactivated before calling this API. 597 */ 598 int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu) 599 { 600 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 601 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 602 struct ecore_sp_vport_update_params params; 603 struct ecore_hwfn *p_hwfn; 604 int rc; 605 int i; 606 607 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 608 params.vport_id = 0; 609 params.mtu = mtu; 610 params.vport_id = 0; 611 for_each_hwfn(edev, i) { 612 p_hwfn = &edev->hwfns[i]; 613 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 614 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 615 ECORE_SPQ_MODE_EBLOCK, NULL); 616 if (rc != ECORE_SUCCESS) { 617 DP_ERR(edev, "Failed to update MTU\n"); 618 return -1; 619 } 620 } 621 DP_INFO(edev, "MTU updated to %u\n", mtu); 622 623 return 0; 624 } 625 626 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast) 627 { 628 memset(ucast, 0, sizeof(struct ecore_filter_ucast)); 629 ucast->is_rx_filter = true; 630 ucast->is_tx_filter = true; 631 /* ucast->assert_on_error = true; - For debug */ 632 } 633 634 static int 635 qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev, 636 enum qed_filter_rx_mode_type type) 637 { 638 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 639 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 640 struct ecore_filter_accept_flags flags; 641 642 memset(&flags, 0, sizeof(flags)); 643 644 flags.update_rx_mode_config = 1; 645 flags.update_tx_mode_config = 1; 646 flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 647 ECORE_ACCEPT_MCAST_MATCHED | 648 ECORE_ACCEPT_BCAST; 649 650 flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 651 ECORE_ACCEPT_MCAST_MATCHED | 652 ECORE_ACCEPT_BCAST; 653 654 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 655 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 656 if (IS_VF(edev)) { 657 flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 658 DP_INFO(edev, "Enabling Tx unmatched flag for VF\n"); 659 } 660 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 661 flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 662 } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC | 663 QED_FILTER_RX_MODE_TYPE_PROMISC)) { 664 flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED | 665 ECORE_ACCEPT_MCAST_UNMATCHED; 666 } 667 668 return ecore_filter_accept_cmd(edev, 0, flags, false, false, 669 ECORE_SPQ_MODE_CB, NULL); 670 } 671 672 static int 673 qede_tunnel_update(struct qede_dev *qdev, 674 struct ecore_tunnel_info *tunn_info) 675 { 676 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 677 enum _ecore_status_t rc = ECORE_INVAL; 678 struct ecore_hwfn *p_hwfn; 679 struct ecore_ptt *p_ptt; 680 int i; 681 682 for_each_hwfn(edev, i) { 683 p_hwfn = &edev->hwfns[i]; 684 if (IS_PF(edev)) { 685 p_ptt = ecore_ptt_acquire(p_hwfn); 686 if (!p_ptt) { 687 DP_ERR(p_hwfn, "Can't acquire PTT\n"); 688 return -EAGAIN; 689 } 690 } else { 691 p_ptt = NULL; 692 } 693 694 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, 695 tunn_info, ECORE_SPQ_MODE_CB, NULL); 696 if (IS_PF(edev)) 697 ecore_ptt_release(p_hwfn, p_ptt); 698 699 if (rc != ECORE_SUCCESS) 700 break; 701 } 702 703 return rc; 704 } 705 706 static int 707 qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss, 708 bool enable) 709 { 710 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 711 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 712 enum _ecore_status_t rc = ECORE_INVAL; 713 struct ecore_tunnel_info tunn; 714 715 if (qdev->vxlan.enable == enable) 716 return ECORE_SUCCESS; 717 718 memset(&tunn, 0, sizeof(struct ecore_tunnel_info)); 719 tunn.vxlan.b_update_mode = true; 720 tunn.vxlan.b_mode_enabled = enable; 721 tunn.b_update_rx_cls = true; 722 tunn.b_update_tx_cls = true; 723 tunn.vxlan.tun_cls = clss; 724 725 tunn.vxlan_port.b_update_port = true; 726 tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0; 727 728 rc = qede_tunnel_update(qdev, &tunn); 729 if (rc == ECORE_SUCCESS) { 730 qdev->vxlan.enable = enable; 731 qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0; 732 DP_INFO(edev, "vxlan is %s, UDP port = %d\n", 733 enable ? "enabled" : "disabled", qdev->vxlan.udp_port); 734 } else { 735 DP_ERR(edev, "Failed to update tunn_clss %u\n", 736 tunn.vxlan.tun_cls); 737 } 738 739 return rc; 740 } 741 742 static int 743 qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss, 744 bool enable) 745 { 746 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 747 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 748 enum _ecore_status_t rc = ECORE_INVAL; 749 struct ecore_tunnel_info tunn; 750 751 memset(&tunn, 0, sizeof(struct ecore_tunnel_info)); 752 tunn.l2_geneve.b_update_mode = true; 753 tunn.l2_geneve.b_mode_enabled = enable; 754 tunn.ip_geneve.b_update_mode = true; 755 tunn.ip_geneve.b_mode_enabled = enable; 756 tunn.l2_geneve.tun_cls = clss; 757 tunn.ip_geneve.tun_cls = clss; 758 tunn.b_update_rx_cls = true; 759 tunn.b_update_tx_cls = true; 760 761 tunn.geneve_port.b_update_port = true; 762 tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0; 763 764 rc = qede_tunnel_update(qdev, &tunn); 765 if (rc == ECORE_SUCCESS) { 766 qdev->geneve.enable = enable; 767 qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0; 768 DP_INFO(edev, "GENEVE is %s, UDP port = %d\n", 769 enable ? "enabled" : "disabled", qdev->geneve.udp_port); 770 } else { 771 DP_ERR(edev, "Failed to update tunn_clss %u\n", 772 clss); 773 } 774 775 return rc; 776 } 777 778 static int 779 qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss, 780 enum rte_eth_tunnel_type tunn_type, bool enable) 781 { 782 int rc = -EINVAL; 783 784 switch (tunn_type) { 785 case RTE_TUNNEL_TYPE_VXLAN: 786 rc = qede_vxlan_enable(eth_dev, clss, enable); 787 break; 788 case RTE_TUNNEL_TYPE_GENEVE: 789 rc = qede_geneve_enable(eth_dev, clss, enable); 790 break; 791 default: 792 rc = -EINVAL; 793 break; 794 } 795 796 return rc; 797 } 798 799 static int 800 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 801 bool add) 802 { 803 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 804 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 805 struct qede_ucast_entry *tmp = NULL; 806 struct qede_ucast_entry *u; 807 struct ether_addr *mac_addr; 808 809 mac_addr = (struct ether_addr *)ucast->mac; 810 if (add) { 811 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 812 if ((memcmp(mac_addr, &tmp->mac, 813 ETHER_ADDR_LEN) == 0) && 814 ucast->vni == tmp->vni && 815 ucast->vlan == tmp->vlan) { 816 DP_ERR(edev, "Unicast MAC is already added" 817 " with vlan = %u, vni = %u\n", 818 ucast->vlan, ucast->vni); 819 return -EEXIST; 820 } 821 } 822 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), 823 RTE_CACHE_LINE_SIZE); 824 if (!u) { 825 DP_ERR(edev, "Did not allocate memory for ucast\n"); 826 return -ENOMEM; 827 } 828 ether_addr_copy(mac_addr, &u->mac); 829 u->vlan = ucast->vlan; 830 u->vni = ucast->vni; 831 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); 832 qdev->num_uc_addr++; 833 } else { 834 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 835 if ((memcmp(mac_addr, &tmp->mac, 836 ETHER_ADDR_LEN) == 0) && 837 ucast->vlan == tmp->vlan && 838 ucast->vni == tmp->vni) 839 break; 840 } 841 if (tmp == NULL) { 842 DP_INFO(edev, "Unicast MAC is not found\n"); 843 return -EINVAL; 844 } 845 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); 846 qdev->num_uc_addr--; 847 } 848 849 return 0; 850 } 851 852 static int 853 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast, 854 bool add) 855 { 856 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 857 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 858 struct ether_addr *mac_addr; 859 struct qede_mcast_entry *tmp = NULL; 860 struct qede_mcast_entry *m; 861 862 mac_addr = (struct ether_addr *)mcast->mac; 863 if (add) { 864 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 865 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) { 866 DP_ERR(edev, 867 "Multicast MAC is already added\n"); 868 return -EEXIST; 869 } 870 } 871 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), 872 RTE_CACHE_LINE_SIZE); 873 if (!m) { 874 DP_ERR(edev, 875 "Did not allocate memory for mcast\n"); 876 return -ENOMEM; 877 } 878 ether_addr_copy(mac_addr, &m->mac); 879 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); 880 qdev->num_mc_addr++; 881 } else { 882 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 883 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) 884 break; 885 } 886 if (tmp == NULL) { 887 DP_INFO(edev, "Multicast mac is not found\n"); 888 return -EINVAL; 889 } 890 SLIST_REMOVE(&qdev->mc_list_head, tmp, 891 qede_mcast_entry, list); 892 qdev->num_mc_addr--; 893 } 894 895 return 0; 896 } 897 898 static enum _ecore_status_t 899 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 900 bool add) 901 { 902 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 903 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 904 enum _ecore_status_t rc; 905 struct ecore_filter_mcast mcast; 906 struct qede_mcast_entry *tmp; 907 uint16_t j = 0; 908 909 /* Multicast */ 910 if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) { 911 if (add) { 912 if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) { 913 DP_ERR(edev, 914 "Mcast filter table limit exceeded, " 915 "Please enable mcast promisc mode\n"); 916 return -ECORE_INVAL; 917 } 918 } 919 rc = qede_mcast_filter(eth_dev, ucast, add); 920 if (rc == 0) { 921 DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr); 922 memset(&mcast, 0, sizeof(mcast)); 923 mcast.num_mc_addrs = qdev->num_mc_addr; 924 mcast.opcode = ECORE_FILTER_ADD; 925 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 926 ether_addr_copy(&tmp->mac, 927 (struct ether_addr *)&mcast.mac[j]); 928 j++; 929 } 930 rc = ecore_filter_mcast_cmd(edev, &mcast, 931 ECORE_SPQ_MODE_CB, NULL); 932 } 933 if (rc != ECORE_SUCCESS) { 934 DP_ERR(edev, "Failed to add multicast filter" 935 " rc = %d, op = %d\n", rc, add); 936 } 937 } else { /* Unicast */ 938 if (add) { 939 if (qdev->num_uc_addr >= 940 qdev->dev_info.num_mac_filters) { 941 DP_ERR(edev, 942 "Ucast filter table limit exceeded," 943 " Please enable promisc mode\n"); 944 return -ECORE_INVAL; 945 } 946 } 947 rc = qede_ucast_filter(eth_dev, ucast, add); 948 if (rc == 0) 949 rc = ecore_filter_ucast_cmd(edev, ucast, 950 ECORE_SPQ_MODE_CB, NULL); 951 if (rc != ECORE_SUCCESS) { 952 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", 953 rc, add); 954 } 955 } 956 957 return rc; 958 } 959 960 static int 961 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, 962 __rte_unused uint32_t index, __rte_unused uint32_t pool) 963 { 964 struct ecore_filter_ucast ucast; 965 int re; 966 967 qede_set_ucast_cmn_params(&ucast); 968 ucast.type = ECORE_FILTER_MAC; 969 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); 970 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); 971 return re; 972 } 973 974 static void 975 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 976 { 977 struct qede_dev *qdev = eth_dev->data->dev_private; 978 struct ecore_dev *edev = &qdev->edev; 979 struct ecore_filter_ucast ucast; 980 981 PMD_INIT_FUNC_TRACE(edev); 982 983 if (index >= qdev->dev_info.num_mac_filters) { 984 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 985 index, qdev->dev_info.num_mac_filters); 986 return; 987 } 988 989 qede_set_ucast_cmn_params(&ucast); 990 ucast.opcode = ECORE_FILTER_REMOVE; 991 ucast.type = ECORE_FILTER_MAC; 992 993 /* Use the index maintained by rte */ 994 ether_addr_copy(ð_dev->data->mac_addrs[index], 995 (struct ether_addr *)&ucast.mac); 996 997 ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); 998 } 999 1000 static void 1001 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) 1002 { 1003 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1004 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1005 1006 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 1007 mac_addr->addr_bytes)) { 1008 DP_ERR(edev, "Setting MAC address is not allowed\n"); 1009 ether_addr_copy(&qdev->primary_mac, 1010 ð_dev->data->mac_addrs[0]); 1011 return; 1012 } 1013 1014 qede_mac_addr_add(eth_dev, mac_addr, 0, 0); 1015 } 1016 1017 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg) 1018 { 1019 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1020 struct ecore_sp_vport_update_params params; 1021 struct ecore_hwfn *p_hwfn; 1022 uint8_t i; 1023 int rc; 1024 1025 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 1026 params.vport_id = 0; 1027 params.update_accept_any_vlan_flg = 1; 1028 params.accept_any_vlan = flg; 1029 for_each_hwfn(edev, i) { 1030 p_hwfn = &edev->hwfns[i]; 1031 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1032 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 1033 ECORE_SPQ_MODE_EBLOCK, NULL); 1034 if (rc != ECORE_SUCCESS) { 1035 DP_ERR(edev, "Failed to configure accept-any-vlan\n"); 1036 return; 1037 } 1038 } 1039 1040 DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled"); 1041 } 1042 1043 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg) 1044 { 1045 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1046 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1047 struct ecore_sp_vport_update_params params; 1048 struct ecore_hwfn *p_hwfn; 1049 uint8_t i; 1050 int rc; 1051 1052 memset(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); 1053 params.vport_id = 0; 1054 params.update_inner_vlan_removal_flg = 1; 1055 params.inner_vlan_removal_flg = flg; 1056 for_each_hwfn(edev, i) { 1057 p_hwfn = &edev->hwfns[i]; 1058 params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1059 rc = ecore_sp_vport_update(p_hwfn, ¶ms, 1060 ECORE_SPQ_MODE_EBLOCK, NULL); 1061 if (rc != ECORE_SUCCESS) { 1062 DP_ERR(edev, "Failed to update vport\n"); 1063 return -1; 1064 } 1065 } 1066 1067 DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled"); 1068 return 0; 1069 } 1070 1071 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 1072 uint16_t vlan_id, int on) 1073 { 1074 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1075 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1076 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 1077 struct qede_vlan_entry *tmp = NULL; 1078 struct qede_vlan_entry *vlan; 1079 struct ecore_filter_ucast ucast; 1080 int rc; 1081 1082 if (on) { 1083 if (qdev->configured_vlans == dev_info->num_vlan_filters) { 1084 DP_ERR(edev, "Reached max VLAN filter limit" 1085 " enabling accept_any_vlan\n"); 1086 qede_config_accept_any_vlan(qdev, true); 1087 return 0; 1088 } 1089 1090 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 1091 if (tmp->vid == vlan_id) { 1092 DP_ERR(edev, "VLAN %u already configured\n", 1093 vlan_id); 1094 return -EEXIST; 1095 } 1096 } 1097 1098 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), 1099 RTE_CACHE_LINE_SIZE); 1100 1101 if (!vlan) { 1102 DP_ERR(edev, "Did not allocate memory for VLAN\n"); 1103 return -ENOMEM; 1104 } 1105 1106 qede_set_ucast_cmn_params(&ucast); 1107 ucast.opcode = ECORE_FILTER_ADD; 1108 ucast.type = ECORE_FILTER_VLAN; 1109 ucast.vlan = vlan_id; 1110 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 1111 NULL); 1112 if (rc != 0) { 1113 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 1114 rc); 1115 rte_free(vlan); 1116 } else { 1117 vlan->vid = vlan_id; 1118 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); 1119 qdev->configured_vlans++; 1120 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", 1121 vlan_id, qdev->configured_vlans); 1122 } 1123 } else { 1124 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 1125 if (tmp->vid == vlan_id) 1126 break; 1127 } 1128 1129 if (!tmp) { 1130 if (qdev->configured_vlans == 0) { 1131 DP_INFO(edev, 1132 "No VLAN filters configured yet\n"); 1133 return 0; 1134 } 1135 1136 DP_ERR(edev, "VLAN %u not configured\n", vlan_id); 1137 return -EINVAL; 1138 } 1139 1140 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); 1141 1142 qede_set_ucast_cmn_params(&ucast); 1143 ucast.opcode = ECORE_FILTER_REMOVE; 1144 ucast.type = ECORE_FILTER_VLAN; 1145 ucast.vlan = vlan_id; 1146 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 1147 NULL); 1148 if (rc != 0) { 1149 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 1150 vlan_id, rc); 1151 } else { 1152 qdev->configured_vlans--; 1153 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", 1154 vlan_id, qdev->configured_vlans); 1155 } 1156 } 1157 1158 return rc; 1159 } 1160 1161 static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 1162 { 1163 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1164 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1165 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1166 1167 if (mask & ETH_VLAN_STRIP_MASK) { 1168 if (rxmode->hw_vlan_strip) 1169 (void)qede_vlan_stripping(eth_dev, 1); 1170 else 1171 (void)qede_vlan_stripping(eth_dev, 0); 1172 } 1173 1174 if (mask & ETH_VLAN_FILTER_MASK) { 1175 /* VLAN filtering kicks in when a VLAN is added */ 1176 if (rxmode->hw_vlan_filter) { 1177 qede_vlan_filter_set(eth_dev, 0, 1); 1178 } else { 1179 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ 1180 DP_ERR(edev, 1181 " Please remove existing VLAN filters" 1182 " before disabling VLAN filtering\n"); 1183 /* Signal app that VLAN filtering is still 1184 * enabled 1185 */ 1186 rxmode->hw_vlan_filter = true; 1187 } else { 1188 qede_vlan_filter_set(eth_dev, 0, 0); 1189 } 1190 } 1191 } 1192 1193 if (mask & ETH_VLAN_EXTEND_MASK) 1194 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q" 1195 " and classification is based on outer tag only\n"); 1196 1197 DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n", 1198 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); 1199 1200 return 0; 1201 } 1202 1203 static void qede_prandom_bytes(uint32_t *buff) 1204 { 1205 uint8_t i; 1206 1207 srand((unsigned int)time(NULL)); 1208 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 1209 buff[i] = rand(); 1210 } 1211 1212 int qede_config_rss(struct rte_eth_dev *eth_dev) 1213 { 1214 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1215 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1216 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; 1217 struct rte_eth_rss_reta_entry64 reta_conf[2]; 1218 struct rte_eth_rss_conf rss_conf; 1219 uint32_t i, id, pos, q; 1220 1221 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 1222 if (!rss_conf.rss_key) { 1223 DP_INFO(edev, "Applying driver default key\n"); 1224 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1225 qede_prandom_bytes(&def_rss_key[0]); 1226 rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; 1227 } 1228 1229 /* Configure RSS hash */ 1230 if (qede_rss_hash_update(eth_dev, &rss_conf)) 1231 return -EINVAL; 1232 1233 /* Configure default RETA */ 1234 memset(reta_conf, 0, sizeof(reta_conf)); 1235 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) 1236 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; 1237 1238 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 1239 id = i / RTE_RETA_GROUP_SIZE; 1240 pos = i % RTE_RETA_GROUP_SIZE; 1241 q = i % QEDE_RSS_COUNT(qdev); 1242 reta_conf[id].reta[pos] = q; 1243 } 1244 if (qede_rss_reta_update(eth_dev, &reta_conf[0], 1245 ECORE_RSS_IND_TABLE_SIZE)) 1246 return -EINVAL; 1247 1248 return 0; 1249 } 1250 1251 static void qede_fastpath_start(struct ecore_dev *edev) 1252 { 1253 struct ecore_hwfn *p_hwfn; 1254 int i; 1255 1256 for_each_hwfn(edev, i) { 1257 p_hwfn = &edev->hwfns[i]; 1258 ecore_hw_start_fastpath(p_hwfn); 1259 } 1260 } 1261 1262 static int qede_dev_start(struct rte_eth_dev *eth_dev) 1263 { 1264 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1265 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1266 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1267 1268 PMD_INIT_FUNC_TRACE(edev); 1269 1270 /* Update MTU only if it has changed */ 1271 if (qdev->mtu != qdev->new_mtu) { 1272 if (qede_update_mtu(eth_dev, qdev->new_mtu)) 1273 goto err; 1274 qdev->mtu = qdev->new_mtu; 1275 } 1276 1277 /* Configure TPA parameters */ 1278 if (rxmode->enable_lro) { 1279 if (qede_enable_tpa(eth_dev, true)) 1280 return -EINVAL; 1281 /* Enable scatter mode for LRO */ 1282 if (!rxmode->enable_scatter) 1283 eth_dev->data->scattered_rx = 1; 1284 } 1285 1286 /* Start queues */ 1287 if (qede_start_queues(eth_dev)) 1288 goto err; 1289 1290 /* Newer SR-IOV PF driver expects RX/TX queues to be started before 1291 * enabling RSS. Hence RSS configuration is deferred upto this point. 1292 * Also, we would like to retain similar behavior in PF case, so we 1293 * don't do PF/VF specific check here. 1294 */ 1295 if (rxmode->mq_mode == ETH_MQ_RX_RSS) 1296 if (qede_config_rss(eth_dev)) 1297 goto err; 1298 1299 /* Enable vport*/ 1300 if (qede_activate_vport(eth_dev, true)) 1301 goto err; 1302 1303 /* Bring-up the link */ 1304 qede_dev_set_link_state(eth_dev, true); 1305 1306 /* Update link status */ 1307 qede_link_update(eth_dev, 0); 1308 1309 /* Start/resume traffic */ 1310 qede_fastpath_start(edev); 1311 1312 DP_INFO(edev, "Device started\n"); 1313 1314 return 0; 1315 err: 1316 DP_ERR(edev, "Device start fails\n"); 1317 return -1; /* common error code is < 0 */ 1318 } 1319 1320 static void qede_dev_stop(struct rte_eth_dev *eth_dev) 1321 { 1322 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1323 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1324 1325 PMD_INIT_FUNC_TRACE(edev); 1326 1327 /* Disable vport */ 1328 if (qede_activate_vport(eth_dev, false)) 1329 return; 1330 1331 if (qdev->enable_lro) 1332 qede_enable_tpa(eth_dev, false); 1333 1334 /* Stop queues */ 1335 qede_stop_queues(eth_dev); 1336 1337 /* Disable traffic */ 1338 ecore_hw_stop_fastpath(edev); /* TBD - loop */ 1339 1340 /* Bring the link down */ 1341 qede_dev_set_link_state(eth_dev, false); 1342 1343 DP_INFO(edev, "Device is stopped\n"); 1344 } 1345 1346 #define QEDE_TX_SWITCHING "vf_txswitch" 1347 1348 const char *valid_args[] = { 1349 QEDE_TX_SWITCHING, 1350 NULL, 1351 }; 1352 1353 static int qede_args_check(const char *key, const char *val, void *opaque) 1354 { 1355 unsigned long tmp; 1356 int ret = 0; 1357 struct rte_eth_dev *eth_dev = opaque; 1358 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1359 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1360 1361 errno = 0; 1362 tmp = strtoul(val, NULL, 0); 1363 if (errno) { 1364 DP_INFO(edev, "%s: \"%s\" is not a valid integer", key, val); 1365 return errno; 1366 } 1367 1368 if (strcmp(QEDE_TX_SWITCHING, key) == 0) 1369 qdev->enable_tx_switching = !!tmp; 1370 1371 return ret; 1372 } 1373 1374 static int qede_args(struct rte_eth_dev *eth_dev) 1375 { 1376 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1377 struct rte_kvargs *kvlist; 1378 struct rte_devargs *devargs; 1379 int ret; 1380 int i; 1381 1382 devargs = pci_dev->device.devargs; 1383 if (!devargs) 1384 return 0; /* return success */ 1385 1386 kvlist = rte_kvargs_parse(devargs->args, valid_args); 1387 if (kvlist == NULL) 1388 return -EINVAL; 1389 1390 /* Process parameters. */ 1391 for (i = 0; (valid_args[i] != NULL); ++i) { 1392 if (rte_kvargs_count(kvlist, valid_args[i])) { 1393 ret = rte_kvargs_process(kvlist, valid_args[i], 1394 qede_args_check, eth_dev); 1395 if (ret != ECORE_SUCCESS) { 1396 rte_kvargs_free(kvlist); 1397 return ret; 1398 } 1399 } 1400 } 1401 rte_kvargs_free(kvlist); 1402 1403 return 0; 1404 } 1405 1406 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 1407 { 1408 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1409 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1410 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 1411 int ret; 1412 1413 PMD_INIT_FUNC_TRACE(edev); 1414 1415 /* Check requirements for 100G mode */ 1416 if (ECORE_IS_CMT(edev)) { 1417 if (eth_dev->data->nb_rx_queues < 2 || 1418 eth_dev->data->nb_tx_queues < 2) { 1419 DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n"); 1420 return -EINVAL; 1421 } 1422 1423 if ((eth_dev->data->nb_rx_queues % 2 != 0) || 1424 (eth_dev->data->nb_tx_queues % 2 != 0)) { 1425 DP_ERR(edev, 1426 "100G mode needs even no. of RX/TX queues\n"); 1427 return -EINVAL; 1428 } 1429 } 1430 1431 /* We need to have min 1 RX queue.There is no min check in 1432 * rte_eth_dev_configure(), so we are checking it here. 1433 */ 1434 if (eth_dev->data->nb_rx_queues == 0) { 1435 DP_ERR(edev, "Minimum one RX queue is required\n"); 1436 return -EINVAL; 1437 } 1438 1439 /* Enable Tx switching by default */ 1440 qdev->enable_tx_switching = 1; 1441 1442 /* Parse devargs and fix up rxmode */ 1443 if (qede_args(eth_dev)) 1444 return -ENOTSUP; 1445 1446 /* Sanity checks and throw warnings */ 1447 if (rxmode->enable_scatter) 1448 eth_dev->data->scattered_rx = 1; 1449 1450 if (!rxmode->hw_strip_crc) 1451 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n"); 1452 1453 if (!rxmode->hw_ip_checksum) 1454 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " 1455 "in hw\n"); 1456 if (rxmode->header_split) 1457 DP_INFO(edev, "Header split enable is not supported\n"); 1458 if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode == 1459 ETH_MQ_RX_RSS)) { 1460 DP_ERR(edev, "Unsupported multi-queue mode\n"); 1461 return -ENOTSUP; 1462 } 1463 /* Flow director mode check */ 1464 if (qede_check_fdir_support(eth_dev)) 1465 return -ENOTSUP; 1466 1467 /* Deallocate resources if held previously. It is needed only if the 1468 * queue count has been changed from previous configuration. If its 1469 * going to change then it means RX/TX queue setup will be called 1470 * again and the fastpath pointers will be reinitialized there. 1471 */ 1472 if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues || 1473 qdev->num_rx_queues != eth_dev->data->nb_rx_queues) { 1474 qede_dealloc_fp_resc(eth_dev); 1475 /* Proceed with updated queue count */ 1476 qdev->num_tx_queues = eth_dev->data->nb_tx_queues; 1477 qdev->num_rx_queues = eth_dev->data->nb_rx_queues; 1478 if (qede_alloc_fp_resc(qdev)) 1479 return -ENOMEM; 1480 } 1481 1482 /* If jumbo enabled adjust MTU */ 1483 if (eth_dev->data->dev_conf.rxmode.jumbo_frame) 1484 eth_dev->data->mtu = 1485 eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - 1486 ETHER_HDR_LEN - ETHER_CRC_LEN; 1487 1488 /* VF's MTU has to be set using vport-start where as 1489 * PF's MTU can be updated via vport-update. 1490 */ 1491 if (IS_VF(edev)) { 1492 if (qede_start_vport(qdev, eth_dev->data->mtu)) 1493 return -1; 1494 } else { 1495 if (qede_update_mtu(eth_dev, eth_dev->data->mtu)) 1496 return -1; 1497 } 1498 1499 qdev->mtu = eth_dev->data->mtu; 1500 qdev->new_mtu = qdev->mtu; 1501 1502 /* Enable VLAN offloads by default */ 1503 ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | 1504 ETH_VLAN_FILTER_MASK | 1505 ETH_VLAN_EXTEND_MASK); 1506 if (ret) 1507 return ret; 1508 1509 DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n", 1510 QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)); 1511 1512 return 0; 1513 } 1514 1515 /* Info about HW descriptor ring limitations */ 1516 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 1517 .nb_max = 0x8000, /* 32K */ 1518 .nb_min = 128, 1519 .nb_align = 128 /* lowest common multiple */ 1520 }; 1521 1522 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 1523 .nb_max = 0x8000, /* 32K */ 1524 .nb_min = 256, 1525 .nb_align = 256, 1526 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, 1527 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 1528 }; 1529 1530 static void 1531 qede_dev_info_get(struct rte_eth_dev *eth_dev, 1532 struct rte_eth_dev_info *dev_info) 1533 { 1534 struct qede_dev *qdev = eth_dev->data->dev_private; 1535 struct ecore_dev *edev = &qdev->edev; 1536 struct qed_link_output link; 1537 uint32_t speed_cap = 0; 1538 1539 PMD_INIT_FUNC_TRACE(edev); 1540 1541 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1542 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; 1543 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 1544 dev_info->rx_desc_lim = qede_rx_desc_lim; 1545 dev_info->tx_desc_lim = qede_tx_desc_lim; 1546 1547 if (IS_PF(edev)) 1548 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1549 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2); 1550 else 1551 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 1552 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); 1553 dev_info->max_tx_queues = dev_info->max_rx_queues; 1554 1555 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; 1556 dev_info->max_vfs = 0; 1557 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 1558 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 1559 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 1560 1561 dev_info->default_txconf = (struct rte_eth_txconf) { 1562 .txq_flags = QEDE_TXQ_FLAGS, 1563 }; 1564 1565 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP | 1566 DEV_RX_OFFLOAD_IPV4_CKSUM | 1567 DEV_RX_OFFLOAD_UDP_CKSUM | 1568 DEV_RX_OFFLOAD_TCP_CKSUM | 1569 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1570 DEV_RX_OFFLOAD_TCP_LRO); 1571 1572 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | 1573 DEV_TX_OFFLOAD_IPV4_CKSUM | 1574 DEV_TX_OFFLOAD_UDP_CKSUM | 1575 DEV_TX_OFFLOAD_TCP_CKSUM | 1576 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1577 DEV_TX_OFFLOAD_TCP_TSO | 1578 DEV_TX_OFFLOAD_VXLAN_TNL_TSO | 1579 DEV_TX_OFFLOAD_GENEVE_TNL_TSO); 1580 1581 memset(&link, 0, sizeof(struct qed_link_output)); 1582 qdev->ops->common->get_link(edev, &link); 1583 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1584 speed_cap |= ETH_LINK_SPEED_1G; 1585 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1586 speed_cap |= ETH_LINK_SPEED_10G; 1587 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1588 speed_cap |= ETH_LINK_SPEED_25G; 1589 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1590 speed_cap |= ETH_LINK_SPEED_40G; 1591 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1592 speed_cap |= ETH_LINK_SPEED_50G; 1593 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1594 speed_cap |= ETH_LINK_SPEED_100G; 1595 dev_info->speed_capa = speed_cap; 1596 } 1597 1598 /* return 0 means link status changed, -1 means not changed */ 1599 int 1600 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 1601 { 1602 struct qede_dev *qdev = eth_dev->data->dev_private; 1603 struct ecore_dev *edev = &qdev->edev; 1604 uint16_t link_duplex; 1605 struct qed_link_output link; 1606 struct rte_eth_link *curr = ð_dev->data->dev_link; 1607 1608 memset(&link, 0, sizeof(struct qed_link_output)); 1609 qdev->ops->common->get_link(edev, &link); 1610 1611 /* Link Speed */ 1612 curr->link_speed = link.speed; 1613 1614 /* Link Mode */ 1615 switch (link.duplex) { 1616 case QEDE_DUPLEX_HALF: 1617 link_duplex = ETH_LINK_HALF_DUPLEX; 1618 break; 1619 case QEDE_DUPLEX_FULL: 1620 link_duplex = ETH_LINK_FULL_DUPLEX; 1621 break; 1622 case QEDE_DUPLEX_UNKNOWN: 1623 default: 1624 link_duplex = -1; 1625 } 1626 curr->link_duplex = link_duplex; 1627 1628 /* Link Status */ 1629 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN; 1630 1631 /* AN */ 1632 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 1633 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 1634 1635 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 1636 curr->link_speed, curr->link_duplex, 1637 curr->link_autoneg, curr->link_status); 1638 1639 /* return 0 means link status changed, -1 means not changed */ 1640 return ((curr->link_status == link.link_up) ? -1 : 0); 1641 } 1642 1643 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 1644 { 1645 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT 1646 struct qede_dev *qdev = eth_dev->data->dev_private; 1647 struct ecore_dev *edev = &qdev->edev; 1648 1649 PMD_INIT_FUNC_TRACE(edev); 1650 #endif 1651 1652 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1653 1654 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1655 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1656 1657 qed_configure_filter_rx_mode(eth_dev, type); 1658 } 1659 1660 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 1661 { 1662 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT 1663 struct qede_dev *qdev = eth_dev->data->dev_private; 1664 struct ecore_dev *edev = &qdev->edev; 1665 1666 PMD_INIT_FUNC_TRACE(edev); 1667 #endif 1668 1669 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1670 qed_configure_filter_rx_mode(eth_dev, 1671 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 1672 else 1673 qed_configure_filter_rx_mode(eth_dev, 1674 QED_FILTER_RX_MODE_TYPE_REGULAR); 1675 } 1676 1677 static void qede_poll_sp_sb_cb(void *param) 1678 { 1679 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1680 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1681 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1682 int rc; 1683 1684 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 1685 qede_interrupt_action(&edev->hwfns[1]); 1686 1687 rc = rte_eal_alarm_set(timer_period * US_PER_S, 1688 qede_poll_sp_sb_cb, 1689 (void *)eth_dev); 1690 if (rc != 0) { 1691 DP_ERR(edev, "Unable to start periodic" 1692 " timer rc %d\n", rc); 1693 assert(false && "Unable to start periodic timer"); 1694 } 1695 } 1696 1697 static void qede_dev_close(struct rte_eth_dev *eth_dev) 1698 { 1699 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 1700 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1701 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1702 1703 PMD_INIT_FUNC_TRACE(edev); 1704 1705 /* dev_stop() shall cleanup fp resources in hw but without releasing 1706 * dma memories and sw structures so that dev_start() can be called 1707 * by the app without reconfiguration. However, in dev_close() we 1708 * can release all the resources and device can be brought up newly 1709 */ 1710 if (eth_dev->data->dev_started) 1711 qede_dev_stop(eth_dev); 1712 1713 qede_stop_vport(edev); 1714 qede_fdir_dealloc_resc(eth_dev); 1715 qede_dealloc_fp_resc(eth_dev); 1716 1717 eth_dev->data->nb_rx_queues = 0; 1718 eth_dev->data->nb_tx_queues = 0; 1719 1720 qdev->ops->common->slowpath_stop(edev); 1721 qdev->ops->common->remove(edev); 1722 rte_intr_disable(&pci_dev->intr_handle); 1723 rte_intr_callback_unregister(&pci_dev->intr_handle, 1724 qede_interrupt_handler, (void *)eth_dev); 1725 if (ECORE_IS_CMT(edev)) 1726 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 1727 } 1728 1729 static int 1730 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 1731 { 1732 struct qede_dev *qdev = eth_dev->data->dev_private; 1733 struct ecore_dev *edev = &qdev->edev; 1734 struct ecore_eth_stats stats; 1735 unsigned int i = 0, j = 0, qid; 1736 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 1737 struct qede_tx_queue *txq; 1738 1739 ecore_get_vport_stats(edev, &stats); 1740 1741 /* RX Stats */ 1742 eth_stats->ipackets = stats.common.rx_ucast_pkts + 1743 stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts; 1744 1745 eth_stats->ibytes = stats.common.rx_ucast_bytes + 1746 stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes; 1747 1748 eth_stats->ierrors = stats.common.rx_crc_errors + 1749 stats.common.rx_align_errors + 1750 stats.common.rx_carrier_errors + 1751 stats.common.rx_oversize_packets + 1752 stats.common.rx_jabbers + stats.common.rx_undersize_packets; 1753 1754 eth_stats->rx_nombuf = stats.common.no_buff_discards; 1755 1756 eth_stats->imissed = stats.common.mftag_filter_discards + 1757 stats.common.mac_filter_discards + 1758 stats.common.no_buff_discards + 1759 stats.common.brb_truncates + stats.common.brb_discards; 1760 1761 /* TX stats */ 1762 eth_stats->opackets = stats.common.tx_ucast_pkts + 1763 stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts; 1764 1765 eth_stats->obytes = stats.common.tx_ucast_bytes + 1766 stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes; 1767 1768 eth_stats->oerrors = stats.common.tx_err_drop_pkts; 1769 1770 /* Queue stats */ 1771 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1772 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1773 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), 1774 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1775 if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) || 1776 (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev))) 1777 DP_VERBOSE(edev, ECORE_MSG_DEBUG, 1778 "Not all the queue stats will be displayed. Set" 1779 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" 1780 " appropriately and retry.\n"); 1781 1782 for_each_rss(qid) { 1783 eth_stats->q_ipackets[i] = 1784 *(uint64_t *)( 1785 ((char *)(qdev->fp_array[qid].rxq)) + 1786 offsetof(struct qede_rx_queue, 1787 rcv_pkts)); 1788 eth_stats->q_errors[i] = 1789 *(uint64_t *)( 1790 ((char *)(qdev->fp_array[qid].rxq)) + 1791 offsetof(struct qede_rx_queue, 1792 rx_hw_errors)) + 1793 *(uint64_t *)( 1794 ((char *)(qdev->fp_array[qid].rxq)) + 1795 offsetof(struct qede_rx_queue, 1796 rx_alloc_errors)); 1797 i++; 1798 if (i == rxq_stat_cntrs) 1799 break; 1800 } 1801 1802 for_each_tss(qid) { 1803 txq = qdev->fp_array[qid].txq; 1804 eth_stats->q_opackets[j] = 1805 *((uint64_t *)(uintptr_t) 1806 (((uint64_t)(uintptr_t)(txq)) + 1807 offsetof(struct qede_tx_queue, 1808 xmit_pkts))); 1809 j++; 1810 if (j == txq_stat_cntrs) 1811 break; 1812 } 1813 1814 return 0; 1815 } 1816 1817 static unsigned 1818 qede_get_xstats_count(struct qede_dev *qdev) { 1819 if (ECORE_IS_BB(&qdev->edev)) 1820 return RTE_DIM(qede_xstats_strings) + 1821 RTE_DIM(qede_bb_xstats_strings) + 1822 (RTE_DIM(qede_rxq_xstats_strings) * 1823 RTE_MIN(QEDE_RSS_COUNT(qdev), 1824 RTE_ETHDEV_QUEUE_STAT_CNTRS)); 1825 else 1826 return RTE_DIM(qede_xstats_strings) + 1827 RTE_DIM(qede_ah_xstats_strings) + 1828 (RTE_DIM(qede_rxq_xstats_strings) * 1829 RTE_MIN(QEDE_RSS_COUNT(qdev), 1830 RTE_ETHDEV_QUEUE_STAT_CNTRS)); 1831 } 1832 1833 static int 1834 qede_get_xstats_names(struct rte_eth_dev *dev, 1835 struct rte_eth_xstat_name *xstats_names, 1836 __rte_unused unsigned int limit) 1837 { 1838 struct qede_dev *qdev = dev->data->dev_private; 1839 struct ecore_dev *edev = &qdev->edev; 1840 const unsigned int stat_cnt = qede_get_xstats_count(qdev); 1841 unsigned int i, qid, stat_idx = 0; 1842 unsigned int rxq_stat_cntrs; 1843 1844 if (xstats_names != NULL) { 1845 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1846 snprintf(xstats_names[stat_idx].name, 1847 sizeof(xstats_names[stat_idx].name), 1848 "%s", 1849 qede_xstats_strings[i].name); 1850 stat_idx++; 1851 } 1852 1853 if (ECORE_IS_BB(edev)) { 1854 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1855 snprintf(xstats_names[stat_idx].name, 1856 sizeof(xstats_names[stat_idx].name), 1857 "%s", 1858 qede_bb_xstats_strings[i].name); 1859 stat_idx++; 1860 } 1861 } else { 1862 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1863 snprintf(xstats_names[stat_idx].name, 1864 sizeof(xstats_names[stat_idx].name), 1865 "%s", 1866 qede_ah_xstats_strings[i].name); 1867 stat_idx++; 1868 } 1869 } 1870 1871 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1872 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1873 for (qid = 0; qid < rxq_stat_cntrs; qid++) { 1874 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1875 snprintf(xstats_names[stat_idx].name, 1876 sizeof(xstats_names[stat_idx].name), 1877 "%.4s%d%s", 1878 qede_rxq_xstats_strings[i].name, qid, 1879 qede_rxq_xstats_strings[i].name + 4); 1880 stat_idx++; 1881 } 1882 } 1883 } 1884 1885 return stat_cnt; 1886 } 1887 1888 static int 1889 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1890 unsigned int n) 1891 { 1892 struct qede_dev *qdev = dev->data->dev_private; 1893 struct ecore_dev *edev = &qdev->edev; 1894 struct ecore_eth_stats stats; 1895 const unsigned int num = qede_get_xstats_count(qdev); 1896 unsigned int i, qid, stat_idx = 0; 1897 unsigned int rxq_stat_cntrs; 1898 1899 if (n < num) 1900 return num; 1901 1902 ecore_get_vport_stats(edev, &stats); 1903 1904 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1905 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + 1906 qede_xstats_strings[i].offset); 1907 xstats[stat_idx].id = stat_idx; 1908 stat_idx++; 1909 } 1910 1911 if (ECORE_IS_BB(edev)) { 1912 for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) { 1913 xstats[stat_idx].value = 1914 *(uint64_t *)(((char *)&stats) + 1915 qede_bb_xstats_strings[i].offset); 1916 xstats[stat_idx].id = stat_idx; 1917 stat_idx++; 1918 } 1919 } else { 1920 for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) { 1921 xstats[stat_idx].value = 1922 *(uint64_t *)(((char *)&stats) + 1923 qede_ah_xstats_strings[i].offset); 1924 xstats[stat_idx].id = stat_idx; 1925 stat_idx++; 1926 } 1927 } 1928 1929 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1930 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1931 for (qid = 0; qid < rxq_stat_cntrs; qid++) { 1932 for_each_rss(qid) { 1933 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1934 xstats[stat_idx].value = *(uint64_t *)( 1935 ((char *)(qdev->fp_array[qid].rxq)) + 1936 qede_rxq_xstats_strings[i].offset); 1937 xstats[stat_idx].id = stat_idx; 1938 stat_idx++; 1939 } 1940 } 1941 } 1942 1943 return stat_idx; 1944 } 1945 1946 static void 1947 qede_reset_xstats(struct rte_eth_dev *dev) 1948 { 1949 struct qede_dev *qdev = dev->data->dev_private; 1950 struct ecore_dev *edev = &qdev->edev; 1951 1952 ecore_reset_vport_stats(edev); 1953 qede_reset_queue_stats(qdev, true); 1954 } 1955 1956 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 1957 { 1958 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1959 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1960 struct qed_link_params link_params; 1961 int rc; 1962 1963 DP_INFO(edev, "setting link state %d\n", link_up); 1964 memset(&link_params, 0, sizeof(link_params)); 1965 link_params.link_up = link_up; 1966 rc = qdev->ops->common->set_link(edev, &link_params); 1967 if (rc != ECORE_SUCCESS) 1968 DP_ERR(edev, "Unable to set link state %d\n", link_up); 1969 1970 return rc; 1971 } 1972 1973 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 1974 { 1975 return qede_dev_set_link_state(eth_dev, true); 1976 } 1977 1978 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 1979 { 1980 return qede_dev_set_link_state(eth_dev, false); 1981 } 1982 1983 static void qede_reset_stats(struct rte_eth_dev *eth_dev) 1984 { 1985 struct qede_dev *qdev = eth_dev->data->dev_private; 1986 struct ecore_dev *edev = &qdev->edev; 1987 1988 ecore_reset_vport_stats(edev); 1989 qede_reset_queue_stats(qdev, false); 1990 } 1991 1992 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 1993 { 1994 enum qed_filter_rx_mode_type type = 1995 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1996 1997 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1998 type |= QED_FILTER_RX_MODE_TYPE_PROMISC; 1999 2000 qed_configure_filter_rx_mode(eth_dev, type); 2001 } 2002 2003 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 2004 { 2005 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 2006 qed_configure_filter_rx_mode(eth_dev, 2007 QED_FILTER_RX_MODE_TYPE_PROMISC); 2008 else 2009 qed_configure_filter_rx_mode(eth_dev, 2010 QED_FILTER_RX_MODE_TYPE_REGULAR); 2011 } 2012 2013 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 2014 struct rte_eth_fc_conf *fc_conf) 2015 { 2016 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2017 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2018 struct qed_link_output current_link; 2019 struct qed_link_params params; 2020 2021 memset(¤t_link, 0, sizeof(current_link)); 2022 qdev->ops->common->get_link(edev, ¤t_link); 2023 2024 memset(¶ms, 0, sizeof(params)); 2025 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 2026 if (fc_conf->autoneg) { 2027 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 2028 DP_ERR(edev, "Autoneg not supported\n"); 2029 return -EINVAL; 2030 } 2031 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 2032 } 2033 2034 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 2035 if (fc_conf->mode == RTE_FC_FULL) 2036 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 2037 QED_LINK_PAUSE_RX_ENABLE); 2038 if (fc_conf->mode == RTE_FC_TX_PAUSE) 2039 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 2040 if (fc_conf->mode == RTE_FC_RX_PAUSE) 2041 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 2042 2043 params.link_up = true; 2044 (void)qdev->ops->common->set_link(edev, ¶ms); 2045 2046 return 0; 2047 } 2048 2049 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 2050 struct rte_eth_fc_conf *fc_conf) 2051 { 2052 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2053 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2054 struct qed_link_output current_link; 2055 2056 memset(¤t_link, 0, sizeof(current_link)); 2057 qdev->ops->common->get_link(edev, ¤t_link); 2058 2059 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 2060 fc_conf->autoneg = true; 2061 2062 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 2063 QED_LINK_PAUSE_TX_ENABLE)) 2064 fc_conf->mode = RTE_FC_FULL; 2065 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 2066 fc_conf->mode = RTE_FC_RX_PAUSE; 2067 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 2068 fc_conf->mode = RTE_FC_TX_PAUSE; 2069 else 2070 fc_conf->mode = RTE_FC_NONE; 2071 2072 return 0; 2073 } 2074 2075 static const uint32_t * 2076 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 2077 { 2078 static const uint32_t ptypes[] = { 2079 RTE_PTYPE_L2_ETHER, 2080 RTE_PTYPE_L2_ETHER_VLAN, 2081 RTE_PTYPE_L3_IPV4, 2082 RTE_PTYPE_L3_IPV6, 2083 RTE_PTYPE_L4_TCP, 2084 RTE_PTYPE_L4_UDP, 2085 RTE_PTYPE_TUNNEL_VXLAN, 2086 RTE_PTYPE_L4_FRAG, 2087 RTE_PTYPE_TUNNEL_GENEVE, 2088 /* Inner */ 2089 RTE_PTYPE_INNER_L2_ETHER, 2090 RTE_PTYPE_INNER_L2_ETHER_VLAN, 2091 RTE_PTYPE_INNER_L3_IPV4, 2092 RTE_PTYPE_INNER_L3_IPV6, 2093 RTE_PTYPE_INNER_L4_TCP, 2094 RTE_PTYPE_INNER_L4_UDP, 2095 RTE_PTYPE_INNER_L4_FRAG, 2096 RTE_PTYPE_UNKNOWN 2097 }; 2098 2099 if (eth_dev->rx_pkt_burst == qede_recv_pkts) 2100 return ptypes; 2101 2102 return NULL; 2103 } 2104 2105 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) 2106 { 2107 *rss_caps = 0; 2108 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 2109 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 2110 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 2111 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 2112 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 2113 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 2114 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; 2115 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; 2116 } 2117 2118 int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 2119 struct rte_eth_rss_conf *rss_conf) 2120 { 2121 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2122 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2123 struct ecore_sp_vport_update_params vport_update_params; 2124 struct ecore_rss_params rss_params; 2125 struct ecore_hwfn *p_hwfn; 2126 uint32_t *key = (uint32_t *)rss_conf->rss_key; 2127 uint64_t hf = rss_conf->rss_hf; 2128 uint8_t len = rss_conf->rss_key_len; 2129 uint8_t idx; 2130 uint8_t i; 2131 int rc; 2132 2133 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2134 memset(&rss_params, 0, sizeof(rss_params)); 2135 2136 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", 2137 (unsigned long)hf, len, key); 2138 2139 if (hf != 0) { 2140 /* Enabling RSS */ 2141 DP_INFO(edev, "Enabling rss\n"); 2142 2143 /* RSS caps */ 2144 qede_init_rss_caps(&rss_params.rss_caps, hf); 2145 rss_params.update_rss_capabilities = 1; 2146 2147 /* RSS hash key */ 2148 if (key) { 2149 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { 2150 DP_ERR(edev, "RSS key length exceeds limit\n"); 2151 return -EINVAL; 2152 } 2153 DP_INFO(edev, "Applying user supplied hash key\n"); 2154 rss_params.update_rss_key = 1; 2155 memcpy(&rss_params.rss_key, key, len); 2156 } 2157 rss_params.rss_enable = 1; 2158 } 2159 2160 rss_params.update_rss_config = 1; 2161 /* tbl_size has to be set with capabilities */ 2162 rss_params.rss_table_size_log = 7; 2163 vport_update_params.vport_id = 0; 2164 /* pass the L2 handles instead of qids */ 2165 for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) { 2166 idx = qdev->rss_ind_table[i]; 2167 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle; 2168 } 2169 vport_update_params.rss_params = &rss_params; 2170 2171 for_each_hwfn(edev, i) { 2172 p_hwfn = &edev->hwfns[i]; 2173 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2174 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2175 ECORE_SPQ_MODE_EBLOCK, NULL); 2176 if (rc) { 2177 DP_ERR(edev, "vport-update for RSS failed\n"); 2178 return rc; 2179 } 2180 } 2181 qdev->rss_enable = rss_params.rss_enable; 2182 2183 /* Update local structure for hash query */ 2184 qdev->rss_conf.rss_hf = hf; 2185 qdev->rss_conf.rss_key_len = len; 2186 if (qdev->rss_enable) { 2187 if (qdev->rss_conf.rss_key == NULL) { 2188 qdev->rss_conf.rss_key = (uint8_t *)malloc(len); 2189 if (qdev->rss_conf.rss_key == NULL) { 2190 DP_ERR(edev, "No memory to store RSS key\n"); 2191 return -ENOMEM; 2192 } 2193 } 2194 if (key && len) { 2195 DP_INFO(edev, "Storing RSS key\n"); 2196 memcpy(qdev->rss_conf.rss_key, key, len); 2197 } 2198 } else if (!qdev->rss_enable && len == 0) { 2199 if (qdev->rss_conf.rss_key) { 2200 free(qdev->rss_conf.rss_key); 2201 qdev->rss_conf.rss_key = NULL; 2202 DP_INFO(edev, "Free RSS key\n"); 2203 } 2204 } 2205 2206 return 0; 2207 } 2208 2209 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 2210 struct rte_eth_rss_conf *rss_conf) 2211 { 2212 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2213 2214 rss_conf->rss_hf = qdev->rss_conf.rss_hf; 2215 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; 2216 2217 if (rss_conf->rss_key && qdev->rss_conf.rss_key) 2218 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, 2219 rss_conf->rss_key_len); 2220 return 0; 2221 } 2222 2223 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev, 2224 struct ecore_rss_params *rss) 2225 { 2226 int i, fn; 2227 bool rss_mode = 1; /* enable */ 2228 struct ecore_queue_cid *cid; 2229 struct ecore_rss_params *t_rss; 2230 2231 /* In regular scenario, we'd simply need to take input handlers. 2232 * But in CMT, we'd have to split the handlers according to the 2233 * engine they were configured on. We'd then have to understand 2234 * whether RSS is really required, since 2-queues on CMT doesn't 2235 * require RSS. 2236 */ 2237 2238 /* CMT should be round-robin */ 2239 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 2240 cid = rss->rss_ind_table[i]; 2241 2242 if (cid->p_owner == ECORE_LEADING_HWFN(edev)) 2243 t_rss = &rss[0]; 2244 else 2245 t_rss = &rss[1]; 2246 2247 t_rss->rss_ind_table[i / edev->num_hwfns] = cid; 2248 } 2249 2250 t_rss = &rss[1]; 2251 t_rss->update_rss_ind_table = 1; 2252 t_rss->rss_table_size_log = 7; 2253 t_rss->update_rss_config = 1; 2254 2255 /* Make sure RSS is actually required */ 2256 for_each_hwfn(edev, fn) { 2257 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns; 2258 i++) { 2259 if (rss[fn].rss_ind_table[i] != 2260 rss[fn].rss_ind_table[0]) 2261 break; 2262 } 2263 2264 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) { 2265 DP_INFO(edev, 2266 "CMT - 1 queue per-hwfn; Disabling RSS\n"); 2267 rss_mode = 0; 2268 goto out; 2269 } 2270 } 2271 2272 out: 2273 t_rss->rss_enable = rss_mode; 2274 2275 return rss_mode; 2276 } 2277 2278 int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 2279 struct rte_eth_rss_reta_entry64 *reta_conf, 2280 uint16_t reta_size) 2281 { 2282 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2283 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2284 struct ecore_sp_vport_update_params vport_update_params; 2285 struct ecore_rss_params *params; 2286 struct ecore_hwfn *p_hwfn; 2287 uint16_t i, idx, shift; 2288 uint8_t entry; 2289 int rc = 0; 2290 2291 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2292 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 2293 reta_size); 2294 return -EINVAL; 2295 } 2296 2297 memset(&vport_update_params, 0, sizeof(vport_update_params)); 2298 params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns, 2299 RTE_CACHE_LINE_SIZE); 2300 if (params == NULL) { 2301 DP_ERR(edev, "failed to allocate memory\n"); 2302 return -ENOMEM; 2303 } 2304 2305 for (i = 0; i < reta_size; i++) { 2306 idx = i / RTE_RETA_GROUP_SIZE; 2307 shift = i % RTE_RETA_GROUP_SIZE; 2308 if (reta_conf[idx].mask & (1ULL << shift)) { 2309 entry = reta_conf[idx].reta[shift]; 2310 /* Pass rxq handles to ecore */ 2311 params->rss_ind_table[i] = 2312 qdev->fp_array[entry].rxq->handle; 2313 /* Update the local copy for RETA query command */ 2314 qdev->rss_ind_table[i] = entry; 2315 } 2316 } 2317 2318 params->update_rss_ind_table = 1; 2319 params->rss_table_size_log = 7; 2320 params->update_rss_config = 1; 2321 2322 /* Fix up RETA for CMT mode device */ 2323 if (ECORE_IS_CMT(edev)) 2324 qdev->rss_enable = qede_update_rss_parm_cmt(edev, 2325 params); 2326 vport_update_params.vport_id = 0; 2327 /* Use the current value of rss_enable */ 2328 params->rss_enable = qdev->rss_enable; 2329 vport_update_params.rss_params = params; 2330 2331 for_each_hwfn(edev, i) { 2332 p_hwfn = &edev->hwfns[i]; 2333 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 2334 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 2335 ECORE_SPQ_MODE_EBLOCK, NULL); 2336 if (rc) { 2337 DP_ERR(edev, "vport-update for RSS failed\n"); 2338 goto out; 2339 } 2340 } 2341 2342 out: 2343 rte_free(params); 2344 return rc; 2345 } 2346 2347 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 2348 struct rte_eth_rss_reta_entry64 *reta_conf, 2349 uint16_t reta_size) 2350 { 2351 struct qede_dev *qdev = eth_dev->data->dev_private; 2352 struct ecore_dev *edev = &qdev->edev; 2353 uint16_t i, idx, shift; 2354 uint8_t entry; 2355 2356 if (reta_size > ETH_RSS_RETA_SIZE_128) { 2357 DP_ERR(edev, "reta_size %d is not supported\n", 2358 reta_size); 2359 return -EINVAL; 2360 } 2361 2362 for (i = 0; i < reta_size; i++) { 2363 idx = i / RTE_RETA_GROUP_SIZE; 2364 shift = i % RTE_RETA_GROUP_SIZE; 2365 if (reta_conf[idx].mask & (1ULL << shift)) { 2366 entry = qdev->rss_ind_table[i]; 2367 reta_conf[idx].reta[shift] = entry; 2368 } 2369 } 2370 2371 return 0; 2372 } 2373 2374 2375 2376 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 2377 { 2378 struct qede_dev *qdev = QEDE_INIT_QDEV(dev); 2379 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2380 struct rte_eth_dev_info dev_info = {0}; 2381 struct qede_fastpath *fp; 2382 uint32_t max_rx_pkt_len; 2383 uint32_t frame_size; 2384 uint16_t rx_buf_size; 2385 uint16_t bufsz; 2386 bool restart = false; 2387 int i; 2388 2389 PMD_INIT_FUNC_TRACE(edev); 2390 if (IS_VF(edev)) 2391 return -ENOTSUP; 2392 qede_dev_info_get(dev, &dev_info); 2393 max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2394 frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD; 2395 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) { 2396 DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n", 2397 mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN - 2398 ETHER_CRC_LEN - QEDE_ETH_OVERHEAD); 2399 return -EINVAL; 2400 } 2401 if (!dev->data->scattered_rx && 2402 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 2403 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", 2404 dev->data->min_rx_buf_size); 2405 return -EINVAL; 2406 } 2407 /* Temporarily replace I/O functions with dummy ones. It cannot 2408 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. 2409 */ 2410 dev->rx_pkt_burst = qede_rxtx_pkts_dummy; 2411 dev->tx_pkt_burst = qede_rxtx_pkts_dummy; 2412 if (dev->data->dev_started) { 2413 dev->data->dev_started = 0; 2414 qede_dev_stop(dev); 2415 restart = true; 2416 } 2417 rte_delay_ms(1000); 2418 qdev->new_mtu = mtu; 2419 /* Fix up RX buf size for all queues of the port */ 2420 for_each_rss(i) { 2421 fp = &qdev->fp_array[i]; 2422 if (fp->rxq != NULL) { 2423 bufsz = (uint16_t)rte_pktmbuf_data_room_size( 2424 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; 2425 if (dev->data->scattered_rx) 2426 rx_buf_size = bufsz + ETHER_HDR_LEN + 2427 ETHER_CRC_LEN + QEDE_ETH_OVERHEAD; 2428 else 2429 rx_buf_size = frame_size; 2430 rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size); 2431 fp->rxq->rx_buf_size = rx_buf_size; 2432 DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size); 2433 } 2434 } 2435 if (max_rx_pkt_len > ETHER_MAX_LEN) 2436 dev->data->dev_conf.rxmode.jumbo_frame = 1; 2437 else 2438 dev->data->dev_conf.rxmode.jumbo_frame = 0; 2439 if (!dev->data->dev_started && restart) { 2440 qede_dev_start(dev); 2441 dev->data->dev_started = 1; 2442 } 2443 /* update max frame size */ 2444 dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; 2445 /* Reassign back */ 2446 dev->rx_pkt_burst = qede_recv_pkts; 2447 dev->tx_pkt_burst = qede_xmit_pkts; 2448 2449 return 0; 2450 } 2451 2452 static int 2453 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, 2454 struct rte_eth_udp_tunnel *tunnel_udp) 2455 { 2456 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2457 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2458 struct ecore_tunnel_info tunn; /* @DPDK */ 2459 uint16_t udp_port; 2460 int rc; 2461 2462 PMD_INIT_FUNC_TRACE(edev); 2463 2464 memset(&tunn, 0, sizeof(tunn)); 2465 2466 switch (tunnel_udp->prot_type) { 2467 case RTE_TUNNEL_TYPE_VXLAN: 2468 if (qdev->vxlan.udp_port != tunnel_udp->udp_port) { 2469 DP_ERR(edev, "UDP port %u doesn't exist\n", 2470 tunnel_udp->udp_port); 2471 return ECORE_INVAL; 2472 } 2473 udp_port = 0; 2474 2475 tunn.vxlan_port.b_update_port = true; 2476 tunn.vxlan_port.port = udp_port; 2477 2478 rc = qede_tunnel_update(qdev, &tunn); 2479 if (rc != ECORE_SUCCESS) { 2480 DP_ERR(edev, "Unable to config UDP port %u\n", 2481 tunn.vxlan_port.port); 2482 return rc; 2483 } 2484 2485 qdev->vxlan.udp_port = udp_port; 2486 /* If the request is to delete UDP port and if the number of 2487 * VXLAN filters have reached 0 then VxLAN offload can be be 2488 * disabled. 2489 */ 2490 if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0) 2491 return qede_vxlan_enable(eth_dev, 2492 ECORE_TUNN_CLSS_MAC_VLAN, false); 2493 2494 break; 2495 2496 case RTE_TUNNEL_TYPE_GENEVE: 2497 if (qdev->geneve.udp_port != tunnel_udp->udp_port) { 2498 DP_ERR(edev, "UDP port %u doesn't exist\n", 2499 tunnel_udp->udp_port); 2500 return ECORE_INVAL; 2501 } 2502 2503 udp_port = 0; 2504 2505 tunn.geneve_port.b_update_port = true; 2506 tunn.geneve_port.port = udp_port; 2507 2508 rc = qede_tunnel_update(qdev, &tunn); 2509 if (rc != ECORE_SUCCESS) { 2510 DP_ERR(edev, "Unable to config UDP port %u\n", 2511 tunn.vxlan_port.port); 2512 return rc; 2513 } 2514 2515 qdev->vxlan.udp_port = udp_port; 2516 /* If the request is to delete UDP port and if the number of 2517 * GENEVE filters have reached 0 then GENEVE offload can be be 2518 * disabled. 2519 */ 2520 if (qdev->geneve.enable && qdev->geneve.num_filters == 0) 2521 return qede_geneve_enable(eth_dev, 2522 ECORE_TUNN_CLSS_MAC_VLAN, false); 2523 2524 break; 2525 2526 default: 2527 return ECORE_INVAL; 2528 } 2529 2530 return 0; 2531 2532 } 2533 static int 2534 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, 2535 struct rte_eth_udp_tunnel *tunnel_udp) 2536 { 2537 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2538 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2539 struct ecore_tunnel_info tunn; /* @DPDK */ 2540 uint16_t udp_port; 2541 int rc; 2542 2543 PMD_INIT_FUNC_TRACE(edev); 2544 2545 memset(&tunn, 0, sizeof(tunn)); 2546 2547 switch (tunnel_udp->prot_type) { 2548 case RTE_TUNNEL_TYPE_VXLAN: 2549 if (qdev->vxlan.udp_port == tunnel_udp->udp_port) { 2550 DP_INFO(edev, 2551 "UDP port %u for VXLAN was already configured\n", 2552 tunnel_udp->udp_port); 2553 return ECORE_SUCCESS; 2554 } 2555 2556 /* Enable VxLAN tunnel with default MAC/VLAN classification if 2557 * it was not enabled while adding VXLAN filter before UDP port 2558 * update. 2559 */ 2560 if (!qdev->vxlan.enable) { 2561 rc = qede_vxlan_enable(eth_dev, 2562 ECORE_TUNN_CLSS_MAC_VLAN, true); 2563 if (rc != ECORE_SUCCESS) { 2564 DP_ERR(edev, "Failed to enable VXLAN " 2565 "prior to updating UDP port\n"); 2566 return rc; 2567 } 2568 } 2569 udp_port = tunnel_udp->udp_port; 2570 2571 tunn.vxlan_port.b_update_port = true; 2572 tunn.vxlan_port.port = udp_port; 2573 2574 rc = qede_tunnel_update(qdev, &tunn); 2575 if (rc != ECORE_SUCCESS) { 2576 DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n", 2577 udp_port); 2578 return rc; 2579 } 2580 2581 DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port); 2582 2583 qdev->vxlan.udp_port = udp_port; 2584 break; 2585 2586 case RTE_TUNNEL_TYPE_GENEVE: 2587 if (qdev->geneve.udp_port == tunnel_udp->udp_port) { 2588 DP_INFO(edev, 2589 "UDP port %u for GENEVE was already configured\n", 2590 tunnel_udp->udp_port); 2591 return ECORE_SUCCESS; 2592 } 2593 2594 /* Enable GENEVE tunnel with default MAC/VLAN classification if 2595 * it was not enabled while adding GENEVE filter before UDP port 2596 * update. 2597 */ 2598 if (!qdev->geneve.enable) { 2599 rc = qede_geneve_enable(eth_dev, 2600 ECORE_TUNN_CLSS_MAC_VLAN, true); 2601 if (rc != ECORE_SUCCESS) { 2602 DP_ERR(edev, "Failed to enable GENEVE " 2603 "prior to updating UDP port\n"); 2604 return rc; 2605 } 2606 } 2607 udp_port = tunnel_udp->udp_port; 2608 2609 tunn.geneve_port.b_update_port = true; 2610 tunn.geneve_port.port = udp_port; 2611 2612 rc = qede_tunnel_update(qdev, &tunn); 2613 if (rc != ECORE_SUCCESS) { 2614 DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n", 2615 udp_port); 2616 return rc; 2617 } 2618 2619 DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port); 2620 2621 qdev->geneve.udp_port = udp_port; 2622 break; 2623 2624 default: 2625 return ECORE_INVAL; 2626 } 2627 2628 return 0; 2629 } 2630 2631 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type, 2632 uint32_t *clss, char *str) 2633 { 2634 uint16_t j; 2635 *clss = MAX_ECORE_TUNN_CLSS; 2636 2637 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) { 2638 if (filter == qede_tunn_types[j].rte_filter_type) { 2639 *type = qede_tunn_types[j].qede_type; 2640 *clss = qede_tunn_types[j].qede_tunn_clss; 2641 strcpy(str, qede_tunn_types[j].string); 2642 return; 2643 } 2644 } 2645 } 2646 2647 static int 2648 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast, 2649 const struct rte_eth_tunnel_filter_conf *conf, 2650 uint32_t type) 2651 { 2652 /* Init commmon ucast params first */ 2653 qede_set_ucast_cmn_params(ucast); 2654 2655 /* Copy out the required fields based on classification type */ 2656 ucast->type = type; 2657 2658 switch (type) { 2659 case ECORE_FILTER_VNI: 2660 ucast->vni = conf->tenant_id; 2661 break; 2662 case ECORE_FILTER_INNER_VLAN: 2663 ucast->vlan = conf->inner_vlan; 2664 break; 2665 case ECORE_FILTER_MAC: 2666 memcpy(ucast->mac, conf->outer_mac.addr_bytes, 2667 ETHER_ADDR_LEN); 2668 break; 2669 case ECORE_FILTER_INNER_MAC: 2670 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 2671 ETHER_ADDR_LEN); 2672 break; 2673 case ECORE_FILTER_MAC_VNI_PAIR: 2674 memcpy(ucast->mac, conf->outer_mac.addr_bytes, 2675 ETHER_ADDR_LEN); 2676 ucast->vni = conf->tenant_id; 2677 break; 2678 case ECORE_FILTER_INNER_MAC_VNI_PAIR: 2679 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 2680 ETHER_ADDR_LEN); 2681 ucast->vni = conf->tenant_id; 2682 break; 2683 case ECORE_FILTER_INNER_PAIR: 2684 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 2685 ETHER_ADDR_LEN); 2686 ucast->vlan = conf->inner_vlan; 2687 break; 2688 default: 2689 return -EINVAL; 2690 } 2691 2692 return ECORE_SUCCESS; 2693 } 2694 2695 static int 2696 _qede_tunn_filter_config(struct rte_eth_dev *eth_dev, 2697 const struct rte_eth_tunnel_filter_conf *conf, 2698 __attribute__((unused)) enum rte_filter_op filter_op, 2699 enum ecore_tunn_clss *clss, 2700 bool add) 2701 { 2702 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2703 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2704 struct ecore_filter_ucast ucast = {0}; 2705 enum ecore_filter_ucast_type type; 2706 uint16_t filter_type = 0; 2707 char str[80]; 2708 int rc; 2709 2710 filter_type = conf->filter_type; 2711 /* Determine if the given filter classification is supported */ 2712 qede_get_ecore_tunn_params(filter_type, &type, clss, str); 2713 if (*clss == MAX_ECORE_TUNN_CLSS) { 2714 DP_ERR(edev, "Unsupported filter type\n"); 2715 return -EINVAL; 2716 } 2717 /* Init tunnel ucast params */ 2718 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type); 2719 if (rc != ECORE_SUCCESS) { 2720 DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n", 2721 conf->filter_type); 2722 return rc; 2723 } 2724 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n", 2725 str, filter_op, ucast.type); 2726 2727 ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE; 2728 2729 /* Skip MAC/VLAN if filter is based on VNI */ 2730 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { 2731 rc = qede_mac_int_ops(eth_dev, &ucast, add); 2732 if ((rc == 0) && add) { 2733 /* Enable accept anyvlan */ 2734 qede_config_accept_any_vlan(qdev, true); 2735 } 2736 } else { 2737 rc = qede_ucast_filter(eth_dev, &ucast, add); 2738 if (rc == 0) 2739 rc = ecore_filter_ucast_cmd(edev, &ucast, 2740 ECORE_SPQ_MODE_CB, NULL); 2741 } 2742 2743 return rc; 2744 } 2745 2746 static int 2747 qede_tunn_filter_config(struct rte_eth_dev *eth_dev, 2748 enum rte_filter_op filter_op, 2749 const struct rte_eth_tunnel_filter_conf *conf) 2750 { 2751 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2752 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2753 enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS; 2754 bool add; 2755 int rc; 2756 2757 PMD_INIT_FUNC_TRACE(edev); 2758 2759 switch (filter_op) { 2760 case RTE_ETH_FILTER_ADD: 2761 add = true; 2762 break; 2763 case RTE_ETH_FILTER_DELETE: 2764 add = false; 2765 break; 2766 default: 2767 DP_ERR(edev, "Unsupported operation %d\n", filter_op); 2768 return -EINVAL; 2769 } 2770 2771 if (IS_VF(edev)) 2772 return qede_tunn_enable(eth_dev, 2773 ECORE_TUNN_CLSS_MAC_VLAN, 2774 conf->tunnel_type, add); 2775 2776 rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add); 2777 if (rc != ECORE_SUCCESS) 2778 return rc; 2779 2780 if (add) { 2781 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) { 2782 qdev->vxlan.num_filters++; 2783 qdev->vxlan.filter_type = conf->filter_type; 2784 } else { /* GENEVE */ 2785 qdev->geneve.num_filters++; 2786 qdev->geneve.filter_type = conf->filter_type; 2787 } 2788 2789 if (!qdev->vxlan.enable || !qdev->geneve.enable) 2790 return qede_tunn_enable(eth_dev, clss, 2791 conf->tunnel_type, 2792 true); 2793 } else { 2794 if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) 2795 qdev->vxlan.num_filters--; 2796 else /*GENEVE*/ 2797 qdev->geneve.num_filters--; 2798 2799 /* Disable VXLAN if VXLAN filters become 0 */ 2800 if ((qdev->vxlan.num_filters == 0) || 2801 (qdev->geneve.num_filters == 0)) 2802 return qede_tunn_enable(eth_dev, clss, 2803 conf->tunnel_type, 2804 false); 2805 } 2806 2807 return 0; 2808 } 2809 2810 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, 2811 enum rte_filter_type filter_type, 2812 enum rte_filter_op filter_op, 2813 void *arg) 2814 { 2815 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2816 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2817 struct rte_eth_tunnel_filter_conf *filter_conf = 2818 (struct rte_eth_tunnel_filter_conf *)arg; 2819 2820 switch (filter_type) { 2821 case RTE_ETH_FILTER_TUNNEL: 2822 switch (filter_conf->tunnel_type) { 2823 case RTE_TUNNEL_TYPE_VXLAN: 2824 case RTE_TUNNEL_TYPE_GENEVE: 2825 DP_INFO(edev, 2826 "Packet steering to the specified Rx queue" 2827 " is not supported with UDP tunneling"); 2828 return(qede_tunn_filter_config(eth_dev, filter_op, 2829 filter_conf)); 2830 /* Place holders for future tunneling support */ 2831 case RTE_TUNNEL_TYPE_TEREDO: 2832 case RTE_TUNNEL_TYPE_NVGRE: 2833 case RTE_TUNNEL_TYPE_IP_IN_GRE: 2834 case RTE_L2_TUNNEL_TYPE_E_TAG: 2835 DP_ERR(edev, "Unsupported tunnel type %d\n", 2836 filter_conf->tunnel_type); 2837 return -EINVAL; 2838 case RTE_TUNNEL_TYPE_NONE: 2839 default: 2840 return 0; 2841 } 2842 break; 2843 case RTE_ETH_FILTER_FDIR: 2844 return qede_fdir_filter_conf(eth_dev, filter_op, arg); 2845 case RTE_ETH_FILTER_NTUPLE: 2846 return qede_ntuple_filter_conf(eth_dev, filter_op, arg); 2847 case RTE_ETH_FILTER_MACVLAN: 2848 case RTE_ETH_FILTER_ETHERTYPE: 2849 case RTE_ETH_FILTER_FLEXIBLE: 2850 case RTE_ETH_FILTER_SYN: 2851 case RTE_ETH_FILTER_HASH: 2852 case RTE_ETH_FILTER_L2_TUNNEL: 2853 case RTE_ETH_FILTER_MAX: 2854 default: 2855 DP_ERR(edev, "Unsupported filter type %d\n", 2856 filter_type); 2857 return -EINVAL; 2858 } 2859 2860 return 0; 2861 } 2862 2863 static const struct eth_dev_ops qede_eth_dev_ops = { 2864 .dev_configure = qede_dev_configure, 2865 .dev_infos_get = qede_dev_info_get, 2866 .rx_queue_setup = qede_rx_queue_setup, 2867 .rx_queue_release = qede_rx_queue_release, 2868 .tx_queue_setup = qede_tx_queue_setup, 2869 .tx_queue_release = qede_tx_queue_release, 2870 .dev_start = qede_dev_start, 2871 .dev_set_link_up = qede_dev_set_link_up, 2872 .dev_set_link_down = qede_dev_set_link_down, 2873 .link_update = qede_link_update, 2874 .promiscuous_enable = qede_promiscuous_enable, 2875 .promiscuous_disable = qede_promiscuous_disable, 2876 .allmulticast_enable = qede_allmulticast_enable, 2877 .allmulticast_disable = qede_allmulticast_disable, 2878 .dev_stop = qede_dev_stop, 2879 .dev_close = qede_dev_close, 2880 .stats_get = qede_get_stats, 2881 .stats_reset = qede_reset_stats, 2882 .xstats_get = qede_get_xstats, 2883 .xstats_reset = qede_reset_xstats, 2884 .xstats_get_names = qede_get_xstats_names, 2885 .mac_addr_add = qede_mac_addr_add, 2886 .mac_addr_remove = qede_mac_addr_remove, 2887 .mac_addr_set = qede_mac_addr_set, 2888 .vlan_offload_set = qede_vlan_offload_set, 2889 .vlan_filter_set = qede_vlan_filter_set, 2890 .flow_ctrl_set = qede_flow_ctrl_set, 2891 .flow_ctrl_get = qede_flow_ctrl_get, 2892 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2893 .rss_hash_update = qede_rss_hash_update, 2894 .rss_hash_conf_get = qede_rss_hash_conf_get, 2895 .reta_update = qede_rss_reta_update, 2896 .reta_query = qede_rss_reta_query, 2897 .mtu_set = qede_set_mtu, 2898 .filter_ctrl = qede_dev_filter_ctrl, 2899 .udp_tunnel_port_add = qede_udp_dst_port_add, 2900 .udp_tunnel_port_del = qede_udp_dst_port_del, 2901 }; 2902 2903 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 2904 .dev_configure = qede_dev_configure, 2905 .dev_infos_get = qede_dev_info_get, 2906 .rx_queue_setup = qede_rx_queue_setup, 2907 .rx_queue_release = qede_rx_queue_release, 2908 .tx_queue_setup = qede_tx_queue_setup, 2909 .tx_queue_release = qede_tx_queue_release, 2910 .dev_start = qede_dev_start, 2911 .dev_set_link_up = qede_dev_set_link_up, 2912 .dev_set_link_down = qede_dev_set_link_down, 2913 .link_update = qede_link_update, 2914 .promiscuous_enable = qede_promiscuous_enable, 2915 .promiscuous_disable = qede_promiscuous_disable, 2916 .allmulticast_enable = qede_allmulticast_enable, 2917 .allmulticast_disable = qede_allmulticast_disable, 2918 .dev_stop = qede_dev_stop, 2919 .dev_close = qede_dev_close, 2920 .stats_get = qede_get_stats, 2921 .stats_reset = qede_reset_stats, 2922 .xstats_get = qede_get_xstats, 2923 .xstats_reset = qede_reset_xstats, 2924 .xstats_get_names = qede_get_xstats_names, 2925 .vlan_offload_set = qede_vlan_offload_set, 2926 .vlan_filter_set = qede_vlan_filter_set, 2927 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2928 .rss_hash_update = qede_rss_hash_update, 2929 .rss_hash_conf_get = qede_rss_hash_conf_get, 2930 .reta_update = qede_rss_reta_update, 2931 .reta_query = qede_rss_reta_query, 2932 .mtu_set = qede_set_mtu, 2933 .udp_tunnel_port_add = qede_udp_dst_port_add, 2934 .udp_tunnel_port_del = qede_udp_dst_port_del, 2935 }; 2936 2937 static void qede_update_pf_params(struct ecore_dev *edev) 2938 { 2939 struct ecore_pf_params pf_params; 2940 2941 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 2942 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; 2943 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 2944 qed_ops->common->update_pf_params(edev, &pf_params); 2945 } 2946 2947 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 2948 { 2949 struct rte_pci_device *pci_dev; 2950 struct rte_pci_addr pci_addr; 2951 struct qede_dev *adapter; 2952 struct ecore_dev *edev; 2953 struct qed_dev_eth_info dev_info; 2954 struct qed_slowpath_params params; 2955 static bool do_once = true; 2956 uint8_t bulletin_change; 2957 uint8_t vf_mac[ETHER_ADDR_LEN]; 2958 uint8_t is_mac_forced; 2959 bool is_mac_exist; 2960 /* Fix up ecore debug level */ 2961 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 2962 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 2963 int rc; 2964 2965 /* Extract key data structures */ 2966 adapter = eth_dev->data->dev_private; 2967 adapter->ethdev = eth_dev; 2968 edev = &adapter->edev; 2969 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 2970 pci_addr = pci_dev->addr; 2971 2972 PMD_INIT_FUNC_TRACE(edev); 2973 2974 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 2975 pci_addr.bus, pci_addr.devid, pci_addr.function, 2976 eth_dev->data->port_id); 2977 2978 eth_dev->rx_pkt_burst = qede_recv_pkts; 2979 eth_dev->tx_pkt_burst = qede_xmit_pkts; 2980 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; 2981 2982 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2983 DP_ERR(edev, "Skipping device init from secondary process\n"); 2984 return 0; 2985 } 2986 2987 rte_eth_copy_pci_info(eth_dev, pci_dev); 2988 2989 /* @DPDK */ 2990 edev->vendor_id = pci_dev->id.vendor_id; 2991 edev->device_id = pci_dev->id.device_id; 2992 2993 qed_ops = qed_get_eth_ops(); 2994 if (!qed_ops) { 2995 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 2996 return -EINVAL; 2997 } 2998 2999 DP_INFO(edev, "Starting qede probe\n"); 3000 rc = qed_ops->common->probe(edev, pci_dev, dp_module, 3001 dp_level, is_vf); 3002 if (rc != 0) { 3003 DP_ERR(edev, "qede probe failed rc %d\n", rc); 3004 return -ENODEV; 3005 } 3006 qede_update_pf_params(edev); 3007 rte_intr_callback_register(&pci_dev->intr_handle, 3008 qede_interrupt_handler, (void *)eth_dev); 3009 if (rte_intr_enable(&pci_dev->intr_handle)) { 3010 DP_ERR(edev, "rte_intr_enable() failed\n"); 3011 return -ENODEV; 3012 } 3013 3014 /* Start the Slowpath-process */ 3015 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 3016 params.int_mode = ECORE_INT_MODE_MSIX; 3017 params.drv_major = QEDE_PMD_VERSION_MAJOR; 3018 params.drv_minor = QEDE_PMD_VERSION_MINOR; 3019 params.drv_rev = QEDE_PMD_VERSION_REVISION; 3020 params.drv_eng = QEDE_PMD_VERSION_PATCH; 3021 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, 3022 QEDE_PMD_DRV_VER_STR_SIZE); 3023 3024 /* For CMT mode device do periodic polling for slowpath events. 3025 * This is required since uio device uses only one MSI-x 3026 * interrupt vector but we need one for each engine. 3027 */ 3028 if (ECORE_IS_CMT(edev) && IS_PF(edev)) { 3029 rc = rte_eal_alarm_set(timer_period * US_PER_S, 3030 qede_poll_sp_sb_cb, 3031 (void *)eth_dev); 3032 if (rc != 0) { 3033 DP_ERR(edev, "Unable to start periodic" 3034 " timer rc %d\n", rc); 3035 return -EINVAL; 3036 } 3037 } 3038 3039 rc = qed_ops->common->slowpath_start(edev, ¶ms); 3040 if (rc) { 3041 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 3042 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 3043 (void *)eth_dev); 3044 return -ENODEV; 3045 } 3046 3047 rc = qed_ops->fill_dev_info(edev, &dev_info); 3048 if (rc) { 3049 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 3050 qed_ops->common->slowpath_stop(edev); 3051 qed_ops->common->remove(edev); 3052 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 3053 (void *)eth_dev); 3054 return -ENODEV; 3055 } 3056 3057 qede_alloc_etherdev(adapter, &dev_info); 3058 3059 adapter->ops->common->set_name(edev, edev->name); 3060 3061 if (!is_vf) 3062 adapter->dev_info.num_mac_filters = 3063 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 3064 ECORE_MAC); 3065 else 3066 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 3067 (uint32_t *)&adapter->dev_info.num_mac_filters); 3068 3069 /* Allocate memory for storing MAC addr */ 3070 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 3071 (ETHER_ADDR_LEN * 3072 adapter->dev_info.num_mac_filters), 3073 RTE_CACHE_LINE_SIZE); 3074 3075 if (eth_dev->data->mac_addrs == NULL) { 3076 DP_ERR(edev, "Failed to allocate MAC address\n"); 3077 qed_ops->common->slowpath_stop(edev); 3078 qed_ops->common->remove(edev); 3079 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 3080 (void *)eth_dev); 3081 return -ENOMEM; 3082 } 3083 3084 if (!is_vf) { 3085 ether_addr_copy((struct ether_addr *)edev->hwfns[0]. 3086 hw_info.hw_mac_addr, 3087 ð_dev->data->mac_addrs[0]); 3088 ether_addr_copy(ð_dev->data->mac_addrs[0], 3089 &adapter->primary_mac); 3090 } else { 3091 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 3092 &bulletin_change); 3093 if (bulletin_change) { 3094 is_mac_exist = 3095 ecore_vf_bulletin_get_forced_mac( 3096 ECORE_LEADING_HWFN(edev), 3097 vf_mac, 3098 &is_mac_forced); 3099 if (is_mac_exist && is_mac_forced) { 3100 DP_INFO(edev, "VF macaddr received from PF\n"); 3101 ether_addr_copy((struct ether_addr *)&vf_mac, 3102 ð_dev->data->mac_addrs[0]); 3103 ether_addr_copy(ð_dev->data->mac_addrs[0], 3104 &adapter->primary_mac); 3105 } else { 3106 DP_ERR(edev, "No VF macaddr assigned\n"); 3107 } 3108 } 3109 } 3110 3111 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 3112 3113 if (do_once) { 3114 qede_print_adapter_info(adapter); 3115 do_once = false; 3116 } 3117 3118 adapter->num_tx_queues = 0; 3119 adapter->num_rx_queues = 0; 3120 SLIST_INIT(&adapter->fdir_info.fdir_list_head); 3121 SLIST_INIT(&adapter->vlan_list_head); 3122 SLIST_INIT(&adapter->uc_list_head); 3123 adapter->mtu = ETHER_MTU; 3124 adapter->new_mtu = ETHER_MTU; 3125 if (!is_vf) { 3126 if (qede_start_vport(adapter, adapter->mtu)) 3127 return -1; 3128 } else { 3129 /* VF tunnel offloads is enabled by default in PF driver */ 3130 adapter->vxlan.enable = true; 3131 adapter->vxlan.num_filters = 0; 3132 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | 3133 ETH_TUNNEL_FILTER_IVLAN; 3134 adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT; 3135 adapter->geneve.enable = true; 3136 adapter->vxlan.num_filters = 0; 3137 adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC | 3138 ETH_TUNNEL_FILTER_IVLAN; 3139 adapter->vxlan.udp_port = QEDE_GENEVE_DEF_PORT; 3140 } 3141 3142 DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", 3143 adapter->primary_mac.addr_bytes[0], 3144 adapter->primary_mac.addr_bytes[1], 3145 adapter->primary_mac.addr_bytes[2], 3146 adapter->primary_mac.addr_bytes[3], 3147 adapter->primary_mac.addr_bytes[4], 3148 adapter->primary_mac.addr_bytes[5]); 3149 3150 DP_INFO(edev, "Device initialized\n"); 3151 3152 return 0; 3153 } 3154 3155 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 3156 { 3157 return qede_common_dev_init(eth_dev, 1); 3158 } 3159 3160 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 3161 { 3162 return qede_common_dev_init(eth_dev, 0); 3163 } 3164 3165 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 3166 { 3167 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT 3168 struct qede_dev *qdev = eth_dev->data->dev_private; 3169 struct ecore_dev *edev = &qdev->edev; 3170 3171 PMD_INIT_FUNC_TRACE(edev); 3172 #endif 3173 3174 /* only uninitialize in the primary process */ 3175 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 3176 return 0; 3177 3178 /* safe to close dev here */ 3179 qede_dev_close(eth_dev); 3180 3181 eth_dev->dev_ops = NULL; 3182 eth_dev->rx_pkt_burst = NULL; 3183 eth_dev->tx_pkt_burst = NULL; 3184 3185 if (eth_dev->data->mac_addrs) 3186 rte_free(eth_dev->data->mac_addrs); 3187 3188 eth_dev->data->mac_addrs = NULL; 3189 3190 return 0; 3191 } 3192 3193 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 3194 { 3195 return qede_dev_common_uninit(eth_dev); 3196 } 3197 3198 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 3199 { 3200 return qede_dev_common_uninit(eth_dev); 3201 } 3202 3203 static const struct rte_pci_id pci_id_qedevf_map[] = { 3204 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 3205 { 3206 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF) 3207 }, 3208 { 3209 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV) 3210 }, 3211 { 3212 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV) 3213 }, 3214 {.vendor_id = 0,} 3215 }; 3216 3217 static const struct rte_pci_id pci_id_qede_map[] = { 3218 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 3219 { 3220 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E) 3221 }, 3222 { 3223 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S) 3224 }, 3225 { 3226 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40) 3227 }, 3228 { 3229 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25) 3230 }, 3231 { 3232 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100) 3233 }, 3234 { 3235 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50) 3236 }, 3237 { 3238 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G) 3239 }, 3240 { 3241 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G) 3242 }, 3243 { 3244 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G) 3245 }, 3246 { 3247 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G) 3248 }, 3249 {.vendor_id = 0,} 3250 }; 3251 3252 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3253 struct rte_pci_device *pci_dev) 3254 { 3255 return rte_eth_dev_pci_generic_probe(pci_dev, 3256 sizeof(struct qede_dev), qedevf_eth_dev_init); 3257 } 3258 3259 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 3260 { 3261 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit); 3262 } 3263 3264 static struct rte_pci_driver rte_qedevf_pmd = { 3265 .id_table = pci_id_qedevf_map, 3266 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 3267 .probe = qedevf_eth_dev_pci_probe, 3268 .remove = qedevf_eth_dev_pci_remove, 3269 }; 3270 3271 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 3272 struct rte_pci_device *pci_dev) 3273 { 3274 return rte_eth_dev_pci_generic_probe(pci_dev, 3275 sizeof(struct qede_dev), qede_eth_dev_init); 3276 } 3277 3278 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 3279 { 3280 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit); 3281 } 3282 3283 static struct rte_pci_driver rte_qede_pmd = { 3284 .id_table = pci_id_qede_map, 3285 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 3286 .probe = qede_eth_dev_pci_probe, 3287 .remove = qede_eth_dev_pci_remove, 3288 }; 3289 3290 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd); 3291 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 3292 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio-pci"); 3293 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); 3294 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 3295 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci"); 3296 3297 RTE_INIT(qede_init_log); 3298 static void 3299 qede_init_log(void) 3300 { 3301 qede_logtype_init = rte_log_register("pmd.qede.init"); 3302 if (qede_logtype_init >= 0) 3303 rte_log_set_level(qede_logtype_init, RTE_LOG_NOTICE); 3304 qede_logtype_driver = rte_log_register("pmd.qede.driver"); 3305 if (qede_logtype_driver >= 0) 3306 rte_log_set_level(qede_logtype_driver, RTE_LOG_NOTICE); 3307 } 3308