1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "qede_ethdev.h" 10 #include <rte_alarm.h> 11 12 /* Globals */ 13 static const struct qed_eth_ops *qed_ops; 14 static const char *drivername = "qede pmd"; 15 static int64_t timer_period = 1; 16 17 struct rte_qede_xstats_name_off { 18 char name[RTE_ETH_XSTATS_NAME_SIZE]; 19 uint64_t offset; 20 }; 21 22 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 23 {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)}, 24 {"rx_multicast_bytes", 25 offsetof(struct ecore_eth_stats, rx_mcast_bytes)}, 26 {"rx_broadcast_bytes", 27 offsetof(struct ecore_eth_stats, rx_bcast_bytes)}, 28 {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)}, 29 {"rx_multicast_packets", 30 offsetof(struct ecore_eth_stats, rx_mcast_pkts)}, 31 {"rx_broadcast_packets", 32 offsetof(struct ecore_eth_stats, rx_bcast_pkts)}, 33 34 {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)}, 35 {"tx_multicast_bytes", 36 offsetof(struct ecore_eth_stats, tx_mcast_bytes)}, 37 {"tx_broadcast_bytes", 38 offsetof(struct ecore_eth_stats, tx_bcast_bytes)}, 39 {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)}, 40 {"tx_multicast_packets", 41 offsetof(struct ecore_eth_stats, tx_mcast_pkts)}, 42 {"tx_broadcast_packets", 43 offsetof(struct ecore_eth_stats, tx_bcast_pkts)}, 44 45 {"rx_64_byte_packets", 46 offsetof(struct ecore_eth_stats, rx_64_byte_packets)}, 47 {"rx_65_to_127_byte_packets", 48 offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)}, 49 {"rx_128_to_255_byte_packets", 50 offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)}, 51 {"rx_256_to_511_byte_packets", 52 offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)}, 53 {"rx_512_to_1023_byte_packets", 54 offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)}, 55 {"rx_1024_to_1518_byte_packets", 56 offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)}, 57 {"rx_1519_to_1522_byte_packets", 58 offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)}, 59 {"rx_1519_to_2047_byte_packets", 60 offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)}, 61 {"rx_2048_to_4095_byte_packets", 62 offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)}, 63 {"rx_4096_to_9216_byte_packets", 64 offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)}, 65 {"rx_9217_to_16383_byte_packets", 66 offsetof(struct ecore_eth_stats, 67 rx_9217_to_16383_byte_packets)}, 68 {"tx_64_byte_packets", 69 offsetof(struct ecore_eth_stats, tx_64_byte_packets)}, 70 {"tx_65_to_127_byte_packets", 71 offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)}, 72 {"tx_128_to_255_byte_packets", 73 offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)}, 74 {"tx_256_to_511_byte_packets", 75 offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)}, 76 {"tx_512_to_1023_byte_packets", 77 offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)}, 78 {"tx_1024_to_1518_byte_packets", 79 offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)}, 80 {"trx_1519_to_1522_byte_packets", 81 offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)}, 82 {"tx_2048_to_4095_byte_packets", 83 offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)}, 84 {"tx_4096_to_9216_byte_packets", 85 offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)}, 86 {"tx_9217_to_16383_byte_packets", 87 offsetof(struct ecore_eth_stats, 88 tx_9217_to_16383_byte_packets)}, 89 90 {"rx_mac_crtl_frames", 91 offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)}, 92 {"tx_mac_control_frames", 93 offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)}, 94 {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)}, 95 {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)}, 96 {"rx_priority_flow_control_frames", 97 offsetof(struct ecore_eth_stats, rx_pfc_frames)}, 98 {"tx_priority_flow_control_frames", 99 offsetof(struct ecore_eth_stats, tx_pfc_frames)}, 100 101 {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)}, 102 {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)}, 103 {"rx_carrier_errors", 104 offsetof(struct ecore_eth_stats, rx_carrier_errors)}, 105 {"rx_oversize_packet_errors", 106 offsetof(struct ecore_eth_stats, rx_oversize_packets)}, 107 {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)}, 108 {"rx_undersize_packet_errors", 109 offsetof(struct ecore_eth_stats, rx_undersize_packets)}, 110 {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)}, 111 {"rx_host_buffer_not_available", 112 offsetof(struct ecore_eth_stats, no_buff_discards)}, 113 /* Number of packets discarded because they are bigger than MTU */ 114 {"rx_packet_too_big_discards", 115 offsetof(struct ecore_eth_stats, packet_too_big_discard)}, 116 {"rx_ttl_zero_discards", 117 offsetof(struct ecore_eth_stats, ttl0_discard)}, 118 {"rx_multi_function_tag_filter_discards", 119 offsetof(struct ecore_eth_stats, mftag_filter_discards)}, 120 {"rx_mac_filter_discards", 121 offsetof(struct ecore_eth_stats, mac_filter_discards)}, 122 {"rx_hw_buffer_truncates", 123 offsetof(struct ecore_eth_stats, brb_truncates)}, 124 {"rx_hw_buffer_discards", 125 offsetof(struct ecore_eth_stats, brb_discards)}, 126 {"tx_lpi_entry_count", 127 offsetof(struct ecore_eth_stats, tx_lpi_entry_count)}, 128 {"tx_total_collisions", 129 offsetof(struct ecore_eth_stats, tx_total_collisions)}, 130 {"tx_error_drop_packets", 131 offsetof(struct ecore_eth_stats, tx_err_drop_pkts)}, 132 133 {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)}, 134 {"rx_mac_unicast_packets", 135 offsetof(struct ecore_eth_stats, rx_mac_uc_packets)}, 136 {"rx_mac_multicast_packets", 137 offsetof(struct ecore_eth_stats, rx_mac_mc_packets)}, 138 {"rx_mac_broadcast_packets", 139 offsetof(struct ecore_eth_stats, rx_mac_bc_packets)}, 140 {"rx_mac_frames_ok", 141 offsetof(struct ecore_eth_stats, rx_mac_frames_ok)}, 142 {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)}, 143 {"tx_mac_unicast_packets", 144 offsetof(struct ecore_eth_stats, tx_mac_uc_packets)}, 145 {"tx_mac_multicast_packets", 146 offsetof(struct ecore_eth_stats, tx_mac_mc_packets)}, 147 {"tx_mac_broadcast_packets", 148 offsetof(struct ecore_eth_stats, tx_mac_bc_packets)}, 149 150 {"lro_coalesced_packets", 151 offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)}, 152 {"lro_coalesced_events", 153 offsetof(struct ecore_eth_stats, tpa_coalesced_events)}, 154 {"lro_aborts_num", 155 offsetof(struct ecore_eth_stats, tpa_aborts_num)}, 156 {"lro_not_coalesced_packets", 157 offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)}, 158 {"lro_coalesced_bytes", 159 offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)}, 160 }; 161 162 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 163 { 164 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 165 } 166 167 static void 168 qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param) 169 { 170 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 171 struct qede_dev *qdev = eth_dev->data->dev_private; 172 struct ecore_dev *edev = &qdev->edev; 173 174 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 175 if (rte_intr_enable(ð_dev->pci_dev->intr_handle)) 176 DP_ERR(edev, "rte_intr_enable failed\n"); 177 } 178 179 static void 180 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 181 { 182 rte_memcpy(&qdev->dev_info, info, sizeof(*info)); 183 qdev->num_tc = qdev->dev_info.num_tc; 184 qdev->ops = qed_ops; 185 } 186 187 static void qede_print_adapter_info(struct qede_dev *qdev) 188 { 189 struct ecore_dev *edev = &qdev->edev; 190 struct qed_dev_info *info = &qdev->dev_info.common; 191 static char ver_str[QED_DRV_VER_STR_SIZE]; 192 193 DP_INFO(edev, "*********************************\n"); 194 DP_INFO(edev, " Chip details : %s%d\n", 195 ECORE_IS_BB(edev) ? "BB" : "AH", 196 CHIP_REV_IS_A0(edev) ? 0 : 1); 197 198 sprintf(ver_str, "%s %s_%d.%d.%d.%d", QEDE_PMD_VER_PREFIX, 199 edev->ver_str, QEDE_PMD_VERSION_MAJOR, QEDE_PMD_VERSION_MINOR, 200 QEDE_PMD_VERSION_REVISION, QEDE_PMD_VERSION_PATCH); 201 strcpy(qdev->drv_ver, ver_str); 202 DP_INFO(edev, " Driver version : %s\n", ver_str); 203 204 sprintf(ver_str, "%d.%d.%d.%d", info->fw_major, info->fw_minor, 205 info->fw_rev, info->fw_eng); 206 DP_INFO(edev, " Firmware version : %s\n", ver_str); 207 208 sprintf(ver_str, "%d.%d.%d.%d", 209 (info->mfw_rev >> 24) & 0xff, 210 (info->mfw_rev >> 16) & 0xff, 211 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff); 212 DP_INFO(edev, " Management firmware version : %s\n", ver_str); 213 214 DP_INFO(edev, " Firmware file : %s\n", fw_file); 215 216 DP_INFO(edev, "*********************************\n"); 217 } 218 219 static int 220 qede_set_ucast_rx_mac(struct qede_dev *qdev, 221 enum qed_filter_xcast_params_type opcode, 222 uint8_t mac[ETHER_ADDR_LEN]) 223 { 224 struct ecore_dev *edev = &qdev->edev; 225 struct qed_filter_params filter_cmd; 226 227 memset(&filter_cmd, 0, sizeof(filter_cmd)); 228 filter_cmd.type = QED_FILTER_TYPE_UCAST; 229 filter_cmd.filter.ucast.type = opcode; 230 filter_cmd.filter.ucast.mac_valid = 1; 231 rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN); 232 return qdev->ops->filter_config(edev, &filter_cmd); 233 } 234 235 static void 236 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, 237 uint32_t index, __rte_unused uint32_t pool) 238 { 239 struct qede_dev *qdev = eth_dev->data->dev_private; 240 struct ecore_dev *edev = &qdev->edev; 241 int rc; 242 243 PMD_INIT_FUNC_TRACE(edev); 244 245 if (index >= qdev->dev_info.num_mac_addrs) { 246 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 247 index, qdev->dev_info.num_mac_addrs); 248 return; 249 } 250 251 /* Adding macaddr even though promiscuous mode is set */ 252 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 253 DP_INFO(edev, "Port is in promisc mode, yet adding it\n"); 254 255 /* Add MAC filters according to the unicast secondary macs */ 256 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD, 257 mac_addr->addr_bytes); 258 if (rc) 259 DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc); 260 } 261 262 static void 263 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 264 { 265 struct qede_dev *qdev = eth_dev->data->dev_private; 266 struct ecore_dev *edev = &qdev->edev; 267 struct ether_addr mac_addr; 268 int rc; 269 270 PMD_INIT_FUNC_TRACE(edev); 271 272 if (index >= qdev->dev_info.num_mac_addrs) { 273 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 274 index, qdev->dev_info.num_mac_addrs); 275 return; 276 } 277 278 /* Use the index maintained by rte */ 279 ether_addr_copy(ð_dev->data->mac_addrs[index], &mac_addr); 280 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL, 281 mac_addr.addr_bytes); 282 if (rc) 283 DP_ERR(edev, "Unable to remove macaddr rc=%d\n", rc); 284 } 285 286 static void 287 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) 288 { 289 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 290 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 291 int rc; 292 293 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 294 mac_addr->addr_bytes)) { 295 DP_ERR(edev, "Setting MAC address is not allowed\n"); 296 ether_addr_copy(&qdev->primary_mac, 297 ð_dev->data->mac_addrs[0]); 298 return; 299 } 300 301 /* First remove the primary mac */ 302 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL, 303 qdev->primary_mac.addr_bytes); 304 305 if (rc) { 306 DP_ERR(edev, "Unable to remove current macaddr" 307 " Reverting to previous default mac\n"); 308 ether_addr_copy(&qdev->primary_mac, 309 ð_dev->data->mac_addrs[0]); 310 return; 311 } 312 313 /* Add new MAC */ 314 rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD, 315 mac_addr->addr_bytes); 316 317 if (rc) 318 DP_ERR(edev, "Unable to add new default mac\n"); 319 else 320 ether_addr_copy(mac_addr, &qdev->primary_mac); 321 } 322 323 324 325 326 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action) 327 { 328 struct ecore_dev *edev = &qdev->edev; 329 struct qed_update_vport_params params = { 330 .vport_id = 0, 331 .accept_any_vlan = action, 332 .update_accept_any_vlan_flg = 1, 333 }; 334 int rc; 335 336 /* Proceed only if action actually needs to be performed */ 337 if (qdev->accept_any_vlan == action) 338 return; 339 340 rc = qdev->ops->vport_update(edev, ¶ms); 341 if (rc) { 342 DP_ERR(edev, "Failed to %s accept-any-vlan\n", 343 action ? "enable" : "disable"); 344 } else { 345 DP_INFO(edev, "%s accept-any-vlan\n", 346 action ? "enabled" : "disabled"); 347 qdev->accept_any_vlan = action; 348 } 349 } 350 351 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping) 352 { 353 struct qed_update_vport_params vport_update_params; 354 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 355 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 356 int rc; 357 358 memset(&vport_update_params, 0, sizeof(vport_update_params)); 359 vport_update_params.vport_id = 0; 360 vport_update_params.update_inner_vlan_removal_flg = 1; 361 vport_update_params.inner_vlan_removal_flg = set_stripping; 362 rc = qdev->ops->vport_update(edev, &vport_update_params); 363 if (rc) { 364 DP_ERR(edev, "Update V-PORT failed %d\n", rc); 365 return rc; 366 } 367 368 return 0; 369 } 370 371 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 372 { 373 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 374 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 375 376 if (mask & ETH_VLAN_STRIP_MASK) { 377 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) 378 (void)qede_vlan_stripping(eth_dev, 1); 379 else 380 (void)qede_vlan_stripping(eth_dev, 0); 381 } 382 383 DP_INFO(edev, "vlan offload mask %d vlan-strip %d\n", 384 mask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip); 385 } 386 387 static int qede_set_ucast_rx_vlan(struct qede_dev *qdev, 388 enum qed_filter_xcast_params_type opcode, 389 uint16_t vid) 390 { 391 struct qed_filter_params filter_cmd; 392 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 393 394 memset(&filter_cmd, 0, sizeof(filter_cmd)); 395 filter_cmd.type = QED_FILTER_TYPE_UCAST; 396 filter_cmd.filter.ucast.type = opcode; 397 filter_cmd.filter.ucast.vlan_valid = 1; 398 filter_cmd.filter.ucast.vlan = vid; 399 400 return qdev->ops->filter_config(edev, &filter_cmd); 401 } 402 403 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 404 uint16_t vlan_id, int on) 405 { 406 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 407 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 408 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 409 int rc; 410 411 if (vlan_id != 0 && 412 qdev->configured_vlans == dev_info->num_vlan_filters) { 413 DP_NOTICE(edev, false, "Reached max VLAN filter limit" 414 " enabling accept_any_vlan\n"); 415 qede_config_accept_any_vlan(qdev, true); 416 return 0; 417 } 418 419 if (on) { 420 rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD, 421 vlan_id); 422 if (rc) 423 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 424 rc); 425 else 426 if (vlan_id != 0) 427 qdev->configured_vlans++; 428 } else { 429 rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL, 430 vlan_id); 431 if (rc) 432 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 433 vlan_id, rc); 434 else 435 if (vlan_id != 0) 436 qdev->configured_vlans--; 437 } 438 439 DP_INFO(edev, "vlan_id %u on %u rc %d configured_vlans %u\n", 440 vlan_id, on, rc, qdev->configured_vlans); 441 442 return rc; 443 } 444 445 static int qede_init_vport(struct qede_dev *qdev) 446 { 447 struct ecore_dev *edev = &qdev->edev; 448 struct qed_start_vport_params start = {0}; 449 int rc; 450 451 start.remove_inner_vlan = 1; 452 start.gro_enable = 0; 453 start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD; 454 start.vport_id = 0; 455 start.drop_ttl0 = false; 456 start.clear_stats = 1; 457 start.handle_ptp_pkts = 0; 458 459 rc = qdev->ops->vport_start(edev, &start); 460 if (rc) { 461 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 462 return rc; 463 } 464 465 DP_INFO(edev, 466 "Start vport ramrod passed, vport_id = %d, MTU = %u\n", 467 start.vport_id, ETHER_MTU); 468 469 return 0; 470 } 471 472 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 473 { 474 struct qede_dev *qdev = eth_dev->data->dev_private; 475 struct ecore_dev *edev = &qdev->edev; 476 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 477 int rc; 478 479 PMD_INIT_FUNC_TRACE(edev); 480 481 /* Check requirements for 100G mode */ 482 if (edev->num_hwfns > 1) { 483 if (eth_dev->data->nb_rx_queues < 2 || 484 eth_dev->data->nb_tx_queues < 2) { 485 DP_NOTICE(edev, false, 486 "100G mode needs min. 2 RX/TX queues\n"); 487 return -EINVAL; 488 } 489 490 if ((eth_dev->data->nb_rx_queues % 2 != 0) || 491 (eth_dev->data->nb_tx_queues % 2 != 0)) { 492 DP_NOTICE(edev, false, 493 "100G mode needs even no. of RX/TX queues\n"); 494 return -EINVAL; 495 } 496 } 497 498 qdev->fp_num_tx = eth_dev->data->nb_tx_queues; 499 qdev->fp_num_rx = eth_dev->data->nb_rx_queues; 500 qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx; 501 502 /* Sanity checks and throw warnings */ 503 if (rxmode->enable_scatter == 1) { 504 DP_ERR(edev, "RX scatter packets is not supported\n"); 505 return -EINVAL; 506 } 507 508 if (rxmode->enable_lro == 1) { 509 DP_INFO(edev, "LRO is not supported\n"); 510 return -EINVAL; 511 } 512 513 if (!rxmode->hw_strip_crc) 514 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n"); 515 516 if (!rxmode->hw_ip_checksum) 517 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " 518 "in hw\n"); 519 520 /* Check for the port restart case */ 521 if (qdev->state != QEDE_DEV_INIT) { 522 rc = qdev->ops->vport_stop(edev, 0); 523 if (rc != 0) 524 return rc; 525 qede_dealloc_fp_resc(eth_dev); 526 } 527 528 /* Fastpath status block should be initialized before sending 529 * VPORT-START in the case of VF. Anyway, do it for both VF/PF. 530 */ 531 rc = qede_alloc_fp_resc(qdev); 532 if (rc != 0) 533 return rc; 534 535 /* Issue VPORT-START with default config values to allow 536 * other port configurations early on. 537 */ 538 rc = qede_init_vport(qdev); 539 if (rc != 0) 540 return rc; 541 542 /* Add primary mac for PF */ 543 if (IS_PF(edev)) 544 qede_mac_addr_set(eth_dev, &qdev->primary_mac); 545 546 qdev->state = QEDE_DEV_CONFIG; 547 548 return 0; 549 } 550 551 /* Info about HW descriptor ring limitations */ 552 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 553 .nb_max = NUM_RX_BDS_MAX, 554 .nb_min = 128, 555 .nb_align = 128 /* lowest common multiple */ 556 }; 557 558 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 559 .nb_max = NUM_TX_BDS_MAX, 560 .nb_min = 256, 561 .nb_align = 256 562 }; 563 564 static void 565 qede_dev_info_get(struct rte_eth_dev *eth_dev, 566 struct rte_eth_dev_info *dev_info) 567 { 568 struct qede_dev *qdev = eth_dev->data->dev_private; 569 struct ecore_dev *edev = &qdev->edev; 570 571 PMD_INIT_FUNC_TRACE(edev); 572 573 dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU + 574 QEDE_ETH_OVERHEAD); 575 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 576 dev_info->rx_desc_lim = qede_rx_desc_lim; 577 dev_info->tx_desc_lim = qede_tx_desc_lim; 578 dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev); 579 dev_info->max_tx_queues = dev_info->max_rx_queues; 580 dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs; 581 if (IS_VF(edev)) 582 dev_info->max_vfs = 0; 583 else 584 dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev); 585 dev_info->driver_name = qdev->drv_ver; 586 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 587 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 588 589 dev_info->default_txconf = (struct rte_eth_txconf) { 590 .txq_flags = QEDE_TXQ_FLAGS, 591 }; 592 593 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP | 594 DEV_RX_OFFLOAD_IPV4_CKSUM | 595 DEV_RX_OFFLOAD_UDP_CKSUM | 596 DEV_RX_OFFLOAD_TCP_CKSUM); 597 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | 598 DEV_TX_OFFLOAD_IPV4_CKSUM | 599 DEV_TX_OFFLOAD_UDP_CKSUM | 600 DEV_TX_OFFLOAD_TCP_CKSUM); 601 602 dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G | 603 ETH_LINK_SPEED_100G; 604 } 605 606 /* return 0 means link status changed, -1 means not changed */ 607 static int 608 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 609 { 610 struct qede_dev *qdev = eth_dev->data->dev_private; 611 struct ecore_dev *edev = &qdev->edev; 612 uint16_t link_duplex; 613 struct qed_link_output link; 614 struct rte_eth_link *curr = ð_dev->data->dev_link; 615 616 memset(&link, 0, sizeof(struct qed_link_output)); 617 qdev->ops->common->get_link(edev, &link); 618 619 /* Link Speed */ 620 curr->link_speed = link.speed; 621 622 /* Link Mode */ 623 switch (link.duplex) { 624 case QEDE_DUPLEX_HALF: 625 link_duplex = ETH_LINK_HALF_DUPLEX; 626 break; 627 case QEDE_DUPLEX_FULL: 628 link_duplex = ETH_LINK_FULL_DUPLEX; 629 break; 630 case QEDE_DUPLEX_UNKNOWN: 631 default: 632 link_duplex = -1; 633 } 634 curr->link_duplex = link_duplex; 635 636 /* Link Status */ 637 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN; 638 639 /* AN */ 640 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 641 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 642 643 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 644 curr->link_speed, curr->link_duplex, 645 curr->link_autoneg, curr->link_status); 646 647 /* return 0 means link status changed, -1 means not changed */ 648 return ((curr->link_status == link.link_up) ? -1 : 0); 649 } 650 651 static void 652 qede_rx_mode_setting(struct rte_eth_dev *eth_dev, 653 enum qed_filter_rx_mode_type accept_flags) 654 { 655 struct qede_dev *qdev = eth_dev->data->dev_private; 656 struct ecore_dev *edev = &qdev->edev; 657 struct qed_filter_params rx_mode; 658 659 DP_INFO(edev, "%s mode %u\n", __func__, accept_flags); 660 661 memset(&rx_mode, 0, sizeof(struct qed_filter_params)); 662 rx_mode.type = QED_FILTER_TYPE_RX_MODE; 663 rx_mode.filter.accept_flags = accept_flags; 664 qdev->ops->filter_config(edev, &rx_mode); 665 } 666 667 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 668 { 669 struct qede_dev *qdev = eth_dev->data->dev_private; 670 struct ecore_dev *edev = &qdev->edev; 671 672 PMD_INIT_FUNC_TRACE(edev); 673 674 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 675 676 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 677 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 678 679 qede_rx_mode_setting(eth_dev, type); 680 } 681 682 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 683 { 684 struct qede_dev *qdev = eth_dev->data->dev_private; 685 struct ecore_dev *edev = &qdev->edev; 686 687 PMD_INIT_FUNC_TRACE(edev); 688 689 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 690 qede_rx_mode_setting(eth_dev, 691 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 692 else 693 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR); 694 } 695 696 static void qede_poll_sp_sb_cb(void *param) 697 { 698 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 699 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 700 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 701 int rc; 702 703 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 704 qede_interrupt_action(&edev->hwfns[1]); 705 706 rc = rte_eal_alarm_set(timer_period * US_PER_S, 707 qede_poll_sp_sb_cb, 708 (void *)eth_dev); 709 if (rc != 0) { 710 DP_ERR(edev, "Unable to start periodic" 711 " timer rc %d\n", rc); 712 assert(false && "Unable to start periodic timer"); 713 } 714 } 715 716 static void qede_dev_close(struct rte_eth_dev *eth_dev) 717 { 718 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 719 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 720 int rc; 721 722 PMD_INIT_FUNC_TRACE(edev); 723 724 /* dev_stop() shall cleanup fp resources in hw but without releasing 725 * dma memories and sw structures so that dev_start() can be called 726 * by the app without reconfiguration. However, in dev_close() we 727 * can release all the resources and device can be brought up newly 728 */ 729 if (qdev->state != QEDE_DEV_STOP) 730 qede_dev_stop(eth_dev); 731 else 732 DP_INFO(edev, "Device is already stopped\n"); 733 734 rc = qdev->ops->vport_stop(edev, 0); 735 if (rc != 0) 736 DP_ERR(edev, "Failed to stop VPORT\n"); 737 738 qede_dealloc_fp_resc(eth_dev); 739 740 qdev->ops->common->slowpath_stop(edev); 741 742 qdev->ops->common->remove(edev); 743 744 rte_intr_disable(ð_dev->pci_dev->intr_handle); 745 746 rte_intr_callback_unregister(ð_dev->pci_dev->intr_handle, 747 qede_interrupt_handler, (void *)eth_dev); 748 749 if (edev->num_hwfns > 1) 750 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 751 752 qdev->state = QEDE_DEV_INIT; /* Go back to init state */ 753 } 754 755 static void 756 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 757 { 758 struct qede_dev *qdev = eth_dev->data->dev_private; 759 struct ecore_dev *edev = &qdev->edev; 760 struct ecore_eth_stats stats; 761 762 qdev->ops->get_vport_stats(edev, &stats); 763 764 /* RX Stats */ 765 eth_stats->ipackets = stats.rx_ucast_pkts + 766 stats.rx_mcast_pkts + stats.rx_bcast_pkts; 767 768 eth_stats->ibytes = stats.rx_ucast_bytes + 769 stats.rx_mcast_bytes + stats.rx_bcast_bytes; 770 771 eth_stats->ierrors = stats.rx_crc_errors + 772 stats.rx_align_errors + 773 stats.rx_carrier_errors + 774 stats.rx_oversize_packets + 775 stats.rx_jabbers + stats.rx_undersize_packets; 776 777 eth_stats->rx_nombuf = stats.no_buff_discards; 778 779 eth_stats->imissed = stats.mftag_filter_discards + 780 stats.mac_filter_discards + 781 stats.no_buff_discards + stats.brb_truncates + stats.brb_discards; 782 783 /* TX stats */ 784 eth_stats->opackets = stats.tx_ucast_pkts + 785 stats.tx_mcast_pkts + stats.tx_bcast_pkts; 786 787 eth_stats->obytes = stats.tx_ucast_bytes + 788 stats.tx_mcast_bytes + stats.tx_bcast_bytes; 789 790 eth_stats->oerrors = stats.tx_err_drop_pkts; 791 } 792 793 static int 794 qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev, 795 struct rte_eth_xstat_name *xstats_names, unsigned limit) 796 { 797 unsigned int i, stat_cnt = RTE_DIM(qede_xstats_strings); 798 799 if (xstats_names != NULL) 800 for (i = 0; i < stat_cnt; i++) 801 snprintf(xstats_names[i].name, 802 sizeof(xstats_names[i].name), 803 "%s", 804 qede_xstats_strings[i].name); 805 806 return stat_cnt; 807 } 808 809 static int 810 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 811 unsigned int n) 812 { 813 struct qede_dev *qdev = dev->data->dev_private; 814 struct ecore_dev *edev = &qdev->edev; 815 struct ecore_eth_stats stats; 816 unsigned int num = RTE_DIM(qede_xstats_strings); 817 818 if (n < num) 819 return num; 820 821 qdev->ops->get_vport_stats(edev, &stats); 822 823 for (num = 0; num < n; num++) 824 xstats[num].value = *(u64 *)(((char *)&stats) + 825 qede_xstats_strings[num].offset); 826 827 return num; 828 } 829 830 static void 831 qede_reset_xstats(struct rte_eth_dev *dev) 832 { 833 struct qede_dev *qdev = dev->data->dev_private; 834 struct ecore_dev *edev = &qdev->edev; 835 836 ecore_reset_vport_stats(edev); 837 } 838 839 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 840 { 841 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 842 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 843 struct qed_link_params link_params; 844 int rc; 845 846 DP_INFO(edev, "setting link state %d\n", link_up); 847 memset(&link_params, 0, sizeof(link_params)); 848 link_params.link_up = link_up; 849 rc = qdev->ops->common->set_link(edev, &link_params); 850 if (rc != ECORE_SUCCESS) 851 DP_ERR(edev, "Unable to set link state %d\n", link_up); 852 853 return rc; 854 } 855 856 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 857 { 858 return qede_dev_set_link_state(eth_dev, true); 859 } 860 861 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 862 { 863 return qede_dev_set_link_state(eth_dev, false); 864 } 865 866 static void qede_reset_stats(struct rte_eth_dev *eth_dev) 867 { 868 struct qede_dev *qdev = eth_dev->data->dev_private; 869 struct ecore_dev *edev = &qdev->edev; 870 871 ecore_reset_vport_stats(edev); 872 } 873 874 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 875 { 876 enum qed_filter_rx_mode_type type = 877 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 878 879 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 880 type |= QED_FILTER_RX_MODE_TYPE_PROMISC; 881 882 qede_rx_mode_setting(eth_dev, type); 883 } 884 885 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 886 { 887 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 888 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC); 889 else 890 qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR); 891 } 892 893 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 894 struct rte_eth_fc_conf *fc_conf) 895 { 896 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 897 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 898 struct qed_link_output current_link; 899 struct qed_link_params params; 900 901 memset(¤t_link, 0, sizeof(current_link)); 902 qdev->ops->common->get_link(edev, ¤t_link); 903 904 memset(¶ms, 0, sizeof(params)); 905 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 906 if (fc_conf->autoneg) { 907 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 908 DP_ERR(edev, "Autoneg not supported\n"); 909 return -EINVAL; 910 } 911 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 912 } 913 914 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 915 if (fc_conf->mode == RTE_FC_FULL) 916 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 917 QED_LINK_PAUSE_RX_ENABLE); 918 if (fc_conf->mode == RTE_FC_TX_PAUSE) 919 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 920 if (fc_conf->mode == RTE_FC_RX_PAUSE) 921 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 922 923 params.link_up = true; 924 (void)qdev->ops->common->set_link(edev, ¶ms); 925 926 return 0; 927 } 928 929 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 930 struct rte_eth_fc_conf *fc_conf) 931 { 932 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 933 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 934 struct qed_link_output current_link; 935 936 memset(¤t_link, 0, sizeof(current_link)); 937 qdev->ops->common->get_link(edev, ¤t_link); 938 939 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 940 fc_conf->autoneg = true; 941 942 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 943 QED_LINK_PAUSE_TX_ENABLE)) 944 fc_conf->mode = RTE_FC_FULL; 945 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 946 fc_conf->mode = RTE_FC_RX_PAUSE; 947 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 948 fc_conf->mode = RTE_FC_TX_PAUSE; 949 else 950 fc_conf->mode = RTE_FC_NONE; 951 952 return 0; 953 } 954 955 static const uint32_t * 956 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 957 { 958 static const uint32_t ptypes[] = { 959 RTE_PTYPE_L3_IPV4, 960 RTE_PTYPE_L3_IPV6, 961 RTE_PTYPE_UNKNOWN 962 }; 963 964 if (eth_dev->rx_pkt_burst == qede_recv_pkts) 965 return ptypes; 966 967 return NULL; 968 } 969 970 int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 971 struct rte_eth_rss_conf *rss_conf) 972 { 973 struct qed_update_vport_params vport_update_params; 974 struct qede_dev *qdev = eth_dev->data->dev_private; 975 struct ecore_dev *edev = &qdev->edev; 976 uint8_t rss_caps; 977 uint32_t *key = (uint32_t *)rss_conf->rss_key; 978 uint64_t hf = rss_conf->rss_hf; 979 int i; 980 981 if (hf == 0) 982 DP_ERR(edev, "hash function 0 will disable RSS\n"); 983 984 rss_caps = 0; 985 rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 986 rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 987 rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 988 rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 989 rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 990 rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 991 992 /* If the mapping doesn't fit any supported, return */ 993 if (rss_caps == 0 && hf != 0) 994 return -EINVAL; 995 996 memset(&vport_update_params, 0, sizeof(vport_update_params)); 997 998 if (key != NULL) 999 memcpy(qdev->rss_params.rss_key, rss_conf->rss_key, 1000 rss_conf->rss_key_len); 1001 1002 qdev->rss_params.rss_caps = rss_caps; 1003 memcpy(&vport_update_params.rss_params, &qdev->rss_params, 1004 sizeof(vport_update_params.rss_params)); 1005 vport_update_params.update_rss_flg = 1; 1006 vport_update_params.vport_id = 0; 1007 1008 return qdev->ops->vport_update(edev, &vport_update_params); 1009 } 1010 1011 int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 1012 struct rte_eth_rss_conf *rss_conf) 1013 { 1014 struct qede_dev *qdev = eth_dev->data->dev_private; 1015 uint64_t hf; 1016 1017 if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key)) 1018 return -EINVAL; 1019 1020 if (rss_conf->rss_key) 1021 memcpy(rss_conf->rss_key, qdev->rss_params.rss_key, 1022 sizeof(qdev->rss_params.rss_key)); 1023 1024 hf = 0; 1025 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4) ? 1026 ETH_RSS_IPV4 : 0; 1027 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ? 1028 ETH_RSS_IPV6 : 0; 1029 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ? 1030 ETH_RSS_IPV6_EX : 0; 1031 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ? 1032 ETH_RSS_NONFRAG_IPV4_TCP : 0; 1033 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ? 1034 ETH_RSS_NONFRAG_IPV6_TCP : 0; 1035 hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ? 1036 ETH_RSS_IPV6_TCP_EX : 0; 1037 1038 rss_conf->rss_hf = hf; 1039 1040 return 0; 1041 } 1042 1043 int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 1044 struct rte_eth_rss_reta_entry64 *reta_conf, 1045 uint16_t reta_size) 1046 { 1047 struct qed_update_vport_params vport_update_params; 1048 struct qede_dev *qdev = eth_dev->data->dev_private; 1049 struct ecore_dev *edev = &qdev->edev; 1050 uint16_t i, idx, shift; 1051 1052 if (reta_size > ETH_RSS_RETA_SIZE_128) { 1053 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 1054 reta_size); 1055 return -EINVAL; 1056 } 1057 1058 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1059 memcpy(&vport_update_params.rss_params, &qdev->rss_params, 1060 sizeof(vport_update_params.rss_params)); 1061 1062 for (i = 0; i < reta_size; i++) { 1063 idx = i / RTE_RETA_GROUP_SIZE; 1064 shift = i % RTE_RETA_GROUP_SIZE; 1065 if (reta_conf[idx].mask & (1ULL << shift)) { 1066 uint8_t entry = reta_conf[idx].reta[shift]; 1067 qdev->rss_params.rss_ind_table[i] = entry; 1068 } 1069 } 1070 1071 vport_update_params.update_rss_flg = 1; 1072 vport_update_params.vport_id = 0; 1073 1074 return qdev->ops->vport_update(edev, &vport_update_params); 1075 } 1076 1077 int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 1078 struct rte_eth_rss_reta_entry64 *reta_conf, 1079 uint16_t reta_size) 1080 { 1081 struct qede_dev *qdev = eth_dev->data->dev_private; 1082 uint16_t i, idx, shift; 1083 1084 if (reta_size > ETH_RSS_RETA_SIZE_128) { 1085 struct ecore_dev *edev = &qdev->edev; 1086 DP_ERR(edev, "reta_size %d is not supported\n", 1087 reta_size); 1088 } 1089 1090 for (i = 0; i < reta_size; i++) { 1091 idx = i / RTE_RETA_GROUP_SIZE; 1092 shift = i % RTE_RETA_GROUP_SIZE; 1093 if (reta_conf[idx].mask & (1ULL << shift)) { 1094 uint8_t entry = qdev->rss_params.rss_ind_table[i]; 1095 reta_conf[idx].reta[shift] = entry; 1096 } 1097 } 1098 1099 return 0; 1100 } 1101 1102 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 1103 { 1104 uint32_t frame_size; 1105 struct qede_dev *qdev = dev->data->dev_private; 1106 struct rte_eth_dev_info dev_info = {0}; 1107 1108 qede_dev_info_get(dev, &dev_info); 1109 1110 /* VLAN_TAG = 4 */ 1111 frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4; 1112 1113 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) 1114 return -EINVAL; 1115 1116 if (!dev->data->scattered_rx && 1117 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) 1118 return -EINVAL; 1119 1120 if (frame_size > ETHER_MAX_LEN) 1121 dev->data->dev_conf.rxmode.jumbo_frame = 1; 1122 else 1123 dev->data->dev_conf.rxmode.jumbo_frame = 0; 1124 1125 /* update max frame size */ 1126 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1127 qdev->mtu = mtu; 1128 qede_dev_stop(dev); 1129 qede_dev_start(dev); 1130 1131 return 0; 1132 } 1133 1134 static const struct eth_dev_ops qede_eth_dev_ops = { 1135 .dev_configure = qede_dev_configure, 1136 .dev_infos_get = qede_dev_info_get, 1137 .rx_queue_setup = qede_rx_queue_setup, 1138 .rx_queue_release = qede_rx_queue_release, 1139 .tx_queue_setup = qede_tx_queue_setup, 1140 .tx_queue_release = qede_tx_queue_release, 1141 .dev_start = qede_dev_start, 1142 .dev_set_link_up = qede_dev_set_link_up, 1143 .dev_set_link_down = qede_dev_set_link_down, 1144 .link_update = qede_link_update, 1145 .promiscuous_enable = qede_promiscuous_enable, 1146 .promiscuous_disable = qede_promiscuous_disable, 1147 .allmulticast_enable = qede_allmulticast_enable, 1148 .allmulticast_disable = qede_allmulticast_disable, 1149 .dev_stop = qede_dev_stop, 1150 .dev_close = qede_dev_close, 1151 .stats_get = qede_get_stats, 1152 .stats_reset = qede_reset_stats, 1153 .xstats_get = qede_get_xstats, 1154 .xstats_reset = qede_reset_xstats, 1155 .xstats_get_names = qede_get_xstats_names, 1156 .mac_addr_add = qede_mac_addr_add, 1157 .mac_addr_remove = qede_mac_addr_remove, 1158 .mac_addr_set = qede_mac_addr_set, 1159 .vlan_offload_set = qede_vlan_offload_set, 1160 .vlan_filter_set = qede_vlan_filter_set, 1161 .flow_ctrl_set = qede_flow_ctrl_set, 1162 .flow_ctrl_get = qede_flow_ctrl_get, 1163 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 1164 .rss_hash_update = qede_rss_hash_update, 1165 .rss_hash_conf_get = qede_rss_hash_conf_get, 1166 .reta_update = qede_rss_reta_update, 1167 .reta_query = qede_rss_reta_query, 1168 .mtu_set = qede_set_mtu, 1169 }; 1170 1171 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 1172 .dev_configure = qede_dev_configure, 1173 .dev_infos_get = qede_dev_info_get, 1174 .rx_queue_setup = qede_rx_queue_setup, 1175 .rx_queue_release = qede_rx_queue_release, 1176 .tx_queue_setup = qede_tx_queue_setup, 1177 .tx_queue_release = qede_tx_queue_release, 1178 .dev_start = qede_dev_start, 1179 .dev_set_link_up = qede_dev_set_link_up, 1180 .dev_set_link_down = qede_dev_set_link_down, 1181 .link_update = qede_link_update, 1182 .promiscuous_enable = qede_promiscuous_enable, 1183 .promiscuous_disable = qede_promiscuous_disable, 1184 .allmulticast_enable = qede_allmulticast_enable, 1185 .allmulticast_disable = qede_allmulticast_disable, 1186 .dev_stop = qede_dev_stop, 1187 .dev_close = qede_dev_close, 1188 .stats_get = qede_get_stats, 1189 .stats_reset = qede_reset_stats, 1190 .xstats_get = qede_get_xstats, 1191 .xstats_reset = qede_reset_xstats, 1192 .xstats_get_names = qede_get_xstats_names, 1193 .vlan_offload_set = qede_vlan_offload_set, 1194 .vlan_filter_set = qede_vlan_filter_set, 1195 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 1196 .rss_hash_update = qede_rss_hash_update, 1197 .rss_hash_conf_get = qede_rss_hash_conf_get, 1198 .reta_update = qede_rss_reta_update, 1199 .reta_query = qede_rss_reta_query, 1200 .mtu_set = qede_set_mtu, 1201 }; 1202 1203 static void qede_update_pf_params(struct ecore_dev *edev) 1204 { 1205 struct ecore_pf_params pf_params; 1206 /* 32 rx + 32 tx */ 1207 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 1208 pf_params.eth_pf_params.num_cons = 64; 1209 qed_ops->common->update_pf_params(edev, &pf_params); 1210 } 1211 1212 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 1213 { 1214 struct rte_pci_device *pci_dev; 1215 struct rte_pci_addr pci_addr; 1216 struct qede_dev *adapter; 1217 struct ecore_dev *edev; 1218 struct qed_dev_eth_info dev_info; 1219 struct qed_slowpath_params params; 1220 static bool do_once = true; 1221 uint8_t bulletin_change; 1222 uint8_t vf_mac[ETHER_ADDR_LEN]; 1223 uint8_t is_mac_forced; 1224 bool is_mac_exist; 1225 /* Fix up ecore debug level */ 1226 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 1227 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 1228 uint32_t max_mac_addrs; 1229 int rc; 1230 1231 /* Extract key data structures */ 1232 adapter = eth_dev->data->dev_private; 1233 edev = &adapter->edev; 1234 pci_addr = eth_dev->pci_dev->addr; 1235 1236 PMD_INIT_FUNC_TRACE(edev); 1237 1238 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 1239 pci_addr.bus, pci_addr.devid, pci_addr.function, 1240 eth_dev->data->port_id); 1241 1242 eth_dev->rx_pkt_burst = qede_recv_pkts; 1243 eth_dev->tx_pkt_burst = qede_xmit_pkts; 1244 1245 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1246 DP_NOTICE(edev, false, 1247 "Skipping device init from secondary process\n"); 1248 return 0; 1249 } 1250 1251 pci_dev = eth_dev->pci_dev; 1252 1253 rte_eth_copy_pci_info(eth_dev, pci_dev); 1254 1255 qed_ops = qed_get_eth_ops(); 1256 if (!qed_ops) { 1257 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 1258 return -EINVAL; 1259 } 1260 1261 DP_INFO(edev, "Starting qede probe\n"); 1262 1263 rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH, 1264 dp_module, dp_level, is_vf); 1265 1266 if (rc != 0) { 1267 DP_ERR(edev, "qede probe failed rc %d\n", rc); 1268 return -ENODEV; 1269 } 1270 1271 qede_update_pf_params(edev); 1272 1273 rte_intr_callback_register(ð_dev->pci_dev->intr_handle, 1274 qede_interrupt_handler, (void *)eth_dev); 1275 1276 if (rte_intr_enable(ð_dev->pci_dev->intr_handle)) { 1277 DP_ERR(edev, "rte_intr_enable() failed\n"); 1278 return -ENODEV; 1279 } 1280 1281 /* Start the Slowpath-process */ 1282 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 1283 params.int_mode = ECORE_INT_MODE_MSIX; 1284 params.drv_major = QEDE_MAJOR_VERSION; 1285 params.drv_minor = QEDE_MINOR_VERSION; 1286 params.drv_rev = QEDE_REVISION_VERSION; 1287 params.drv_eng = QEDE_ENGINEERING_VERSION; 1288 strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE); 1289 1290 /* For CMT mode device do periodic polling for slowpath events. 1291 * This is required since uio device uses only one MSI-x 1292 * interrupt vector but we need one for each engine. 1293 */ 1294 if (edev->num_hwfns > 1) { 1295 rc = rte_eal_alarm_set(timer_period * US_PER_S, 1296 qede_poll_sp_sb_cb, 1297 (void *)eth_dev); 1298 if (rc != 0) { 1299 DP_ERR(edev, "Unable to start periodic" 1300 " timer rc %d\n", rc); 1301 return -EINVAL; 1302 } 1303 } 1304 1305 rc = qed_ops->common->slowpath_start(edev, ¶ms); 1306 if (rc) { 1307 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 1308 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 1309 (void *)eth_dev); 1310 return -ENODEV; 1311 } 1312 1313 rc = qed_ops->fill_dev_info(edev, &dev_info); 1314 if (rc) { 1315 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 1316 qed_ops->common->slowpath_stop(edev); 1317 qed_ops->common->remove(edev); 1318 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 1319 (void *)eth_dev); 1320 return -ENODEV; 1321 } 1322 1323 qede_alloc_etherdev(adapter, &dev_info); 1324 1325 adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION); 1326 1327 if (!is_vf) 1328 adapter->dev_info.num_mac_addrs = 1329 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 1330 ECORE_MAC); 1331 else 1332 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 1333 &adapter->dev_info.num_mac_addrs); 1334 1335 /* Allocate memory for storing MAC addr */ 1336 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 1337 (ETHER_ADDR_LEN * 1338 adapter->dev_info.num_mac_addrs), 1339 RTE_CACHE_LINE_SIZE); 1340 1341 if (eth_dev->data->mac_addrs == NULL) { 1342 DP_ERR(edev, "Failed to allocate MAC address\n"); 1343 qed_ops->common->slowpath_stop(edev); 1344 qed_ops->common->remove(edev); 1345 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 1346 (void *)eth_dev); 1347 return -ENOMEM; 1348 } 1349 1350 if (!is_vf) { 1351 ether_addr_copy((struct ether_addr *)edev->hwfns[0]. 1352 hw_info.hw_mac_addr, 1353 ð_dev->data->mac_addrs[0]); 1354 ether_addr_copy(ð_dev->data->mac_addrs[0], 1355 &adapter->primary_mac); 1356 } else { 1357 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 1358 &bulletin_change); 1359 if (bulletin_change) { 1360 is_mac_exist = 1361 ecore_vf_bulletin_get_forced_mac( 1362 ECORE_LEADING_HWFN(edev), 1363 vf_mac, 1364 &is_mac_forced); 1365 if (is_mac_exist && is_mac_forced) { 1366 DP_INFO(edev, "VF macaddr received from PF\n"); 1367 ether_addr_copy((struct ether_addr *)&vf_mac, 1368 ð_dev->data->mac_addrs[0]); 1369 ether_addr_copy(ð_dev->data->mac_addrs[0], 1370 &adapter->primary_mac); 1371 } else { 1372 DP_NOTICE(edev, false, 1373 "No VF macaddr assigned\n"); 1374 } 1375 } 1376 } 1377 1378 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 1379 1380 if (do_once) { 1381 qede_print_adapter_info(adapter); 1382 do_once = false; 1383 } 1384 1385 adapter->state = QEDE_DEV_INIT; 1386 1387 DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", 1388 adapter->primary_mac.addr_bytes[0], 1389 adapter->primary_mac.addr_bytes[1], 1390 adapter->primary_mac.addr_bytes[2], 1391 adapter->primary_mac.addr_bytes[3], 1392 adapter->primary_mac.addr_bytes[4], 1393 adapter->primary_mac.addr_bytes[5]); 1394 1395 return rc; 1396 } 1397 1398 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 1399 { 1400 return qede_common_dev_init(eth_dev, 1); 1401 } 1402 1403 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 1404 { 1405 return qede_common_dev_init(eth_dev, 0); 1406 } 1407 1408 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 1409 { 1410 /* only uninitialize in the primary process */ 1411 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1412 return 0; 1413 1414 /* safe to close dev here */ 1415 qede_dev_close(eth_dev); 1416 1417 eth_dev->dev_ops = NULL; 1418 eth_dev->rx_pkt_burst = NULL; 1419 eth_dev->tx_pkt_burst = NULL; 1420 1421 if (eth_dev->data->mac_addrs) 1422 rte_free(eth_dev->data->mac_addrs); 1423 1424 eth_dev->data->mac_addrs = NULL; 1425 1426 return 0; 1427 } 1428 1429 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 1430 { 1431 return qede_dev_common_uninit(eth_dev); 1432 } 1433 1434 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 1435 { 1436 return qede_dev_common_uninit(eth_dev); 1437 } 1438 1439 static struct rte_pci_id pci_id_qedevf_map[] = { 1440 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 1441 { 1442 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF) 1443 }, 1444 { 1445 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV) 1446 }, 1447 {.vendor_id = 0,} 1448 }; 1449 1450 static struct rte_pci_id pci_id_qede_map[] = { 1451 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 1452 { 1453 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E) 1454 }, 1455 { 1456 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S) 1457 }, 1458 { 1459 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40) 1460 }, 1461 { 1462 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25) 1463 }, 1464 { 1465 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100) 1466 }, 1467 {.vendor_id = 0,} 1468 }; 1469 1470 static struct eth_driver rte_qedevf_pmd = { 1471 .pci_drv = { 1472 .id_table = pci_id_qedevf_map, 1473 .drv_flags = 1474 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1475 .probe = rte_eth_dev_pci_probe, 1476 .remove = rte_eth_dev_pci_remove, 1477 }, 1478 .eth_dev_init = qedevf_eth_dev_init, 1479 .eth_dev_uninit = qedevf_eth_dev_uninit, 1480 .dev_private_size = sizeof(struct qede_dev), 1481 }; 1482 1483 static struct eth_driver rte_qede_pmd = { 1484 .pci_drv = { 1485 .id_table = pci_id_qede_map, 1486 .drv_flags = 1487 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1488 .probe = rte_eth_dev_pci_probe, 1489 .remove = rte_eth_dev_pci_remove, 1490 }, 1491 .eth_dev_init = qede_eth_dev_init, 1492 .eth_dev_uninit = qede_eth_dev_uninit, 1493 .dev_private_size = sizeof(struct qede_dev), 1494 }; 1495 1496 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv); 1497 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 1498 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv); 1499 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 1500