1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "qede_ethdev.h" 10 #include <rte_alarm.h> 11 #include <rte_version.h> 12 13 /* Globals */ 14 static const struct qed_eth_ops *qed_ops; 15 static int64_t timer_period = 1; 16 17 /* VXLAN tunnel classification mapping */ 18 const struct _qede_vxlan_tunn_types { 19 uint16_t rte_filter_type; 20 enum ecore_filter_ucast_type qede_type; 21 enum ecore_tunn_clss qede_tunn_clss; 22 const char *string; 23 } qede_tunn_types[] = { 24 { 25 ETH_TUNNEL_FILTER_OMAC, 26 ECORE_FILTER_MAC, 27 ECORE_TUNN_CLSS_MAC_VLAN, 28 "outer-mac" 29 }, 30 { 31 ETH_TUNNEL_FILTER_TENID, 32 ECORE_FILTER_VNI, 33 ECORE_TUNN_CLSS_MAC_VNI, 34 "vni" 35 }, 36 { 37 ETH_TUNNEL_FILTER_IMAC, 38 ECORE_FILTER_INNER_MAC, 39 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 40 "inner-mac" 41 }, 42 { 43 ETH_TUNNEL_FILTER_IVLAN, 44 ECORE_FILTER_INNER_VLAN, 45 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 46 "inner-vlan" 47 }, 48 { 49 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID, 50 ECORE_FILTER_MAC_VNI_PAIR, 51 ECORE_TUNN_CLSS_MAC_VNI, 52 "outer-mac and vni" 53 }, 54 { 55 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC, 56 ECORE_FILTER_UNUSED, 57 MAX_ECORE_TUNN_CLSS, 58 "outer-mac and inner-mac" 59 }, 60 { 61 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN, 62 ECORE_FILTER_UNUSED, 63 MAX_ECORE_TUNN_CLSS, 64 "outer-mac and inner-vlan" 65 }, 66 { 67 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC, 68 ECORE_FILTER_INNER_MAC_VNI_PAIR, 69 ECORE_TUNN_CLSS_INNER_MAC_VNI, 70 "vni and inner-mac", 71 }, 72 { 73 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN, 74 ECORE_FILTER_UNUSED, 75 MAX_ECORE_TUNN_CLSS, 76 "vni and inner-vlan", 77 }, 78 { 79 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN, 80 ECORE_FILTER_INNER_PAIR, 81 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 82 "inner-mac and inner-vlan", 83 }, 84 { 85 ETH_TUNNEL_FILTER_OIP, 86 ECORE_FILTER_UNUSED, 87 MAX_ECORE_TUNN_CLSS, 88 "outer-IP" 89 }, 90 { 91 ETH_TUNNEL_FILTER_IIP, 92 ECORE_FILTER_UNUSED, 93 MAX_ECORE_TUNN_CLSS, 94 "inner-IP" 95 }, 96 { 97 RTE_TUNNEL_FILTER_IMAC_IVLAN, 98 ECORE_FILTER_UNUSED, 99 MAX_ECORE_TUNN_CLSS, 100 "IMAC_IVLAN" 101 }, 102 { 103 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID, 104 ECORE_FILTER_UNUSED, 105 MAX_ECORE_TUNN_CLSS, 106 "IMAC_IVLAN_TENID" 107 }, 108 { 109 RTE_TUNNEL_FILTER_IMAC_TENID, 110 ECORE_FILTER_UNUSED, 111 MAX_ECORE_TUNN_CLSS, 112 "IMAC_TENID" 113 }, 114 { 115 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC, 116 ECORE_FILTER_UNUSED, 117 MAX_ECORE_TUNN_CLSS, 118 "OMAC_TENID_IMAC" 119 }, 120 }; 121 122 struct rte_qede_xstats_name_off { 123 char name[RTE_ETH_XSTATS_NAME_SIZE]; 124 uint64_t offset; 125 }; 126 127 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 128 {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)}, 129 {"rx_multicast_bytes", 130 offsetof(struct ecore_eth_stats, rx_mcast_bytes)}, 131 {"rx_broadcast_bytes", 132 offsetof(struct ecore_eth_stats, rx_bcast_bytes)}, 133 {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)}, 134 {"rx_multicast_packets", 135 offsetof(struct ecore_eth_stats, rx_mcast_pkts)}, 136 {"rx_broadcast_packets", 137 offsetof(struct ecore_eth_stats, rx_bcast_pkts)}, 138 139 {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)}, 140 {"tx_multicast_bytes", 141 offsetof(struct ecore_eth_stats, tx_mcast_bytes)}, 142 {"tx_broadcast_bytes", 143 offsetof(struct ecore_eth_stats, tx_bcast_bytes)}, 144 {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)}, 145 {"tx_multicast_packets", 146 offsetof(struct ecore_eth_stats, tx_mcast_pkts)}, 147 {"tx_broadcast_packets", 148 offsetof(struct ecore_eth_stats, tx_bcast_pkts)}, 149 150 {"rx_64_byte_packets", 151 offsetof(struct ecore_eth_stats, rx_64_byte_packets)}, 152 {"rx_65_to_127_byte_packets", 153 offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)}, 154 {"rx_128_to_255_byte_packets", 155 offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)}, 156 {"rx_256_to_511_byte_packets", 157 offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)}, 158 {"rx_512_to_1023_byte_packets", 159 offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)}, 160 {"rx_1024_to_1518_byte_packets", 161 offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)}, 162 {"rx_1519_to_1522_byte_packets", 163 offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)}, 164 {"rx_1519_to_2047_byte_packets", 165 offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)}, 166 {"rx_2048_to_4095_byte_packets", 167 offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)}, 168 {"rx_4096_to_9216_byte_packets", 169 offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)}, 170 {"rx_9217_to_16383_byte_packets", 171 offsetof(struct ecore_eth_stats, 172 rx_9217_to_16383_byte_packets)}, 173 {"tx_64_byte_packets", 174 offsetof(struct ecore_eth_stats, tx_64_byte_packets)}, 175 {"tx_65_to_127_byte_packets", 176 offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)}, 177 {"tx_128_to_255_byte_packets", 178 offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)}, 179 {"tx_256_to_511_byte_packets", 180 offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)}, 181 {"tx_512_to_1023_byte_packets", 182 offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)}, 183 {"tx_1024_to_1518_byte_packets", 184 offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)}, 185 {"trx_1519_to_1522_byte_packets", 186 offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)}, 187 {"tx_2048_to_4095_byte_packets", 188 offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)}, 189 {"tx_4096_to_9216_byte_packets", 190 offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)}, 191 {"tx_9217_to_16383_byte_packets", 192 offsetof(struct ecore_eth_stats, 193 tx_9217_to_16383_byte_packets)}, 194 195 {"rx_mac_crtl_frames", 196 offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)}, 197 {"tx_mac_control_frames", 198 offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)}, 199 {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)}, 200 {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)}, 201 {"rx_priority_flow_control_frames", 202 offsetof(struct ecore_eth_stats, rx_pfc_frames)}, 203 {"tx_priority_flow_control_frames", 204 offsetof(struct ecore_eth_stats, tx_pfc_frames)}, 205 206 {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)}, 207 {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)}, 208 {"rx_carrier_errors", 209 offsetof(struct ecore_eth_stats, rx_carrier_errors)}, 210 {"rx_oversize_packet_errors", 211 offsetof(struct ecore_eth_stats, rx_oversize_packets)}, 212 {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)}, 213 {"rx_undersize_packet_errors", 214 offsetof(struct ecore_eth_stats, rx_undersize_packets)}, 215 {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)}, 216 {"rx_host_buffer_not_available", 217 offsetof(struct ecore_eth_stats, no_buff_discards)}, 218 /* Number of packets discarded because they are bigger than MTU */ 219 {"rx_packet_too_big_discards", 220 offsetof(struct ecore_eth_stats, packet_too_big_discard)}, 221 {"rx_ttl_zero_discards", 222 offsetof(struct ecore_eth_stats, ttl0_discard)}, 223 {"rx_multi_function_tag_filter_discards", 224 offsetof(struct ecore_eth_stats, mftag_filter_discards)}, 225 {"rx_mac_filter_discards", 226 offsetof(struct ecore_eth_stats, mac_filter_discards)}, 227 {"rx_hw_buffer_truncates", 228 offsetof(struct ecore_eth_stats, brb_truncates)}, 229 {"rx_hw_buffer_discards", 230 offsetof(struct ecore_eth_stats, brb_discards)}, 231 {"tx_lpi_entry_count", 232 offsetof(struct ecore_eth_stats, tx_lpi_entry_count)}, 233 {"tx_total_collisions", 234 offsetof(struct ecore_eth_stats, tx_total_collisions)}, 235 {"tx_error_drop_packets", 236 offsetof(struct ecore_eth_stats, tx_err_drop_pkts)}, 237 238 {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)}, 239 {"rx_mac_unicast_packets", 240 offsetof(struct ecore_eth_stats, rx_mac_uc_packets)}, 241 {"rx_mac_multicast_packets", 242 offsetof(struct ecore_eth_stats, rx_mac_mc_packets)}, 243 {"rx_mac_broadcast_packets", 244 offsetof(struct ecore_eth_stats, rx_mac_bc_packets)}, 245 {"rx_mac_frames_ok", 246 offsetof(struct ecore_eth_stats, rx_mac_frames_ok)}, 247 {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)}, 248 {"tx_mac_unicast_packets", 249 offsetof(struct ecore_eth_stats, tx_mac_uc_packets)}, 250 {"tx_mac_multicast_packets", 251 offsetof(struct ecore_eth_stats, tx_mac_mc_packets)}, 252 {"tx_mac_broadcast_packets", 253 offsetof(struct ecore_eth_stats, tx_mac_bc_packets)}, 254 255 {"lro_coalesced_packets", 256 offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)}, 257 {"lro_coalesced_events", 258 offsetof(struct ecore_eth_stats, tpa_coalesced_events)}, 259 {"lro_aborts_num", 260 offsetof(struct ecore_eth_stats, tpa_aborts_num)}, 261 {"lro_not_coalesced_packets", 262 offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)}, 263 {"lro_coalesced_bytes", 264 offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)}, 265 }; 266 267 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { 268 {"rx_q_segments", 269 offsetof(struct qede_rx_queue, rx_segs)}, 270 {"rx_q_hw_errors", 271 offsetof(struct qede_rx_queue, rx_hw_errors)}, 272 {"rx_q_allocation_errors", 273 offsetof(struct qede_rx_queue, rx_alloc_errors)} 274 }; 275 276 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 277 { 278 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 279 } 280 281 static void 282 qede_interrupt_handler(void *param) 283 { 284 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 285 struct qede_dev *qdev = eth_dev->data->dev_private; 286 struct ecore_dev *edev = &qdev->edev; 287 288 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 289 if (rte_intr_enable(eth_dev->intr_handle)) 290 DP_ERR(edev, "rte_intr_enable failed\n"); 291 } 292 293 static void 294 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 295 { 296 rte_memcpy(&qdev->dev_info, info, sizeof(*info)); 297 qdev->num_tc = qdev->dev_info.num_tc; 298 qdev->ops = qed_ops; 299 } 300 301 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO 302 static void qede_print_adapter_info(struct qede_dev *qdev) 303 { 304 struct ecore_dev *edev = &qdev->edev; 305 struct qed_dev_info *info = &qdev->dev_info.common; 306 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE]; 307 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 308 309 DP_INFO(edev, "*********************************\n"); 310 DP_INFO(edev, " DPDK version:%s\n", rte_version()); 311 DP_INFO(edev, " Chip details : %s%d\n", 312 ECORE_IS_BB(edev) ? "BB" : "AH", 313 CHIP_REV_IS_A0(edev) ? 0 : 1); 314 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", 315 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng); 316 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s", 317 ver_str, QEDE_PMD_VERSION); 318 DP_INFO(edev, " Driver version : %s\n", drv_ver); 319 DP_INFO(edev, " Firmware version : %s\n", ver_str); 320 321 snprintf(ver_str, MCP_DRV_VER_STR_SIZE, 322 "%d.%d.%d.%d", 323 (info->mfw_rev >> 24) & 0xff, 324 (info->mfw_rev >> 16) & 0xff, 325 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff); 326 DP_INFO(edev, " Management Firmware version : %s\n", ver_str); 327 DP_INFO(edev, " Firmware file : %s\n", fw_file); 328 DP_INFO(edev, "*********************************\n"); 329 } 330 #endif 331 332 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast) 333 { 334 memset(ucast, 0, sizeof(struct ecore_filter_ucast)); 335 ucast->is_rx_filter = true; 336 ucast->is_tx_filter = true; 337 /* ucast->assert_on_error = true; - For debug */ 338 } 339 340 static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn, 341 uint8_t clss, bool mode, bool mask) 342 { 343 memset(p_tunn, 0, sizeof(struct ecore_tunnel_info)); 344 p_tunn->vxlan.b_update_mode = mode; 345 p_tunn->vxlan.b_mode_enabled = mask; 346 p_tunn->b_update_rx_cls = true; 347 p_tunn->b_update_tx_cls = true; 348 p_tunn->vxlan.tun_cls = clss; 349 } 350 351 static int 352 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 353 bool add) 354 { 355 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 356 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 357 struct qede_ucast_entry *tmp = NULL; 358 struct qede_ucast_entry *u; 359 struct ether_addr *mac_addr; 360 361 mac_addr = (struct ether_addr *)ucast->mac; 362 if (add) { 363 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 364 if ((memcmp(mac_addr, &tmp->mac, 365 ETHER_ADDR_LEN) == 0) && 366 ucast->vlan == tmp->vlan) { 367 DP_ERR(edev, "Unicast MAC is already added" 368 " with vlan = %u, vni = %u\n", 369 ucast->vlan, ucast->vni); 370 return -EEXIST; 371 } 372 } 373 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), 374 RTE_CACHE_LINE_SIZE); 375 if (!u) { 376 DP_ERR(edev, "Did not allocate memory for ucast\n"); 377 return -ENOMEM; 378 } 379 ether_addr_copy(mac_addr, &u->mac); 380 u->vlan = ucast->vlan; 381 u->vni = ucast->vni; 382 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); 383 qdev->num_uc_addr++; 384 } else { 385 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 386 if ((memcmp(mac_addr, &tmp->mac, 387 ETHER_ADDR_LEN) == 0) && 388 ucast->vlan == tmp->vlan && 389 ucast->vni == tmp->vni) 390 break; 391 } 392 if (tmp == NULL) { 393 DP_INFO(edev, "Unicast MAC is not found\n"); 394 return -EINVAL; 395 } 396 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); 397 qdev->num_uc_addr--; 398 } 399 400 return 0; 401 } 402 403 static int 404 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast, 405 bool add) 406 { 407 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 408 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 409 struct ether_addr *mac_addr; 410 struct qede_mcast_entry *tmp = NULL; 411 struct qede_mcast_entry *m; 412 413 mac_addr = (struct ether_addr *)mcast->mac; 414 if (add) { 415 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 416 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) { 417 DP_ERR(edev, 418 "Multicast MAC is already added\n"); 419 return -EEXIST; 420 } 421 } 422 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), 423 RTE_CACHE_LINE_SIZE); 424 if (!m) { 425 DP_ERR(edev, 426 "Did not allocate memory for mcast\n"); 427 return -ENOMEM; 428 } 429 ether_addr_copy(mac_addr, &m->mac); 430 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); 431 qdev->num_mc_addr++; 432 } else { 433 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 434 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) 435 break; 436 } 437 if (tmp == NULL) { 438 DP_INFO(edev, "Multicast mac is not found\n"); 439 return -EINVAL; 440 } 441 SLIST_REMOVE(&qdev->mc_list_head, tmp, 442 qede_mcast_entry, list); 443 qdev->num_mc_addr--; 444 } 445 446 return 0; 447 } 448 449 static enum _ecore_status_t 450 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 451 bool add) 452 { 453 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 454 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 455 enum _ecore_status_t rc; 456 struct ecore_filter_mcast mcast; 457 struct qede_mcast_entry *tmp; 458 uint16_t j = 0; 459 460 /* Multicast */ 461 if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) { 462 if (add) { 463 if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) { 464 DP_ERR(edev, 465 "Mcast filter table limit exceeded, " 466 "Please enable mcast promisc mode\n"); 467 return -ECORE_INVAL; 468 } 469 } 470 rc = qede_mcast_filter(eth_dev, ucast, add); 471 if (rc == 0) { 472 DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr); 473 memset(&mcast, 0, sizeof(mcast)); 474 mcast.num_mc_addrs = qdev->num_mc_addr; 475 mcast.opcode = ECORE_FILTER_ADD; 476 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 477 ether_addr_copy(&tmp->mac, 478 (struct ether_addr *)&mcast.mac[j]); 479 j++; 480 } 481 rc = ecore_filter_mcast_cmd(edev, &mcast, 482 ECORE_SPQ_MODE_CB, NULL); 483 } 484 if (rc != ECORE_SUCCESS) { 485 DP_ERR(edev, "Failed to add multicast filter" 486 " rc = %d, op = %d\n", rc, add); 487 } 488 } else { /* Unicast */ 489 if (add) { 490 if (qdev->num_uc_addr >= 491 qdev->dev_info.num_mac_filters) { 492 DP_ERR(edev, 493 "Ucast filter table limit exceeded," 494 " Please enable promisc mode\n"); 495 return -ECORE_INVAL; 496 } 497 } 498 rc = qede_ucast_filter(eth_dev, ucast, add); 499 if (rc == 0) 500 rc = ecore_filter_ucast_cmd(edev, ucast, 501 ECORE_SPQ_MODE_CB, NULL); 502 if (rc != ECORE_SUCCESS) { 503 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", 504 rc, add); 505 } 506 } 507 508 return rc; 509 } 510 511 static int 512 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, 513 __rte_unused uint32_t index, __rte_unused uint32_t pool) 514 { 515 struct ecore_filter_ucast ucast; 516 int re; 517 518 qede_set_ucast_cmn_params(&ucast); 519 ucast.type = ECORE_FILTER_MAC; 520 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); 521 re = (int)qede_mac_int_ops(eth_dev, &ucast, 1); 522 return re; 523 } 524 525 static void 526 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 527 { 528 struct qede_dev *qdev = eth_dev->data->dev_private; 529 struct ecore_dev *edev = &qdev->edev; 530 struct ecore_filter_ucast ucast; 531 532 PMD_INIT_FUNC_TRACE(edev); 533 534 if (index >= qdev->dev_info.num_mac_filters) { 535 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 536 index, qdev->dev_info.num_mac_filters); 537 return; 538 } 539 540 qede_set_ucast_cmn_params(&ucast); 541 ucast.opcode = ECORE_FILTER_REMOVE; 542 ucast.type = ECORE_FILTER_MAC; 543 544 /* Use the index maintained by rte */ 545 ether_addr_copy(ð_dev->data->mac_addrs[index], 546 (struct ether_addr *)&ucast.mac); 547 548 ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); 549 } 550 551 static void 552 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) 553 { 554 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 555 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 556 557 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 558 mac_addr->addr_bytes)) { 559 DP_ERR(edev, "Setting MAC address is not allowed\n"); 560 ether_addr_copy(&qdev->primary_mac, 561 ð_dev->data->mac_addrs[0]); 562 return; 563 } 564 565 qede_mac_addr_add(eth_dev, mac_addr, 0, 0); 566 } 567 568 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action) 569 { 570 struct ecore_dev *edev = &qdev->edev; 571 struct qed_update_vport_params params = { 572 .vport_id = 0, 573 .accept_any_vlan = action, 574 .update_accept_any_vlan_flg = 1, 575 }; 576 int rc; 577 578 /* Proceed only if action actually needs to be performed */ 579 if (qdev->accept_any_vlan == action) 580 return; 581 582 rc = qdev->ops->vport_update(edev, ¶ms); 583 if (rc) { 584 DP_ERR(edev, "Failed to %s accept-any-vlan\n", 585 action ? "enable" : "disable"); 586 } else { 587 DP_INFO(edev, "%s accept-any-vlan\n", 588 action ? "enabled" : "disabled"); 589 qdev->accept_any_vlan = action; 590 } 591 } 592 593 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping) 594 { 595 struct qed_update_vport_params vport_update_params; 596 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 597 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 598 int rc; 599 600 memset(&vport_update_params, 0, sizeof(vport_update_params)); 601 vport_update_params.vport_id = 0; 602 vport_update_params.update_inner_vlan_removal_flg = 1; 603 vport_update_params.inner_vlan_removal_flg = set_stripping; 604 rc = qdev->ops->vport_update(edev, &vport_update_params); 605 if (rc) { 606 DP_ERR(edev, "Update V-PORT failed %d\n", rc); 607 return rc; 608 } 609 qdev->vlan_strip_flg = set_stripping; 610 611 return 0; 612 } 613 614 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 615 uint16_t vlan_id, int on) 616 { 617 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 618 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 619 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 620 struct qede_vlan_entry *tmp = NULL; 621 struct qede_vlan_entry *vlan; 622 struct ecore_filter_ucast ucast; 623 int rc; 624 625 if (on) { 626 if (qdev->configured_vlans == dev_info->num_vlan_filters) { 627 DP_ERR(edev, "Reached max VLAN filter limit" 628 " enabling accept_any_vlan\n"); 629 qede_config_accept_any_vlan(qdev, true); 630 return 0; 631 } 632 633 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 634 if (tmp->vid == vlan_id) { 635 DP_ERR(edev, "VLAN %u already configured\n", 636 vlan_id); 637 return -EEXIST; 638 } 639 } 640 641 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), 642 RTE_CACHE_LINE_SIZE); 643 644 if (!vlan) { 645 DP_ERR(edev, "Did not allocate memory for VLAN\n"); 646 return -ENOMEM; 647 } 648 649 qede_set_ucast_cmn_params(&ucast); 650 ucast.opcode = ECORE_FILTER_ADD; 651 ucast.type = ECORE_FILTER_VLAN; 652 ucast.vlan = vlan_id; 653 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 654 NULL); 655 if (rc != 0) { 656 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 657 rc); 658 rte_free(vlan); 659 } else { 660 vlan->vid = vlan_id; 661 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); 662 qdev->configured_vlans++; 663 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", 664 vlan_id, qdev->configured_vlans); 665 } 666 } else { 667 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 668 if (tmp->vid == vlan_id) 669 break; 670 } 671 672 if (!tmp) { 673 if (qdev->configured_vlans == 0) { 674 DP_INFO(edev, 675 "No VLAN filters configured yet\n"); 676 return 0; 677 } 678 679 DP_ERR(edev, "VLAN %u not configured\n", vlan_id); 680 return -EINVAL; 681 } 682 683 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); 684 685 qede_set_ucast_cmn_params(&ucast); 686 ucast.opcode = ECORE_FILTER_REMOVE; 687 ucast.type = ECORE_FILTER_VLAN; 688 ucast.vlan = vlan_id; 689 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 690 NULL); 691 if (rc != 0) { 692 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 693 vlan_id, rc); 694 } else { 695 qdev->configured_vlans--; 696 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", 697 vlan_id, qdev->configured_vlans); 698 } 699 } 700 701 return rc; 702 } 703 704 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 705 { 706 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 707 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 708 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 709 710 if (mask & ETH_VLAN_STRIP_MASK) { 711 if (rxmode->hw_vlan_strip) 712 (void)qede_vlan_stripping(eth_dev, 1); 713 else 714 (void)qede_vlan_stripping(eth_dev, 0); 715 } 716 717 if (mask & ETH_VLAN_FILTER_MASK) { 718 /* VLAN filtering kicks in when a VLAN is added */ 719 if (rxmode->hw_vlan_filter) { 720 qede_vlan_filter_set(eth_dev, 0, 1); 721 } else { 722 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ 723 DP_ERR(edev, 724 " Please remove existing VLAN filters" 725 " before disabling VLAN filtering\n"); 726 /* Signal app that VLAN filtering is still 727 * enabled 728 */ 729 rxmode->hw_vlan_filter = true; 730 } else { 731 qede_vlan_filter_set(eth_dev, 0, 0); 732 } 733 } 734 } 735 736 if (mask & ETH_VLAN_EXTEND_MASK) 737 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q" 738 " and classification is based on outer tag only\n"); 739 740 DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n", 741 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); 742 } 743 744 static int qede_init_vport(struct qede_dev *qdev) 745 { 746 struct ecore_dev *edev = &qdev->edev; 747 struct qed_start_vport_params start = {0}; 748 int rc; 749 750 start.remove_inner_vlan = 1; 751 start.enable_lro = qdev->enable_lro; 752 start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD; 753 start.vport_id = 0; 754 start.drop_ttl0 = false; 755 start.clear_stats = 1; 756 start.handle_ptp_pkts = 0; 757 758 rc = qdev->ops->vport_start(edev, &start); 759 if (rc) { 760 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 761 return rc; 762 } 763 764 DP_INFO(edev, 765 "Start vport ramrod passed, vport_id = %d, MTU = %u\n", 766 start.vport_id, ETHER_MTU); 767 768 return 0; 769 } 770 771 static void qede_prandom_bytes(uint32_t *buff) 772 { 773 uint8_t i; 774 775 srand((unsigned int)time(NULL)); 776 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 777 buff[i] = rand(); 778 } 779 780 int qede_config_rss(struct rte_eth_dev *eth_dev) 781 { 782 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 783 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO 784 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 785 #endif 786 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; 787 struct rte_eth_rss_reta_entry64 reta_conf[2]; 788 struct rte_eth_rss_conf rss_conf; 789 uint32_t i, id, pos, q; 790 791 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 792 if (!rss_conf.rss_key) { 793 DP_INFO(edev, "Applying driver default key\n"); 794 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 795 qede_prandom_bytes(&def_rss_key[0]); 796 rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; 797 } 798 799 /* Configure RSS hash */ 800 if (qede_rss_hash_update(eth_dev, &rss_conf)) 801 return -EINVAL; 802 803 /* Configure default RETA */ 804 memset(reta_conf, 0, sizeof(reta_conf)); 805 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) 806 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; 807 808 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 809 id = i / RTE_RETA_GROUP_SIZE; 810 pos = i % RTE_RETA_GROUP_SIZE; 811 q = i % QEDE_RSS_COUNT(qdev); 812 reta_conf[id].reta[pos] = q; 813 } 814 if (qede_rss_reta_update(eth_dev, &reta_conf[0], 815 ECORE_RSS_IND_TABLE_SIZE)) 816 return -EINVAL; 817 818 return 0; 819 } 820 821 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 822 { 823 struct qede_dev *qdev = eth_dev->data->dev_private; 824 struct ecore_dev *edev = &qdev->edev; 825 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 826 int rc; 827 828 PMD_INIT_FUNC_TRACE(edev); 829 830 /* Check requirements for 100G mode */ 831 if (edev->num_hwfns > 1) { 832 if (eth_dev->data->nb_rx_queues < 2 || 833 eth_dev->data->nb_tx_queues < 2) { 834 DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n"); 835 return -EINVAL; 836 } 837 838 if ((eth_dev->data->nb_rx_queues % 2 != 0) || 839 (eth_dev->data->nb_tx_queues % 2 != 0)) { 840 DP_ERR(edev, 841 "100G mode needs even no. of RX/TX queues\n"); 842 return -EINVAL; 843 } 844 } 845 846 /* Sanity checks and throw warnings */ 847 if (rxmode->enable_scatter == 1) 848 eth_dev->data->scattered_rx = 1; 849 850 if (!rxmode->hw_strip_crc) 851 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n"); 852 853 if (!rxmode->hw_ip_checksum) 854 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " 855 "in hw\n"); 856 857 if (rxmode->enable_lro) { 858 qdev->enable_lro = true; 859 /* Enable scatter mode for LRO */ 860 if (!rxmode->enable_scatter) 861 eth_dev->data->scattered_rx = 1; 862 } 863 864 /* Check for the port restart case */ 865 if (qdev->state != QEDE_DEV_INIT) { 866 rc = qdev->ops->vport_stop(edev, 0); 867 if (rc != 0) 868 return rc; 869 qede_dealloc_fp_resc(eth_dev); 870 } 871 872 qdev->fp_num_tx = eth_dev->data->nb_tx_queues; 873 qdev->fp_num_rx = eth_dev->data->nb_rx_queues; 874 qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx; 875 876 /* Fastpath status block should be initialized before sending 877 * VPORT-START in the case of VF. Anyway, do it for both VF/PF. 878 */ 879 rc = qede_alloc_fp_resc(qdev); 880 if (rc != 0) 881 return rc; 882 883 /* Issue VPORT-START with default config values to allow 884 * other port configurations early on. 885 */ 886 rc = qede_init_vport(qdev); 887 if (rc != 0) 888 return rc; 889 890 if (!(rxmode->mq_mode == ETH_MQ_RX_RSS || 891 rxmode->mq_mode == ETH_MQ_RX_NONE)) { 892 DP_ERR(edev, "Unsupported RSS mode\n"); 893 qdev->ops->vport_stop(edev, 0); 894 qede_dealloc_fp_resc(eth_dev); 895 return -EINVAL; 896 } 897 898 /* Flow director mode check */ 899 rc = qede_check_fdir_support(eth_dev); 900 if (rc) { 901 qdev->ops->vport_stop(edev, 0); 902 qede_dealloc_fp_resc(eth_dev); 903 return -EINVAL; 904 } 905 SLIST_INIT(&qdev->fdir_info.fdir_list_head); 906 907 SLIST_INIT(&qdev->vlan_list_head); 908 909 /* Enable VLAN offloads by default */ 910 qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | 911 ETH_VLAN_FILTER_MASK | 912 ETH_VLAN_EXTEND_MASK); 913 914 qdev->state = QEDE_DEV_CONFIG; 915 916 DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n", 917 (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev), 918 qdev->num_tc); 919 920 return 0; 921 } 922 923 /* Info about HW descriptor ring limitations */ 924 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 925 .nb_max = NUM_RX_BDS_MAX, 926 .nb_min = 128, 927 .nb_align = 128 /* lowest common multiple */ 928 }; 929 930 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 931 .nb_max = NUM_TX_BDS_MAX, 932 .nb_min = 256, 933 .nb_align = 256, 934 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, 935 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 936 }; 937 938 static void 939 qede_dev_info_get(struct rte_eth_dev *eth_dev, 940 struct rte_eth_dev_info *dev_info) 941 { 942 struct qede_dev *qdev = eth_dev->data->dev_private; 943 struct ecore_dev *edev = &qdev->edev; 944 struct qed_link_output link; 945 uint32_t speed_cap = 0; 946 947 PMD_INIT_FUNC_TRACE(edev); 948 949 dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 950 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; 951 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 952 dev_info->rx_desc_lim = qede_rx_desc_lim; 953 dev_info->tx_desc_lim = qede_tx_desc_lim; 954 955 if (IS_PF(edev)) 956 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 957 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2); 958 else 959 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 960 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); 961 dev_info->max_tx_queues = dev_info->max_rx_queues; 962 963 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; 964 dev_info->max_vfs = 0; 965 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 966 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 967 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 968 969 dev_info->default_txconf = (struct rte_eth_txconf) { 970 .txq_flags = QEDE_TXQ_FLAGS, 971 }; 972 973 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP | 974 DEV_RX_OFFLOAD_IPV4_CKSUM | 975 DEV_RX_OFFLOAD_UDP_CKSUM | 976 DEV_RX_OFFLOAD_TCP_CKSUM | 977 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 978 DEV_RX_OFFLOAD_TCP_LRO); 979 980 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | 981 DEV_TX_OFFLOAD_IPV4_CKSUM | 982 DEV_TX_OFFLOAD_UDP_CKSUM | 983 DEV_TX_OFFLOAD_TCP_CKSUM | 984 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 985 DEV_TX_OFFLOAD_TCP_TSO | 986 DEV_TX_OFFLOAD_VXLAN_TNL_TSO); 987 988 memset(&link, 0, sizeof(struct qed_link_output)); 989 qdev->ops->common->get_link(edev, &link); 990 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 991 speed_cap |= ETH_LINK_SPEED_1G; 992 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 993 speed_cap |= ETH_LINK_SPEED_10G; 994 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 995 speed_cap |= ETH_LINK_SPEED_25G; 996 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 997 speed_cap |= ETH_LINK_SPEED_40G; 998 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 999 speed_cap |= ETH_LINK_SPEED_50G; 1000 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1001 speed_cap |= ETH_LINK_SPEED_100G; 1002 dev_info->speed_capa = speed_cap; 1003 } 1004 1005 /* return 0 means link status changed, -1 means not changed */ 1006 static int 1007 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 1008 { 1009 struct qede_dev *qdev = eth_dev->data->dev_private; 1010 struct ecore_dev *edev = &qdev->edev; 1011 uint16_t link_duplex; 1012 struct qed_link_output link; 1013 struct rte_eth_link *curr = ð_dev->data->dev_link; 1014 1015 memset(&link, 0, sizeof(struct qed_link_output)); 1016 qdev->ops->common->get_link(edev, &link); 1017 1018 /* Link Speed */ 1019 curr->link_speed = link.speed; 1020 1021 /* Link Mode */ 1022 switch (link.duplex) { 1023 case QEDE_DUPLEX_HALF: 1024 link_duplex = ETH_LINK_HALF_DUPLEX; 1025 break; 1026 case QEDE_DUPLEX_FULL: 1027 link_duplex = ETH_LINK_FULL_DUPLEX; 1028 break; 1029 case QEDE_DUPLEX_UNKNOWN: 1030 default: 1031 link_duplex = -1; 1032 } 1033 curr->link_duplex = link_duplex; 1034 1035 /* Link Status */ 1036 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN; 1037 1038 /* AN */ 1039 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 1040 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 1041 1042 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 1043 curr->link_speed, curr->link_duplex, 1044 curr->link_autoneg, curr->link_status); 1045 1046 /* return 0 means link status changed, -1 means not changed */ 1047 return ((curr->link_status == link.link_up) ? -1 : 0); 1048 } 1049 1050 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 1051 { 1052 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT 1053 struct qede_dev *qdev = eth_dev->data->dev_private; 1054 struct ecore_dev *edev = &qdev->edev; 1055 1056 PMD_INIT_FUNC_TRACE(edev); 1057 #endif 1058 1059 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1060 1061 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1062 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1063 1064 qed_configure_filter_rx_mode(eth_dev, type); 1065 } 1066 1067 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 1068 { 1069 #ifdef RTE_LIBRTE_QEDE_DEBUG_INIT 1070 struct qede_dev *qdev = eth_dev->data->dev_private; 1071 struct ecore_dev *edev = &qdev->edev; 1072 1073 PMD_INIT_FUNC_TRACE(edev); 1074 #endif 1075 1076 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1077 qed_configure_filter_rx_mode(eth_dev, 1078 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 1079 else 1080 qed_configure_filter_rx_mode(eth_dev, 1081 QED_FILTER_RX_MODE_TYPE_REGULAR); 1082 } 1083 1084 static void qede_poll_sp_sb_cb(void *param) 1085 { 1086 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1087 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1088 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1089 int rc; 1090 1091 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 1092 qede_interrupt_action(&edev->hwfns[1]); 1093 1094 rc = rte_eal_alarm_set(timer_period * US_PER_S, 1095 qede_poll_sp_sb_cb, 1096 (void *)eth_dev); 1097 if (rc != 0) { 1098 DP_ERR(edev, "Unable to start periodic" 1099 " timer rc %d\n", rc); 1100 assert(false && "Unable to start periodic timer"); 1101 } 1102 } 1103 1104 static void qede_dev_close(struct rte_eth_dev *eth_dev) 1105 { 1106 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1107 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1108 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1109 int rc; 1110 1111 PMD_INIT_FUNC_TRACE(edev); 1112 1113 qede_fdir_dealloc_resc(eth_dev); 1114 1115 /* dev_stop() shall cleanup fp resources in hw but without releasing 1116 * dma memories and sw structures so that dev_start() can be called 1117 * by the app without reconfiguration. However, in dev_close() we 1118 * can release all the resources and device can be brought up newly 1119 */ 1120 if (qdev->state != QEDE_DEV_STOP) 1121 qede_dev_stop(eth_dev); 1122 else 1123 DP_INFO(edev, "Device is already stopped\n"); 1124 1125 rc = qdev->ops->vport_stop(edev, 0); 1126 if (rc != 0) 1127 DP_ERR(edev, "Failed to stop VPORT\n"); 1128 1129 qede_dealloc_fp_resc(eth_dev); 1130 1131 qdev->ops->common->slowpath_stop(edev); 1132 1133 qdev->ops->common->remove(edev); 1134 1135 rte_intr_disable(&pci_dev->intr_handle); 1136 1137 rte_intr_callback_unregister(&pci_dev->intr_handle, 1138 qede_interrupt_handler, (void *)eth_dev); 1139 1140 if (edev->num_hwfns > 1) 1141 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 1142 1143 qdev->state = QEDE_DEV_INIT; /* Go back to init state */ 1144 } 1145 1146 static void 1147 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 1148 { 1149 struct qede_dev *qdev = eth_dev->data->dev_private; 1150 struct ecore_dev *edev = &qdev->edev; 1151 struct ecore_eth_stats stats; 1152 unsigned int i = 0, j = 0, qid; 1153 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 1154 struct qede_tx_queue *txq; 1155 1156 qdev->ops->get_vport_stats(edev, &stats); 1157 1158 /* RX Stats */ 1159 eth_stats->ipackets = stats.rx_ucast_pkts + 1160 stats.rx_mcast_pkts + stats.rx_bcast_pkts; 1161 1162 eth_stats->ibytes = stats.rx_ucast_bytes + 1163 stats.rx_mcast_bytes + stats.rx_bcast_bytes; 1164 1165 eth_stats->ierrors = stats.rx_crc_errors + 1166 stats.rx_align_errors + 1167 stats.rx_carrier_errors + 1168 stats.rx_oversize_packets + 1169 stats.rx_jabbers + stats.rx_undersize_packets; 1170 1171 eth_stats->rx_nombuf = stats.no_buff_discards; 1172 1173 eth_stats->imissed = stats.mftag_filter_discards + 1174 stats.mac_filter_discards + 1175 stats.no_buff_discards + stats.brb_truncates + stats.brb_discards; 1176 1177 /* TX stats */ 1178 eth_stats->opackets = stats.tx_ucast_pkts + 1179 stats.tx_mcast_pkts + stats.tx_bcast_pkts; 1180 1181 eth_stats->obytes = stats.tx_ucast_bytes + 1182 stats.tx_mcast_bytes + stats.tx_bcast_bytes; 1183 1184 eth_stats->oerrors = stats.tx_err_drop_pkts; 1185 1186 /* Queue stats */ 1187 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1188 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1189 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), 1190 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1191 if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) || 1192 (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev))) 1193 DP_VERBOSE(edev, ECORE_MSG_DEBUG, 1194 "Not all the queue stats will be displayed. Set" 1195 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" 1196 " appropriately and retry.\n"); 1197 1198 for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { 1199 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { 1200 eth_stats->q_ipackets[i] = 1201 *(uint64_t *)( 1202 ((char *)(qdev->fp_array[(qid)].rxq)) + 1203 offsetof(struct qede_rx_queue, 1204 rcv_pkts)); 1205 eth_stats->q_errors[i] = 1206 *(uint64_t *)( 1207 ((char *)(qdev->fp_array[(qid)].rxq)) + 1208 offsetof(struct qede_rx_queue, 1209 rx_hw_errors)) + 1210 *(uint64_t *)( 1211 ((char *)(qdev->fp_array[(qid)].rxq)) + 1212 offsetof(struct qede_rx_queue, 1213 rx_alloc_errors)); 1214 i++; 1215 } 1216 if (i == rxq_stat_cntrs) 1217 break; 1218 } 1219 1220 for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { 1221 if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) { 1222 txq = qdev->fp_array[(qid)].txqs[0]; 1223 eth_stats->q_opackets[j] = 1224 *((uint64_t *)(uintptr_t) 1225 (((uint64_t)(uintptr_t)(txq)) + 1226 offsetof(struct qede_tx_queue, 1227 xmit_pkts))); 1228 j++; 1229 } 1230 if (j == txq_stat_cntrs) 1231 break; 1232 } 1233 } 1234 1235 static unsigned 1236 qede_get_xstats_count(struct qede_dev *qdev) { 1237 return RTE_DIM(qede_xstats_strings) + 1238 (RTE_DIM(qede_rxq_xstats_strings) * 1239 RTE_MIN(QEDE_RSS_COUNT(qdev), 1240 RTE_ETHDEV_QUEUE_STAT_CNTRS)); 1241 } 1242 1243 static int 1244 qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev, 1245 struct rte_eth_xstat_name *xstats_names, 1246 __rte_unused unsigned int limit) 1247 { 1248 struct qede_dev *qdev = dev->data->dev_private; 1249 const unsigned int stat_cnt = qede_get_xstats_count(qdev); 1250 unsigned int i, qid, stat_idx = 0; 1251 unsigned int rxq_stat_cntrs; 1252 1253 if (xstats_names != NULL) { 1254 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1255 snprintf(xstats_names[stat_idx].name, 1256 sizeof(xstats_names[stat_idx].name), 1257 "%s", 1258 qede_xstats_strings[i].name); 1259 stat_idx++; 1260 } 1261 1262 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1263 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1264 for (qid = 0; qid < rxq_stat_cntrs; qid++) { 1265 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1266 snprintf(xstats_names[stat_idx].name, 1267 sizeof(xstats_names[stat_idx].name), 1268 "%.4s%d%s", 1269 qede_rxq_xstats_strings[i].name, qid, 1270 qede_rxq_xstats_strings[i].name + 4); 1271 stat_idx++; 1272 } 1273 } 1274 } 1275 1276 return stat_cnt; 1277 } 1278 1279 static int 1280 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1281 unsigned int n) 1282 { 1283 struct qede_dev *qdev = dev->data->dev_private; 1284 struct ecore_dev *edev = &qdev->edev; 1285 struct ecore_eth_stats stats; 1286 const unsigned int num = qede_get_xstats_count(qdev); 1287 unsigned int i, qid, stat_idx = 0; 1288 unsigned int rxq_stat_cntrs; 1289 1290 if (n < num) 1291 return num; 1292 1293 qdev->ops->get_vport_stats(edev, &stats); 1294 1295 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1296 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + 1297 qede_xstats_strings[i].offset); 1298 xstats[stat_idx].id = stat_idx; 1299 stat_idx++; 1300 } 1301 1302 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1303 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1304 for (qid = 0; qid < rxq_stat_cntrs; qid++) { 1305 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { 1306 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1307 xstats[stat_idx].value = *(uint64_t *)( 1308 ((char *)(qdev->fp_array[(qid)].rxq)) + 1309 qede_rxq_xstats_strings[i].offset); 1310 xstats[stat_idx].id = stat_idx; 1311 stat_idx++; 1312 } 1313 } 1314 } 1315 1316 return stat_idx; 1317 } 1318 1319 static void 1320 qede_reset_xstats(struct rte_eth_dev *dev) 1321 { 1322 struct qede_dev *qdev = dev->data->dev_private; 1323 struct ecore_dev *edev = &qdev->edev; 1324 1325 ecore_reset_vport_stats(edev); 1326 } 1327 1328 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 1329 { 1330 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1331 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1332 struct qed_link_params link_params; 1333 int rc; 1334 1335 DP_INFO(edev, "setting link state %d\n", link_up); 1336 memset(&link_params, 0, sizeof(link_params)); 1337 link_params.link_up = link_up; 1338 rc = qdev->ops->common->set_link(edev, &link_params); 1339 if (rc != ECORE_SUCCESS) 1340 DP_ERR(edev, "Unable to set link state %d\n", link_up); 1341 1342 return rc; 1343 } 1344 1345 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 1346 { 1347 return qede_dev_set_link_state(eth_dev, true); 1348 } 1349 1350 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 1351 { 1352 return qede_dev_set_link_state(eth_dev, false); 1353 } 1354 1355 static void qede_reset_stats(struct rte_eth_dev *eth_dev) 1356 { 1357 struct qede_dev *qdev = eth_dev->data->dev_private; 1358 struct ecore_dev *edev = &qdev->edev; 1359 1360 ecore_reset_vport_stats(edev); 1361 } 1362 1363 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 1364 { 1365 enum qed_filter_rx_mode_type type = 1366 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1367 1368 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1369 type |= QED_FILTER_RX_MODE_TYPE_PROMISC; 1370 1371 qed_configure_filter_rx_mode(eth_dev, type); 1372 } 1373 1374 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 1375 { 1376 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1377 qed_configure_filter_rx_mode(eth_dev, 1378 QED_FILTER_RX_MODE_TYPE_PROMISC); 1379 else 1380 qed_configure_filter_rx_mode(eth_dev, 1381 QED_FILTER_RX_MODE_TYPE_REGULAR); 1382 } 1383 1384 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 1385 struct rte_eth_fc_conf *fc_conf) 1386 { 1387 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1388 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1389 struct qed_link_output current_link; 1390 struct qed_link_params params; 1391 1392 memset(¤t_link, 0, sizeof(current_link)); 1393 qdev->ops->common->get_link(edev, ¤t_link); 1394 1395 memset(¶ms, 0, sizeof(params)); 1396 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 1397 if (fc_conf->autoneg) { 1398 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 1399 DP_ERR(edev, "Autoneg not supported\n"); 1400 return -EINVAL; 1401 } 1402 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1403 } 1404 1405 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 1406 if (fc_conf->mode == RTE_FC_FULL) 1407 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 1408 QED_LINK_PAUSE_RX_ENABLE); 1409 if (fc_conf->mode == RTE_FC_TX_PAUSE) 1410 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1411 if (fc_conf->mode == RTE_FC_RX_PAUSE) 1412 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1413 1414 params.link_up = true; 1415 (void)qdev->ops->common->set_link(edev, ¶ms); 1416 1417 return 0; 1418 } 1419 1420 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 1421 struct rte_eth_fc_conf *fc_conf) 1422 { 1423 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1424 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1425 struct qed_link_output current_link; 1426 1427 memset(¤t_link, 0, sizeof(current_link)); 1428 qdev->ops->common->get_link(edev, ¤t_link); 1429 1430 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1431 fc_conf->autoneg = true; 1432 1433 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 1434 QED_LINK_PAUSE_TX_ENABLE)) 1435 fc_conf->mode = RTE_FC_FULL; 1436 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 1437 fc_conf->mode = RTE_FC_RX_PAUSE; 1438 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 1439 fc_conf->mode = RTE_FC_TX_PAUSE; 1440 else 1441 fc_conf->mode = RTE_FC_NONE; 1442 1443 return 0; 1444 } 1445 1446 static const uint32_t * 1447 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 1448 { 1449 static const uint32_t ptypes[] = { 1450 RTE_PTYPE_L3_IPV4, 1451 RTE_PTYPE_L3_IPV6, 1452 RTE_PTYPE_UNKNOWN 1453 }; 1454 1455 if (eth_dev->rx_pkt_burst == qede_recv_pkts) 1456 return ptypes; 1457 1458 return NULL; 1459 } 1460 1461 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) 1462 { 1463 *rss_caps = 0; 1464 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 1465 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 1466 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 1467 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 1468 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 1469 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 1470 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; 1471 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; 1472 } 1473 1474 int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 1475 struct rte_eth_rss_conf *rss_conf) 1476 { 1477 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1478 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1479 struct ecore_sp_vport_update_params vport_update_params; 1480 struct ecore_rss_params rss_params; 1481 struct ecore_hwfn *p_hwfn; 1482 uint32_t *key = (uint32_t *)rss_conf->rss_key; 1483 uint64_t hf = rss_conf->rss_hf; 1484 uint8_t len = rss_conf->rss_key_len; 1485 uint8_t idx; 1486 uint8_t i; 1487 int rc; 1488 1489 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1490 memset(&rss_params, 0, sizeof(rss_params)); 1491 1492 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", 1493 (unsigned long)hf, len, key); 1494 1495 if (hf != 0) { 1496 /* Enabling RSS */ 1497 DP_INFO(edev, "Enabling rss\n"); 1498 1499 /* RSS caps */ 1500 qede_init_rss_caps(&rss_params.rss_caps, hf); 1501 rss_params.update_rss_capabilities = 1; 1502 1503 /* RSS hash key */ 1504 if (key) { 1505 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { 1506 DP_ERR(edev, "RSS key length exceeds limit\n"); 1507 return -EINVAL; 1508 } 1509 DP_INFO(edev, "Applying user supplied hash key\n"); 1510 rss_params.update_rss_key = 1; 1511 memcpy(&rss_params.rss_key, key, len); 1512 } 1513 rss_params.rss_enable = 1; 1514 } 1515 1516 rss_params.update_rss_config = 1; 1517 /* tbl_size has to be set with capabilities */ 1518 rss_params.rss_table_size_log = 7; 1519 vport_update_params.vport_id = 0; 1520 /* pass the L2 handles instead of qids */ 1521 for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) { 1522 idx = qdev->rss_ind_table[i]; 1523 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle; 1524 } 1525 vport_update_params.rss_params = &rss_params; 1526 1527 for_each_hwfn(edev, i) { 1528 p_hwfn = &edev->hwfns[i]; 1529 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1530 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 1531 ECORE_SPQ_MODE_EBLOCK, NULL); 1532 if (rc) { 1533 DP_ERR(edev, "vport-update for RSS failed\n"); 1534 return rc; 1535 } 1536 } 1537 qdev->rss_enable = rss_params.rss_enable; 1538 1539 /* Update local structure for hash query */ 1540 qdev->rss_conf.rss_hf = hf; 1541 qdev->rss_conf.rss_key_len = len; 1542 if (qdev->rss_enable) { 1543 if (qdev->rss_conf.rss_key == NULL) { 1544 qdev->rss_conf.rss_key = (uint8_t *)malloc(len); 1545 if (qdev->rss_conf.rss_key == NULL) { 1546 DP_ERR(edev, "No memory to store RSS key\n"); 1547 return -ENOMEM; 1548 } 1549 } 1550 if (key && len) { 1551 DP_INFO(edev, "Storing RSS key\n"); 1552 memcpy(qdev->rss_conf.rss_key, key, len); 1553 } 1554 } else if (!qdev->rss_enable && len == 0) { 1555 if (qdev->rss_conf.rss_key) { 1556 free(qdev->rss_conf.rss_key); 1557 qdev->rss_conf.rss_key = NULL; 1558 DP_INFO(edev, "Free RSS key\n"); 1559 } 1560 } 1561 1562 return 0; 1563 } 1564 1565 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 1566 struct rte_eth_rss_conf *rss_conf) 1567 { 1568 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1569 1570 rss_conf->rss_hf = qdev->rss_conf.rss_hf; 1571 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; 1572 1573 if (rss_conf->rss_key && qdev->rss_conf.rss_key) 1574 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, 1575 rss_conf->rss_key_len); 1576 return 0; 1577 } 1578 1579 static bool qede_update_rss_parm_cmt(struct ecore_dev *edev, 1580 struct ecore_rss_params *rss) 1581 { 1582 int i, fn; 1583 bool rss_mode = 1; /* enable */ 1584 struct ecore_queue_cid *cid; 1585 struct ecore_rss_params *t_rss; 1586 1587 /* In regular scenario, we'd simply need to take input handlers. 1588 * But in CMT, we'd have to split the handlers according to the 1589 * engine they were configured on. We'd then have to understand 1590 * whether RSS is really required, since 2-queues on CMT doesn't 1591 * require RSS. 1592 */ 1593 1594 /* CMT should be round-robin */ 1595 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 1596 cid = rss->rss_ind_table[i]; 1597 1598 if (cid->p_owner == ECORE_LEADING_HWFN(edev)) 1599 t_rss = &rss[0]; 1600 else 1601 t_rss = &rss[1]; 1602 1603 t_rss->rss_ind_table[i / edev->num_hwfns] = cid; 1604 } 1605 1606 t_rss = &rss[1]; 1607 t_rss->update_rss_ind_table = 1; 1608 t_rss->rss_table_size_log = 7; 1609 t_rss->update_rss_config = 1; 1610 1611 /* Make sure RSS is actually required */ 1612 for_each_hwfn(edev, fn) { 1613 for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns; 1614 i++) { 1615 if (rss[fn].rss_ind_table[i] != 1616 rss[fn].rss_ind_table[0]) 1617 break; 1618 } 1619 1620 if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) { 1621 DP_INFO(edev, 1622 "CMT - 1 queue per-hwfn; Disabling RSS\n"); 1623 rss_mode = 0; 1624 goto out; 1625 } 1626 } 1627 1628 out: 1629 t_rss->rss_enable = rss_mode; 1630 1631 return rss_mode; 1632 } 1633 1634 int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 1635 struct rte_eth_rss_reta_entry64 *reta_conf, 1636 uint16_t reta_size) 1637 { 1638 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1639 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1640 struct ecore_sp_vport_update_params vport_update_params; 1641 struct ecore_rss_params *params; 1642 struct ecore_hwfn *p_hwfn; 1643 uint16_t i, idx, shift; 1644 uint8_t entry; 1645 int rc = 0; 1646 1647 if (reta_size > ETH_RSS_RETA_SIZE_128) { 1648 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 1649 reta_size); 1650 return -EINVAL; 1651 } 1652 1653 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1654 params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns, 1655 RTE_CACHE_LINE_SIZE); 1656 1657 for (i = 0; i < reta_size; i++) { 1658 idx = i / RTE_RETA_GROUP_SIZE; 1659 shift = i % RTE_RETA_GROUP_SIZE; 1660 if (reta_conf[idx].mask & (1ULL << shift)) { 1661 entry = reta_conf[idx].reta[shift]; 1662 /* Pass rxq handles to ecore */ 1663 params->rss_ind_table[i] = 1664 qdev->fp_array[entry].rxq->handle; 1665 /* Update the local copy for RETA query command */ 1666 qdev->rss_ind_table[i] = entry; 1667 } 1668 } 1669 1670 params->update_rss_ind_table = 1; 1671 params->rss_table_size_log = 7; 1672 params->update_rss_config = 1; 1673 1674 /* Fix up RETA for CMT mode device */ 1675 if (edev->num_hwfns > 1) 1676 qdev->rss_enable = qede_update_rss_parm_cmt(edev, 1677 params); 1678 vport_update_params.vport_id = 0; 1679 /* Use the current value of rss_enable */ 1680 params->rss_enable = qdev->rss_enable; 1681 vport_update_params.rss_params = params; 1682 1683 for_each_hwfn(edev, i) { 1684 p_hwfn = &edev->hwfns[i]; 1685 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1686 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 1687 ECORE_SPQ_MODE_EBLOCK, NULL); 1688 if (rc) { 1689 DP_ERR(edev, "vport-update for RSS failed\n"); 1690 goto out; 1691 } 1692 } 1693 1694 out: 1695 rte_free(params); 1696 return rc; 1697 } 1698 1699 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 1700 struct rte_eth_rss_reta_entry64 *reta_conf, 1701 uint16_t reta_size) 1702 { 1703 struct qede_dev *qdev = eth_dev->data->dev_private; 1704 struct ecore_dev *edev = &qdev->edev; 1705 uint16_t i, idx, shift; 1706 uint8_t entry; 1707 1708 if (reta_size > ETH_RSS_RETA_SIZE_128) { 1709 DP_ERR(edev, "reta_size %d is not supported\n", 1710 reta_size); 1711 return -EINVAL; 1712 } 1713 1714 for (i = 0; i < reta_size; i++) { 1715 idx = i / RTE_RETA_GROUP_SIZE; 1716 shift = i % RTE_RETA_GROUP_SIZE; 1717 if (reta_conf[idx].mask & (1ULL << shift)) { 1718 entry = qdev->rss_ind_table[i]; 1719 reta_conf[idx].reta[shift] = entry; 1720 } 1721 } 1722 1723 return 0; 1724 } 1725 1726 static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 1727 { 1728 struct qede_dev *qdev = QEDE_INIT_QDEV(dev); 1729 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1730 struct rte_eth_dev_info dev_info = {0}; 1731 struct qede_fastpath *fp; 1732 uint32_t frame_size; 1733 uint16_t rx_buf_size; 1734 uint16_t bufsz; 1735 int i; 1736 1737 PMD_INIT_FUNC_TRACE(edev); 1738 qede_dev_info_get(dev, &dev_info); 1739 frame_size = mtu + QEDE_ETH_OVERHEAD; 1740 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) { 1741 DP_ERR(edev, "MTU %u out of range\n", mtu); 1742 return -EINVAL; 1743 } 1744 if (!dev->data->scattered_rx && 1745 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 1746 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", 1747 dev->data->min_rx_buf_size); 1748 return -EINVAL; 1749 } 1750 /* Temporarily replace I/O functions with dummy ones. It cannot 1751 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. 1752 */ 1753 dev->rx_pkt_burst = qede_rxtx_pkts_dummy; 1754 dev->tx_pkt_burst = qede_rxtx_pkts_dummy; 1755 qede_dev_stop(dev); 1756 rte_delay_ms(1000); 1757 qdev->mtu = mtu; 1758 /* Fix up RX buf size for all queues of the port */ 1759 for_each_queue(i) { 1760 fp = &qdev->fp_array[i]; 1761 if (fp->type & QEDE_FASTPATH_RX) { 1762 bufsz = (uint16_t)rte_pktmbuf_data_room_size( 1763 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; 1764 if (dev->data->scattered_rx) 1765 rx_buf_size = bufsz + QEDE_ETH_OVERHEAD; 1766 else 1767 rx_buf_size = mtu + QEDE_ETH_OVERHEAD; 1768 rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size); 1769 fp->rxq->rx_buf_size = rx_buf_size; 1770 DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size); 1771 } 1772 } 1773 qede_dev_start(dev); 1774 if (frame_size > ETHER_MAX_LEN) 1775 dev->data->dev_conf.rxmode.jumbo_frame = 1; 1776 else 1777 dev->data->dev_conf.rxmode.jumbo_frame = 0; 1778 /* update max frame size */ 1779 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1780 /* Reassign back */ 1781 dev->rx_pkt_burst = qede_recv_pkts; 1782 dev->tx_pkt_burst = qede_xmit_pkts; 1783 1784 return 0; 1785 } 1786 1787 static int 1788 qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev, 1789 struct rte_eth_udp_tunnel *tunnel_udp, 1790 bool add) 1791 { 1792 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1793 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1794 struct ecore_tunnel_info tunn; /* @DPDK */ 1795 struct ecore_hwfn *p_hwfn; 1796 int rc, i; 1797 1798 PMD_INIT_FUNC_TRACE(edev); 1799 1800 memset(&tunn, 0, sizeof(tunn)); 1801 if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) { 1802 tunn.vxlan_port.b_update_port = true; 1803 tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port : 1804 QEDE_VXLAN_DEF_PORT; 1805 for_each_hwfn(edev, i) { 1806 p_hwfn = &edev->hwfns[i]; 1807 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn, 1808 ECORE_SPQ_MODE_CB, NULL); 1809 if (rc != ECORE_SUCCESS) { 1810 DP_ERR(edev, "Unable to config UDP port %u\n", 1811 tunn.vxlan_port.port); 1812 return rc; 1813 } 1814 } 1815 } 1816 1817 return 0; 1818 } 1819 1820 static int 1821 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, 1822 struct rte_eth_udp_tunnel *tunnel_udp) 1823 { 1824 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false); 1825 } 1826 1827 static int 1828 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, 1829 struct rte_eth_udp_tunnel *tunnel_udp) 1830 { 1831 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true); 1832 } 1833 1834 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type, 1835 uint32_t *clss, char *str) 1836 { 1837 uint16_t j; 1838 *clss = MAX_ECORE_TUNN_CLSS; 1839 1840 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) { 1841 if (filter == qede_tunn_types[j].rte_filter_type) { 1842 *type = qede_tunn_types[j].qede_type; 1843 *clss = qede_tunn_types[j].qede_tunn_clss; 1844 strcpy(str, qede_tunn_types[j].string); 1845 return; 1846 } 1847 } 1848 } 1849 1850 static int 1851 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast, 1852 const struct rte_eth_tunnel_filter_conf *conf, 1853 uint32_t type) 1854 { 1855 /* Init commmon ucast params first */ 1856 qede_set_ucast_cmn_params(ucast); 1857 1858 /* Copy out the required fields based on classification type */ 1859 ucast->type = type; 1860 1861 switch (type) { 1862 case ECORE_FILTER_VNI: 1863 ucast->vni = conf->tenant_id; 1864 break; 1865 case ECORE_FILTER_INNER_VLAN: 1866 ucast->vlan = conf->inner_vlan; 1867 break; 1868 case ECORE_FILTER_MAC: 1869 memcpy(ucast->mac, conf->outer_mac.addr_bytes, 1870 ETHER_ADDR_LEN); 1871 break; 1872 case ECORE_FILTER_INNER_MAC: 1873 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 1874 ETHER_ADDR_LEN); 1875 break; 1876 case ECORE_FILTER_MAC_VNI_PAIR: 1877 memcpy(ucast->mac, conf->outer_mac.addr_bytes, 1878 ETHER_ADDR_LEN); 1879 ucast->vni = conf->tenant_id; 1880 break; 1881 case ECORE_FILTER_INNER_MAC_VNI_PAIR: 1882 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 1883 ETHER_ADDR_LEN); 1884 ucast->vni = conf->tenant_id; 1885 break; 1886 case ECORE_FILTER_INNER_PAIR: 1887 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 1888 ETHER_ADDR_LEN); 1889 ucast->vlan = conf->inner_vlan; 1890 break; 1891 default: 1892 return -EINVAL; 1893 } 1894 1895 return ECORE_SUCCESS; 1896 } 1897 1898 static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev, 1899 enum rte_filter_op filter_op, 1900 const struct rte_eth_tunnel_filter_conf *conf) 1901 { 1902 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1903 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1904 struct ecore_tunnel_info tunn; 1905 struct ecore_hwfn *p_hwfn; 1906 enum ecore_filter_ucast_type type; 1907 enum ecore_tunn_clss clss; 1908 struct ecore_filter_ucast ucast; 1909 char str[80]; 1910 uint16_t filter_type; 1911 int rc, i; 1912 1913 filter_type = conf->filter_type | qdev->vxlan_filter_type; 1914 /* First determine if the given filter classification is supported */ 1915 qede_get_ecore_tunn_params(filter_type, &type, &clss, str); 1916 if (clss == MAX_ECORE_TUNN_CLSS) { 1917 DP_ERR(edev, "Wrong filter type\n"); 1918 return -EINVAL; 1919 } 1920 /* Init tunnel ucast params */ 1921 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type); 1922 if (rc != ECORE_SUCCESS) { 1923 DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n", 1924 conf->filter_type); 1925 return rc; 1926 } 1927 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n", 1928 str, filter_op, ucast.type); 1929 switch (filter_op) { 1930 case RTE_ETH_FILTER_ADD: 1931 ucast.opcode = ECORE_FILTER_ADD; 1932 1933 /* Skip MAC/VLAN if filter is based on VNI */ 1934 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { 1935 rc = qede_mac_int_ops(eth_dev, &ucast, 1); 1936 if (rc == 0) { 1937 /* Enable accept anyvlan */ 1938 qede_config_accept_any_vlan(qdev, true); 1939 } 1940 } else { 1941 rc = qede_ucast_filter(eth_dev, &ucast, 1); 1942 if (rc == 0) 1943 rc = ecore_filter_ucast_cmd(edev, &ucast, 1944 ECORE_SPQ_MODE_CB, NULL); 1945 } 1946 1947 if (rc != ECORE_SUCCESS) 1948 return rc; 1949 1950 qdev->vxlan_filter_type = filter_type; 1951 1952 DP_INFO(edev, "Enabling VXLAN tunneling\n"); 1953 qede_set_cmn_tunn_param(&tunn, clss, true, true); 1954 for_each_hwfn(edev, i) { 1955 p_hwfn = &edev->hwfns[i]; 1956 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, 1957 &tunn, ECORE_SPQ_MODE_CB, NULL); 1958 if (rc != ECORE_SUCCESS) { 1959 DP_ERR(edev, "Failed to update tunn_clss %u\n", 1960 tunn.vxlan.tun_cls); 1961 } 1962 } 1963 qdev->num_tunn_filters++; /* Filter added successfully */ 1964 break; 1965 case RTE_ETH_FILTER_DELETE: 1966 ucast.opcode = ECORE_FILTER_REMOVE; 1967 1968 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { 1969 rc = qede_mac_int_ops(eth_dev, &ucast, 0); 1970 } else { 1971 rc = qede_ucast_filter(eth_dev, &ucast, 0); 1972 if (rc == 0) 1973 rc = ecore_filter_ucast_cmd(edev, &ucast, 1974 ECORE_SPQ_MODE_CB, NULL); 1975 } 1976 if (rc != ECORE_SUCCESS) 1977 return rc; 1978 1979 qdev->vxlan_filter_type = filter_type; 1980 qdev->num_tunn_filters--; 1981 1982 /* Disable VXLAN if VXLAN filters become 0 */ 1983 if (qdev->num_tunn_filters == 0) { 1984 DP_INFO(edev, "Disabling VXLAN tunneling\n"); 1985 1986 /* Use 0 as tunnel mode */ 1987 qede_set_cmn_tunn_param(&tunn, clss, false, true); 1988 for_each_hwfn(edev, i) { 1989 p_hwfn = &edev->hwfns[i]; 1990 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn, 1991 ECORE_SPQ_MODE_CB, NULL); 1992 if (rc != ECORE_SUCCESS) { 1993 DP_ERR(edev, 1994 "Failed to update tunn_clss %u\n", 1995 tunn.vxlan.tun_cls); 1996 break; 1997 } 1998 } 1999 } 2000 break; 2001 default: 2002 DP_ERR(edev, "Unsupported operation %d\n", filter_op); 2003 return -EINVAL; 2004 } 2005 DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters); 2006 2007 return 0; 2008 } 2009 2010 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, 2011 enum rte_filter_type filter_type, 2012 enum rte_filter_op filter_op, 2013 void *arg) 2014 { 2015 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 2016 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 2017 struct rte_eth_tunnel_filter_conf *filter_conf = 2018 (struct rte_eth_tunnel_filter_conf *)arg; 2019 2020 switch (filter_type) { 2021 case RTE_ETH_FILTER_TUNNEL: 2022 switch (filter_conf->tunnel_type) { 2023 case RTE_TUNNEL_TYPE_VXLAN: 2024 DP_INFO(edev, 2025 "Packet steering to the specified Rx queue" 2026 " is not supported with VXLAN tunneling"); 2027 return(qede_vxlan_tunn_config(eth_dev, filter_op, 2028 filter_conf)); 2029 /* Place holders for future tunneling support */ 2030 case RTE_TUNNEL_TYPE_GENEVE: 2031 case RTE_TUNNEL_TYPE_TEREDO: 2032 case RTE_TUNNEL_TYPE_NVGRE: 2033 case RTE_TUNNEL_TYPE_IP_IN_GRE: 2034 case RTE_L2_TUNNEL_TYPE_E_TAG: 2035 DP_ERR(edev, "Unsupported tunnel type %d\n", 2036 filter_conf->tunnel_type); 2037 return -EINVAL; 2038 case RTE_TUNNEL_TYPE_NONE: 2039 default: 2040 return 0; 2041 } 2042 break; 2043 case RTE_ETH_FILTER_FDIR: 2044 return qede_fdir_filter_conf(eth_dev, filter_op, arg); 2045 case RTE_ETH_FILTER_NTUPLE: 2046 return qede_ntuple_filter_conf(eth_dev, filter_op, arg); 2047 case RTE_ETH_FILTER_MACVLAN: 2048 case RTE_ETH_FILTER_ETHERTYPE: 2049 case RTE_ETH_FILTER_FLEXIBLE: 2050 case RTE_ETH_FILTER_SYN: 2051 case RTE_ETH_FILTER_HASH: 2052 case RTE_ETH_FILTER_L2_TUNNEL: 2053 case RTE_ETH_FILTER_MAX: 2054 default: 2055 DP_ERR(edev, "Unsupported filter type %d\n", 2056 filter_type); 2057 return -EINVAL; 2058 } 2059 2060 return 0; 2061 } 2062 2063 static const struct eth_dev_ops qede_eth_dev_ops = { 2064 .dev_configure = qede_dev_configure, 2065 .dev_infos_get = qede_dev_info_get, 2066 .rx_queue_setup = qede_rx_queue_setup, 2067 .rx_queue_release = qede_rx_queue_release, 2068 .tx_queue_setup = qede_tx_queue_setup, 2069 .tx_queue_release = qede_tx_queue_release, 2070 .dev_start = qede_dev_start, 2071 .dev_set_link_up = qede_dev_set_link_up, 2072 .dev_set_link_down = qede_dev_set_link_down, 2073 .link_update = qede_link_update, 2074 .promiscuous_enable = qede_promiscuous_enable, 2075 .promiscuous_disable = qede_promiscuous_disable, 2076 .allmulticast_enable = qede_allmulticast_enable, 2077 .allmulticast_disable = qede_allmulticast_disable, 2078 .dev_stop = qede_dev_stop, 2079 .dev_close = qede_dev_close, 2080 .stats_get = qede_get_stats, 2081 .stats_reset = qede_reset_stats, 2082 .xstats_get = qede_get_xstats, 2083 .xstats_reset = qede_reset_xstats, 2084 .xstats_get_names = qede_get_xstats_names, 2085 .mac_addr_add = qede_mac_addr_add, 2086 .mac_addr_remove = qede_mac_addr_remove, 2087 .mac_addr_set = qede_mac_addr_set, 2088 .vlan_offload_set = qede_vlan_offload_set, 2089 .vlan_filter_set = qede_vlan_filter_set, 2090 .flow_ctrl_set = qede_flow_ctrl_set, 2091 .flow_ctrl_get = qede_flow_ctrl_get, 2092 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2093 .rss_hash_update = qede_rss_hash_update, 2094 .rss_hash_conf_get = qede_rss_hash_conf_get, 2095 .reta_update = qede_rss_reta_update, 2096 .reta_query = qede_rss_reta_query, 2097 .mtu_set = qede_set_mtu, 2098 .filter_ctrl = qede_dev_filter_ctrl, 2099 .udp_tunnel_port_add = qede_udp_dst_port_add, 2100 .udp_tunnel_port_del = qede_udp_dst_port_del, 2101 }; 2102 2103 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 2104 .dev_configure = qede_dev_configure, 2105 .dev_infos_get = qede_dev_info_get, 2106 .rx_queue_setup = qede_rx_queue_setup, 2107 .rx_queue_release = qede_rx_queue_release, 2108 .tx_queue_setup = qede_tx_queue_setup, 2109 .tx_queue_release = qede_tx_queue_release, 2110 .dev_start = qede_dev_start, 2111 .dev_set_link_up = qede_dev_set_link_up, 2112 .dev_set_link_down = qede_dev_set_link_down, 2113 .link_update = qede_link_update, 2114 .promiscuous_enable = qede_promiscuous_enable, 2115 .promiscuous_disable = qede_promiscuous_disable, 2116 .allmulticast_enable = qede_allmulticast_enable, 2117 .allmulticast_disable = qede_allmulticast_disable, 2118 .dev_stop = qede_dev_stop, 2119 .dev_close = qede_dev_close, 2120 .stats_get = qede_get_stats, 2121 .stats_reset = qede_reset_stats, 2122 .xstats_get = qede_get_xstats, 2123 .xstats_reset = qede_reset_xstats, 2124 .xstats_get_names = qede_get_xstats_names, 2125 .vlan_offload_set = qede_vlan_offload_set, 2126 .vlan_filter_set = qede_vlan_filter_set, 2127 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2128 .rss_hash_update = qede_rss_hash_update, 2129 .rss_hash_conf_get = qede_rss_hash_conf_get, 2130 .reta_update = qede_rss_reta_update, 2131 .reta_query = qede_rss_reta_query, 2132 .mtu_set = qede_set_mtu, 2133 }; 2134 2135 static void qede_update_pf_params(struct ecore_dev *edev) 2136 { 2137 struct ecore_pf_params pf_params; 2138 2139 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 2140 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; 2141 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 2142 qed_ops->common->update_pf_params(edev, &pf_params); 2143 } 2144 2145 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 2146 { 2147 struct rte_pci_device *pci_dev; 2148 struct rte_pci_addr pci_addr; 2149 struct qede_dev *adapter; 2150 struct ecore_dev *edev; 2151 struct qed_dev_eth_info dev_info; 2152 struct qed_slowpath_params params; 2153 static bool do_once = true; 2154 uint8_t bulletin_change; 2155 uint8_t vf_mac[ETHER_ADDR_LEN]; 2156 uint8_t is_mac_forced; 2157 bool is_mac_exist; 2158 /* Fix up ecore debug level */ 2159 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 2160 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 2161 int rc; 2162 2163 /* Extract key data structures */ 2164 adapter = eth_dev->data->dev_private; 2165 edev = &adapter->edev; 2166 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 2167 pci_addr = pci_dev->addr; 2168 2169 PMD_INIT_FUNC_TRACE(edev); 2170 2171 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 2172 pci_addr.bus, pci_addr.devid, pci_addr.function, 2173 eth_dev->data->port_id); 2174 2175 eth_dev->rx_pkt_burst = qede_recv_pkts; 2176 eth_dev->tx_pkt_burst = qede_xmit_pkts; 2177 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; 2178 2179 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2180 DP_NOTICE(edev, false, 2181 "Skipping device init from secondary process\n"); 2182 return 0; 2183 } 2184 2185 rte_eth_copy_pci_info(eth_dev, pci_dev); 2186 2187 /* @DPDK */ 2188 edev->vendor_id = pci_dev->id.vendor_id; 2189 edev->device_id = pci_dev->id.device_id; 2190 2191 qed_ops = qed_get_eth_ops(); 2192 if (!qed_ops) { 2193 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 2194 return -EINVAL; 2195 } 2196 2197 DP_INFO(edev, "Starting qede probe\n"); 2198 2199 rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH, 2200 dp_module, dp_level, is_vf); 2201 2202 if (rc != 0) { 2203 DP_ERR(edev, "qede probe failed rc %d\n", rc); 2204 return -ENODEV; 2205 } 2206 2207 qede_update_pf_params(edev); 2208 2209 rte_intr_callback_register(&pci_dev->intr_handle, 2210 qede_interrupt_handler, (void *)eth_dev); 2211 2212 if (rte_intr_enable(&pci_dev->intr_handle)) { 2213 DP_ERR(edev, "rte_intr_enable() failed\n"); 2214 return -ENODEV; 2215 } 2216 2217 /* Start the Slowpath-process */ 2218 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 2219 params.int_mode = ECORE_INT_MODE_MSIX; 2220 params.drv_major = QEDE_PMD_VERSION_MAJOR; 2221 params.drv_minor = QEDE_PMD_VERSION_MINOR; 2222 params.drv_rev = QEDE_PMD_VERSION_REVISION; 2223 params.drv_eng = QEDE_PMD_VERSION_PATCH; 2224 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, 2225 QEDE_PMD_DRV_VER_STR_SIZE); 2226 2227 /* For CMT mode device do periodic polling for slowpath events. 2228 * This is required since uio device uses only one MSI-x 2229 * interrupt vector but we need one for each engine. 2230 */ 2231 if (edev->num_hwfns > 1 && IS_PF(edev)) { 2232 rc = rte_eal_alarm_set(timer_period * US_PER_S, 2233 qede_poll_sp_sb_cb, 2234 (void *)eth_dev); 2235 if (rc != 0) { 2236 DP_ERR(edev, "Unable to start periodic" 2237 " timer rc %d\n", rc); 2238 return -EINVAL; 2239 } 2240 } 2241 2242 rc = qed_ops->common->slowpath_start(edev, ¶ms); 2243 if (rc) { 2244 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 2245 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2246 (void *)eth_dev); 2247 return -ENODEV; 2248 } 2249 2250 rc = qed_ops->fill_dev_info(edev, &dev_info); 2251 if (rc) { 2252 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 2253 qed_ops->common->slowpath_stop(edev); 2254 qed_ops->common->remove(edev); 2255 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2256 (void *)eth_dev); 2257 return -ENODEV; 2258 } 2259 2260 qede_alloc_etherdev(adapter, &dev_info); 2261 2262 adapter->ops->common->set_name(edev, edev->name); 2263 2264 if (!is_vf) 2265 adapter->dev_info.num_mac_filters = 2266 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 2267 ECORE_MAC); 2268 else 2269 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 2270 (uint32_t *)&adapter->dev_info.num_mac_filters); 2271 2272 /* Allocate memory for storing MAC addr */ 2273 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 2274 (ETHER_ADDR_LEN * 2275 adapter->dev_info.num_mac_filters), 2276 RTE_CACHE_LINE_SIZE); 2277 2278 if (eth_dev->data->mac_addrs == NULL) { 2279 DP_ERR(edev, "Failed to allocate MAC address\n"); 2280 qed_ops->common->slowpath_stop(edev); 2281 qed_ops->common->remove(edev); 2282 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2283 (void *)eth_dev); 2284 return -ENOMEM; 2285 } 2286 2287 if (!is_vf) { 2288 ether_addr_copy((struct ether_addr *)edev->hwfns[0]. 2289 hw_info.hw_mac_addr, 2290 ð_dev->data->mac_addrs[0]); 2291 ether_addr_copy(ð_dev->data->mac_addrs[0], 2292 &adapter->primary_mac); 2293 } else { 2294 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 2295 &bulletin_change); 2296 if (bulletin_change) { 2297 is_mac_exist = 2298 ecore_vf_bulletin_get_forced_mac( 2299 ECORE_LEADING_HWFN(edev), 2300 vf_mac, 2301 &is_mac_forced); 2302 if (is_mac_exist && is_mac_forced) { 2303 DP_INFO(edev, "VF macaddr received from PF\n"); 2304 ether_addr_copy((struct ether_addr *)&vf_mac, 2305 ð_dev->data->mac_addrs[0]); 2306 ether_addr_copy(ð_dev->data->mac_addrs[0], 2307 &adapter->primary_mac); 2308 } else { 2309 DP_NOTICE(edev, false, 2310 "No VF macaddr assigned\n"); 2311 } 2312 } 2313 } 2314 2315 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 2316 2317 if (do_once) { 2318 #ifdef RTE_LIBRTE_QEDE_DEBUG_INFO 2319 qede_print_adapter_info(adapter); 2320 #endif 2321 do_once = false; 2322 } 2323 2324 adapter->state = QEDE_DEV_INIT; 2325 2326 DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", 2327 adapter->primary_mac.addr_bytes[0], 2328 adapter->primary_mac.addr_bytes[1], 2329 adapter->primary_mac.addr_bytes[2], 2330 adapter->primary_mac.addr_bytes[3], 2331 adapter->primary_mac.addr_bytes[4], 2332 adapter->primary_mac.addr_bytes[5]); 2333 2334 return rc; 2335 } 2336 2337 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 2338 { 2339 return qede_common_dev_init(eth_dev, 1); 2340 } 2341 2342 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 2343 { 2344 return qede_common_dev_init(eth_dev, 0); 2345 } 2346 2347 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 2348 { 2349 /* only uninitialize in the primary process */ 2350 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2351 return 0; 2352 2353 /* safe to close dev here */ 2354 qede_dev_close(eth_dev); 2355 2356 eth_dev->dev_ops = NULL; 2357 eth_dev->rx_pkt_burst = NULL; 2358 eth_dev->tx_pkt_burst = NULL; 2359 2360 if (eth_dev->data->mac_addrs) 2361 rte_free(eth_dev->data->mac_addrs); 2362 2363 eth_dev->data->mac_addrs = NULL; 2364 2365 return 0; 2366 } 2367 2368 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2369 { 2370 return qede_dev_common_uninit(eth_dev); 2371 } 2372 2373 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2374 { 2375 return qede_dev_common_uninit(eth_dev); 2376 } 2377 2378 static const struct rte_pci_id pci_id_qedevf_map[] = { 2379 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2380 { 2381 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF) 2382 }, 2383 { 2384 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV) 2385 }, 2386 { 2387 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV) 2388 }, 2389 {.vendor_id = 0,} 2390 }; 2391 2392 static const struct rte_pci_id pci_id_qede_map[] = { 2393 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2394 { 2395 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E) 2396 }, 2397 { 2398 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S) 2399 }, 2400 { 2401 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40) 2402 }, 2403 { 2404 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25) 2405 }, 2406 { 2407 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100) 2408 }, 2409 { 2410 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50) 2411 }, 2412 { 2413 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G) 2414 }, 2415 { 2416 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G) 2417 }, 2418 { 2419 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G) 2420 }, 2421 { 2422 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G) 2423 }, 2424 {.vendor_id = 0,} 2425 }; 2426 2427 static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2428 struct rte_pci_device *pci_dev) 2429 { 2430 return rte_eth_dev_pci_generic_probe(pci_dev, 2431 sizeof(struct qede_dev), qedevf_eth_dev_init); 2432 } 2433 2434 static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2435 { 2436 return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit); 2437 } 2438 2439 static struct rte_pci_driver rte_qedevf_pmd = { 2440 .id_table = pci_id_qedevf_map, 2441 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2442 .probe = qedevf_eth_dev_pci_probe, 2443 .remove = qedevf_eth_dev_pci_remove, 2444 }; 2445 2446 static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 2447 struct rte_pci_device *pci_dev) 2448 { 2449 return rte_eth_dev_pci_generic_probe(pci_dev, 2450 sizeof(struct qede_dev), qede_eth_dev_init); 2451 } 2452 2453 static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev) 2454 { 2455 return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit); 2456 } 2457 2458 static struct rte_pci_driver rte_qede_pmd = { 2459 .id_table = pci_id_qede_map, 2460 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2461 .probe = qede_eth_dev_pci_probe, 2462 .remove = qede_eth_dev_pci_remove, 2463 }; 2464 2465 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd); 2466 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 2467 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio"); 2468 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd); 2469 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 2470 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio"); 2471