1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "qede_ethdev.h" 10 #include <rte_alarm.h> 11 #include <rte_version.h> 12 13 /* Globals */ 14 static const struct qed_eth_ops *qed_ops; 15 static int64_t timer_period = 1; 16 17 /* VXLAN tunnel classification mapping */ 18 const struct _qede_vxlan_tunn_types { 19 uint16_t rte_filter_type; 20 enum ecore_filter_ucast_type qede_type; 21 enum ecore_tunn_clss qede_tunn_clss; 22 const char *string; 23 } qede_tunn_types[] = { 24 { 25 ETH_TUNNEL_FILTER_OMAC, 26 ECORE_FILTER_MAC, 27 ECORE_TUNN_CLSS_MAC_VLAN, 28 "outer-mac" 29 }, 30 { 31 ETH_TUNNEL_FILTER_TENID, 32 ECORE_FILTER_VNI, 33 ECORE_TUNN_CLSS_MAC_VNI, 34 "vni" 35 }, 36 { 37 ETH_TUNNEL_FILTER_IMAC, 38 ECORE_FILTER_INNER_MAC, 39 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 40 "inner-mac" 41 }, 42 { 43 ETH_TUNNEL_FILTER_IVLAN, 44 ECORE_FILTER_INNER_VLAN, 45 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 46 "inner-vlan" 47 }, 48 { 49 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID, 50 ECORE_FILTER_MAC_VNI_PAIR, 51 ECORE_TUNN_CLSS_MAC_VNI, 52 "outer-mac and vni" 53 }, 54 { 55 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC, 56 ECORE_FILTER_UNUSED, 57 MAX_ECORE_TUNN_CLSS, 58 "outer-mac and inner-mac" 59 }, 60 { 61 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN, 62 ECORE_FILTER_UNUSED, 63 MAX_ECORE_TUNN_CLSS, 64 "outer-mac and inner-vlan" 65 }, 66 { 67 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC, 68 ECORE_FILTER_INNER_MAC_VNI_PAIR, 69 ECORE_TUNN_CLSS_INNER_MAC_VNI, 70 "vni and inner-mac", 71 }, 72 { 73 ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN, 74 ECORE_FILTER_UNUSED, 75 MAX_ECORE_TUNN_CLSS, 76 "vni and inner-vlan", 77 }, 78 { 79 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN, 80 ECORE_FILTER_INNER_PAIR, 81 ECORE_TUNN_CLSS_INNER_MAC_VLAN, 82 "inner-mac and inner-vlan", 83 }, 84 { 85 ETH_TUNNEL_FILTER_OIP, 86 ECORE_FILTER_UNUSED, 87 MAX_ECORE_TUNN_CLSS, 88 "outer-IP" 89 }, 90 { 91 ETH_TUNNEL_FILTER_IIP, 92 ECORE_FILTER_UNUSED, 93 MAX_ECORE_TUNN_CLSS, 94 "inner-IP" 95 }, 96 { 97 RTE_TUNNEL_FILTER_IMAC_IVLAN, 98 ECORE_FILTER_UNUSED, 99 MAX_ECORE_TUNN_CLSS, 100 "IMAC_IVLAN" 101 }, 102 { 103 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID, 104 ECORE_FILTER_UNUSED, 105 MAX_ECORE_TUNN_CLSS, 106 "IMAC_IVLAN_TENID" 107 }, 108 { 109 RTE_TUNNEL_FILTER_IMAC_TENID, 110 ECORE_FILTER_UNUSED, 111 MAX_ECORE_TUNN_CLSS, 112 "IMAC_TENID" 113 }, 114 { 115 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC, 116 ECORE_FILTER_UNUSED, 117 MAX_ECORE_TUNN_CLSS, 118 "OMAC_TENID_IMAC" 119 }, 120 }; 121 122 struct rte_qede_xstats_name_off { 123 char name[RTE_ETH_XSTATS_NAME_SIZE]; 124 uint64_t offset; 125 }; 126 127 static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { 128 {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)}, 129 {"rx_multicast_bytes", 130 offsetof(struct ecore_eth_stats, rx_mcast_bytes)}, 131 {"rx_broadcast_bytes", 132 offsetof(struct ecore_eth_stats, rx_bcast_bytes)}, 133 {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)}, 134 {"rx_multicast_packets", 135 offsetof(struct ecore_eth_stats, rx_mcast_pkts)}, 136 {"rx_broadcast_packets", 137 offsetof(struct ecore_eth_stats, rx_bcast_pkts)}, 138 139 {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)}, 140 {"tx_multicast_bytes", 141 offsetof(struct ecore_eth_stats, tx_mcast_bytes)}, 142 {"tx_broadcast_bytes", 143 offsetof(struct ecore_eth_stats, tx_bcast_bytes)}, 144 {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)}, 145 {"tx_multicast_packets", 146 offsetof(struct ecore_eth_stats, tx_mcast_pkts)}, 147 {"tx_broadcast_packets", 148 offsetof(struct ecore_eth_stats, tx_bcast_pkts)}, 149 150 {"rx_64_byte_packets", 151 offsetof(struct ecore_eth_stats, rx_64_byte_packets)}, 152 {"rx_65_to_127_byte_packets", 153 offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)}, 154 {"rx_128_to_255_byte_packets", 155 offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)}, 156 {"rx_256_to_511_byte_packets", 157 offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)}, 158 {"rx_512_to_1023_byte_packets", 159 offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)}, 160 {"rx_1024_to_1518_byte_packets", 161 offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)}, 162 {"rx_1519_to_1522_byte_packets", 163 offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)}, 164 {"rx_1519_to_2047_byte_packets", 165 offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)}, 166 {"rx_2048_to_4095_byte_packets", 167 offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)}, 168 {"rx_4096_to_9216_byte_packets", 169 offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)}, 170 {"rx_9217_to_16383_byte_packets", 171 offsetof(struct ecore_eth_stats, 172 rx_9217_to_16383_byte_packets)}, 173 {"tx_64_byte_packets", 174 offsetof(struct ecore_eth_stats, tx_64_byte_packets)}, 175 {"tx_65_to_127_byte_packets", 176 offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)}, 177 {"tx_128_to_255_byte_packets", 178 offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)}, 179 {"tx_256_to_511_byte_packets", 180 offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)}, 181 {"tx_512_to_1023_byte_packets", 182 offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)}, 183 {"tx_1024_to_1518_byte_packets", 184 offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)}, 185 {"trx_1519_to_1522_byte_packets", 186 offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)}, 187 {"tx_2048_to_4095_byte_packets", 188 offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)}, 189 {"tx_4096_to_9216_byte_packets", 190 offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)}, 191 {"tx_9217_to_16383_byte_packets", 192 offsetof(struct ecore_eth_stats, 193 tx_9217_to_16383_byte_packets)}, 194 195 {"rx_mac_crtl_frames", 196 offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)}, 197 {"tx_mac_control_frames", 198 offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)}, 199 {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)}, 200 {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)}, 201 {"rx_priority_flow_control_frames", 202 offsetof(struct ecore_eth_stats, rx_pfc_frames)}, 203 {"tx_priority_flow_control_frames", 204 offsetof(struct ecore_eth_stats, tx_pfc_frames)}, 205 206 {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)}, 207 {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)}, 208 {"rx_carrier_errors", 209 offsetof(struct ecore_eth_stats, rx_carrier_errors)}, 210 {"rx_oversize_packet_errors", 211 offsetof(struct ecore_eth_stats, rx_oversize_packets)}, 212 {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)}, 213 {"rx_undersize_packet_errors", 214 offsetof(struct ecore_eth_stats, rx_undersize_packets)}, 215 {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)}, 216 {"rx_host_buffer_not_available", 217 offsetof(struct ecore_eth_stats, no_buff_discards)}, 218 /* Number of packets discarded because they are bigger than MTU */ 219 {"rx_packet_too_big_discards", 220 offsetof(struct ecore_eth_stats, packet_too_big_discard)}, 221 {"rx_ttl_zero_discards", 222 offsetof(struct ecore_eth_stats, ttl0_discard)}, 223 {"rx_multi_function_tag_filter_discards", 224 offsetof(struct ecore_eth_stats, mftag_filter_discards)}, 225 {"rx_mac_filter_discards", 226 offsetof(struct ecore_eth_stats, mac_filter_discards)}, 227 {"rx_hw_buffer_truncates", 228 offsetof(struct ecore_eth_stats, brb_truncates)}, 229 {"rx_hw_buffer_discards", 230 offsetof(struct ecore_eth_stats, brb_discards)}, 231 {"tx_lpi_entry_count", 232 offsetof(struct ecore_eth_stats, tx_lpi_entry_count)}, 233 {"tx_total_collisions", 234 offsetof(struct ecore_eth_stats, tx_total_collisions)}, 235 {"tx_error_drop_packets", 236 offsetof(struct ecore_eth_stats, tx_err_drop_pkts)}, 237 238 {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)}, 239 {"rx_mac_unicast_packets", 240 offsetof(struct ecore_eth_stats, rx_mac_uc_packets)}, 241 {"rx_mac_multicast_packets", 242 offsetof(struct ecore_eth_stats, rx_mac_mc_packets)}, 243 {"rx_mac_broadcast_packets", 244 offsetof(struct ecore_eth_stats, rx_mac_bc_packets)}, 245 {"rx_mac_frames_ok", 246 offsetof(struct ecore_eth_stats, rx_mac_frames_ok)}, 247 {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)}, 248 {"tx_mac_unicast_packets", 249 offsetof(struct ecore_eth_stats, tx_mac_uc_packets)}, 250 {"tx_mac_multicast_packets", 251 offsetof(struct ecore_eth_stats, tx_mac_mc_packets)}, 252 {"tx_mac_broadcast_packets", 253 offsetof(struct ecore_eth_stats, tx_mac_bc_packets)}, 254 255 {"lro_coalesced_packets", 256 offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)}, 257 {"lro_coalesced_events", 258 offsetof(struct ecore_eth_stats, tpa_coalesced_events)}, 259 {"lro_aborts_num", 260 offsetof(struct ecore_eth_stats, tpa_aborts_num)}, 261 {"lro_not_coalesced_packets", 262 offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)}, 263 {"lro_coalesced_bytes", 264 offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)}, 265 }; 266 267 static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = { 268 {"rx_q_segments", 269 offsetof(struct qede_rx_queue, rx_segs)}, 270 {"rx_q_hw_errors", 271 offsetof(struct qede_rx_queue, rx_hw_errors)}, 272 {"rx_q_allocation_errors", 273 offsetof(struct qede_rx_queue, rx_alloc_errors)} 274 }; 275 276 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) 277 { 278 ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); 279 } 280 281 static void 282 qede_interrupt_handler(struct rte_intr_handle *handle, void *param) 283 { 284 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 285 struct qede_dev *qdev = eth_dev->data->dev_private; 286 struct ecore_dev *edev = &qdev->edev; 287 288 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 289 if (rte_intr_enable(handle)) 290 DP_ERR(edev, "rte_intr_enable failed\n"); 291 } 292 293 static void 294 qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) 295 { 296 rte_memcpy(&qdev->dev_info, info, sizeof(*info)); 297 qdev->num_tc = qdev->dev_info.num_tc; 298 qdev->ops = qed_ops; 299 } 300 301 static void qede_print_adapter_info(struct qede_dev *qdev) 302 { 303 struct ecore_dev *edev = &qdev->edev; 304 struct qed_dev_info *info = &qdev->dev_info.common; 305 static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE]; 306 static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE]; 307 308 DP_INFO(edev, "*********************************\n"); 309 DP_INFO(edev, " DPDK version:%s\n", rte_version()); 310 DP_INFO(edev, " Chip details : %s%d\n", 311 ECORE_IS_BB(edev) ? "BB" : "AH", 312 CHIP_REV_IS_A0(edev) ? 0 : 1); 313 snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", 314 info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng); 315 snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s", 316 ver_str, QEDE_PMD_VERSION); 317 DP_INFO(edev, " Driver version : %s\n", drv_ver); 318 DP_INFO(edev, " Firmware version : %s\n", ver_str); 319 320 snprintf(ver_str, MCP_DRV_VER_STR_SIZE, 321 "%d.%d.%d.%d", 322 (info->mfw_rev >> 24) & 0xff, 323 (info->mfw_rev >> 16) & 0xff, 324 (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff); 325 DP_INFO(edev, " Management Firmware version : %s\n", ver_str); 326 DP_INFO(edev, " Firmware file : %s\n", fw_file); 327 DP_INFO(edev, "*********************************\n"); 328 } 329 330 static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast) 331 { 332 memset(ucast, 0, sizeof(struct ecore_filter_ucast)); 333 ucast->is_rx_filter = true; 334 ucast->is_tx_filter = true; 335 /* ucast->assert_on_error = true; - For debug */ 336 } 337 338 static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn, 339 uint8_t clss, bool mode, bool mask) 340 { 341 memset(p_tunn, 0, sizeof(struct ecore_tunnel_info)); 342 p_tunn->vxlan.b_update_mode = mode; 343 p_tunn->vxlan.b_mode_enabled = mask; 344 p_tunn->b_update_rx_cls = true; 345 p_tunn->b_update_tx_cls = true; 346 p_tunn->vxlan.tun_cls = clss; 347 } 348 349 static int 350 qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 351 bool add) 352 { 353 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 354 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 355 struct qede_ucast_entry *tmp = NULL; 356 struct qede_ucast_entry *u; 357 struct ether_addr *mac_addr; 358 359 mac_addr = (struct ether_addr *)ucast->mac; 360 if (add) { 361 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 362 if ((memcmp(mac_addr, &tmp->mac, 363 ETHER_ADDR_LEN) == 0) && 364 ucast->vlan == tmp->vlan) { 365 DP_ERR(edev, "Unicast MAC is already added" 366 " with vlan = %u, vni = %u\n", 367 ucast->vlan, ucast->vni); 368 return -EEXIST; 369 } 370 } 371 u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), 372 RTE_CACHE_LINE_SIZE); 373 if (!u) { 374 DP_ERR(edev, "Did not allocate memory for ucast\n"); 375 return -ENOMEM; 376 } 377 ether_addr_copy(mac_addr, &u->mac); 378 u->vlan = ucast->vlan; 379 u->vni = ucast->vni; 380 SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); 381 qdev->num_uc_addr++; 382 } else { 383 SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { 384 if ((memcmp(mac_addr, &tmp->mac, 385 ETHER_ADDR_LEN) == 0) && 386 ucast->vlan == tmp->vlan && 387 ucast->vni == tmp->vni) 388 break; 389 } 390 if (tmp == NULL) { 391 DP_INFO(edev, "Unicast MAC is not found\n"); 392 return -EINVAL; 393 } 394 SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); 395 qdev->num_uc_addr--; 396 } 397 398 return 0; 399 } 400 401 static int 402 qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast, 403 bool add) 404 { 405 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 406 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 407 struct ether_addr *mac_addr; 408 struct qede_mcast_entry *tmp = NULL; 409 struct qede_mcast_entry *m; 410 411 mac_addr = (struct ether_addr *)mcast->mac; 412 if (add) { 413 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 414 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) { 415 DP_ERR(edev, 416 "Multicast MAC is already added\n"); 417 return -EEXIST; 418 } 419 } 420 m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), 421 RTE_CACHE_LINE_SIZE); 422 if (!m) { 423 DP_ERR(edev, 424 "Did not allocate memory for mcast\n"); 425 return -ENOMEM; 426 } 427 ether_addr_copy(mac_addr, &m->mac); 428 SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); 429 qdev->num_mc_addr++; 430 } else { 431 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 432 if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) 433 break; 434 } 435 if (tmp == NULL) { 436 DP_INFO(edev, "Multicast mac is not found\n"); 437 return -EINVAL; 438 } 439 SLIST_REMOVE(&qdev->mc_list_head, tmp, 440 qede_mcast_entry, list); 441 qdev->num_mc_addr--; 442 } 443 444 return 0; 445 } 446 447 static enum _ecore_status_t 448 qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, 449 bool add) 450 { 451 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 452 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 453 enum _ecore_status_t rc; 454 struct ecore_filter_mcast mcast; 455 struct qede_mcast_entry *tmp; 456 uint16_t j = 0; 457 458 /* Multicast */ 459 if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) { 460 if (add) { 461 if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) { 462 DP_ERR(edev, 463 "Mcast filter table limit exceeded, " 464 "Please enable mcast promisc mode\n"); 465 return -ECORE_INVAL; 466 } 467 } 468 rc = qede_mcast_filter(eth_dev, ucast, add); 469 if (rc == 0) { 470 DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr); 471 memset(&mcast, 0, sizeof(mcast)); 472 mcast.num_mc_addrs = qdev->num_mc_addr; 473 mcast.opcode = ECORE_FILTER_ADD; 474 SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { 475 ether_addr_copy(&tmp->mac, 476 (struct ether_addr *)&mcast.mac[j]); 477 j++; 478 } 479 rc = ecore_filter_mcast_cmd(edev, &mcast, 480 ECORE_SPQ_MODE_CB, NULL); 481 } 482 if (rc != ECORE_SUCCESS) { 483 DP_ERR(edev, "Failed to add multicast filter" 484 " rc = %d, op = %d\n", rc, add); 485 } 486 } else { /* Unicast */ 487 if (add) { 488 if (qdev->num_uc_addr >= 489 qdev->dev_info.num_mac_filters) { 490 DP_ERR(edev, 491 "Ucast filter table limit exceeded," 492 " Please enable promisc mode\n"); 493 return -ECORE_INVAL; 494 } 495 } 496 rc = qede_ucast_filter(eth_dev, ucast, add); 497 if (rc == 0) 498 rc = ecore_filter_ucast_cmd(edev, ucast, 499 ECORE_SPQ_MODE_CB, NULL); 500 if (rc != ECORE_SUCCESS) { 501 DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", 502 rc, add); 503 } 504 } 505 506 return rc; 507 } 508 509 static void 510 qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, 511 uint32_t index, __rte_unused uint32_t pool) 512 { 513 struct ecore_filter_ucast ucast; 514 515 qede_set_ucast_cmn_params(&ucast); 516 ucast.type = ECORE_FILTER_MAC; 517 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); 518 (void)qede_mac_int_ops(eth_dev, &ucast, 1); 519 } 520 521 static void 522 qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) 523 { 524 struct qede_dev *qdev = eth_dev->data->dev_private; 525 struct ecore_dev *edev = &qdev->edev; 526 struct ether_addr mac_addr; 527 struct ecore_filter_ucast ucast; 528 int rc; 529 530 PMD_INIT_FUNC_TRACE(edev); 531 532 if (index >= qdev->dev_info.num_mac_filters) { 533 DP_ERR(edev, "Index %u is above MAC filter limit %u\n", 534 index, qdev->dev_info.num_mac_filters); 535 return; 536 } 537 538 qede_set_ucast_cmn_params(&ucast); 539 ucast.opcode = ECORE_FILTER_REMOVE; 540 ucast.type = ECORE_FILTER_MAC; 541 542 /* Use the index maintained by rte */ 543 ether_addr_copy(ð_dev->data->mac_addrs[index], 544 (struct ether_addr *)&ucast.mac); 545 546 ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); 547 } 548 549 static void 550 qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) 551 { 552 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 553 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 554 struct ecore_filter_ucast ucast; 555 int rc; 556 557 if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), 558 mac_addr->addr_bytes)) { 559 DP_ERR(edev, "Setting MAC address is not allowed\n"); 560 ether_addr_copy(&qdev->primary_mac, 561 ð_dev->data->mac_addrs[0]); 562 return; 563 } 564 565 /* First remove the primary mac */ 566 qede_set_ucast_cmn_params(&ucast); 567 ucast.opcode = ECORE_FILTER_REMOVE; 568 ucast.type = ECORE_FILTER_MAC; 569 ether_addr_copy(&qdev->primary_mac, 570 (struct ether_addr *)&ucast.mac); 571 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); 572 if (rc != 0) { 573 DP_ERR(edev, "Unable to remove current macaddr" 574 " Reverting to previous default mac\n"); 575 ether_addr_copy(&qdev->primary_mac, 576 ð_dev->data->mac_addrs[0]); 577 return; 578 } 579 580 /* Add new MAC */ 581 ucast.opcode = ECORE_FILTER_ADD; 582 ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); 583 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); 584 if (rc != 0) 585 DP_ERR(edev, "Unable to add new default mac\n"); 586 else 587 ether_addr_copy(mac_addr, &qdev->primary_mac); 588 } 589 590 static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action) 591 { 592 struct ecore_dev *edev = &qdev->edev; 593 struct qed_update_vport_params params = { 594 .vport_id = 0, 595 .accept_any_vlan = action, 596 .update_accept_any_vlan_flg = 1, 597 }; 598 int rc; 599 600 /* Proceed only if action actually needs to be performed */ 601 if (qdev->accept_any_vlan == action) 602 return; 603 604 rc = qdev->ops->vport_update(edev, ¶ms); 605 if (rc) { 606 DP_ERR(edev, "Failed to %s accept-any-vlan\n", 607 action ? "enable" : "disable"); 608 } else { 609 DP_INFO(edev, "%s accept-any-vlan\n", 610 action ? "enabled" : "disabled"); 611 qdev->accept_any_vlan = action; 612 } 613 } 614 615 static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping) 616 { 617 struct qed_update_vport_params vport_update_params; 618 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 619 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 620 int rc; 621 622 memset(&vport_update_params, 0, sizeof(vport_update_params)); 623 vport_update_params.vport_id = 0; 624 vport_update_params.update_inner_vlan_removal_flg = 1; 625 vport_update_params.inner_vlan_removal_flg = set_stripping; 626 rc = qdev->ops->vport_update(edev, &vport_update_params); 627 if (rc) { 628 DP_ERR(edev, "Update V-PORT failed %d\n", rc); 629 return rc; 630 } 631 632 return 0; 633 } 634 635 static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 636 { 637 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 638 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 639 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 640 641 if (mask & ETH_VLAN_STRIP_MASK) { 642 if (rxmode->hw_vlan_strip) 643 (void)qede_vlan_stripping(eth_dev, 1); 644 else 645 (void)qede_vlan_stripping(eth_dev, 0); 646 } 647 648 if (mask & ETH_VLAN_FILTER_MASK) { 649 /* VLAN filtering kicks in when a VLAN is added */ 650 if (rxmode->hw_vlan_filter) { 651 qede_vlan_filter_set(eth_dev, 0, 1); 652 } else { 653 if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */ 654 DP_ERR(edev, 655 " Please remove existing VLAN filters" 656 " before disabling VLAN filtering\n"); 657 /* Signal app that VLAN filtering is still 658 * enabled 659 */ 660 rxmode->hw_vlan_filter = true; 661 } else { 662 qede_vlan_filter_set(eth_dev, 0, 0); 663 } 664 } 665 } 666 667 if (mask & ETH_VLAN_EXTEND_MASK) 668 DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q" 669 " and classification is based on outer tag only\n"); 670 671 DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n", 672 mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); 673 } 674 675 static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, 676 uint16_t vlan_id, int on) 677 { 678 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 679 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 680 struct qed_dev_eth_info *dev_info = &qdev->dev_info; 681 struct qede_vlan_entry *tmp = NULL; 682 struct qede_vlan_entry *vlan; 683 struct ecore_filter_ucast ucast; 684 int rc; 685 686 if (on) { 687 if (qdev->configured_vlans == dev_info->num_vlan_filters) { 688 DP_ERR(edev, "Reached max VLAN filter limit" 689 " enabling accept_any_vlan\n"); 690 qede_config_accept_any_vlan(qdev, true); 691 return 0; 692 } 693 694 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 695 if (tmp->vid == vlan_id) { 696 DP_ERR(edev, "VLAN %u already configured\n", 697 vlan_id); 698 return -EEXIST; 699 } 700 } 701 702 vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry), 703 RTE_CACHE_LINE_SIZE); 704 705 if (!vlan) { 706 DP_ERR(edev, "Did not allocate memory for VLAN\n"); 707 return -ENOMEM; 708 } 709 710 qede_set_ucast_cmn_params(&ucast); 711 ucast.opcode = ECORE_FILTER_ADD; 712 ucast.type = ECORE_FILTER_VLAN; 713 ucast.vlan = vlan_id; 714 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 715 NULL); 716 if (rc != 0) { 717 DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, 718 rc); 719 rte_free(vlan); 720 } else { 721 vlan->vid = vlan_id; 722 SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list); 723 qdev->configured_vlans++; 724 DP_INFO(edev, "VLAN %u added, configured_vlans %u\n", 725 vlan_id, qdev->configured_vlans); 726 } 727 } else { 728 SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { 729 if (tmp->vid == vlan_id) 730 break; 731 } 732 733 if (!tmp) { 734 if (qdev->configured_vlans == 0) { 735 DP_INFO(edev, 736 "No VLAN filters configured yet\n"); 737 return 0; 738 } 739 740 DP_ERR(edev, "VLAN %u not configured\n", vlan_id); 741 return -EINVAL; 742 } 743 744 SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); 745 746 qede_set_ucast_cmn_params(&ucast); 747 ucast.opcode = ECORE_FILTER_REMOVE; 748 ucast.type = ECORE_FILTER_VLAN; 749 ucast.vlan = vlan_id; 750 rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, 751 NULL); 752 if (rc != 0) { 753 DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", 754 vlan_id, rc); 755 } else { 756 qdev->configured_vlans--; 757 DP_INFO(edev, "VLAN %u removed configured_vlans %u\n", 758 vlan_id, qdev->configured_vlans); 759 } 760 } 761 762 return rc; 763 } 764 765 static int qede_init_vport(struct qede_dev *qdev) 766 { 767 struct ecore_dev *edev = &qdev->edev; 768 struct qed_start_vport_params start = {0}; 769 int rc; 770 771 start.remove_inner_vlan = 1; 772 start.enable_lro = qdev->enable_lro; 773 start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD; 774 start.vport_id = 0; 775 start.drop_ttl0 = false; 776 start.clear_stats = 1; 777 start.handle_ptp_pkts = 0; 778 779 rc = qdev->ops->vport_start(edev, &start); 780 if (rc) { 781 DP_ERR(edev, "Start V-PORT failed %d\n", rc); 782 return rc; 783 } 784 785 DP_INFO(edev, 786 "Start vport ramrod passed, vport_id = %d, MTU = %u\n", 787 start.vport_id, ETHER_MTU); 788 789 return 0; 790 } 791 792 static void qede_prandom_bytes(uint32_t *buff) 793 { 794 uint8_t i; 795 796 srand((unsigned int)time(NULL)); 797 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 798 buff[i] = rand(); 799 } 800 801 int qede_config_rss(struct rte_eth_dev *eth_dev) 802 { 803 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 804 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 805 uint32_t def_rss_key[ECORE_RSS_KEY_SIZE]; 806 struct rte_eth_rss_reta_entry64 reta_conf[2]; 807 struct rte_eth_rss_conf rss_conf; 808 uint32_t i, id, pos, q; 809 810 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 811 if (!rss_conf.rss_key) { 812 DP_INFO(edev, "Applying driver default key\n"); 813 rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 814 qede_prandom_bytes(&def_rss_key[0]); 815 rss_conf.rss_key = (uint8_t *)&def_rss_key[0]; 816 } 817 818 /* Configure RSS hash */ 819 if (qede_rss_hash_update(eth_dev, &rss_conf)) 820 return -EINVAL; 821 822 /* Configure default RETA */ 823 memset(reta_conf, 0, sizeof(reta_conf)); 824 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) 825 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; 826 827 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 828 id = i / RTE_RETA_GROUP_SIZE; 829 pos = i % RTE_RETA_GROUP_SIZE; 830 q = i % QEDE_RSS_COUNT(qdev); 831 reta_conf[id].reta[pos] = q; 832 } 833 if (qede_rss_reta_update(eth_dev, &reta_conf[0], 834 ECORE_RSS_IND_TABLE_SIZE)) 835 return -EINVAL; 836 837 return 0; 838 } 839 840 static int qede_dev_configure(struct rte_eth_dev *eth_dev) 841 { 842 struct qede_dev *qdev = eth_dev->data->dev_private; 843 struct ecore_dev *edev = &qdev->edev; 844 struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; 845 int rc, i, j; 846 847 PMD_INIT_FUNC_TRACE(edev); 848 849 /* Check requirements for 100G mode */ 850 if (edev->num_hwfns > 1) { 851 if (eth_dev->data->nb_rx_queues < 2 || 852 eth_dev->data->nb_tx_queues < 2) { 853 DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n"); 854 return -EINVAL; 855 } 856 857 if ((eth_dev->data->nb_rx_queues % 2 != 0) || 858 (eth_dev->data->nb_tx_queues % 2 != 0)) { 859 DP_ERR(edev, 860 "100G mode needs even no. of RX/TX queues\n"); 861 return -EINVAL; 862 } 863 } 864 865 /* Sanity checks and throw warnings */ 866 if (rxmode->enable_scatter == 1) 867 eth_dev->data->scattered_rx = 1; 868 869 if (!rxmode->hw_strip_crc) 870 DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n"); 871 872 if (!rxmode->hw_ip_checksum) 873 DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " 874 "in hw\n"); 875 876 if (rxmode->enable_lro) { 877 qdev->enable_lro = true; 878 /* Enable scatter mode for LRO */ 879 if (!rxmode->enable_scatter) 880 eth_dev->data->scattered_rx = 1; 881 } 882 883 /* Check for the port restart case */ 884 if (qdev->state != QEDE_DEV_INIT) { 885 rc = qdev->ops->vport_stop(edev, 0); 886 if (rc != 0) 887 return rc; 888 qede_dealloc_fp_resc(eth_dev); 889 } 890 891 qdev->fp_num_tx = eth_dev->data->nb_tx_queues; 892 qdev->fp_num_rx = eth_dev->data->nb_rx_queues; 893 qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx; 894 895 /* Fastpath status block should be initialized before sending 896 * VPORT-START in the case of VF. Anyway, do it for both VF/PF. 897 */ 898 rc = qede_alloc_fp_resc(qdev); 899 if (rc != 0) 900 return rc; 901 902 /* Issue VPORT-START with default config values to allow 903 * other port configurations early on. 904 */ 905 rc = qede_init_vport(qdev); 906 if (rc != 0) 907 return rc; 908 909 if (!(rxmode->mq_mode == ETH_MQ_RX_RSS || 910 rxmode->mq_mode == ETH_MQ_RX_NONE)) { 911 DP_ERR(edev, "Unsupported RSS mode\n"); 912 qdev->ops->vport_stop(edev, 0); 913 qede_dealloc_fp_resc(eth_dev); 914 return -EINVAL; 915 } 916 917 /* Flow director mode check */ 918 rc = qede_check_fdir_support(eth_dev); 919 if (rc) { 920 qdev->ops->vport_stop(edev, 0); 921 qede_dealloc_fp_resc(eth_dev); 922 return -EINVAL; 923 } 924 SLIST_INIT(&qdev->fdir_info.fdir_list_head); 925 926 SLIST_INIT(&qdev->vlan_list_head); 927 928 /* Add primary mac for PF */ 929 if (IS_PF(edev)) 930 qede_mac_addr_set(eth_dev, &qdev->primary_mac); 931 932 /* Enable VLAN offloads by default */ 933 qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | 934 ETH_VLAN_FILTER_MASK | 935 ETH_VLAN_EXTEND_MASK); 936 937 qdev->state = QEDE_DEV_CONFIG; 938 939 DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n", 940 (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev), 941 qdev->num_tc); 942 943 return 0; 944 } 945 946 /* Info about HW descriptor ring limitations */ 947 static const struct rte_eth_desc_lim qede_rx_desc_lim = { 948 .nb_max = NUM_RX_BDS_MAX, 949 .nb_min = 128, 950 .nb_align = 128 /* lowest common multiple */ 951 }; 952 953 static const struct rte_eth_desc_lim qede_tx_desc_lim = { 954 .nb_max = NUM_TX_BDS_MAX, 955 .nb_min = 256, 956 .nb_align = 256, 957 .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET, 958 .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 959 }; 960 961 static void 962 qede_dev_info_get(struct rte_eth_dev *eth_dev, 963 struct rte_eth_dev_info *dev_info) 964 { 965 struct qede_dev *qdev = eth_dev->data->dev_private; 966 struct ecore_dev *edev = &qdev->edev; 967 struct qed_link_output link; 968 uint32_t speed_cap = 0; 969 970 PMD_INIT_FUNC_TRACE(edev); 971 972 dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 973 dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; 974 dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; 975 dev_info->rx_desc_lim = qede_rx_desc_lim; 976 dev_info->tx_desc_lim = qede_tx_desc_lim; 977 978 if (IS_PF(edev)) 979 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 980 QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2); 981 else 982 dev_info->max_rx_queues = (uint16_t)RTE_MIN( 983 QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF); 984 dev_info->max_tx_queues = dev_info->max_rx_queues; 985 986 dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters; 987 dev_info->max_vfs = 0; 988 dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; 989 dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t); 990 dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; 991 992 dev_info->default_txconf = (struct rte_eth_txconf) { 993 .txq_flags = QEDE_TXQ_FLAGS, 994 }; 995 996 dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP | 997 DEV_RX_OFFLOAD_IPV4_CKSUM | 998 DEV_RX_OFFLOAD_UDP_CKSUM | 999 DEV_RX_OFFLOAD_TCP_CKSUM | 1000 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | 1001 DEV_RX_OFFLOAD_TCP_LRO); 1002 1003 dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | 1004 DEV_TX_OFFLOAD_IPV4_CKSUM | 1005 DEV_TX_OFFLOAD_UDP_CKSUM | 1006 DEV_TX_OFFLOAD_TCP_CKSUM | 1007 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | 1008 DEV_TX_OFFLOAD_TCP_TSO | 1009 DEV_TX_OFFLOAD_VXLAN_TNL_TSO); 1010 1011 memset(&link, 0, sizeof(struct qed_link_output)); 1012 qdev->ops->common->get_link(edev, &link); 1013 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1014 speed_cap |= ETH_LINK_SPEED_1G; 1015 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1016 speed_cap |= ETH_LINK_SPEED_10G; 1017 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1018 speed_cap |= ETH_LINK_SPEED_25G; 1019 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1020 speed_cap |= ETH_LINK_SPEED_40G; 1021 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1022 speed_cap |= ETH_LINK_SPEED_50G; 1023 if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1024 speed_cap |= ETH_LINK_SPEED_100G; 1025 dev_info->speed_capa = speed_cap; 1026 } 1027 1028 /* return 0 means link status changed, -1 means not changed */ 1029 static int 1030 qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) 1031 { 1032 struct qede_dev *qdev = eth_dev->data->dev_private; 1033 struct ecore_dev *edev = &qdev->edev; 1034 uint16_t link_duplex; 1035 struct qed_link_output link; 1036 struct rte_eth_link *curr = ð_dev->data->dev_link; 1037 1038 memset(&link, 0, sizeof(struct qed_link_output)); 1039 qdev->ops->common->get_link(edev, &link); 1040 1041 /* Link Speed */ 1042 curr->link_speed = link.speed; 1043 1044 /* Link Mode */ 1045 switch (link.duplex) { 1046 case QEDE_DUPLEX_HALF: 1047 link_duplex = ETH_LINK_HALF_DUPLEX; 1048 break; 1049 case QEDE_DUPLEX_FULL: 1050 link_duplex = ETH_LINK_FULL_DUPLEX; 1051 break; 1052 case QEDE_DUPLEX_UNKNOWN: 1053 default: 1054 link_duplex = -1; 1055 } 1056 curr->link_duplex = link_duplex; 1057 1058 /* Link Status */ 1059 curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN; 1060 1061 /* AN */ 1062 curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? 1063 ETH_LINK_AUTONEG : ETH_LINK_FIXED; 1064 1065 DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", 1066 curr->link_speed, curr->link_duplex, 1067 curr->link_autoneg, curr->link_status); 1068 1069 /* return 0 means link status changed, -1 means not changed */ 1070 return ((curr->link_status == link.link_up) ? -1 : 0); 1071 } 1072 1073 static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) 1074 { 1075 struct qede_dev *qdev = eth_dev->data->dev_private; 1076 struct ecore_dev *edev = &qdev->edev; 1077 1078 PMD_INIT_FUNC_TRACE(edev); 1079 1080 enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; 1081 1082 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1083 type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1084 1085 qed_configure_filter_rx_mode(eth_dev, type); 1086 } 1087 1088 static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) 1089 { 1090 struct qede_dev *qdev = eth_dev->data->dev_private; 1091 struct ecore_dev *edev = &qdev->edev; 1092 1093 PMD_INIT_FUNC_TRACE(edev); 1094 1095 if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) 1096 qed_configure_filter_rx_mode(eth_dev, 1097 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); 1098 else 1099 qed_configure_filter_rx_mode(eth_dev, 1100 QED_FILTER_RX_MODE_TYPE_REGULAR); 1101 } 1102 1103 static void qede_poll_sp_sb_cb(void *param) 1104 { 1105 struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; 1106 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1107 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1108 int rc; 1109 1110 qede_interrupt_action(ECORE_LEADING_HWFN(edev)); 1111 qede_interrupt_action(&edev->hwfns[1]); 1112 1113 rc = rte_eal_alarm_set(timer_period * US_PER_S, 1114 qede_poll_sp_sb_cb, 1115 (void *)eth_dev); 1116 if (rc != 0) { 1117 DP_ERR(edev, "Unable to start periodic" 1118 " timer rc %d\n", rc); 1119 assert(false && "Unable to start periodic timer"); 1120 } 1121 } 1122 1123 static void qede_dev_close(struct rte_eth_dev *eth_dev) 1124 { 1125 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1126 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1127 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1128 int rc; 1129 1130 PMD_INIT_FUNC_TRACE(edev); 1131 1132 qede_fdir_dealloc_resc(eth_dev); 1133 1134 /* dev_stop() shall cleanup fp resources in hw but without releasing 1135 * dma memories and sw structures so that dev_start() can be called 1136 * by the app without reconfiguration. However, in dev_close() we 1137 * can release all the resources and device can be brought up newly 1138 */ 1139 if (qdev->state != QEDE_DEV_STOP) 1140 qede_dev_stop(eth_dev); 1141 else 1142 DP_INFO(edev, "Device is already stopped\n"); 1143 1144 rc = qdev->ops->vport_stop(edev, 0); 1145 if (rc != 0) 1146 DP_ERR(edev, "Failed to stop VPORT\n"); 1147 1148 qede_dealloc_fp_resc(eth_dev); 1149 1150 qdev->ops->common->slowpath_stop(edev); 1151 1152 qdev->ops->common->remove(edev); 1153 1154 rte_intr_disable(&pci_dev->intr_handle); 1155 1156 rte_intr_callback_unregister(&pci_dev->intr_handle, 1157 qede_interrupt_handler, (void *)eth_dev); 1158 1159 if (edev->num_hwfns > 1) 1160 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); 1161 1162 qdev->state = QEDE_DEV_INIT; /* Go back to init state */ 1163 } 1164 1165 static void 1166 qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) 1167 { 1168 struct qede_dev *qdev = eth_dev->data->dev_private; 1169 struct ecore_dev *edev = &qdev->edev; 1170 struct ecore_eth_stats stats; 1171 unsigned int i = 0, j = 0, qid; 1172 unsigned int rxq_stat_cntrs, txq_stat_cntrs; 1173 struct qede_tx_queue *txq; 1174 1175 qdev->ops->get_vport_stats(edev, &stats); 1176 1177 /* RX Stats */ 1178 eth_stats->ipackets = stats.rx_ucast_pkts + 1179 stats.rx_mcast_pkts + stats.rx_bcast_pkts; 1180 1181 eth_stats->ibytes = stats.rx_ucast_bytes + 1182 stats.rx_mcast_bytes + stats.rx_bcast_bytes; 1183 1184 eth_stats->ierrors = stats.rx_crc_errors + 1185 stats.rx_align_errors + 1186 stats.rx_carrier_errors + 1187 stats.rx_oversize_packets + 1188 stats.rx_jabbers + stats.rx_undersize_packets; 1189 1190 eth_stats->rx_nombuf = stats.no_buff_discards; 1191 1192 eth_stats->imissed = stats.mftag_filter_discards + 1193 stats.mac_filter_discards + 1194 stats.no_buff_discards + stats.brb_truncates + stats.brb_discards; 1195 1196 /* TX stats */ 1197 eth_stats->opackets = stats.tx_ucast_pkts + 1198 stats.tx_mcast_pkts + stats.tx_bcast_pkts; 1199 1200 eth_stats->obytes = stats.tx_ucast_bytes + 1201 stats.tx_mcast_bytes + stats.tx_bcast_bytes; 1202 1203 eth_stats->oerrors = stats.tx_err_drop_pkts; 1204 1205 /* Queue stats */ 1206 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1207 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1208 txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), 1209 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1210 if ((rxq_stat_cntrs != QEDE_RSS_COUNT(qdev)) || 1211 (txq_stat_cntrs != QEDE_TSS_COUNT(qdev))) 1212 DP_VERBOSE(edev, ECORE_MSG_DEBUG, 1213 "Not all the queue stats will be displayed. Set" 1214 " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" 1215 " appropriately and retry.\n"); 1216 1217 for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { 1218 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { 1219 eth_stats->q_ipackets[i] = 1220 *(uint64_t *)( 1221 ((char *)(qdev->fp_array[(qid)].rxq)) + 1222 offsetof(struct qede_rx_queue, 1223 rcv_pkts)); 1224 eth_stats->q_errors[i] = 1225 *(uint64_t *)( 1226 ((char *)(qdev->fp_array[(qid)].rxq)) + 1227 offsetof(struct qede_rx_queue, 1228 rx_hw_errors)) + 1229 *(uint64_t *)( 1230 ((char *)(qdev->fp_array[(qid)].rxq)) + 1231 offsetof(struct qede_rx_queue, 1232 rx_alloc_errors)); 1233 i++; 1234 } 1235 if (i == rxq_stat_cntrs) 1236 break; 1237 } 1238 1239 for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { 1240 if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) { 1241 txq = qdev->fp_array[(qid)].txqs[0]; 1242 eth_stats->q_opackets[j] = 1243 *((uint64_t *)(uintptr_t) 1244 (((uint64_t)(uintptr_t)(txq)) + 1245 offsetof(struct qede_tx_queue, 1246 xmit_pkts))); 1247 j++; 1248 } 1249 if (j == txq_stat_cntrs) 1250 break; 1251 } 1252 } 1253 1254 static unsigned 1255 qede_get_xstats_count(struct qede_dev *qdev) { 1256 return RTE_DIM(qede_xstats_strings) + 1257 (RTE_DIM(qede_rxq_xstats_strings) * 1258 RTE_MIN(QEDE_RSS_COUNT(qdev), 1259 RTE_ETHDEV_QUEUE_STAT_CNTRS)); 1260 } 1261 1262 static int 1263 qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev, 1264 struct rte_eth_xstat_name *xstats_names, unsigned limit) 1265 { 1266 struct qede_dev *qdev = dev->data->dev_private; 1267 const unsigned int stat_cnt = qede_get_xstats_count(qdev); 1268 unsigned int i, qid, stat_idx = 0; 1269 unsigned int rxq_stat_cntrs; 1270 1271 if (xstats_names != NULL) { 1272 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1273 snprintf(xstats_names[stat_idx].name, 1274 sizeof(xstats_names[stat_idx].name), 1275 "%s", 1276 qede_xstats_strings[i].name); 1277 stat_idx++; 1278 } 1279 1280 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1281 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1282 for (qid = 0; qid < rxq_stat_cntrs; qid++) { 1283 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1284 snprintf(xstats_names[stat_idx].name, 1285 sizeof(xstats_names[stat_idx].name), 1286 "%.4s%d%s", 1287 qede_rxq_xstats_strings[i].name, qid, 1288 qede_rxq_xstats_strings[i].name + 4); 1289 stat_idx++; 1290 } 1291 } 1292 } 1293 1294 return stat_cnt; 1295 } 1296 1297 static int 1298 qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1299 unsigned int n) 1300 { 1301 struct qede_dev *qdev = dev->data->dev_private; 1302 struct ecore_dev *edev = &qdev->edev; 1303 struct ecore_eth_stats stats; 1304 const unsigned int num = qede_get_xstats_count(qdev); 1305 unsigned int i, qid, stat_idx = 0; 1306 unsigned int rxq_stat_cntrs; 1307 1308 if (n < num) 1309 return num; 1310 1311 qdev->ops->get_vport_stats(edev, &stats); 1312 1313 for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { 1314 xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + 1315 qede_xstats_strings[i].offset); 1316 xstats[stat_idx].id = stat_idx; 1317 stat_idx++; 1318 } 1319 1320 rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), 1321 RTE_ETHDEV_QUEUE_STAT_CNTRS); 1322 for (qid = 0; qid < rxq_stat_cntrs; qid++) { 1323 if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { 1324 for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { 1325 xstats[stat_idx].value = *(uint64_t *)( 1326 ((char *)(qdev->fp_array[(qid)].rxq)) + 1327 qede_rxq_xstats_strings[i].offset); 1328 xstats[stat_idx].id = stat_idx; 1329 stat_idx++; 1330 } 1331 } 1332 } 1333 1334 return stat_idx; 1335 } 1336 1337 static void 1338 qede_reset_xstats(struct rte_eth_dev *dev) 1339 { 1340 struct qede_dev *qdev = dev->data->dev_private; 1341 struct ecore_dev *edev = &qdev->edev; 1342 1343 ecore_reset_vport_stats(edev); 1344 } 1345 1346 int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) 1347 { 1348 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1349 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1350 struct qed_link_params link_params; 1351 int rc; 1352 1353 DP_INFO(edev, "setting link state %d\n", link_up); 1354 memset(&link_params, 0, sizeof(link_params)); 1355 link_params.link_up = link_up; 1356 rc = qdev->ops->common->set_link(edev, &link_params); 1357 if (rc != ECORE_SUCCESS) 1358 DP_ERR(edev, "Unable to set link state %d\n", link_up); 1359 1360 return rc; 1361 } 1362 1363 static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) 1364 { 1365 return qede_dev_set_link_state(eth_dev, true); 1366 } 1367 1368 static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) 1369 { 1370 return qede_dev_set_link_state(eth_dev, false); 1371 } 1372 1373 static void qede_reset_stats(struct rte_eth_dev *eth_dev) 1374 { 1375 struct qede_dev *qdev = eth_dev->data->dev_private; 1376 struct ecore_dev *edev = &qdev->edev; 1377 1378 ecore_reset_vport_stats(edev); 1379 } 1380 1381 static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) 1382 { 1383 enum qed_filter_rx_mode_type type = 1384 QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; 1385 1386 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1387 type |= QED_FILTER_RX_MODE_TYPE_PROMISC; 1388 1389 qed_configure_filter_rx_mode(eth_dev, type); 1390 } 1391 1392 static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) 1393 { 1394 if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) 1395 qed_configure_filter_rx_mode(eth_dev, 1396 QED_FILTER_RX_MODE_TYPE_PROMISC); 1397 else 1398 qed_configure_filter_rx_mode(eth_dev, 1399 QED_FILTER_RX_MODE_TYPE_REGULAR); 1400 } 1401 1402 static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, 1403 struct rte_eth_fc_conf *fc_conf) 1404 { 1405 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1406 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1407 struct qed_link_output current_link; 1408 struct qed_link_params params; 1409 1410 memset(¤t_link, 0, sizeof(current_link)); 1411 qdev->ops->common->get_link(edev, ¤t_link); 1412 1413 memset(¶ms, 0, sizeof(params)); 1414 params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; 1415 if (fc_conf->autoneg) { 1416 if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { 1417 DP_ERR(edev, "Autoneg not supported\n"); 1418 return -EINVAL; 1419 } 1420 params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1421 } 1422 1423 /* Pause is assumed to be supported (SUPPORTED_Pause) */ 1424 if (fc_conf->mode == RTE_FC_FULL) 1425 params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | 1426 QED_LINK_PAUSE_RX_ENABLE); 1427 if (fc_conf->mode == RTE_FC_TX_PAUSE) 1428 params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1429 if (fc_conf->mode == RTE_FC_RX_PAUSE) 1430 params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1431 1432 params.link_up = true; 1433 (void)qdev->ops->common->set_link(edev, ¶ms); 1434 1435 return 0; 1436 } 1437 1438 static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, 1439 struct rte_eth_fc_conf *fc_conf) 1440 { 1441 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1442 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1443 struct qed_link_output current_link; 1444 1445 memset(¤t_link, 0, sizeof(current_link)); 1446 qdev->ops->common->get_link(edev, ¤t_link); 1447 1448 if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1449 fc_conf->autoneg = true; 1450 1451 if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | 1452 QED_LINK_PAUSE_TX_ENABLE)) 1453 fc_conf->mode = RTE_FC_FULL; 1454 else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) 1455 fc_conf->mode = RTE_FC_RX_PAUSE; 1456 else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) 1457 fc_conf->mode = RTE_FC_TX_PAUSE; 1458 else 1459 fc_conf->mode = RTE_FC_NONE; 1460 1461 return 0; 1462 } 1463 1464 static const uint32_t * 1465 qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) 1466 { 1467 static const uint32_t ptypes[] = { 1468 RTE_PTYPE_L3_IPV4, 1469 RTE_PTYPE_L3_IPV6, 1470 RTE_PTYPE_UNKNOWN 1471 }; 1472 1473 if (eth_dev->rx_pkt_burst == qede_recv_pkts) 1474 return ptypes; 1475 1476 return NULL; 1477 } 1478 1479 static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) 1480 { 1481 *rss_caps = 0; 1482 *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; 1483 *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; 1484 *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; 1485 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; 1486 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; 1487 *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; 1488 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; 1489 *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; 1490 } 1491 1492 static int qede_rss_hash_update(struct rte_eth_dev *eth_dev, 1493 struct rte_eth_rss_conf *rss_conf) 1494 { 1495 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1496 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1497 struct ecore_sp_vport_update_params vport_update_params; 1498 struct ecore_rss_params rss_params; 1499 struct ecore_hwfn *p_hwfn; 1500 uint32_t *key = (uint32_t *)rss_conf->rss_key; 1501 uint64_t hf = rss_conf->rss_hf; 1502 uint8_t len = rss_conf->rss_key_len; 1503 uint8_t idx; 1504 uint8_t i; 1505 int rc; 1506 1507 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1508 memset(&rss_params, 0, sizeof(rss_params)); 1509 1510 DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n", 1511 (unsigned long)hf, len, key); 1512 1513 if (hf != 0) { 1514 /* Enabling RSS */ 1515 DP_INFO(edev, "Enabling rss\n"); 1516 1517 /* RSS caps */ 1518 qede_init_rss_caps(&rss_params.rss_caps, hf); 1519 rss_params.update_rss_capabilities = 1; 1520 1521 /* RSS hash key */ 1522 if (key) { 1523 if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) { 1524 DP_ERR(edev, "RSS key length exceeds limit\n"); 1525 return -EINVAL; 1526 } 1527 DP_INFO(edev, "Applying user supplied hash key\n"); 1528 rss_params.update_rss_key = 1; 1529 memcpy(&rss_params.rss_key, key, len); 1530 } 1531 rss_params.rss_enable = 1; 1532 } 1533 1534 rss_params.update_rss_config = 1; 1535 /* tbl_size has to be set with capabilities */ 1536 rss_params.rss_table_size_log = 7; 1537 vport_update_params.vport_id = 0; 1538 /* pass the L2 handles instead of qids */ 1539 for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) { 1540 idx = qdev->rss_ind_table[i]; 1541 rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle; 1542 } 1543 vport_update_params.rss_params = &rss_params; 1544 1545 for_each_hwfn(edev, i) { 1546 p_hwfn = &edev->hwfns[i]; 1547 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1548 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 1549 ECORE_SPQ_MODE_EBLOCK, NULL); 1550 if (rc) { 1551 DP_ERR(edev, "vport-update for RSS failed\n"); 1552 return rc; 1553 } 1554 } 1555 qdev->rss_enable = rss_params.rss_enable; 1556 1557 /* Update local structure for hash query */ 1558 qdev->rss_conf.rss_hf = hf; 1559 qdev->rss_conf.rss_key_len = len; 1560 if (qdev->rss_enable) { 1561 if (qdev->rss_conf.rss_key == NULL) { 1562 qdev->rss_conf.rss_key = (uint8_t *)malloc(len); 1563 if (qdev->rss_conf.rss_key == NULL) { 1564 DP_ERR(edev, "No memory to store RSS key\n"); 1565 return -ENOMEM; 1566 } 1567 } 1568 if (key && len) { 1569 DP_INFO(edev, "Storing RSS key\n"); 1570 memcpy(qdev->rss_conf.rss_key, key, len); 1571 } 1572 } else if (!qdev->rss_enable && len == 0) { 1573 if (qdev->rss_conf.rss_key) { 1574 free(qdev->rss_conf.rss_key); 1575 qdev->rss_conf.rss_key = NULL; 1576 DP_INFO(edev, "Free RSS key\n"); 1577 } 1578 } 1579 1580 return 0; 1581 } 1582 1583 static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, 1584 struct rte_eth_rss_conf *rss_conf) 1585 { 1586 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1587 1588 rss_conf->rss_hf = qdev->rss_conf.rss_hf; 1589 rss_conf->rss_key_len = qdev->rss_conf.rss_key_len; 1590 1591 if (rss_conf->rss_key && qdev->rss_conf.rss_key) 1592 memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key, 1593 rss_conf->rss_key_len); 1594 return 0; 1595 } 1596 1597 static int qede_rss_reta_update(struct rte_eth_dev *eth_dev, 1598 struct rte_eth_rss_reta_entry64 *reta_conf, 1599 uint16_t reta_size) 1600 { 1601 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1602 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1603 struct ecore_sp_vport_update_params vport_update_params; 1604 struct ecore_rss_params params; 1605 struct ecore_hwfn *p_hwfn; 1606 uint16_t i, idx, shift; 1607 uint8_t entry; 1608 int rc; 1609 1610 if (reta_size > ETH_RSS_RETA_SIZE_128) { 1611 DP_ERR(edev, "reta_size %d is not supported by hardware\n", 1612 reta_size); 1613 return -EINVAL; 1614 } 1615 1616 memset(&vport_update_params, 0, sizeof(vport_update_params)); 1617 memset(¶ms, 0, sizeof(params)); 1618 1619 for (i = 0; i < reta_size; i++) { 1620 idx = i / RTE_RETA_GROUP_SIZE; 1621 shift = i % RTE_RETA_GROUP_SIZE; 1622 if (reta_conf[idx].mask & (1ULL << shift)) { 1623 entry = reta_conf[idx].reta[shift]; 1624 /* Pass rxq handles to ecore */ 1625 params.rss_ind_table[i] = 1626 qdev->fp_array[entry].rxq->handle; 1627 /* Update the local copy for RETA query command */ 1628 qdev->rss_ind_table[i] = entry; 1629 } 1630 } 1631 1632 /* Fix up RETA for CMT mode device */ 1633 if (edev->num_hwfns > 1) 1634 qdev->rss_enable = qed_update_rss_parm_cmt(edev, 1635 params.rss_ind_table[0]); 1636 params.update_rss_ind_table = 1; 1637 params.rss_table_size_log = 7; 1638 params.update_rss_config = 1; 1639 vport_update_params.vport_id = 0; 1640 /* Use the current value of rss_enable */ 1641 params.rss_enable = qdev->rss_enable; 1642 vport_update_params.rss_params = ¶ms; 1643 1644 for_each_hwfn(edev, i) { 1645 p_hwfn = &edev->hwfns[i]; 1646 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 1647 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 1648 ECORE_SPQ_MODE_EBLOCK, NULL); 1649 if (rc) { 1650 DP_ERR(edev, "vport-update for RSS failed\n"); 1651 return rc; 1652 } 1653 } 1654 1655 return 0; 1656 } 1657 1658 static int qede_rss_reta_query(struct rte_eth_dev *eth_dev, 1659 struct rte_eth_rss_reta_entry64 *reta_conf, 1660 uint16_t reta_size) 1661 { 1662 struct qede_dev *qdev = eth_dev->data->dev_private; 1663 struct ecore_dev *edev = &qdev->edev; 1664 uint16_t i, idx, shift; 1665 uint8_t entry; 1666 1667 if (reta_size > ETH_RSS_RETA_SIZE_128) { 1668 DP_ERR(edev, "reta_size %d is not supported\n", 1669 reta_size); 1670 return -EINVAL; 1671 } 1672 1673 for (i = 0; i < reta_size; i++) { 1674 idx = i / RTE_RETA_GROUP_SIZE; 1675 shift = i % RTE_RETA_GROUP_SIZE; 1676 if (reta_conf[idx].mask & (1ULL << shift)) { 1677 entry = qdev->rss_ind_table[i]; 1678 reta_conf[idx].reta[shift] = entry; 1679 } 1680 } 1681 1682 return 0; 1683 } 1684 1685 int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) 1686 { 1687 struct qede_dev *qdev = QEDE_INIT_QDEV(dev); 1688 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1689 struct rte_eth_dev_info dev_info = {0}; 1690 struct qede_fastpath *fp; 1691 uint32_t frame_size; 1692 uint16_t rx_buf_size; 1693 uint16_t bufsz; 1694 int i; 1695 1696 PMD_INIT_FUNC_TRACE(edev); 1697 qede_dev_info_get(dev, &dev_info); 1698 frame_size = mtu + QEDE_ETH_OVERHEAD; 1699 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) { 1700 DP_ERR(edev, "MTU %u out of range\n", mtu); 1701 return -EINVAL; 1702 } 1703 if (!dev->data->scattered_rx && 1704 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { 1705 DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", 1706 dev->data->min_rx_buf_size); 1707 return -EINVAL; 1708 } 1709 /* Temporarily replace I/O functions with dummy ones. It cannot 1710 * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. 1711 */ 1712 dev->rx_pkt_burst = qede_rxtx_pkts_dummy; 1713 dev->tx_pkt_burst = qede_rxtx_pkts_dummy; 1714 qede_dev_stop(dev); 1715 rte_delay_ms(1000); 1716 qdev->mtu = mtu; 1717 /* Fix up RX buf size for all queues of the port */ 1718 for_each_queue(i) { 1719 fp = &qdev->fp_array[i]; 1720 if (fp->type & QEDE_FASTPATH_RX) { 1721 bufsz = (uint16_t)rte_pktmbuf_data_room_size( 1722 fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; 1723 if (dev->data->scattered_rx) 1724 rx_buf_size = bufsz + QEDE_ETH_OVERHEAD; 1725 else 1726 rx_buf_size = mtu + QEDE_ETH_OVERHEAD; 1727 rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size); 1728 fp->rxq->rx_buf_size = rx_buf_size; 1729 DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size); 1730 } 1731 } 1732 qede_dev_start(dev); 1733 if (frame_size > ETHER_MAX_LEN) 1734 dev->data->dev_conf.rxmode.jumbo_frame = 1; 1735 else 1736 dev->data->dev_conf.rxmode.jumbo_frame = 0; 1737 /* update max frame size */ 1738 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; 1739 /* Reassign back */ 1740 dev->rx_pkt_burst = qede_recv_pkts; 1741 dev->tx_pkt_burst = qede_xmit_pkts; 1742 1743 return 0; 1744 } 1745 1746 static int 1747 qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev, 1748 struct rte_eth_udp_tunnel *tunnel_udp, 1749 bool add) 1750 { 1751 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1752 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1753 struct ecore_tunnel_info tunn; /* @DPDK */ 1754 struct ecore_hwfn *p_hwfn; 1755 int rc, i; 1756 1757 PMD_INIT_FUNC_TRACE(edev); 1758 1759 memset(&tunn, 0, sizeof(tunn)); 1760 if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) { 1761 tunn.vxlan_port.b_update_port = true; 1762 tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port : 1763 QEDE_VXLAN_DEF_PORT; 1764 for_each_hwfn(edev, i) { 1765 p_hwfn = &edev->hwfns[i]; 1766 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn, 1767 ECORE_SPQ_MODE_CB, NULL); 1768 if (rc != ECORE_SUCCESS) { 1769 DP_ERR(edev, "Unable to config UDP port %u\n", 1770 tunn.vxlan_port.port); 1771 return rc; 1772 } 1773 } 1774 } 1775 1776 return 0; 1777 } 1778 1779 int 1780 qede_udp_dst_port_del(struct rte_eth_dev *eth_dev, 1781 struct rte_eth_udp_tunnel *tunnel_udp) 1782 { 1783 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false); 1784 } 1785 1786 int 1787 qede_udp_dst_port_add(struct rte_eth_dev *eth_dev, 1788 struct rte_eth_udp_tunnel *tunnel_udp) 1789 { 1790 return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true); 1791 } 1792 1793 static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type, 1794 uint32_t *clss, char *str) 1795 { 1796 uint16_t j; 1797 *clss = MAX_ECORE_TUNN_CLSS; 1798 1799 for (j = 0; j < RTE_DIM(qede_tunn_types); j++) { 1800 if (filter == qede_tunn_types[j].rte_filter_type) { 1801 *type = qede_tunn_types[j].qede_type; 1802 *clss = qede_tunn_types[j].qede_tunn_clss; 1803 strcpy(str, qede_tunn_types[j].string); 1804 return; 1805 } 1806 } 1807 } 1808 1809 static int 1810 qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast, 1811 const struct rte_eth_tunnel_filter_conf *conf, 1812 uint32_t type) 1813 { 1814 /* Init commmon ucast params first */ 1815 qede_set_ucast_cmn_params(ucast); 1816 1817 /* Copy out the required fields based on classification type */ 1818 ucast->type = type; 1819 1820 switch (type) { 1821 case ECORE_FILTER_VNI: 1822 ucast->vni = conf->tenant_id; 1823 break; 1824 case ECORE_FILTER_INNER_VLAN: 1825 ucast->vlan = conf->inner_vlan; 1826 break; 1827 case ECORE_FILTER_MAC: 1828 memcpy(ucast->mac, conf->outer_mac.addr_bytes, 1829 ETHER_ADDR_LEN); 1830 break; 1831 case ECORE_FILTER_INNER_MAC: 1832 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 1833 ETHER_ADDR_LEN); 1834 break; 1835 case ECORE_FILTER_MAC_VNI_PAIR: 1836 memcpy(ucast->mac, conf->outer_mac.addr_bytes, 1837 ETHER_ADDR_LEN); 1838 ucast->vni = conf->tenant_id; 1839 break; 1840 case ECORE_FILTER_INNER_MAC_VNI_PAIR: 1841 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 1842 ETHER_ADDR_LEN); 1843 ucast->vni = conf->tenant_id; 1844 break; 1845 case ECORE_FILTER_INNER_PAIR: 1846 memcpy(ucast->mac, conf->inner_mac.addr_bytes, 1847 ETHER_ADDR_LEN); 1848 ucast->vlan = conf->inner_vlan; 1849 break; 1850 default: 1851 return -EINVAL; 1852 } 1853 1854 return ECORE_SUCCESS; 1855 } 1856 1857 static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev, 1858 enum rte_filter_op filter_op, 1859 const struct rte_eth_tunnel_filter_conf *conf) 1860 { 1861 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1862 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1863 struct ecore_tunnel_info tunn; 1864 struct ecore_hwfn *p_hwfn; 1865 enum ecore_filter_ucast_type type; 1866 enum ecore_tunn_clss clss; 1867 struct ecore_filter_ucast ucast; 1868 char str[80]; 1869 uint16_t filter_type; 1870 int rc, i; 1871 1872 filter_type = conf->filter_type | qdev->vxlan_filter_type; 1873 /* First determine if the given filter classification is supported */ 1874 qede_get_ecore_tunn_params(filter_type, &type, &clss, str); 1875 if (clss == MAX_ECORE_TUNN_CLSS) { 1876 DP_ERR(edev, "Wrong filter type\n"); 1877 return -EINVAL; 1878 } 1879 /* Init tunnel ucast params */ 1880 rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type); 1881 if (rc != ECORE_SUCCESS) { 1882 DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n", 1883 conf->filter_type); 1884 return rc; 1885 } 1886 DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n", 1887 str, filter_op, ucast.type); 1888 switch (filter_op) { 1889 case RTE_ETH_FILTER_ADD: 1890 ucast.opcode = ECORE_FILTER_ADD; 1891 1892 /* Skip MAC/VLAN if filter is based on VNI */ 1893 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { 1894 rc = qede_mac_int_ops(eth_dev, &ucast, 1); 1895 if (rc == 0) { 1896 /* Enable accept anyvlan */ 1897 qede_config_accept_any_vlan(qdev, true); 1898 } 1899 } else { 1900 rc = qede_ucast_filter(eth_dev, &ucast, 1); 1901 if (rc == 0) 1902 rc = ecore_filter_ucast_cmd(edev, &ucast, 1903 ECORE_SPQ_MODE_CB, NULL); 1904 } 1905 1906 if (rc != ECORE_SUCCESS) 1907 return rc; 1908 1909 qdev->vxlan_filter_type = filter_type; 1910 1911 DP_INFO(edev, "Enabling VXLAN tunneling\n"); 1912 qede_set_cmn_tunn_param(&tunn, clss, true, true); 1913 for_each_hwfn(edev, i) { 1914 p_hwfn = &edev->hwfns[i]; 1915 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, 1916 &tunn, ECORE_SPQ_MODE_CB, NULL); 1917 if (rc != ECORE_SUCCESS) { 1918 DP_ERR(edev, "Failed to update tunn_clss %u\n", 1919 tunn.vxlan.tun_cls); 1920 } 1921 } 1922 qdev->num_tunn_filters++; /* Filter added successfully */ 1923 break; 1924 case RTE_ETH_FILTER_DELETE: 1925 ucast.opcode = ECORE_FILTER_REMOVE; 1926 1927 if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) { 1928 rc = qede_mac_int_ops(eth_dev, &ucast, 0); 1929 } else { 1930 rc = qede_ucast_filter(eth_dev, &ucast, 0); 1931 if (rc == 0) 1932 rc = ecore_filter_ucast_cmd(edev, &ucast, 1933 ECORE_SPQ_MODE_CB, NULL); 1934 } 1935 if (rc != ECORE_SUCCESS) 1936 return rc; 1937 1938 qdev->vxlan_filter_type = filter_type; 1939 qdev->num_tunn_filters--; 1940 1941 /* Disable VXLAN if VXLAN filters become 0 */ 1942 if (qdev->num_tunn_filters == 0) { 1943 DP_INFO(edev, "Disabling VXLAN tunneling\n"); 1944 1945 /* Use 0 as tunnel mode */ 1946 qede_set_cmn_tunn_param(&tunn, clss, false, true); 1947 for_each_hwfn(edev, i) { 1948 p_hwfn = &edev->hwfns[i]; 1949 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn, 1950 ECORE_SPQ_MODE_CB, NULL); 1951 if (rc != ECORE_SUCCESS) { 1952 DP_ERR(edev, 1953 "Failed to update tunn_clss %u\n", 1954 tunn.vxlan.tun_cls); 1955 break; 1956 } 1957 } 1958 } 1959 break; 1960 default: 1961 DP_ERR(edev, "Unsupported operation %d\n", filter_op); 1962 return -EINVAL; 1963 } 1964 DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters); 1965 1966 return 0; 1967 } 1968 1969 int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, 1970 enum rte_filter_type filter_type, 1971 enum rte_filter_op filter_op, 1972 void *arg) 1973 { 1974 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); 1975 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); 1976 struct rte_eth_tunnel_filter_conf *filter_conf = 1977 (struct rte_eth_tunnel_filter_conf *)arg; 1978 1979 switch (filter_type) { 1980 case RTE_ETH_FILTER_TUNNEL: 1981 switch (filter_conf->tunnel_type) { 1982 case RTE_TUNNEL_TYPE_VXLAN: 1983 DP_INFO(edev, 1984 "Packet steering to the specified Rx queue" 1985 " is not supported with VXLAN tunneling"); 1986 return(qede_vxlan_tunn_config(eth_dev, filter_op, 1987 filter_conf)); 1988 /* Place holders for future tunneling support */ 1989 case RTE_TUNNEL_TYPE_GENEVE: 1990 case RTE_TUNNEL_TYPE_TEREDO: 1991 case RTE_TUNNEL_TYPE_NVGRE: 1992 case RTE_TUNNEL_TYPE_IP_IN_GRE: 1993 case RTE_L2_TUNNEL_TYPE_E_TAG: 1994 DP_ERR(edev, "Unsupported tunnel type %d\n", 1995 filter_conf->tunnel_type); 1996 return -EINVAL; 1997 case RTE_TUNNEL_TYPE_NONE: 1998 default: 1999 return 0; 2000 } 2001 break; 2002 case RTE_ETH_FILTER_FDIR: 2003 return qede_fdir_filter_conf(eth_dev, filter_op, arg); 2004 case RTE_ETH_FILTER_NTUPLE: 2005 return qede_ntuple_filter_conf(eth_dev, filter_op, arg); 2006 case RTE_ETH_FILTER_MACVLAN: 2007 case RTE_ETH_FILTER_ETHERTYPE: 2008 case RTE_ETH_FILTER_FLEXIBLE: 2009 case RTE_ETH_FILTER_SYN: 2010 case RTE_ETH_FILTER_HASH: 2011 case RTE_ETH_FILTER_L2_TUNNEL: 2012 case RTE_ETH_FILTER_MAX: 2013 default: 2014 DP_ERR(edev, "Unsupported filter type %d\n", 2015 filter_type); 2016 return -EINVAL; 2017 } 2018 2019 return 0; 2020 } 2021 2022 static const struct eth_dev_ops qede_eth_dev_ops = { 2023 .dev_configure = qede_dev_configure, 2024 .dev_infos_get = qede_dev_info_get, 2025 .rx_queue_setup = qede_rx_queue_setup, 2026 .rx_queue_release = qede_rx_queue_release, 2027 .tx_queue_setup = qede_tx_queue_setup, 2028 .tx_queue_release = qede_tx_queue_release, 2029 .dev_start = qede_dev_start, 2030 .dev_set_link_up = qede_dev_set_link_up, 2031 .dev_set_link_down = qede_dev_set_link_down, 2032 .link_update = qede_link_update, 2033 .promiscuous_enable = qede_promiscuous_enable, 2034 .promiscuous_disable = qede_promiscuous_disable, 2035 .allmulticast_enable = qede_allmulticast_enable, 2036 .allmulticast_disable = qede_allmulticast_disable, 2037 .dev_stop = qede_dev_stop, 2038 .dev_close = qede_dev_close, 2039 .stats_get = qede_get_stats, 2040 .stats_reset = qede_reset_stats, 2041 .xstats_get = qede_get_xstats, 2042 .xstats_reset = qede_reset_xstats, 2043 .xstats_get_names = qede_get_xstats_names, 2044 .mac_addr_add = qede_mac_addr_add, 2045 .mac_addr_remove = qede_mac_addr_remove, 2046 .mac_addr_set = qede_mac_addr_set, 2047 .vlan_offload_set = qede_vlan_offload_set, 2048 .vlan_filter_set = qede_vlan_filter_set, 2049 .flow_ctrl_set = qede_flow_ctrl_set, 2050 .flow_ctrl_get = qede_flow_ctrl_get, 2051 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2052 .rss_hash_update = qede_rss_hash_update, 2053 .rss_hash_conf_get = qede_rss_hash_conf_get, 2054 .reta_update = qede_rss_reta_update, 2055 .reta_query = qede_rss_reta_query, 2056 .mtu_set = qede_set_mtu, 2057 .filter_ctrl = qede_dev_filter_ctrl, 2058 .udp_tunnel_port_add = qede_udp_dst_port_add, 2059 .udp_tunnel_port_del = qede_udp_dst_port_del, 2060 }; 2061 2062 static const struct eth_dev_ops qede_eth_vf_dev_ops = { 2063 .dev_configure = qede_dev_configure, 2064 .dev_infos_get = qede_dev_info_get, 2065 .rx_queue_setup = qede_rx_queue_setup, 2066 .rx_queue_release = qede_rx_queue_release, 2067 .tx_queue_setup = qede_tx_queue_setup, 2068 .tx_queue_release = qede_tx_queue_release, 2069 .dev_start = qede_dev_start, 2070 .dev_set_link_up = qede_dev_set_link_up, 2071 .dev_set_link_down = qede_dev_set_link_down, 2072 .link_update = qede_link_update, 2073 .promiscuous_enable = qede_promiscuous_enable, 2074 .promiscuous_disable = qede_promiscuous_disable, 2075 .allmulticast_enable = qede_allmulticast_enable, 2076 .allmulticast_disable = qede_allmulticast_disable, 2077 .dev_stop = qede_dev_stop, 2078 .dev_close = qede_dev_close, 2079 .stats_get = qede_get_stats, 2080 .stats_reset = qede_reset_stats, 2081 .xstats_get = qede_get_xstats, 2082 .xstats_reset = qede_reset_xstats, 2083 .xstats_get_names = qede_get_xstats_names, 2084 .vlan_offload_set = qede_vlan_offload_set, 2085 .vlan_filter_set = qede_vlan_filter_set, 2086 .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, 2087 .rss_hash_update = qede_rss_hash_update, 2088 .rss_hash_conf_get = qede_rss_hash_conf_get, 2089 .reta_update = qede_rss_reta_update, 2090 .reta_query = qede_rss_reta_query, 2091 .mtu_set = qede_set_mtu, 2092 }; 2093 2094 static void qede_update_pf_params(struct ecore_dev *edev) 2095 { 2096 struct ecore_pf_params pf_params; 2097 2098 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 2099 pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS; 2100 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; 2101 qed_ops->common->update_pf_params(edev, &pf_params); 2102 } 2103 2104 static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) 2105 { 2106 struct rte_pci_device *pci_dev; 2107 struct rte_pci_addr pci_addr; 2108 struct qede_dev *adapter; 2109 struct ecore_dev *edev; 2110 struct qed_dev_eth_info dev_info; 2111 struct qed_slowpath_params params; 2112 static bool do_once = true; 2113 uint8_t bulletin_change; 2114 uint8_t vf_mac[ETHER_ADDR_LEN]; 2115 uint8_t is_mac_forced; 2116 bool is_mac_exist; 2117 /* Fix up ecore debug level */ 2118 uint32_t dp_module = ~0 & ~ECORE_MSG_HW; 2119 uint8_t dp_level = ECORE_LEVEL_VERBOSE; 2120 uint32_t max_mac_addrs; 2121 int rc; 2122 2123 /* Extract key data structures */ 2124 adapter = eth_dev->data->dev_private; 2125 edev = &adapter->edev; 2126 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 2127 pci_addr = pci_dev->addr; 2128 2129 PMD_INIT_FUNC_TRACE(edev); 2130 2131 snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", 2132 pci_addr.bus, pci_addr.devid, pci_addr.function, 2133 eth_dev->data->port_id); 2134 2135 eth_dev->rx_pkt_burst = qede_recv_pkts; 2136 eth_dev->tx_pkt_burst = qede_xmit_pkts; 2137 eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts; 2138 2139 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 2140 DP_NOTICE(edev, false, 2141 "Skipping device init from secondary process\n"); 2142 return 0; 2143 } 2144 2145 rte_eth_copy_pci_info(eth_dev, pci_dev); 2146 2147 /* @DPDK */ 2148 edev->vendor_id = pci_dev->id.vendor_id; 2149 edev->device_id = pci_dev->id.device_id; 2150 2151 qed_ops = qed_get_eth_ops(); 2152 if (!qed_ops) { 2153 DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); 2154 return -EINVAL; 2155 } 2156 2157 DP_INFO(edev, "Starting qede probe\n"); 2158 2159 rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH, 2160 dp_module, dp_level, is_vf); 2161 2162 if (rc != 0) { 2163 DP_ERR(edev, "qede probe failed rc %d\n", rc); 2164 return -ENODEV; 2165 } 2166 2167 qede_update_pf_params(edev); 2168 2169 rte_intr_callback_register(&pci_dev->intr_handle, 2170 qede_interrupt_handler, (void *)eth_dev); 2171 2172 if (rte_intr_enable(&pci_dev->intr_handle)) { 2173 DP_ERR(edev, "rte_intr_enable() failed\n"); 2174 return -ENODEV; 2175 } 2176 2177 /* Start the Slowpath-process */ 2178 memset(¶ms, 0, sizeof(struct qed_slowpath_params)); 2179 params.int_mode = ECORE_INT_MODE_MSIX; 2180 params.drv_major = QEDE_PMD_VERSION_MAJOR; 2181 params.drv_minor = QEDE_PMD_VERSION_MINOR; 2182 params.drv_rev = QEDE_PMD_VERSION_REVISION; 2183 params.drv_eng = QEDE_PMD_VERSION_PATCH; 2184 strncpy((char *)params.name, QEDE_PMD_VER_PREFIX, 2185 QEDE_PMD_DRV_VER_STR_SIZE); 2186 2187 /* For CMT mode device do periodic polling for slowpath events. 2188 * This is required since uio device uses only one MSI-x 2189 * interrupt vector but we need one for each engine. 2190 */ 2191 if (edev->num_hwfns > 1 && IS_PF(edev)) { 2192 rc = rte_eal_alarm_set(timer_period * US_PER_S, 2193 qede_poll_sp_sb_cb, 2194 (void *)eth_dev); 2195 if (rc != 0) { 2196 DP_ERR(edev, "Unable to start periodic" 2197 " timer rc %d\n", rc); 2198 return -EINVAL; 2199 } 2200 } 2201 2202 rc = qed_ops->common->slowpath_start(edev, ¶ms); 2203 if (rc) { 2204 DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); 2205 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2206 (void *)eth_dev); 2207 return -ENODEV; 2208 } 2209 2210 rc = qed_ops->fill_dev_info(edev, &dev_info); 2211 if (rc) { 2212 DP_ERR(edev, "Cannot get device_info rc %d\n", rc); 2213 qed_ops->common->slowpath_stop(edev); 2214 qed_ops->common->remove(edev); 2215 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2216 (void *)eth_dev); 2217 return -ENODEV; 2218 } 2219 2220 qede_alloc_etherdev(adapter, &dev_info); 2221 2222 adapter->ops->common->set_name(edev, edev->name); 2223 2224 if (!is_vf) 2225 adapter->dev_info.num_mac_filters = 2226 (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), 2227 ECORE_MAC); 2228 else 2229 ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), 2230 (uint32_t *)&adapter->dev_info.num_mac_filters); 2231 2232 /* Allocate memory for storing MAC addr */ 2233 eth_dev->data->mac_addrs = rte_zmalloc(edev->name, 2234 (ETHER_ADDR_LEN * 2235 adapter->dev_info.num_mac_filters), 2236 RTE_CACHE_LINE_SIZE); 2237 2238 if (eth_dev->data->mac_addrs == NULL) { 2239 DP_ERR(edev, "Failed to allocate MAC address\n"); 2240 qed_ops->common->slowpath_stop(edev); 2241 qed_ops->common->remove(edev); 2242 rte_eal_alarm_cancel(qede_poll_sp_sb_cb, 2243 (void *)eth_dev); 2244 return -ENOMEM; 2245 } 2246 2247 if (!is_vf) { 2248 ether_addr_copy((struct ether_addr *)edev->hwfns[0]. 2249 hw_info.hw_mac_addr, 2250 ð_dev->data->mac_addrs[0]); 2251 ether_addr_copy(ð_dev->data->mac_addrs[0], 2252 &adapter->primary_mac); 2253 } else { 2254 ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), 2255 &bulletin_change); 2256 if (bulletin_change) { 2257 is_mac_exist = 2258 ecore_vf_bulletin_get_forced_mac( 2259 ECORE_LEADING_HWFN(edev), 2260 vf_mac, 2261 &is_mac_forced); 2262 if (is_mac_exist && is_mac_forced) { 2263 DP_INFO(edev, "VF macaddr received from PF\n"); 2264 ether_addr_copy((struct ether_addr *)&vf_mac, 2265 ð_dev->data->mac_addrs[0]); 2266 ether_addr_copy(ð_dev->data->mac_addrs[0], 2267 &adapter->primary_mac); 2268 } else { 2269 DP_NOTICE(edev, false, 2270 "No VF macaddr assigned\n"); 2271 } 2272 } 2273 } 2274 2275 eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; 2276 2277 if (do_once) { 2278 qede_print_adapter_info(adapter); 2279 do_once = false; 2280 } 2281 2282 adapter->state = QEDE_DEV_INIT; 2283 2284 DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", 2285 adapter->primary_mac.addr_bytes[0], 2286 adapter->primary_mac.addr_bytes[1], 2287 adapter->primary_mac.addr_bytes[2], 2288 adapter->primary_mac.addr_bytes[3], 2289 adapter->primary_mac.addr_bytes[4], 2290 adapter->primary_mac.addr_bytes[5]); 2291 2292 return rc; 2293 } 2294 2295 static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) 2296 { 2297 return qede_common_dev_init(eth_dev, 1); 2298 } 2299 2300 static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) 2301 { 2302 return qede_common_dev_init(eth_dev, 0); 2303 } 2304 2305 static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) 2306 { 2307 /* only uninitialize in the primary process */ 2308 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 2309 return 0; 2310 2311 /* safe to close dev here */ 2312 qede_dev_close(eth_dev); 2313 2314 eth_dev->dev_ops = NULL; 2315 eth_dev->rx_pkt_burst = NULL; 2316 eth_dev->tx_pkt_burst = NULL; 2317 2318 if (eth_dev->data->mac_addrs) 2319 rte_free(eth_dev->data->mac_addrs); 2320 2321 eth_dev->data->mac_addrs = NULL; 2322 2323 return 0; 2324 } 2325 2326 static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2327 { 2328 return qede_dev_common_uninit(eth_dev); 2329 } 2330 2331 static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) 2332 { 2333 return qede_dev_common_uninit(eth_dev); 2334 } 2335 2336 static const struct rte_pci_id pci_id_qedevf_map[] = { 2337 #define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2338 { 2339 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF) 2340 }, 2341 { 2342 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV) 2343 }, 2344 { 2345 QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV) 2346 }, 2347 {.vendor_id = 0,} 2348 }; 2349 2350 static const struct rte_pci_id pci_id_qede_map[] = { 2351 #define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) 2352 { 2353 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E) 2354 }, 2355 { 2356 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S) 2357 }, 2358 { 2359 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40) 2360 }, 2361 { 2362 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25) 2363 }, 2364 { 2365 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100) 2366 }, 2367 { 2368 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50) 2369 }, 2370 { 2371 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G) 2372 }, 2373 { 2374 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G) 2375 }, 2376 { 2377 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G) 2378 }, 2379 { 2380 QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G) 2381 }, 2382 {.vendor_id = 0,} 2383 }; 2384 2385 static struct eth_driver rte_qedevf_pmd = { 2386 .pci_drv = { 2387 .id_table = pci_id_qedevf_map, 2388 .drv_flags = 2389 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2390 .probe = rte_eth_dev_pci_probe, 2391 .remove = rte_eth_dev_pci_remove, 2392 }, 2393 .eth_dev_init = qedevf_eth_dev_init, 2394 .eth_dev_uninit = qedevf_eth_dev_uninit, 2395 .dev_private_size = sizeof(struct qede_dev), 2396 }; 2397 2398 static struct eth_driver rte_qede_pmd = { 2399 .pci_drv = { 2400 .id_table = pci_id_qede_map, 2401 .drv_flags = 2402 RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 2403 .probe = rte_eth_dev_pci_probe, 2404 .remove = rte_eth_dev_pci_remove, 2405 }, 2406 .eth_dev_init = qede_eth_dev_init, 2407 .eth_dev_uninit = qede_eth_dev_uninit, 2408 .dev_private_size = sizeof(struct qede_dev), 2409 }; 2410 2411 RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv); 2412 RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map); 2413 RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio"); 2414 RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv); 2415 RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map); 2416 RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio"); 2417