1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "bcm_osal.h" 10 11 #include "ecore.h" 12 #include "ecore_status.h" 13 #include "ecore_hsi_eth.h" 14 #include "ecore_chain.h" 15 #include "ecore_spq.h" 16 #include "ecore_init_fw_funcs.h" 17 #include "ecore_cxt.h" 18 #include "ecore_l2.h" 19 #include "ecore_sp_commands.h" 20 #include "ecore_gtt_reg_addr.h" 21 #include "ecore_iro.h" 22 #include "reg_addr.h" 23 #include "ecore_int.h" 24 #include "ecore_hw.h" 25 #include "ecore_vf.h" 26 #include "ecore_sriov.h" 27 #include "ecore_mcp.h" 28 29 #define ECORE_MAX_SGES_NUM 16 30 #define CRC32_POLY 0x1edc6f41 31 32 struct ecore_l2_info { 33 u32 queues; 34 unsigned long **pp_qid_usage; 35 36 /* The lock is meant to synchronize access to the qid usage */ 37 osal_mutex_t lock; 38 }; 39 40 enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn) 41 { 42 struct ecore_l2_info *p_l2_info; 43 unsigned long **pp_qids; 44 u32 i; 45 46 if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 47 return ECORE_SUCCESS; 48 49 p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info)); 50 if (!p_l2_info) 51 return ECORE_NOMEM; 52 p_hwfn->p_l2_info = p_l2_info; 53 54 if (IS_PF(p_hwfn->p_dev)) { 55 p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE); 56 } else { 57 u8 rx = 0, tx = 0; 58 59 ecore_vf_get_num_rxqs(p_hwfn, &rx); 60 ecore_vf_get_num_txqs(p_hwfn, &tx); 61 62 p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx); 63 } 64 65 pp_qids = OSAL_VZALLOC(p_hwfn->p_dev, 66 sizeof(unsigned long *) * 67 p_l2_info->queues); 68 if (pp_qids == OSAL_NULL) 69 return ECORE_NOMEM; 70 p_l2_info->pp_qid_usage = pp_qids; 71 72 for (i = 0; i < p_l2_info->queues; i++) { 73 pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev, 74 MAX_QUEUES_PER_QZONE / 8); 75 if (pp_qids[i] == OSAL_NULL) 76 return ECORE_NOMEM; 77 } 78 79 #ifdef CONFIG_ECORE_LOCK_ALLOC 80 OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock); 81 #endif 82 83 return ECORE_SUCCESS; 84 } 85 86 void ecore_l2_setup(struct ecore_hwfn *p_hwfn) 87 { 88 if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 89 return; 90 91 OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock); 92 } 93 94 void ecore_l2_free(struct ecore_hwfn *p_hwfn) 95 { 96 u32 i; 97 98 if (!ECORE_IS_L2_PERSONALITY(p_hwfn)) 99 return; 100 101 if (p_hwfn->p_l2_info == OSAL_NULL) 102 return; 103 104 if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL) 105 goto out_l2_info; 106 107 /* Free until hit first uninitialized entry */ 108 for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { 109 if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL) 110 break; 111 OSAL_VFREE(p_hwfn->p_dev, 112 p_hwfn->p_l2_info->pp_qid_usage[i]); 113 } 114 115 #ifdef CONFIG_ECORE_LOCK_ALLOC 116 /* Lock is last to initialize, if everything else was */ 117 if (i == p_hwfn->p_l2_info->queues) 118 OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock); 119 #endif 120 121 OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage); 122 123 out_l2_info: 124 OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info); 125 p_hwfn->p_l2_info = OSAL_NULL; 126 } 127 128 /* TODO - we'll need locking around these... */ 129 static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn, 130 struct ecore_queue_cid *p_cid) 131 { 132 struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info; 133 u16 queue_id = p_cid->rel.queue_id; 134 bool b_rc = true; 135 u8 first; 136 137 OSAL_MUTEX_ACQUIRE(&p_l2_info->lock); 138 139 if (queue_id > p_l2_info->queues) { 140 DP_NOTICE(p_hwfn, true, 141 "Requested to increase usage for qzone %04x out of %08x\n", 142 queue_id, p_l2_info->queues); 143 b_rc = false; 144 goto out; 145 } 146 147 first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id], 148 MAX_QUEUES_PER_QZONE); 149 if (first >= MAX_QUEUES_PER_QZONE) { 150 b_rc = false; 151 goto out; 152 } 153 154 OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]); 155 p_cid->qid_usage_idx = first; 156 157 out: 158 OSAL_MUTEX_RELEASE(&p_l2_info->lock); 159 return b_rc; 160 } 161 162 static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn, 163 struct ecore_queue_cid *p_cid) 164 { 165 OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock); 166 167 OSAL_CLEAR_BIT(p_cid->qid_usage_idx, 168 p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); 169 170 OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock); 171 } 172 173 void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn, 174 struct ecore_queue_cid *p_cid) 175 { 176 bool b_legacy_vf = !!(p_cid->vf_legacy & 177 ECORE_QCID_LEGACY_VF_CID); 178 179 /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF. 180 * For legacy vf-queues, the CID doesn't go through here. 181 */ 182 if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) 183 _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); 184 185 /* VFs maintain the index inside queue-zone on their own */ 186 if (p_cid->vfid == ECORE_QUEUE_CID_PF) 187 ecore_eth_queue_qid_usage_del(p_hwfn, p_cid); 188 189 OSAL_VFREE(p_hwfn->p_dev, p_cid); 190 } 191 192 /* The internal is only meant to be directly called by PFs initializeing CIDs 193 * for their VFs. 194 */ 195 static struct ecore_queue_cid * 196 _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, 197 u16 opaque_fid, u32 cid, 198 struct ecore_queue_start_common_params *p_params, 199 bool b_is_rx, 200 struct ecore_queue_cid_vf_params *p_vf_params) 201 { 202 struct ecore_queue_cid *p_cid; 203 enum _ecore_status_t rc; 204 205 p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid)); 206 if (p_cid == OSAL_NULL) 207 return OSAL_NULL; 208 209 p_cid->opaque_fid = opaque_fid; 210 p_cid->cid = cid; 211 p_cid->p_owner = p_hwfn; 212 213 /* Fill in parameters */ 214 p_cid->rel.vport_id = p_params->vport_id; 215 p_cid->rel.queue_id = p_params->queue_id; 216 p_cid->rel.stats_id = p_params->stats_id; 217 p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; 218 p_cid->b_is_rx = b_is_rx; 219 p_cid->sb_idx = p_params->sb_idx; 220 221 /* Fill-in bits related to VFs' queues if information was provided */ 222 if (p_vf_params != OSAL_NULL) { 223 p_cid->vfid = p_vf_params->vfid; 224 p_cid->vf_qid = p_vf_params->vf_qid; 225 p_cid->vf_legacy = p_vf_params->vf_legacy; 226 } else { 227 p_cid->vfid = ECORE_QUEUE_CID_PF; 228 } 229 230 /* Don't try calculating the absolute indices for VFs */ 231 if (IS_VF(p_hwfn->p_dev)) { 232 p_cid->abs = p_cid->rel; 233 234 goto out; 235 } 236 237 /* Calculate the engine-absolute indices of the resources. 238 * This would guarantee they're valid later on. 239 * In some cases [SBs] we already have the right values. 240 */ 241 rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); 242 if (rc != ECORE_SUCCESS) 243 goto fail; 244 245 rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, 246 &p_cid->abs.queue_id); 247 if (rc != ECORE_SUCCESS) 248 goto fail; 249 250 /* In case of a PF configuring its VF's queues, the stats-id is already 251 * absolute [since there's a single index that's suitable per-VF]. 252 */ 253 if (p_cid->vfid == ECORE_QUEUE_CID_PF) { 254 rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id, 255 &p_cid->abs.stats_id); 256 if (rc != ECORE_SUCCESS) 257 goto fail; 258 } else { 259 p_cid->abs.stats_id = p_cid->rel.stats_id; 260 } 261 262 out: 263 /* VF-images have provided the qid_usage_idx on their own. 264 * Otherwise, we need to allocate a unique one. 265 */ 266 if (!p_vf_params) { 267 if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid)) 268 goto fail; 269 } else { 270 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; 271 } 272 273 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 274 "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n", 275 p_cid->opaque_fid, p_cid->cid, 276 p_cid->rel.vport_id, p_cid->abs.vport_id, 277 p_cid->rel.queue_id, p_cid->qid_usage_idx, 278 p_cid->abs.queue_id, 279 p_cid->rel.stats_id, p_cid->abs.stats_id, 280 p_cid->sb_igu_id, p_cid->sb_idx); 281 282 return p_cid; 283 284 fail: 285 OSAL_VFREE(p_hwfn->p_dev, p_cid); 286 return OSAL_NULL; 287 } 288 289 struct ecore_queue_cid * 290 ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 291 struct ecore_queue_start_common_params *p_params, 292 bool b_is_rx, 293 struct ecore_queue_cid_vf_params *p_vf_params) 294 { 295 struct ecore_queue_cid *p_cid; 296 u8 vfid = ECORE_CXT_PF_CID; 297 bool b_legacy_vf = false; 298 u32 cid = 0; 299 300 /* In case of legacy VFs, The CID can be derived from the additional 301 * VF parameters - the VF assumes queue X uses CID X, so we can simply 302 * use the vf_qid for this purpose as well. 303 */ 304 if (p_vf_params) { 305 vfid = p_vf_params->vfid; 306 307 if (p_vf_params->vf_legacy & 308 ECORE_QCID_LEGACY_VF_CID) { 309 b_legacy_vf = true; 310 cid = p_vf_params->vf_qid; 311 } 312 } 313 314 /* Get a unique firmware CID for this queue, in case it's a PF. 315 * VF's don't need a CID as the queue configuration will be done 316 * by PF. 317 */ 318 if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) { 319 if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, 320 &cid, vfid) != ECORE_SUCCESS) { 321 DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n"); 322 return OSAL_NULL; 323 } 324 } 325 326 p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 327 p_params, b_is_rx, p_vf_params); 328 if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf) 329 _ecore_cxt_release_cid(p_hwfn, cid, vfid); 330 331 return p_cid; 332 } 333 334 static struct ecore_queue_cid * 335 ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 336 bool b_is_rx, 337 struct ecore_queue_start_common_params *p_params) 338 { 339 return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx, 340 OSAL_NULL); 341 } 342 343 enum _ecore_status_t 344 ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn, 345 struct ecore_sp_vport_start_params *p_params) 346 { 347 struct vport_start_ramrod_data *p_ramrod = OSAL_NULL; 348 struct ecore_spq_entry *p_ent = OSAL_NULL; 349 struct ecore_sp_init_data init_data; 350 struct eth_vport_tpa_param *p_tpa; 351 u16 rx_mode = 0, tx_err = 0; 352 u8 abs_vport_id = 0; 353 enum _ecore_status_t rc = ECORE_NOTIMPL; 354 355 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 356 if (rc != ECORE_SUCCESS) 357 return rc; 358 359 /* Get SPQ entry */ 360 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 361 init_data.cid = ecore_spq_get_cid(p_hwfn); 362 init_data.opaque_fid = p_params->opaque_fid; 363 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 364 365 rc = ecore_sp_init_request(p_hwfn, &p_ent, 366 ETH_RAMROD_VPORT_START, 367 PROTOCOLID_ETH, &init_data); 368 if (rc != ECORE_SUCCESS) 369 return rc; 370 371 p_ramrod = &p_ent->ramrod.vport_start; 372 p_ramrod->vport_id = abs_vport_id; 373 374 p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu); 375 p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; 376 p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; 377 p_ramrod->drop_ttl0_en = p_params->drop_ttl0; 378 p_ramrod->untagged = p_params->only_untagged; 379 p_ramrod->zero_placement_offset = p_params->zero_placement_offset; 380 381 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); 382 SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); 383 384 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode); 385 386 /* Handle requests for strict behavior on transmission errors */ 387 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE, 388 p_params->b_err_illegal_vlan_mode ? 389 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 390 SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL, 391 p_params->b_err_small_pkt ? 392 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 393 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR, 394 p_params->b_err_anti_spoof ? 395 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 396 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS, 397 p_params->b_err_illegal_inband_mode ? 398 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 399 SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG, 400 p_params->b_err_vlan_insert_with_inband ? 401 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 402 SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION, 403 p_params->b_err_big_pkt ? 404 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 405 SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME, 406 p_params->b_err_ctrl_frame ? 407 ETH_TX_ERR_ASSERT_MALICIOUS : 0); 408 p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err); 409 410 /* TPA related fields */ 411 p_tpa = &p_ramrod->tpa_param; 412 OSAL_MEMSET(p_tpa, 0, sizeof(struct eth_vport_tpa_param)); 413 p_tpa->max_buff_num = p_params->max_buffers_per_cqe; 414 415 switch (p_params->tpa_mode) { 416 case ECORE_TPA_MODE_GRO: 417 p_tpa->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 418 p_tpa->tpa_max_size = (u16)-1; 419 p_tpa->tpa_min_size_to_cont = p_params->mtu / 2; 420 p_tpa->tpa_min_size_to_start = p_params->mtu / 2; 421 p_tpa->tpa_ipv4_en_flg = 1; 422 p_tpa->tpa_ipv6_en_flg = 1; 423 p_tpa->tpa_ipv4_tunn_en_flg = 1; 424 p_tpa->tpa_ipv6_tunn_en_flg = 1; 425 p_tpa->tpa_pkt_split_flg = 1; 426 p_tpa->tpa_gro_consistent_flg = 1; 427 break; 428 default: 429 break; 430 } 431 432 p_ramrod->tx_switching_en = p_params->tx_switching; 433 #ifndef ASIC_ONLY 434 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) 435 p_ramrod->tx_switching_en = 0; 436 #endif 437 438 p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; 439 p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; 440 441 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ 442 p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid); 443 444 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 445 } 446 447 enum _ecore_status_t 448 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn, 449 struct ecore_sp_vport_start_params *p_params) 450 { 451 if (IS_VF(p_hwfn->p_dev)) 452 return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id, 453 p_params->mtu, 454 p_params->remove_inner_vlan, 455 p_params->tpa_mode, 456 p_params->max_buffers_per_cqe, 457 p_params->only_untagged); 458 459 return ecore_sp_eth_vport_start(p_hwfn, p_params); 460 } 461 462 static enum _ecore_status_t 463 ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn, 464 struct vport_update_ramrod_data *p_ramrod, 465 struct ecore_rss_params *p_rss) 466 { 467 struct eth_vport_rss_config *p_config; 468 u16 capabilities = 0; 469 int i, table_size; 470 enum _ecore_status_t rc = ECORE_SUCCESS; 471 472 if (!p_rss) { 473 p_ramrod->common.update_rss_flg = 0; 474 return rc; 475 } 476 p_config = &p_ramrod->rss_config; 477 478 OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE != 479 ETH_RSS_IND_TABLE_ENTRIES_NUM); 480 481 rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id); 482 if (rc != ECORE_SUCCESS) 483 return rc; 484 485 p_ramrod->common.update_rss_flg = p_rss->update_rss_config; 486 p_config->update_rss_capabilities = p_rss->update_rss_capabilities; 487 p_config->update_rss_ind_table = p_rss->update_rss_ind_table; 488 p_config->update_rss_key = p_rss->update_rss_key; 489 490 p_config->rss_mode = p_rss->rss_enable ? 491 ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED; 492 493 p_config->capabilities = 0; 494 495 SET_FIELD(capabilities, 496 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, 497 !!(p_rss->rss_caps & ECORE_RSS_IPV4)); 498 SET_FIELD(capabilities, 499 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, 500 !!(p_rss->rss_caps & ECORE_RSS_IPV6)); 501 SET_FIELD(capabilities, 502 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, 503 !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP)); 504 SET_FIELD(capabilities, 505 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, 506 !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP)); 507 SET_FIELD(capabilities, 508 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, 509 !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP)); 510 SET_FIELD(capabilities, 511 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, 512 !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP)); 513 p_config->tbl_size = p_rss->rss_table_size_log; 514 p_config->capabilities = OSAL_CPU_TO_LE16(capabilities); 515 516 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 517 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", 518 p_ramrod->common.update_rss_flg, 519 p_config->rss_mode, 520 p_config->update_rss_capabilities, 521 p_config->capabilities, 522 p_config->update_rss_ind_table, p_config->update_rss_key); 523 524 table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE, 525 1 << p_config->tbl_size); 526 for (i = 0; i < table_size; i++) { 527 struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i]; 528 529 if (!p_queue) 530 return ECORE_INVAL; 531 532 p_config->indirection_table[i] = 533 OSAL_CPU_TO_LE16(p_queue->abs.queue_id); 534 } 535 536 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 537 "Configured RSS indirection table [%d entries]:\n", 538 table_size); 539 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) { 540 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, 541 "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", 542 OSAL_LE16_TO_CPU(p_config->indirection_table[i]), 543 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]), 544 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]), 545 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]), 546 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]), 547 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]), 548 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]), 549 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]), 550 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]), 551 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]), 552 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]), 553 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]), 554 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]), 555 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]), 556 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]), 557 OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15])); 558 } 559 560 for (i = 0; i < 10; i++) 561 p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]); 562 563 return rc; 564 } 565 566 static void 567 ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn, 568 struct vport_update_ramrod_data *p_ramrod, 569 struct ecore_filter_accept_flags accept_flags) 570 { 571 p_ramrod->common.update_rx_mode_flg = 572 accept_flags.update_rx_mode_config; 573 p_ramrod->common.update_tx_mode_flg = 574 accept_flags.update_tx_mode_config; 575 576 #ifndef ASIC_ONLY 577 /* On B0 emulation we cannot enable Tx, since this would cause writes 578 * to PVFC HW block which isn't implemented in emulation. 579 */ 580 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 581 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 582 "Non-Asic - prevent Tx mode in vport update\n"); 583 p_ramrod->common.update_tx_mode_flg = 0; 584 } 585 #endif 586 587 /* Set Rx mode accept flags */ 588 if (p_ramrod->common.update_rx_mode_flg) { 589 u8 accept_filter = accept_flags.rx_accept_filter; 590 u16 state = 0; 591 592 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 593 !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) || 594 !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED))); 595 596 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, 597 !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)); 598 599 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 600 !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) || 601 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 602 603 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, 604 (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) && 605 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 606 607 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, 608 !!(accept_filter & ECORE_ACCEPT_BCAST)); 609 610 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state); 611 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 612 "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n", 613 p_ramrod->common.vport_id, state); 614 } 615 616 /* Set Tx mode accept flags */ 617 if (p_ramrod->common.update_tx_mode_flg) { 618 u8 accept_filter = accept_flags.tx_accept_filter; 619 u16 state = 0; 620 621 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, 622 !!(accept_filter & ECORE_ACCEPT_NONE)); 623 624 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, 625 !!(accept_filter & ECORE_ACCEPT_NONE)); 626 627 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, 628 (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) && 629 !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); 630 631 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 632 !!(accept_filter & ECORE_ACCEPT_BCAST)); 633 634 p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state); 635 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 636 "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n", 637 p_ramrod->common.vport_id, state); 638 } 639 } 640 641 static void 642 ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod, 643 struct ecore_sge_tpa_params *p_params) 644 { 645 struct eth_vport_tpa_param *p_tpa; 646 u16 val; 647 648 if (!p_params) { 649 p_ramrod->common.update_tpa_param_flg = 0; 650 p_ramrod->common.update_tpa_en_flg = 0; 651 p_ramrod->common.update_tpa_param_flg = 0; 652 return; 653 } 654 655 p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; 656 p_tpa = &p_ramrod->tpa_param; 657 p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; 658 p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; 659 p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; 660 p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; 661 662 p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; 663 p_tpa->max_buff_num = p_params->max_buffers_per_cqe; 664 p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; 665 p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; 666 p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; 667 p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; 668 val = p_params->tpa_max_size; 669 p_tpa->tpa_max_size = OSAL_CPU_TO_LE16(val); 670 val = p_params->tpa_min_size_to_start; 671 p_tpa->tpa_min_size_to_start = OSAL_CPU_TO_LE16(val); 672 val = p_params->tpa_min_size_to_cont; 673 p_tpa->tpa_min_size_to_cont = OSAL_CPU_TO_LE16(val); 674 } 675 676 static void 677 ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod, 678 struct ecore_sp_vport_update_params *p_params) 679 { 680 int i; 681 682 OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0, 683 sizeof(p_ramrod->approx_mcast.bins)); 684 685 if (!p_params->update_approx_mcast_flg) 686 return; 687 688 p_ramrod->common.update_approx_mcast_flg = 1; 689 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 690 u32 *p_bins = (u32 *)p_params->bins; 691 692 p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); 693 } 694 } 695 696 enum _ecore_status_t 697 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn, 698 struct ecore_sp_vport_update_params *p_params, 699 enum spq_mode comp_mode, 700 struct ecore_spq_comp_cb *p_comp_data) 701 { 702 struct ecore_rss_params *p_rss_params = p_params->rss_params; 703 struct vport_update_ramrod_data_cmn *p_cmn; 704 struct ecore_sp_init_data init_data; 705 struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; 706 struct ecore_spq_entry *p_ent = OSAL_NULL; 707 u8 abs_vport_id = 0, val; 708 enum _ecore_status_t rc = ECORE_NOTIMPL; 709 710 if (IS_VF(p_hwfn->p_dev)) { 711 rc = ecore_vf_pf_vport_update(p_hwfn, p_params); 712 return rc; 713 } 714 715 rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); 716 if (rc != ECORE_SUCCESS) 717 return rc; 718 719 /* Get SPQ entry */ 720 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 721 init_data.cid = ecore_spq_get_cid(p_hwfn); 722 init_data.opaque_fid = p_params->opaque_fid; 723 init_data.comp_mode = comp_mode; 724 init_data.p_comp_data = p_comp_data; 725 726 rc = ecore_sp_init_request(p_hwfn, &p_ent, 727 ETH_RAMROD_VPORT_UPDATE, 728 PROTOCOLID_ETH, &init_data); 729 if (rc != ECORE_SUCCESS) 730 return rc; 731 732 /* Copy input params to ramrod according to FW struct */ 733 p_ramrod = &p_ent->ramrod.vport_update; 734 p_cmn = &p_ramrod->common; 735 736 p_cmn->vport_id = abs_vport_id; 737 738 p_cmn->rx_active_flg = p_params->vport_active_rx_flg; 739 p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; 740 p_cmn->tx_active_flg = p_params->vport_active_tx_flg; 741 p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; 742 743 p_cmn->accept_any_vlan = p_params->accept_any_vlan; 744 val = p_params->update_accept_any_vlan_flg; 745 p_cmn->update_accept_any_vlan_flg = val; 746 747 p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; 748 val = p_params->update_inner_vlan_removal_flg; 749 p_cmn->update_inner_vlan_removal_en_flg = val; 750 751 p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; 752 val = p_params->update_default_vlan_enable_flg; 753 p_cmn->update_default_vlan_en_flg = val; 754 755 p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan); 756 p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; 757 758 p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; 759 760 p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; 761 762 #ifndef ASIC_ONLY 763 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) 764 if (p_ramrod->common.tx_switching_en || 765 p_ramrod->common.update_tx_switching_en_flg) { 766 DP_NOTICE(p_hwfn, false, 767 "FPGA - why are we seeing tx-switching? Overriding it\n"); 768 p_ramrod->common.tx_switching_en = 0; 769 p_ramrod->common.update_tx_switching_en_flg = 1; 770 } 771 #endif 772 p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; 773 774 p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; 775 val = p_params->update_anti_spoofing_en_flg; 776 p_ramrod->common.update_anti_spoofing_en_flg = val; 777 778 rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); 779 if (rc != ECORE_SUCCESS) { 780 /* Return spq entry which is taken in ecore_sp_init_request()*/ 781 ecore_spq_return_entry(p_hwfn, p_ent); 782 return rc; 783 } 784 785 /* Update mcast bins for VFs, PF doesn't use this functionality */ 786 ecore_sp_update_mcast_bin(p_ramrod, p_params); 787 788 ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); 789 ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params); 790 if (p_params->mtu) { 791 p_ramrod->common.update_mtu_flg = 1; 792 p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu); 793 } 794 795 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 796 } 797 798 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn, 799 u16 opaque_fid, u8 vport_id) 800 { 801 struct vport_stop_ramrod_data *p_ramrod; 802 struct ecore_sp_init_data init_data; 803 struct ecore_spq_entry *p_ent; 804 u8 abs_vport_id = 0; 805 enum _ecore_status_t rc; 806 807 if (IS_VF(p_hwfn->p_dev)) 808 return ecore_vf_pf_vport_stop(p_hwfn); 809 810 rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id); 811 if (rc != ECORE_SUCCESS) 812 return rc; 813 814 /* Get SPQ entry */ 815 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 816 init_data.cid = ecore_spq_get_cid(p_hwfn); 817 init_data.opaque_fid = opaque_fid; 818 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 819 820 rc = ecore_sp_init_request(p_hwfn, &p_ent, 821 ETH_RAMROD_VPORT_STOP, 822 PROTOCOLID_ETH, &init_data); 823 if (rc != ECORE_SUCCESS) 824 return rc; 825 826 p_ramrod = &p_ent->ramrod.vport_stop; 827 p_ramrod->vport_id = abs_vport_id; 828 829 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 830 } 831 832 static enum _ecore_status_t 833 ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn, 834 struct ecore_filter_accept_flags *p_accept_flags) 835 { 836 struct ecore_sp_vport_update_params s_params; 837 838 OSAL_MEMSET(&s_params, 0, sizeof(s_params)); 839 OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags, 840 sizeof(struct ecore_filter_accept_flags)); 841 842 return ecore_vf_pf_vport_update(p_hwfn, &s_params); 843 } 844 845 enum _ecore_status_t 846 ecore_filter_accept_cmd(struct ecore_dev *p_dev, 847 u8 vport, 848 struct ecore_filter_accept_flags accept_flags, 849 u8 update_accept_any_vlan, 850 u8 accept_any_vlan, 851 enum spq_mode comp_mode, 852 struct ecore_spq_comp_cb *p_comp_data) 853 { 854 struct ecore_sp_vport_update_params vport_update_params; 855 int i, rc; 856 857 /* Prepare and send the vport rx_mode change */ 858 OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params)); 859 vport_update_params.vport_id = vport; 860 vport_update_params.accept_flags = accept_flags; 861 vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; 862 vport_update_params.accept_any_vlan = accept_any_vlan; 863 864 for_each_hwfn(p_dev, i) { 865 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 866 867 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 868 869 if (IS_VF(p_dev)) { 870 rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags); 871 if (rc != ECORE_SUCCESS) 872 return rc; 873 continue; 874 } 875 876 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params, 877 comp_mode, p_comp_data); 878 if (rc != ECORE_SUCCESS) { 879 DP_ERR(p_dev, "Update rx_mode failed %d\n", rc); 880 return rc; 881 } 882 883 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 884 "Accept filter configured, flags = [Rx]%x [Tx]%x\n", 885 accept_flags.rx_accept_filter, 886 accept_flags.tx_accept_filter); 887 888 if (update_accept_any_vlan) 889 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 890 "accept_any_vlan=%d configured\n", 891 accept_any_vlan); 892 } 893 894 return 0; 895 } 896 897 enum _ecore_status_t 898 ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn, 899 struct ecore_queue_cid *p_cid, 900 u16 bd_max_bytes, 901 dma_addr_t bd_chain_phys_addr, 902 dma_addr_t cqe_pbl_addr, 903 u16 cqe_pbl_size) 904 { 905 struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL; 906 struct ecore_spq_entry *p_ent = OSAL_NULL; 907 struct ecore_sp_init_data init_data; 908 enum _ecore_status_t rc = ECORE_NOTIMPL; 909 910 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 911 "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", 912 p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id, 913 p_cid->abs.vport_id, p_cid->sb_igu_id); 914 915 /* Get SPQ entry */ 916 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 917 init_data.cid = p_cid->cid; 918 init_data.opaque_fid = p_cid->opaque_fid; 919 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 920 921 rc = ecore_sp_init_request(p_hwfn, &p_ent, 922 ETH_RAMROD_RX_QUEUE_START, 923 PROTOCOLID_ETH, &init_data); 924 if (rc != ECORE_SUCCESS) 925 return rc; 926 927 p_ramrod = &p_ent->ramrod.rx_queue_start; 928 929 p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id); 930 p_ramrod->sb_index = p_cid->sb_idx; 931 p_ramrod->vport_id = p_cid->abs.vport_id; 932 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 933 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 934 p_ramrod->complete_cqe_flg = 0; 935 p_ramrod->complete_event_flg = 1; 936 937 p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes); 938 DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); 939 940 p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size); 941 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); 942 943 if (p_cid->vfid != ECORE_QUEUE_CID_PF) { 944 bool b_legacy_vf = !!(p_cid->vf_legacy & 945 ECORE_QCID_LEGACY_VF_RX_PROD); 946 947 p_ramrod->vf_rx_prod_index = p_cid->vf_qid; 948 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 949 "Queue%s is meant for VF rxq[%02x]\n", 950 b_legacy_vf ? " [legacy]" : "", 951 p_cid->vf_qid); 952 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; 953 } 954 955 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 956 } 957 958 static enum _ecore_status_t 959 ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn, 960 struct ecore_queue_cid *p_cid, 961 u16 bd_max_bytes, 962 dma_addr_t bd_chain_phys_addr, 963 dma_addr_t cqe_pbl_addr, 964 u16 cqe_pbl_size, 965 void OSAL_IOMEM * *pp_prod) 966 { 967 u32 init_prod_val = 0; 968 969 *pp_prod = (u8 OSAL_IOMEM *) 970 p_hwfn->regview + 971 GTT_BAR0_MAP_REG_MSDM_RAM + 972 MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); 973 974 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 975 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 976 (u32 *)(&init_prod_val)); 977 978 return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid, 979 bd_max_bytes, 980 bd_chain_phys_addr, 981 cqe_pbl_addr, cqe_pbl_size); 982 } 983 984 enum _ecore_status_t 985 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, 986 u16 opaque_fid, 987 struct ecore_queue_start_common_params *p_params, 988 u16 bd_max_bytes, 989 dma_addr_t bd_chain_phys_addr, 990 dma_addr_t cqe_pbl_addr, 991 u16 cqe_pbl_size, 992 struct ecore_rxq_start_ret_params *p_ret_params) 993 { 994 struct ecore_queue_cid *p_cid; 995 enum _ecore_status_t rc; 996 997 /* Allocate a CID for the queue */ 998 p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params); 999 if (p_cid == OSAL_NULL) 1000 return ECORE_NOMEM; 1001 1002 if (IS_PF(p_hwfn->p_dev)) 1003 rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid, 1004 bd_max_bytes, 1005 bd_chain_phys_addr, 1006 cqe_pbl_addr, cqe_pbl_size, 1007 &p_ret_params->p_prod); 1008 else 1009 rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid, 1010 bd_max_bytes, 1011 bd_chain_phys_addr, 1012 cqe_pbl_addr, 1013 cqe_pbl_size, 1014 &p_ret_params->p_prod); 1015 1016 /* Provide the caller with a reference to as handler */ 1017 if (rc != ECORE_SUCCESS) 1018 ecore_eth_queue_cid_release(p_hwfn, p_cid); 1019 else 1020 p_ret_params->p_handle = (void *)p_cid; 1021 1022 return rc; 1023 } 1024 1025 enum _ecore_status_t 1026 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn, 1027 void **pp_rxq_handles, 1028 u8 num_rxqs, 1029 u8 complete_cqe_flg, 1030 u8 complete_event_flg, 1031 enum spq_mode comp_mode, 1032 struct ecore_spq_comp_cb *p_comp_data) 1033 { 1034 struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL; 1035 struct ecore_spq_entry *p_ent = OSAL_NULL; 1036 struct ecore_sp_init_data init_data; 1037 struct ecore_queue_cid *p_cid; 1038 enum _ecore_status_t rc = ECORE_NOTIMPL; 1039 u8 i; 1040 1041 if (IS_VF(p_hwfn->p_dev)) 1042 return ecore_vf_pf_rxqs_update(p_hwfn, 1043 (struct ecore_queue_cid **) 1044 pp_rxq_handles, 1045 num_rxqs, 1046 complete_cqe_flg, 1047 complete_event_flg); 1048 1049 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1050 init_data.comp_mode = comp_mode; 1051 init_data.p_comp_data = p_comp_data; 1052 1053 for (i = 0; i < num_rxqs; i++) { 1054 p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i]; 1055 1056 /* Get SPQ entry */ 1057 init_data.cid = p_cid->cid; 1058 init_data.opaque_fid = p_cid->opaque_fid; 1059 1060 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1061 ETH_RAMROD_RX_QUEUE_UPDATE, 1062 PROTOCOLID_ETH, &init_data); 1063 if (rc != ECORE_SUCCESS) 1064 return rc; 1065 1066 p_ramrod = &p_ent->ramrod.rx_queue_update; 1067 p_ramrod->vport_id = p_cid->abs.vport_id; 1068 1069 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1070 p_ramrod->complete_cqe_flg = complete_cqe_flg; 1071 p_ramrod->complete_event_flg = complete_event_flg; 1072 1073 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1074 if (rc != ECORE_SUCCESS) 1075 return rc; 1076 } 1077 1078 return rc; 1079 } 1080 1081 static enum _ecore_status_t 1082 ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn, 1083 struct ecore_queue_cid *p_cid, 1084 bool b_eq_completion_only, 1085 bool b_cqe_completion) 1086 { 1087 struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL; 1088 struct ecore_spq_entry *p_ent = OSAL_NULL; 1089 struct ecore_sp_init_data init_data; 1090 enum _ecore_status_t rc; 1091 1092 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1093 init_data.cid = p_cid->cid; 1094 init_data.opaque_fid = p_cid->opaque_fid; 1095 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1096 1097 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1098 ETH_RAMROD_RX_QUEUE_STOP, 1099 PROTOCOLID_ETH, &init_data); 1100 if (rc != ECORE_SUCCESS) 1101 return rc; 1102 1103 p_ramrod = &p_ent->ramrod.rx_queue_stop; 1104 p_ramrod->vport_id = p_cid->abs.vport_id; 1105 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1106 1107 /* Cleaning the queue requires the completion to arrive there. 1108 * In addition, VFs require the answer to come as eqe to PF. 1109 */ 1110 p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) && 1111 !b_eq_completion_only) || 1112 b_cqe_completion; 1113 p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) || 1114 b_eq_completion_only; 1115 1116 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1117 } 1118 1119 enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn, 1120 void *p_rxq, 1121 bool eq_completion_only, 1122 bool cqe_completion) 1123 { 1124 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq; 1125 enum _ecore_status_t rc = ECORE_NOTIMPL; 1126 1127 if (IS_PF(p_hwfn->p_dev)) 1128 rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid, 1129 eq_completion_only, 1130 cqe_completion); 1131 else 1132 rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); 1133 1134 if (rc == ECORE_SUCCESS) 1135 ecore_eth_queue_cid_release(p_hwfn, p_cid); 1136 return rc; 1137 } 1138 1139 enum _ecore_status_t 1140 ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn, 1141 struct ecore_queue_cid *p_cid, 1142 dma_addr_t pbl_addr, u16 pbl_size, 1143 u16 pq_id) 1144 { 1145 struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL; 1146 struct ecore_spq_entry *p_ent = OSAL_NULL; 1147 struct ecore_sp_init_data init_data; 1148 enum _ecore_status_t rc = ECORE_NOTIMPL; 1149 1150 /* Get SPQ entry */ 1151 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1152 init_data.cid = p_cid->cid; 1153 init_data.opaque_fid = p_cid->opaque_fid; 1154 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1155 1156 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1157 ETH_RAMROD_TX_QUEUE_START, 1158 PROTOCOLID_ETH, &init_data); 1159 if (rc != ECORE_SUCCESS) 1160 return rc; 1161 1162 p_ramrod = &p_ent->ramrod.tx_queue_start; 1163 p_ramrod->vport_id = p_cid->abs.vport_id; 1164 1165 p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id); 1166 p_ramrod->sb_index = p_cid->sb_idx; 1167 p_ramrod->stats_counter_id = p_cid->abs.stats_id; 1168 1169 p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1170 p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id); 1171 1172 p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size); 1173 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); 1174 1175 p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id); 1176 1177 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1178 } 1179 1180 static enum _ecore_status_t 1181 ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn, 1182 struct ecore_queue_cid *p_cid, 1183 u8 tc, 1184 dma_addr_t pbl_addr, u16 pbl_size, 1185 void OSAL_IOMEM * *pp_doorbell) 1186 { 1187 enum _ecore_status_t rc; 1188 1189 /* TODO - set tc in the pq_params for multi-cos */ 1190 rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, 1191 pbl_addr, pbl_size, 1192 ecore_get_cm_pq_idx_mcos(p_hwfn, tc)); 1193 if (rc != ECORE_SUCCESS) 1194 return rc; 1195 1196 /* Provide the caller with the necessary return values */ 1197 *pp_doorbell = (u8 OSAL_IOMEM *) 1198 p_hwfn->doorbells + 1199 DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY); 1200 1201 return ECORE_SUCCESS; 1202 } 1203 1204 enum _ecore_status_t 1205 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid, 1206 struct ecore_queue_start_common_params *p_params, 1207 u8 tc, 1208 dma_addr_t pbl_addr, u16 pbl_size, 1209 struct ecore_txq_start_ret_params *p_ret_params) 1210 { 1211 struct ecore_queue_cid *p_cid; 1212 enum _ecore_status_t rc; 1213 1214 p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params); 1215 if (p_cid == OSAL_NULL) 1216 return ECORE_INVAL; 1217 1218 if (IS_PF(p_hwfn->p_dev)) 1219 rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, 1220 pbl_addr, pbl_size, 1221 &p_ret_params->p_doorbell); 1222 else 1223 rc = ecore_vf_pf_txq_start(p_hwfn, p_cid, 1224 pbl_addr, pbl_size, 1225 &p_ret_params->p_doorbell); 1226 1227 if (rc != ECORE_SUCCESS) 1228 ecore_eth_queue_cid_release(p_hwfn, p_cid); 1229 else 1230 p_ret_params->p_handle = (void *)p_cid; 1231 1232 return rc; 1233 } 1234 1235 static enum _ecore_status_t 1236 ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn, 1237 struct ecore_queue_cid *p_cid) 1238 { 1239 struct ecore_spq_entry *p_ent = OSAL_NULL; 1240 struct ecore_sp_init_data init_data; 1241 enum _ecore_status_t rc; 1242 1243 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1244 init_data.cid = p_cid->cid; 1245 init_data.opaque_fid = p_cid->opaque_fid; 1246 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1247 1248 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1249 ETH_RAMROD_TX_QUEUE_STOP, 1250 PROTOCOLID_ETH, &init_data); 1251 if (rc != ECORE_SUCCESS) 1252 return rc; 1253 1254 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1255 } 1256 1257 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn, 1258 void *p_handle) 1259 { 1260 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle; 1261 enum _ecore_status_t rc; 1262 1263 if (IS_PF(p_hwfn->p_dev)) 1264 rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid); 1265 else 1266 rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid); 1267 1268 if (rc == ECORE_SUCCESS) 1269 ecore_eth_queue_cid_release(p_hwfn, p_cid); 1270 return rc; 1271 } 1272 1273 static enum eth_filter_action 1274 ecore_filter_action(enum ecore_filter_opcode opcode) 1275 { 1276 enum eth_filter_action action = MAX_ETH_FILTER_ACTION; 1277 1278 switch (opcode) { 1279 case ECORE_FILTER_ADD: 1280 action = ETH_FILTER_ACTION_ADD; 1281 break; 1282 case ECORE_FILTER_REMOVE: 1283 action = ETH_FILTER_ACTION_REMOVE; 1284 break; 1285 case ECORE_FILTER_FLUSH: 1286 action = ETH_FILTER_ACTION_REMOVE_ALL; 1287 break; 1288 default: 1289 action = MAX_ETH_FILTER_ACTION; 1290 } 1291 1292 return action; 1293 } 1294 1295 static enum _ecore_status_t 1296 ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn, 1297 u16 opaque_fid, 1298 struct ecore_filter_ucast *p_filter_cmd, 1299 struct vport_filter_update_ramrod_data **pp_ramrod, 1300 struct ecore_spq_entry **pp_ent, 1301 enum spq_mode comp_mode, 1302 struct ecore_spq_comp_cb *p_comp_data) 1303 { 1304 u8 vport_to_add_to = 0, vport_to_remove_from = 0; 1305 struct vport_filter_update_ramrod_data *p_ramrod; 1306 struct eth_filter_cmd *p_first_filter; 1307 struct eth_filter_cmd *p_second_filter; 1308 struct ecore_sp_init_data init_data; 1309 enum eth_filter_action action; 1310 enum _ecore_status_t rc; 1311 1312 rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, 1313 &vport_to_remove_from); 1314 if (rc != ECORE_SUCCESS) 1315 return rc; 1316 1317 rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, 1318 &vport_to_add_to); 1319 if (rc != ECORE_SUCCESS) 1320 return rc; 1321 1322 /* Get SPQ entry */ 1323 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1324 init_data.cid = ecore_spq_get_cid(p_hwfn); 1325 init_data.opaque_fid = opaque_fid; 1326 init_data.comp_mode = comp_mode; 1327 init_data.p_comp_data = p_comp_data; 1328 1329 rc = ecore_sp_init_request(p_hwfn, pp_ent, 1330 ETH_RAMROD_FILTERS_UPDATE, 1331 PROTOCOLID_ETH, &init_data); 1332 if (rc != ECORE_SUCCESS) 1333 return rc; 1334 1335 *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; 1336 p_ramrod = *pp_ramrod; 1337 p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; 1338 p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; 1339 1340 #ifndef ASIC_ONLY 1341 if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { 1342 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1343 "Non-Asic - prevent Tx filters\n"); 1344 p_ramrod->filter_cmd_hdr.tx = 0; 1345 } 1346 #endif 1347 1348 switch (p_filter_cmd->opcode) { 1349 case ECORE_FILTER_REPLACE: 1350 case ECORE_FILTER_MOVE: 1351 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; 1352 break; 1353 default: 1354 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; 1355 break; 1356 } 1357 1358 p_first_filter = &p_ramrod->filter_cmds[0]; 1359 p_second_filter = &p_ramrod->filter_cmds[1]; 1360 1361 switch (p_filter_cmd->type) { 1362 case ECORE_FILTER_MAC: 1363 p_first_filter->type = ETH_FILTER_TYPE_MAC; 1364 break; 1365 case ECORE_FILTER_VLAN: 1366 p_first_filter->type = ETH_FILTER_TYPE_VLAN; 1367 break; 1368 case ECORE_FILTER_MAC_VLAN: 1369 p_first_filter->type = ETH_FILTER_TYPE_PAIR; 1370 break; 1371 case ECORE_FILTER_INNER_MAC: 1372 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; 1373 break; 1374 case ECORE_FILTER_INNER_VLAN: 1375 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; 1376 break; 1377 case ECORE_FILTER_INNER_PAIR: 1378 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; 1379 break; 1380 case ECORE_FILTER_INNER_MAC_VNI_PAIR: 1381 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; 1382 break; 1383 case ECORE_FILTER_MAC_VNI_PAIR: 1384 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; 1385 break; 1386 case ECORE_FILTER_VNI: 1387 p_first_filter->type = ETH_FILTER_TYPE_VNI; 1388 break; 1389 case ECORE_FILTER_UNUSED: /* @DPDK */ 1390 p_first_filter->type = MAX_ETH_FILTER_TYPE; 1391 break; 1392 } 1393 1394 if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || 1395 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1396 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || 1397 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || 1398 (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1399 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) 1400 ecore_set_fw_mac_addr(&p_first_filter->mac_msb, 1401 &p_first_filter->mac_mid, 1402 &p_first_filter->mac_lsb, 1403 (u8 *)p_filter_cmd->mac); 1404 1405 if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || 1406 (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || 1407 (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || 1408 (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) 1409 p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan); 1410 1411 if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || 1412 (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || 1413 (p_first_filter->type == ETH_FILTER_TYPE_VNI)) 1414 p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni); 1415 1416 if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) { 1417 p_second_filter->type = p_first_filter->type; 1418 p_second_filter->mac_msb = p_first_filter->mac_msb; 1419 p_second_filter->mac_mid = p_first_filter->mac_mid; 1420 p_second_filter->mac_lsb = p_first_filter->mac_lsb; 1421 p_second_filter->vlan_id = p_first_filter->vlan_id; 1422 p_second_filter->vni = p_first_filter->vni; 1423 1424 p_first_filter->action = ETH_FILTER_ACTION_REMOVE; 1425 1426 p_first_filter->vport_id = vport_to_remove_from; 1427 1428 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1429 p_second_filter->vport_id = vport_to_add_to; 1430 } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) { 1431 p_first_filter->vport_id = vport_to_add_to; 1432 OSAL_MEMCPY(p_second_filter, p_first_filter, 1433 sizeof(*p_second_filter)); 1434 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; 1435 p_second_filter->action = ETH_FILTER_ACTION_ADD; 1436 } else { 1437 action = ecore_filter_action(p_filter_cmd->opcode); 1438 1439 if (action == MAX_ETH_FILTER_ACTION) { 1440 DP_NOTICE(p_hwfn, true, 1441 "%d is not supported yet\n", 1442 p_filter_cmd->opcode); 1443 return ECORE_NOTIMPL; 1444 } 1445 1446 p_first_filter->action = action; 1447 p_first_filter->vport_id = 1448 (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ? 1449 vport_to_remove_from : vport_to_add_to; 1450 } 1451 1452 return ECORE_SUCCESS; 1453 } 1454 1455 enum _ecore_status_t 1456 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn, 1457 u16 opaque_fid, 1458 struct ecore_filter_ucast *p_filter_cmd, 1459 enum spq_mode comp_mode, 1460 struct ecore_spq_comp_cb *p_comp_data) 1461 { 1462 struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL; 1463 struct ecore_spq_entry *p_ent = OSAL_NULL; 1464 struct eth_filter_cmd_header *p_header; 1465 enum _ecore_status_t rc; 1466 1467 rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, 1468 &p_ramrod, &p_ent, 1469 comp_mode, p_comp_data); 1470 if (rc != ECORE_SUCCESS) { 1471 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); 1472 return rc; 1473 } 1474 p_header = &p_ramrod->filter_cmd_hdr; 1475 p_header->assert_on_error = p_filter_cmd->assert_on_error; 1476 1477 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1478 if (rc != ECORE_SUCCESS) { 1479 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); 1480 return rc; 1481 } 1482 1483 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1484 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", 1485 (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" : 1486 ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ? 1487 "REMOVE" : 1488 ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ? 1489 "MOVE" : "REPLACE")), 1490 (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" : 1491 ((p_filter_cmd->type == ECORE_FILTER_VLAN) ? 1492 "VLAN" : "MAC & VLAN"), 1493 p_ramrod->filter_cmd_hdr.cmd_cnt, 1494 p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter); 1495 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 1496 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", 1497 p_filter_cmd->vport_to_add_to, 1498 p_filter_cmd->vport_to_remove_from, 1499 p_filter_cmd->mac[0], p_filter_cmd->mac[1], 1500 p_filter_cmd->mac[2], p_filter_cmd->mac[3], 1501 p_filter_cmd->mac[4], p_filter_cmd->mac[5], 1502 p_filter_cmd->vlan); 1503 1504 return ECORE_SUCCESS; 1505 } 1506 1507 /******************************************************************************* 1508 * Description: 1509 * Calculates crc 32 on a buffer 1510 * Note: crc32_length MUST be aligned to 8 1511 * Return: 1512 ******************************************************************************/ 1513 static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed) 1514 { 1515 u32 byte = 0, bit = 0, crc32_result = crc32_seed; 1516 u8 msb = 0, current_byte = 0; 1517 1518 if ((crc32_packet == OSAL_NULL) || 1519 (crc32_length == 0) || ((crc32_length % 8) != 0)) { 1520 return crc32_result; 1521 } 1522 1523 for (byte = 0; byte < crc32_length; byte++) { 1524 current_byte = crc32_packet[byte]; 1525 for (bit = 0; bit < 8; bit++) { 1526 msb = (u8)(crc32_result >> 31); 1527 crc32_result = crc32_result << 1; 1528 if (msb != (0x1 & (current_byte >> bit))) { 1529 crc32_result = crc32_result ^ CRC32_POLY; 1530 crc32_result |= 1; 1531 } 1532 } 1533 } 1534 1535 return crc32_result; 1536 } 1537 1538 static u32 ecore_crc32c_le(u32 seed, u8 *mac) 1539 { 1540 u32 packet_buf[2] = { 0 }; 1541 1542 OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6); 1543 return ecore_calc_crc32c((u8 *)packet_buf, 8, seed); 1544 } 1545 1546 u8 ecore_mcast_bin_from_mac(u8 *mac) 1547 { 1548 u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac); 1549 1550 return crc & 0xff; 1551 } 1552 1553 static enum _ecore_status_t 1554 ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, 1555 struct ecore_filter_mcast *p_filter_cmd, 1556 enum spq_mode comp_mode, 1557 struct ecore_spq_comp_cb *p_comp_data) 1558 { 1559 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; 1560 struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; 1561 struct ecore_spq_entry *p_ent = OSAL_NULL; 1562 struct ecore_sp_init_data init_data; 1563 u8 abs_vport_id = 0; 1564 enum _ecore_status_t rc; 1565 int i; 1566 1567 if (p_filter_cmd->opcode == ECORE_FILTER_ADD) 1568 rc = ecore_fw_vport(p_hwfn, 1569 p_filter_cmd->vport_to_add_to, 1570 &abs_vport_id); 1571 else 1572 rc = ecore_fw_vport(p_hwfn, 1573 p_filter_cmd->vport_to_remove_from, 1574 &abs_vport_id); 1575 if (rc != ECORE_SUCCESS) 1576 return rc; 1577 1578 /* Get SPQ entry */ 1579 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1580 init_data.cid = ecore_spq_get_cid(p_hwfn); 1581 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1582 init_data.comp_mode = comp_mode; 1583 init_data.p_comp_data = p_comp_data; 1584 1585 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1586 ETH_RAMROD_VPORT_UPDATE, 1587 PROTOCOLID_ETH, &init_data); 1588 if (rc != ECORE_SUCCESS) { 1589 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); 1590 return rc; 1591 } 1592 1593 p_ramrod = &p_ent->ramrod.vport_update; 1594 p_ramrod->common.update_approx_mcast_flg = 1; 1595 1596 /* explicitly clear out the entire vector */ 1597 OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 1598 0, sizeof(p_ramrod->approx_mcast.bins)); 1599 OSAL_MEMSET(bins, 0, sizeof(unsigned long) * 1600 ETH_MULTICAST_MAC_BINS_IN_REGS); 1601 /* filter ADD op is explicit set op and it removes 1602 * any existing filters for the vport. 1603 */ 1604 if (p_filter_cmd->opcode == ECORE_FILTER_ADD) { 1605 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1606 u32 bit; 1607 1608 bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1609 OSAL_SET_BIT(bit, bins); 1610 } 1611 1612 /* Convert to correct endianity */ 1613 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1614 struct vport_update_ramrod_mcast *p_ramrod_bins; 1615 u32 *p_bins = (u32 *)bins; 1616 1617 p_ramrod_bins = &p_ramrod->approx_mcast; 1618 p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); 1619 } 1620 } 1621 1622 p_ramrod->common.vport_id = abs_vport_id; 1623 1624 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1625 if (rc != ECORE_SUCCESS) 1626 DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc); 1627 1628 return rc; 1629 } 1630 1631 enum _ecore_status_t 1632 ecore_filter_mcast_cmd(struct ecore_dev *p_dev, 1633 struct ecore_filter_mcast *p_filter_cmd, 1634 enum spq_mode comp_mode, 1635 struct ecore_spq_comp_cb *p_comp_data) 1636 { 1637 enum _ecore_status_t rc = ECORE_SUCCESS; 1638 int i; 1639 1640 /* only ADD and REMOVE operations are supported for multi-cast */ 1641 if ((p_filter_cmd->opcode != ECORE_FILTER_ADD && 1642 (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) || 1643 (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) { 1644 return ECORE_INVAL; 1645 } 1646 1647 for_each_hwfn(p_dev, i) { 1648 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1649 1650 if (IS_VF(p_dev)) { 1651 ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); 1652 continue; 1653 } 1654 1655 rc = ecore_sp_eth_filter_mcast(p_hwfn, 1656 p_filter_cmd, 1657 comp_mode, p_comp_data); 1658 if (rc != ECORE_SUCCESS) 1659 break; 1660 } 1661 1662 return rc; 1663 } 1664 1665 enum _ecore_status_t 1666 ecore_filter_ucast_cmd(struct ecore_dev *p_dev, 1667 struct ecore_filter_ucast *p_filter_cmd, 1668 enum spq_mode comp_mode, 1669 struct ecore_spq_comp_cb *p_comp_data) 1670 { 1671 enum _ecore_status_t rc = ECORE_SUCCESS; 1672 int i; 1673 1674 for_each_hwfn(p_dev, i) { 1675 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1676 u16 opaque_fid; 1677 1678 if (IS_VF(p_dev)) { 1679 rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); 1680 continue; 1681 } 1682 1683 opaque_fid = p_hwfn->hw_info.opaque_fid; 1684 rc = ecore_sp_eth_filter_ucast(p_hwfn, 1685 opaque_fid, 1686 p_filter_cmd, 1687 comp_mode, p_comp_data); 1688 if (rc != ECORE_SUCCESS) 1689 break; 1690 } 1691 1692 return rc; 1693 } 1694 1695 /* Statistics related code */ 1696 static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn, 1697 u32 *p_addr, u32 *p_len, 1698 u16 statistics_bin) 1699 { 1700 if (IS_PF(p_hwfn->p_dev)) { 1701 *p_addr = BAR0_MAP_REG_PSDM_RAM + 1702 PSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1703 *p_len = sizeof(struct eth_pstorm_per_queue_stat); 1704 } else { 1705 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1706 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1707 1708 *p_addr = p_resp->pfdev_info.stats_info.pstats.address; 1709 *p_len = p_resp->pfdev_info.stats_info.pstats.len; 1710 } 1711 } 1712 1713 static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn, 1714 struct ecore_ptt *p_ptt, 1715 struct ecore_eth_stats *p_stats, 1716 u16 statistics_bin) 1717 { 1718 struct eth_pstorm_per_queue_stat pstats; 1719 u32 pstats_addr = 0, pstats_len = 0; 1720 1721 __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, 1722 statistics_bin); 1723 1724 OSAL_MEMSET(&pstats, 0, sizeof(pstats)); 1725 ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); 1726 1727 p_stats->common.tx_ucast_bytes += 1728 HILO_64_REGPAIR(pstats.sent_ucast_bytes); 1729 p_stats->common.tx_mcast_bytes += 1730 HILO_64_REGPAIR(pstats.sent_mcast_bytes); 1731 p_stats->common.tx_bcast_bytes += 1732 HILO_64_REGPAIR(pstats.sent_bcast_bytes); 1733 p_stats->common.tx_ucast_pkts += 1734 HILO_64_REGPAIR(pstats.sent_ucast_pkts); 1735 p_stats->common.tx_mcast_pkts += 1736 HILO_64_REGPAIR(pstats.sent_mcast_pkts); 1737 p_stats->common.tx_bcast_pkts += 1738 HILO_64_REGPAIR(pstats.sent_bcast_pkts); 1739 p_stats->common.tx_err_drop_pkts += 1740 HILO_64_REGPAIR(pstats.error_drop_pkts); 1741 } 1742 1743 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn, 1744 struct ecore_ptt *p_ptt, 1745 struct ecore_eth_stats *p_stats) 1746 { 1747 struct tstorm_per_port_stat tstats; 1748 u32 tstats_addr, tstats_len; 1749 1750 if (IS_PF(p_hwfn->p_dev)) { 1751 tstats_addr = BAR0_MAP_REG_TSDM_RAM + 1752 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); 1753 tstats_len = sizeof(struct tstorm_per_port_stat); 1754 } else { 1755 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1756 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1757 1758 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; 1759 tstats_len = p_resp->pfdev_info.stats_info.tstats.len; 1760 } 1761 1762 OSAL_MEMSET(&tstats, 0, sizeof(tstats)); 1763 ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); 1764 1765 p_stats->common.mftag_filter_discards += 1766 HILO_64_REGPAIR(tstats.mftag_filter_discard); 1767 p_stats->common.mac_filter_discards += 1768 HILO_64_REGPAIR(tstats.eth_mac_filter_discard); 1769 } 1770 1771 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn, 1772 u32 *p_addr, u32 *p_len, 1773 u16 statistics_bin) 1774 { 1775 if (IS_PF(p_hwfn->p_dev)) { 1776 *p_addr = BAR0_MAP_REG_USDM_RAM + 1777 USTORM_QUEUE_STAT_OFFSET(statistics_bin); 1778 *p_len = sizeof(struct eth_ustorm_per_queue_stat); 1779 } else { 1780 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1781 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1782 1783 *p_addr = p_resp->pfdev_info.stats_info.ustats.address; 1784 *p_len = p_resp->pfdev_info.stats_info.ustats.len; 1785 } 1786 } 1787 1788 static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn, 1789 struct ecore_ptt *p_ptt, 1790 struct ecore_eth_stats *p_stats, 1791 u16 statistics_bin) 1792 { 1793 struct eth_ustorm_per_queue_stat ustats; 1794 u32 ustats_addr = 0, ustats_len = 0; 1795 1796 __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, 1797 statistics_bin); 1798 1799 OSAL_MEMSET(&ustats, 0, sizeof(ustats)); 1800 ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); 1801 1802 p_stats->common.rx_ucast_bytes += 1803 HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 1804 p_stats->common.rx_mcast_bytes += 1805 HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 1806 p_stats->common.rx_bcast_bytes += 1807 HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 1808 p_stats->common.rx_ucast_pkts += 1809 HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 1810 p_stats->common.rx_mcast_pkts += 1811 HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 1812 p_stats->common.rx_bcast_pkts += 1813 HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 1814 } 1815 1816 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn, 1817 u32 *p_addr, u32 *p_len, 1818 u16 statistics_bin) 1819 { 1820 if (IS_PF(p_hwfn->p_dev)) { 1821 *p_addr = BAR0_MAP_REG_MSDM_RAM + 1822 MSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1823 *p_len = sizeof(struct eth_mstorm_per_queue_stat); 1824 } else { 1825 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1826 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; 1827 1828 *p_addr = p_resp->pfdev_info.stats_info.mstats.address; 1829 *p_len = p_resp->pfdev_info.stats_info.mstats.len; 1830 } 1831 } 1832 1833 static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn, 1834 struct ecore_ptt *p_ptt, 1835 struct ecore_eth_stats *p_stats, 1836 u16 statistics_bin) 1837 { 1838 struct eth_mstorm_per_queue_stat mstats; 1839 u32 mstats_addr = 0, mstats_len = 0; 1840 1841 __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, 1842 statistics_bin); 1843 1844 OSAL_MEMSET(&mstats, 0, sizeof(mstats)); 1845 ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); 1846 1847 p_stats->common.no_buff_discards += 1848 HILO_64_REGPAIR(mstats.no_buff_discard); 1849 p_stats->common.packet_too_big_discard += 1850 HILO_64_REGPAIR(mstats.packet_too_big_discard); 1851 p_stats->common.ttl0_discard += 1852 HILO_64_REGPAIR(mstats.ttl0_discard); 1853 p_stats->common.tpa_coalesced_pkts += 1854 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); 1855 p_stats->common.tpa_coalesced_events += 1856 HILO_64_REGPAIR(mstats.tpa_coalesced_events); 1857 p_stats->common.tpa_aborts_num += 1858 HILO_64_REGPAIR(mstats.tpa_aborts_num); 1859 p_stats->common.tpa_coalesced_bytes += 1860 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); 1861 } 1862 1863 static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn, 1864 struct ecore_ptt *p_ptt, 1865 struct ecore_eth_stats *p_stats) 1866 { 1867 struct ecore_eth_stats_common *p_common = &p_stats->common; 1868 struct port_stats port_stats; 1869 int j; 1870 1871 OSAL_MEMSET(&port_stats, 0, sizeof(port_stats)); 1872 1873 ecore_memcpy_from(p_hwfn, p_ptt, &port_stats, 1874 p_hwfn->mcp_info->port_addr + 1875 OFFSETOF(struct public_port, stats), 1876 sizeof(port_stats)); 1877 1878 p_common->rx_64_byte_packets += port_stats.eth.r64; 1879 p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; 1880 p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; 1881 p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; 1882 p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; 1883 p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; 1884 p_common->rx_crc_errors += port_stats.eth.rfcs; 1885 p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; 1886 p_common->rx_pause_frames += port_stats.eth.rxpf; 1887 p_common->rx_pfc_frames += port_stats.eth.rxpp; 1888 p_common->rx_align_errors += port_stats.eth.raln; 1889 p_common->rx_carrier_errors += port_stats.eth.rfcr; 1890 p_common->rx_oversize_packets += port_stats.eth.rovr; 1891 p_common->rx_jabbers += port_stats.eth.rjbr; 1892 p_common->rx_undersize_packets += port_stats.eth.rund; 1893 p_common->rx_fragments += port_stats.eth.rfrg; 1894 p_common->tx_64_byte_packets += port_stats.eth.t64; 1895 p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; 1896 p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; 1897 p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; 1898 p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; 1899 p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; 1900 p_common->tx_pause_frames += port_stats.eth.txpf; 1901 p_common->tx_pfc_frames += port_stats.eth.txpp; 1902 p_common->rx_mac_bytes += port_stats.eth.rbyte; 1903 p_common->rx_mac_uc_packets += port_stats.eth.rxuca; 1904 p_common->rx_mac_mc_packets += port_stats.eth.rxmca; 1905 p_common->rx_mac_bc_packets += port_stats.eth.rxbca; 1906 p_common->rx_mac_frames_ok += port_stats.eth.rxpok; 1907 p_common->tx_mac_bytes += port_stats.eth.tbyte; 1908 p_common->tx_mac_uc_packets += port_stats.eth.txuca; 1909 p_common->tx_mac_mc_packets += port_stats.eth.txmca; 1910 p_common->tx_mac_bc_packets += port_stats.eth.txbca; 1911 p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; 1912 for (j = 0; j < 8; j++) { 1913 p_common->brb_truncates += port_stats.brb.brb_truncate[j]; 1914 p_common->brb_discards += port_stats.brb.brb_discard[j]; 1915 } 1916 1917 if (ECORE_IS_BB(p_hwfn->p_dev)) { 1918 struct ecore_eth_stats_bb *p_bb = &p_stats->bb; 1919 1920 p_bb->rx_1519_to_1522_byte_packets += 1921 port_stats.eth.u0.bb0.r1522; 1922 p_bb->rx_1519_to_2047_byte_packets += 1923 port_stats.eth.u0.bb0.r2047; 1924 p_bb->rx_2048_to_4095_byte_packets += 1925 port_stats.eth.u0.bb0.r4095; 1926 p_bb->rx_4096_to_9216_byte_packets += 1927 port_stats.eth.u0.bb0.r9216; 1928 p_bb->rx_9217_to_16383_byte_packets += 1929 port_stats.eth.u0.bb0.r16383; 1930 p_bb->tx_1519_to_2047_byte_packets += 1931 port_stats.eth.u1.bb1.t2047; 1932 p_bb->tx_2048_to_4095_byte_packets += 1933 port_stats.eth.u1.bb1.t4095; 1934 p_bb->tx_4096_to_9216_byte_packets += 1935 port_stats.eth.u1.bb1.t9216; 1936 p_bb->tx_9217_to_16383_byte_packets += 1937 port_stats.eth.u1.bb1.t16383; 1938 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; 1939 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; 1940 } else { 1941 struct ecore_eth_stats_ah *p_ah = &p_stats->ah; 1942 1943 p_ah->rx_1519_to_max_byte_packets += 1944 port_stats.eth.u0.ah0.r1519_to_max; 1945 p_ah->tx_1519_to_max_byte_packets = 1946 port_stats.eth.u1.ah1.t1519_to_max; 1947 } 1948 } 1949 1950 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, 1951 struct ecore_ptt *p_ptt, 1952 struct ecore_eth_stats *stats, 1953 u16 statistics_bin, bool b_get_port_stats) 1954 { 1955 __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); 1956 __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); 1957 __ecore_get_vport_tstats(p_hwfn, p_ptt, stats); 1958 __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); 1959 1960 #ifndef ASIC_ONLY 1961 /* Avoid getting PORT stats for emulation. */ 1962 if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) 1963 return; 1964 #endif 1965 1966 if (b_get_port_stats && p_hwfn->mcp_info) 1967 __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats); 1968 } 1969 1970 static void _ecore_get_vport_stats(struct ecore_dev *p_dev, 1971 struct ecore_eth_stats *stats) 1972 { 1973 u8 fw_vport = 0; 1974 int i; 1975 1976 OSAL_MEMSET(stats, 0, sizeof(*stats)); 1977 1978 for_each_hwfn(p_dev, i) { 1979 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 1980 struct ecore_ptt *p_ptt = IS_PF(p_dev) ? 1981 ecore_ptt_acquire(p_hwfn) : OSAL_NULL; 1982 bool b_get_port_stats; 1983 1984 if (IS_PF(p_dev)) { 1985 /* The main vport index is relative first */ 1986 if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) { 1987 DP_ERR(p_hwfn, "No vport available!\n"); 1988 goto out; 1989 } 1990 } 1991 1992 if (IS_PF(p_dev) && !p_ptt) { 1993 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1994 continue; 1995 } 1996 1997 b_get_port_stats = IS_PF(p_dev) && IS_LEAD_HWFN(p_hwfn); 1998 __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, 1999 b_get_port_stats); 2000 2001 out: 2002 if (IS_PF(p_dev) && p_ptt) 2003 ecore_ptt_release(p_hwfn, p_ptt); 2004 } 2005 } 2006 2007 void ecore_get_vport_stats(struct ecore_dev *p_dev, 2008 struct ecore_eth_stats *stats) 2009 { 2010 u32 i; 2011 2012 if (!p_dev) { 2013 OSAL_MEMSET(stats, 0, sizeof(*stats)); 2014 return; 2015 } 2016 2017 _ecore_get_vport_stats(p_dev, stats); 2018 2019 if (!p_dev->reset_stats) 2020 return; 2021 2022 /* Reduce the statistics baseline */ 2023 for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++) 2024 ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i]; 2025 } 2026 2027 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ 2028 void ecore_reset_vport_stats(struct ecore_dev *p_dev) 2029 { 2030 int i; 2031 2032 for_each_hwfn(p_dev, i) { 2033 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; 2034 struct eth_mstorm_per_queue_stat mstats; 2035 struct eth_ustorm_per_queue_stat ustats; 2036 struct eth_pstorm_per_queue_stat pstats; 2037 struct ecore_ptt *p_ptt = IS_PF(p_dev) ? 2038 ecore_ptt_acquire(p_hwfn) : OSAL_NULL; 2039 u32 addr = 0, len = 0; 2040 2041 if (IS_PF(p_dev) && !p_ptt) { 2042 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 2043 continue; 2044 } 2045 2046 OSAL_MEMSET(&mstats, 0, sizeof(mstats)); 2047 __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); 2048 ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); 2049 2050 OSAL_MEMSET(&ustats, 0, sizeof(ustats)); 2051 __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); 2052 ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); 2053 2054 OSAL_MEMSET(&pstats, 0, sizeof(pstats)); 2055 __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); 2056 ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); 2057 2058 if (IS_PF(p_dev)) 2059 ecore_ptt_release(p_hwfn, p_ptt); 2060 } 2061 2062 /* PORT statistics are not necessarily reset, so we need to 2063 * read and create a baseline for future statistics. 2064 */ 2065 if (!p_dev->reset_stats) 2066 DP_INFO(p_dev, "Reset stats not allocated\n"); 2067 else 2068 _ecore_get_vport_stats(p_dev, p_dev->reset_stats); 2069 } 2070 2071 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, 2072 struct ecore_ptt *p_ptt, 2073 struct ecore_arfs_config_params *p_cfg_params) 2074 { 2075 if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits)) 2076 return; 2077 2078 if (p_cfg_params->arfs_enable) { 2079 ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id, 2080 p_cfg_params->tcp, 2081 p_cfg_params->udp, 2082 p_cfg_params->ipv4, 2083 p_cfg_params->ipv6, 2084 GFT_PROFILE_TYPE_4_TUPLE); 2085 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2086 "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n", 2087 p_cfg_params->tcp ? "Enable" : "Disable", 2088 p_cfg_params->udp ? "Enable" : "Disable", 2089 p_cfg_params->ipv4 ? "Enable" : "Disable", 2090 p_cfg_params->ipv6 ? "Enable" : "Disable"); 2091 } else { 2092 ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); 2093 } 2094 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n", 2095 p_cfg_params->arfs_enable ? "Enable" : "Disable"); 2096 } 2097 2098 enum _ecore_status_t 2099 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, 2100 struct ecore_spq_comp_cb *p_cb, 2101 dma_addr_t p_addr, u16 length, 2102 u16 qid, u8 vport_id, 2103 bool b_is_add) 2104 { 2105 struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL; 2106 struct ecore_spq_entry *p_ent = OSAL_NULL; 2107 struct ecore_sp_init_data init_data; 2108 u16 abs_rx_q_id = 0; 2109 u8 abs_vport_id = 0; 2110 enum _ecore_status_t rc = ECORE_NOTIMPL; 2111 2112 rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id); 2113 if (rc != ECORE_SUCCESS) 2114 return rc; 2115 2116 rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); 2117 if (rc != ECORE_SUCCESS) 2118 return rc; 2119 2120 /* Get SPQ entry */ 2121 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 2122 init_data.cid = ecore_spq_get_cid(p_hwfn); 2123 2124 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 2125 2126 if (p_cb) { 2127 init_data.comp_mode = ECORE_SPQ_MODE_CB; 2128 init_data.p_comp_data = p_cb; 2129 } else { 2130 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 2131 } 2132 2133 rc = ecore_sp_init_request(p_hwfn, &p_ent, 2134 ETH_RAMROD_GFT_UPDATE_FILTER, 2135 PROTOCOLID_ETH, &init_data); 2136 if (rc != ECORE_SUCCESS) 2137 return rc; 2138 2139 p_ramrod = &p_ent->ramrod.rx_update_gft; 2140 2141 DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr); 2142 p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length); 2143 2144 p_ramrod->action_icid_valid = 0; 2145 p_ramrod->action_icid = 0; 2146 2147 p_ramrod->rx_qid_valid = 1; 2148 p_ramrod->rx_qid = OSAL_CPU_TO_LE16(abs_rx_q_id); 2149 2150 p_ramrod->flow_id_valid = 0; 2151 p_ramrod->flow_id = 0; 2152 2153 p_ramrod->vport_id = abs_vport_id; 2154 p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER 2155 : GFT_DELETE_FILTER; 2156 2157 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, 2158 "V[%0x], Q[%04x] - %s filter from 0x%lx [length %04xb]\n", 2159 abs_vport_id, abs_rx_q_id, 2160 b_is_add ? "Adding" : "Removing", 2161 (unsigned long)p_addr, length); 2162 2163 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 2164 } 2165 2166 int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn, 2167 struct ecore_ptt *p_ptt, 2168 struct ecore_queue_cid *p_cid, 2169 u16 *p_rx_coal) 2170 { 2171 u32 coalesce, address, is_valid; 2172 struct cau_sb_entry sb_entry; 2173 u8 timer_res; 2174 enum _ecore_status_t rc; 2175 2176 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2177 p_cid->sb_igu_id * sizeof(u64), 2178 (u64)(osal_uintptr_t)&sb_entry, 2, 0); 2179 if (rc != ECORE_SUCCESS) { 2180 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2181 return rc; 2182 } 2183 2184 timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0); 2185 2186 address = BAR0_MAP_REG_USDM_RAM + 2187 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 2188 coalesce = ecore_rd(p_hwfn, p_ptt, address); 2189 2190 is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); 2191 if (!is_valid) 2192 return ECORE_INVAL; 2193 2194 coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); 2195 *p_rx_coal = (u16)(coalesce << timer_res); 2196 2197 return ECORE_SUCCESS; 2198 } 2199 2200 int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn, 2201 struct ecore_ptt *p_ptt, 2202 struct ecore_queue_cid *p_cid, 2203 u16 *p_tx_coal) 2204 { 2205 u32 coalesce, address, is_valid; 2206 struct cau_sb_entry sb_entry; 2207 u8 timer_res; 2208 enum _ecore_status_t rc; 2209 2210 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + 2211 p_cid->sb_igu_id * sizeof(u64), 2212 (u64)(osal_uintptr_t)&sb_entry, 2, 0); 2213 if (rc != ECORE_SUCCESS) { 2214 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); 2215 return rc; 2216 } 2217 2218 timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1); 2219 2220 address = BAR0_MAP_REG_XSDM_RAM + 2221 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); 2222 coalesce = ecore_rd(p_hwfn, p_ptt, address); 2223 2224 is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); 2225 if (!is_valid) 2226 return ECORE_INVAL; 2227 2228 coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); 2229 *p_tx_coal = (u16)(coalesce << timer_res); 2230 2231 return ECORE_SUCCESS; 2232 } 2233 2234 enum _ecore_status_t 2235 ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal, 2236 void *handle) 2237 { 2238 struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle; 2239 enum _ecore_status_t rc = ECORE_SUCCESS; 2240 struct ecore_ptt *p_ptt; 2241 2242 if (IS_VF(p_hwfn->p_dev)) { 2243 rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid); 2244 if (rc != ECORE_SUCCESS) 2245 DP_NOTICE(p_hwfn, false, 2246 "Unable to read queue calescing\n"); 2247 2248 return rc; 2249 } 2250 2251 p_ptt = ecore_ptt_acquire(p_hwfn); 2252 if (!p_ptt) 2253 return ECORE_AGAIN; 2254 2255 if (p_cid->b_is_rx) { 2256 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); 2257 if (rc != ECORE_SUCCESS) 2258 goto out; 2259 } else { 2260 rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); 2261 if (rc != ECORE_SUCCESS) 2262 goto out; 2263 } 2264 2265 out: 2266 ecore_ptt_release(p_hwfn, p_ptt); 2267 2268 return rc; 2269 } 2270