1 /* 2 * Copyright (c) 2016 QLogic Corporation. 3 * All rights reserved. 4 * www.qlogic.com 5 * 6 * See LICENSE.qede_pmd for copyright and licensing details. 7 */ 8 9 #include "bcm_osal.h" 10 11 #include "ecore.h" 12 #include "ecore_status.h" 13 #include "ecore_chain.h" 14 #include "ecore_spq.h" 15 #include "ecore_init_fw_funcs.h" 16 #include "ecore_cxt.h" 17 #include "ecore_sp_commands.h" 18 #include "ecore_gtt_reg_addr.h" 19 #include "ecore_iro.h" 20 #include "reg_addr.h" 21 #include "ecore_int.h" 22 #include "ecore_hw.h" 23 #include "ecore_dcbx.h" 24 #include "ecore_sriov.h" 25 26 enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn, 27 struct ecore_spq_entry **pp_ent, 28 u8 cmd, 29 u8 protocol, 30 struct ecore_sp_init_data *p_data) 31 { 32 u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid; 33 struct ecore_spq_entry *p_ent = OSAL_NULL; 34 enum _ecore_status_t rc; 35 36 if (!pp_ent) 37 return ECORE_INVAL; 38 39 /* Get an SPQ entry */ 40 rc = ecore_spq_get_entry(p_hwfn, pp_ent); 41 if (rc != ECORE_SUCCESS) 42 return rc; 43 44 /* Fill the SPQ entry */ 45 p_ent = *pp_ent; 46 p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid); 47 p_ent->elem.hdr.cmd_id = cmd; 48 p_ent->elem.hdr.protocol_id = protocol; 49 p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL; 50 p_ent->comp_mode = p_data->comp_mode; 51 p_ent->comp_done.done = 0; 52 53 switch (p_ent->comp_mode) { 54 case ECORE_SPQ_MODE_EBLOCK: 55 p_ent->comp_cb.cookie = &p_ent->comp_done; 56 break; 57 58 case ECORE_SPQ_MODE_BLOCK: 59 if (!p_data->p_comp_data) 60 return ECORE_INVAL; 61 62 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; 63 break; 64 65 case ECORE_SPQ_MODE_CB: 66 if (!p_data->p_comp_data) 67 p_ent->comp_cb.function = OSAL_NULL; 68 else 69 p_ent->comp_cb = *p_data->p_comp_data; 70 break; 71 72 default: 73 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n", 74 p_ent->comp_mode); 75 return ECORE_INVAL; 76 } 77 78 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, 79 "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n", 80 opaque_cid, cmd, protocol, 81 (unsigned long)&p_ent->ramrod, 82 D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK, 83 ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", 84 "MODE_CB")); 85 86 OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); 87 88 return ECORE_SUCCESS; 89 } 90 91 static enum tunnel_clss ecore_tunn_get_clss_type(u8 type) 92 { 93 switch (type) { 94 case ECORE_TUNN_CLSS_MAC_VLAN: 95 return TUNNEL_CLSS_MAC_VLAN; 96 case ECORE_TUNN_CLSS_MAC_VNI: 97 return TUNNEL_CLSS_MAC_VNI; 98 case ECORE_TUNN_CLSS_INNER_MAC_VLAN: 99 return TUNNEL_CLSS_INNER_MAC_VLAN; 100 case ECORE_TUNN_CLSS_INNER_MAC_VNI: 101 return TUNNEL_CLSS_INNER_MAC_VNI; 102 case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE: 103 return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE; 104 default: 105 return TUNNEL_CLSS_MAC_VLAN; 106 } 107 } 108 109 static void 110 ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn, 111 struct ecore_tunn_update_params *p_src, 112 struct pf_update_tunnel_config *p_tunn_cfg) 113 { 114 unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode; 115 unsigned long update_mask = p_src->tunn_mode_update_mask; 116 unsigned long tunn_mode = p_src->tunn_mode; 117 unsigned long new_tunn_mode = 0; 118 119 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) { 120 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode)) 121 OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode); 122 } else { 123 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode)) 124 OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode); 125 } 126 127 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) { 128 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode)) 129 OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode); 130 } else { 131 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode)) 132 OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode); 133 } 134 135 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) { 136 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode)) 137 OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode); 138 } else { 139 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode)) 140 OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode); 141 } 142 143 if (ECORE_IS_BB_A0(p_hwfn->p_dev)) { 144 if (p_src->update_geneve_udp_port) 145 DP_NOTICE(p_hwfn, true, "Geneve not supported\n"); 146 p_src->update_geneve_udp_port = 0; 147 p_src->tunn_mode = new_tunn_mode; 148 return; 149 } 150 151 if (p_src->update_geneve_udp_port) { 152 p_tunn_cfg->set_geneve_udp_port_flg = 1; 153 p_tunn_cfg->geneve_udp_port = 154 OSAL_CPU_TO_LE16(p_src->geneve_udp_port); 155 } 156 157 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) { 158 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode)) 159 OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode); 160 } else { 161 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode)) 162 OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode); 163 } 164 165 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) { 166 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode)) 167 OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode); 168 } else { 169 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode)) 170 OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode); 171 } 172 173 p_src->tunn_mode = new_tunn_mode; 174 } 175 176 static void 177 ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn, 178 struct ecore_tunn_update_params *p_src, 179 struct pf_update_tunnel_config *p_tunn_cfg) 180 { 181 unsigned long tunn_mode = p_src->tunn_mode; 182 enum tunnel_clss type; 183 184 ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg); 185 p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss; 186 p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss; 187 188 type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan); 189 p_tunn_cfg->tunnel_clss_vxlan = type; 190 type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre); 191 p_tunn_cfg->tunnel_clss_l2gre = type; 192 type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre); 193 p_tunn_cfg->tunnel_clss_ipgre = type; 194 195 if (p_src->update_vxlan_udp_port) { 196 p_tunn_cfg->set_vxlan_udp_port_flg = 1; 197 p_tunn_cfg->vxlan_udp_port = 198 OSAL_CPU_TO_LE16(p_src->vxlan_udp_port); 199 } 200 201 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode)) 202 p_tunn_cfg->tx_enable_l2gre = 1; 203 204 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode)) 205 p_tunn_cfg->tx_enable_ipgre = 1; 206 207 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode)) 208 p_tunn_cfg->tx_enable_vxlan = 1; 209 210 if (ECORE_IS_BB_A0(p_hwfn->p_dev)) { 211 if (p_src->update_geneve_udp_port) 212 DP_NOTICE(p_hwfn, true, "Geneve not supported\n"); 213 p_src->update_geneve_udp_port = 0; 214 return; 215 } 216 217 if (p_src->update_geneve_udp_port) { 218 p_tunn_cfg->set_geneve_udp_port_flg = 1; 219 p_tunn_cfg->geneve_udp_port = 220 OSAL_CPU_TO_LE16(p_src->geneve_udp_port); 221 } 222 223 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode)) 224 p_tunn_cfg->tx_enable_l2geneve = 1; 225 226 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode)) 227 p_tunn_cfg->tx_enable_ipgeneve = 1; 228 229 type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve); 230 p_tunn_cfg->tunnel_clss_l2geneve = type; 231 type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); 232 p_tunn_cfg->tunnel_clss_ipgeneve = type; 233 } 234 235 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn, 236 struct ecore_ptt *p_ptt, 237 unsigned long tunn_mode) 238 { 239 u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0; 240 u8 l2geneve_enable = 0, ipgeneve_enable = 0; 241 242 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode)) 243 l2gre_enable = 1; 244 245 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode)) 246 ipgre_enable = 1; 247 248 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode)) 249 vxlan_enable = 1; 250 251 ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable); 252 ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable); 253 254 if (ECORE_IS_BB_A0(p_hwfn->p_dev)) 255 return; 256 257 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode)) 258 l2geneve_enable = 1; 259 260 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode)) 261 ipgeneve_enable = 1; 262 263 ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable, 264 ipgeneve_enable); 265 } 266 267 static void 268 ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn, 269 struct ecore_tunn_start_params *p_src, 270 struct pf_start_tunnel_config *p_tunn_cfg) 271 { 272 unsigned long tunn_mode; 273 enum tunnel_clss type; 274 275 if (!p_src) 276 return; 277 278 tunn_mode = p_src->tunn_mode; 279 type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan); 280 p_tunn_cfg->tunnel_clss_vxlan = type; 281 type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre); 282 p_tunn_cfg->tunnel_clss_l2gre = type; 283 type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre); 284 p_tunn_cfg->tunnel_clss_ipgre = type; 285 286 if (p_src->update_vxlan_udp_port) { 287 p_tunn_cfg->set_vxlan_udp_port_flg = 1; 288 p_tunn_cfg->vxlan_udp_port = 289 OSAL_CPU_TO_LE16(p_src->vxlan_udp_port); 290 } 291 292 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode)) 293 p_tunn_cfg->tx_enable_l2gre = 1; 294 295 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode)) 296 p_tunn_cfg->tx_enable_ipgre = 1; 297 298 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode)) 299 p_tunn_cfg->tx_enable_vxlan = 1; 300 301 if (ECORE_IS_BB_A0(p_hwfn->p_dev)) { 302 if (p_src->update_geneve_udp_port) 303 DP_NOTICE(p_hwfn, true, "Geneve not supported\n"); 304 p_src->update_geneve_udp_port = 0; 305 return; 306 } 307 308 if (p_src->update_geneve_udp_port) { 309 p_tunn_cfg->set_geneve_udp_port_flg = 1; 310 p_tunn_cfg->geneve_udp_port = 311 OSAL_CPU_TO_LE16(p_src->geneve_udp_port); 312 } 313 314 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode)) 315 p_tunn_cfg->tx_enable_l2geneve = 1; 316 317 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode)) 318 p_tunn_cfg->tx_enable_ipgeneve = 1; 319 320 type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve); 321 p_tunn_cfg->tunnel_clss_l2geneve = type; 322 type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); 323 p_tunn_cfg->tunnel_clss_ipgeneve = type; 324 } 325 326 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, 327 struct ecore_tunn_start_params *p_tunn, 328 enum ecore_mf_mode mode, 329 bool allow_npar_tx_switch) 330 { 331 struct pf_start_ramrod_data *p_ramrod = OSAL_NULL; 332 u16 sb = ecore_int_get_sp_sb_id(p_hwfn); 333 u8 sb_index = p_hwfn->p_eq->eq_sb_index; 334 struct ecore_spq_entry *p_ent = OSAL_NULL; 335 struct ecore_sp_init_data init_data; 336 enum _ecore_status_t rc = ECORE_NOTIMPL; 337 u8 page_cnt; 338 339 /* update initial eq producer */ 340 ecore_eq_prod_update(p_hwfn, 341 ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain)); 342 343 /* Initialize the SPQ entry for the ramrod */ 344 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 345 init_data.cid = ecore_spq_get_cid(p_hwfn); 346 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 347 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 348 349 rc = ecore_sp_init_request(p_hwfn, &p_ent, 350 COMMON_RAMROD_PF_START, 351 PROTOCOLID_COMMON, &init_data); 352 if (rc != ECORE_SUCCESS) 353 return rc; 354 355 /* Fill the ramrod data */ 356 p_ramrod = &p_ent->ramrod.pf_start; 357 p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb); 358 p_ramrod->event_ring_sb_index = sb_index; 359 p_ramrod->path_id = ECORE_PATH_ID(p_hwfn); 360 361 /* For easier debugging */ 362 p_ramrod->dont_log_ramrods = 0; 363 p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf); 364 365 switch (mode) { 366 case ECORE_MF_DEFAULT: 367 case ECORE_MF_NPAR: 368 p_ramrod->mf_mode = MF_NPAR; 369 break; 370 case ECORE_MF_OVLAN: 371 p_ramrod->mf_mode = MF_OVLAN; 372 break; 373 default: 374 DP_NOTICE(p_hwfn, true, 375 "Unsupported MF mode, init as DEFAULT\n"); 376 p_ramrod->mf_mode = MF_NPAR; 377 } 378 p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; 379 380 /* Place EQ address in RAMROD */ 381 DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, 382 p_hwfn->p_eq->chain.pbl.p_phys_table); 383 page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain); 384 p_ramrod->event_ring_num_pages = page_cnt; 385 DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, 386 p_hwfn->p_consq->chain.pbl.p_phys_table); 387 388 ecore_tunn_set_pf_start_params(p_hwfn, p_tunn, 389 &p_ramrod->tunnel_config); 390 391 if (IS_MF_SI(p_hwfn)) 392 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; 393 394 switch (p_hwfn->hw_info.personality) { 395 case ECORE_PCI_ETH: 396 p_ramrod->personality = PERSONALITY_ETH; 397 break; 398 default: 399 DP_NOTICE(p_hwfn, true, "Unknown personality %d\n", 400 p_hwfn->hw_info.personality); 401 p_ramrod->personality = PERSONALITY_ETH; 402 } 403 404 if (p_hwfn->p_dev->p_iov_info) { 405 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info; 406 407 p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf; 408 p_ramrod->num_vfs = (u8)p_iov->total_vfs; 409 } 410 /* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI 411 * version is available. 412 */ 413 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; 414 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; 415 416 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, 417 "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", 418 sb, sb_index, p_ramrod->outer_tag); 419 420 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 421 422 if (p_tunn) { 423 ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, 424 p_tunn->tunn_mode); 425 p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode; 426 } 427 428 return rc; 429 } 430 431 enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn) 432 { 433 struct ecore_spq_entry *p_ent = OSAL_NULL; 434 struct ecore_sp_init_data init_data; 435 enum _ecore_status_t rc = ECORE_NOTIMPL; 436 437 /* Get SPQ entry */ 438 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 439 init_data.cid = ecore_spq_get_cid(p_hwfn); 440 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 441 init_data.comp_mode = ECORE_SPQ_MODE_CB; 442 443 rc = ecore_sp_init_request(p_hwfn, &p_ent, 444 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, 445 &init_data); 446 if (rc != ECORE_SUCCESS) 447 return rc; 448 449 ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results, 450 &p_ent->ramrod.pf_update); 451 452 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 453 } 454 455 enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn, 456 struct ecore_rl_update_params *params) 457 { 458 struct ecore_spq_entry *p_ent = OSAL_NULL; 459 enum _ecore_status_t rc = ECORE_NOTIMPL; 460 struct rl_update_ramrod_data *rl_update; 461 struct ecore_sp_init_data init_data; 462 463 /* Get SPQ entry */ 464 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 465 init_data.cid = ecore_spq_get_cid(p_hwfn); 466 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 467 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 468 469 rc = ecore_sp_init_request(p_hwfn, &p_ent, 470 COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON, 471 &init_data); 472 if (rc != ECORE_SUCCESS) 473 return rc; 474 475 rl_update = &p_ent->ramrod.rl_update; 476 477 rl_update->qcn_update_param_flg = params->qcn_update_param_flg; 478 rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg; 479 rl_update->rl_init_flg = params->rl_init_flg; 480 rl_update->rl_start_flg = params->rl_start_flg; 481 rl_update->rl_stop_flg = params->rl_stop_flg; 482 rl_update->rl_id_first = params->rl_id_first; 483 rl_update->rl_id_last = params->rl_id_last; 484 rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg; 485 rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate); 486 rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate); 487 rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai); 488 rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai); 489 rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g); 490 rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us); 491 rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32( 492 params->dcqcn_timeuot_us); 493 rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us); 494 495 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 496 } 497 498 /* Set pf update ramrod command params */ 499 enum _ecore_status_t 500 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn, 501 struct ecore_tunn_update_params *p_tunn, 502 enum spq_mode comp_mode, 503 struct ecore_spq_comp_cb *p_comp_data) 504 { 505 struct ecore_spq_entry *p_ent = OSAL_NULL; 506 struct ecore_sp_init_data init_data; 507 enum _ecore_status_t rc = ECORE_NOTIMPL; 508 509 /* Get SPQ entry */ 510 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 511 init_data.cid = ecore_spq_get_cid(p_hwfn); 512 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 513 init_data.comp_mode = comp_mode; 514 init_data.p_comp_data = p_comp_data; 515 516 rc = ecore_sp_init_request(p_hwfn, &p_ent, 517 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, 518 &init_data); 519 if (rc != ECORE_SUCCESS) 520 return rc; 521 522 ecore_tunn_set_pf_update_params(p_hwfn, p_tunn, 523 &p_ent->ramrod.pf_update.tunnel_config); 524 525 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 526 if (rc != ECORE_SUCCESS) 527 return rc; 528 529 if (p_tunn->update_vxlan_udp_port) 530 ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt, 531 p_tunn->vxlan_udp_port); 532 if (p_tunn->update_geneve_udp_port) 533 ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt, 534 p_tunn->geneve_udp_port); 535 536 ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode); 537 p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode; 538 539 return rc; 540 } 541 542 enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn) 543 { 544 struct ecore_spq_entry *p_ent = OSAL_NULL; 545 struct ecore_sp_init_data init_data; 546 enum _ecore_status_t rc = ECORE_NOTIMPL; 547 548 /* Get SPQ entry */ 549 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 550 init_data.cid = ecore_spq_get_cid(p_hwfn); 551 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 552 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 553 554 rc = ecore_sp_init_request(p_hwfn, &p_ent, 555 COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON, 556 &init_data); 557 if (rc != ECORE_SUCCESS) 558 return rc; 559 560 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 561 } 562 563 enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn) 564 { 565 struct ecore_spq_entry *p_ent = OSAL_NULL; 566 struct ecore_sp_init_data init_data; 567 enum _ecore_status_t rc; 568 569 /* Get SPQ entry */ 570 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 571 init_data.cid = ecore_spq_get_cid(p_hwfn); 572 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 573 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 574 575 rc = ecore_sp_init_request(p_hwfn, &p_ent, 576 COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON, 577 &init_data); 578 if (rc != ECORE_SUCCESS) 579 return rc; 580 581 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 582 } 583