1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2016 - 2018 Cavium Inc. 3 * All rights reserved. 4 * www.cavium.com 5 */ 6 7 #include "bcm_osal.h" 8 #include "ecore.h" 9 #include "ecore_hsi_eth.h" 10 #include "ecore_sriov.h" 11 #include "ecore_l2_api.h" 12 #include "ecore_vf.h" 13 #include "ecore_vfpf_if.h" 14 #include "ecore_status.h" 15 #include "reg_addr.h" 16 #include "ecore_int.h" 17 #include "ecore_l2.h" 18 #include "ecore_mcp_api.h" 19 #include "ecore_vf_api.h" 20 21 static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length) 22 { 23 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 24 void *p_tlv; 25 26 /* This lock is released when we receive PF's response 27 * in ecore_send_msg2pf(). 28 * So, ecore_vf_pf_prep() and ecore_send_msg2pf() 29 * must come in sequence. 30 */ 31 OSAL_MUTEX_ACQUIRE(&p_iov->mutex); 32 33 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 34 "preparing to send %s tlv over vf pf channel\n", 35 qede_ecore_channel_tlvs_string[type]); 36 37 /* Reset Request offset */ 38 p_iov->offset = (u8 *)(p_iov->vf2pf_request); 39 40 /* Clear mailbox - both request and reply */ 41 OSAL_MEMSET(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs)); 42 OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); 43 44 /* Init type and length */ 45 p_tlv = ecore_add_tlv(&p_iov->offset, type, length); 46 47 /* Init first tlv header */ 48 ((struct vfpf_first_tlv *)p_tlv)->reply_address = 49 (u64)p_iov->pf2vf_reply_phys; 50 51 return p_tlv; 52 } 53 54 static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn, 55 enum _ecore_status_t req_status) 56 { 57 union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; 58 59 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 60 "VF request status = 0x%x, PF reply status = 0x%x\n", 61 req_status, resp->default_resp.hdr.status); 62 63 OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex); 64 } 65 66 #ifdef CONFIG_ECORE_SW_CHANNEL 67 /* The SW channel implementation of Windows needs to know the 'exact' 68 * response size of any given message. That means that for future 69 * messages we'd be unable to send TLVs to PF if he'll be unable to 70 * answer them if the |response| != |default response|. 71 * We'd need to handshake in acquire capabilities for any such. 72 */ 73 #endif 74 static enum _ecore_status_t 75 ecore_send_msg2pf(struct ecore_hwfn *p_hwfn, 76 u8 *done, u32 resp_size) 77 { 78 union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; 79 struct ustorm_trigger_vf_zone trigger; 80 struct ustorm_vf_zone *zone_data; 81 enum _ecore_status_t rc = ECORE_SUCCESS; 82 int time = 100; 83 84 zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; 85 86 /* output tlvs list */ 87 ecore_dp_tlv_list(p_hwfn, p_req); 88 89 /* need to add the END TLV to the message size */ 90 resp_size += sizeof(struct channel_list_end_tlv); 91 92 /* Send TLVs over HW channel */ 93 OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); 94 trigger.vf_pf_msg_valid = 1; 95 96 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 97 "VF -> PF [%02x] message: [%08x, %08x] --> %p," 98 " %08x --> %p\n", 99 GET_FIELD(p_hwfn->hw_info.concrete_fid, 100 PXP_CONCRETE_FID_PFID), 101 U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys), 102 U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys), 103 &zone_data->non_trigger.vf_pf_msg_addr, 104 *((u32 *)&trigger), &zone_data->trigger); 105 106 REG_WR(p_hwfn, 107 (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, 108 U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys)); 109 110 REG_WR(p_hwfn, 111 (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, 112 U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys)); 113 114 /* The message data must be written first, to prevent trigger before 115 * data is written. 116 */ 117 OSAL_WMB(p_hwfn->p_dev); 118 119 REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger, 120 *((u32 *)&trigger)); 121 122 /* When PF would be done with the response, it would write back to the 123 * `done' address. Poll until then. 124 */ 125 while ((!*done) && time) { 126 OSAL_MSLEEP(25); 127 time--; 128 } 129 130 if (!*done) { 131 DP_NOTICE(p_hwfn, true, 132 "VF <-- PF Timeout [Type %d]\n", 133 p_req->first_tlv.tl.type); 134 rc = ECORE_TIMEOUT; 135 } else { 136 if ((*done != PFVF_STATUS_SUCCESS) && 137 (*done != PFVF_STATUS_NO_RESOURCE)) 138 DP_NOTICE(p_hwfn, false, 139 "PF response: %d [Type %d]\n", 140 *done, p_req->first_tlv.tl.type); 141 else 142 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 143 "PF response: %d [Type %d]\n", 144 *done, p_req->first_tlv.tl.type); 145 } 146 147 return rc; 148 } 149 150 static void ecore_vf_pf_add_qid(struct ecore_hwfn *p_hwfn, 151 struct ecore_queue_cid *p_cid) 152 { 153 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 154 struct vfpf_qid_tlv *p_qid_tlv; 155 156 /* Only add QIDs for the queue if it was negotiated with PF */ 157 if (!(p_iov->acquire_resp.pfdev_info.capabilities & 158 PFVF_ACQUIRE_CAP_QUEUE_QIDS)) 159 return; 160 161 p_qid_tlv = ecore_add_tlv(&p_iov->offset, 162 CHANNEL_TLV_QID, sizeof(*p_qid_tlv)); 163 p_qid_tlv->qid = p_cid->qid_usage_idx; 164 } 165 166 enum _ecore_status_t _ecore_vf_pf_release(struct ecore_hwfn *p_hwfn, 167 bool b_final) 168 { 169 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 170 struct pfvf_def_resp_tlv *resp; 171 struct vfpf_first_tlv *req; 172 u32 size; 173 enum _ecore_status_t rc; 174 175 /* clear mailbox and prep first tlv */ 176 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); 177 178 /* add list termination tlv */ 179 ecore_add_tlv(&p_iov->offset, 180 CHANNEL_TLV_LIST_END, 181 sizeof(struct channel_list_end_tlv)); 182 183 resp = &p_iov->pf2vf_reply->default_resp; 184 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 185 186 if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS) 187 rc = ECORE_AGAIN; 188 189 ecore_vf_pf_req_end(p_hwfn, rc); 190 if (!b_final) 191 return rc; 192 193 p_hwfn->b_int_enabled = 0; 194 195 if (p_iov->vf2pf_request) 196 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 197 p_iov->vf2pf_request, 198 p_iov->vf2pf_request_phys, 199 sizeof(union vfpf_tlvs)); 200 if (p_iov->pf2vf_reply) 201 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 202 p_iov->pf2vf_reply, 203 p_iov->pf2vf_reply_phys, 204 sizeof(union pfvf_tlvs)); 205 206 if (p_iov->bulletin.p_virt) { 207 size = sizeof(struct ecore_bulletin_content); 208 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 209 p_iov->bulletin.p_virt, 210 p_iov->bulletin.phys, 211 size); 212 } 213 214 #ifdef CONFIG_ECORE_LOCK_ALLOC 215 OSAL_MUTEX_DEALLOC(&p_iov->mutex); 216 #endif 217 218 OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info); 219 p_hwfn->vf_iov_info = OSAL_NULL; 220 221 return rc; 222 } 223 224 enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn) 225 { 226 return _ecore_vf_pf_release(p_hwfn, true); 227 } 228 229 static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn, 230 struct vf_pf_resc_request *p_req, 231 struct pf_vf_resc *p_resp) 232 { 233 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 234 "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", 235 p_req->num_rxqs, p_resp->num_rxqs, 236 p_req->num_rxqs, p_resp->num_txqs, 237 p_req->num_sbs, p_resp->num_sbs, 238 p_req->num_mac_filters, p_resp->num_mac_filters, 239 p_req->num_vlan_filters, p_resp->num_vlan_filters, 240 p_req->num_mc_filters, p_resp->num_mc_filters, 241 p_req->num_cids, p_resp->num_cids); 242 243 /* humble our request */ 244 p_req->num_txqs = p_resp->num_txqs; 245 p_req->num_rxqs = p_resp->num_rxqs; 246 p_req->num_sbs = p_resp->num_sbs; 247 p_req->num_mac_filters = p_resp->num_mac_filters; 248 p_req->num_vlan_filters = p_resp->num_vlan_filters; 249 p_req->num_mc_filters = p_resp->num_mc_filters; 250 p_req->num_cids = p_resp->num_cids; 251 } 252 253 static enum _ecore_status_t 254 ecore_vf_pf_soft_flr_acquire(struct ecore_hwfn *p_hwfn) 255 { 256 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 257 struct pfvf_def_resp_tlv *resp; 258 struct vfpf_soft_flr_tlv *req; 259 enum _ecore_status_t rc; 260 261 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_SOFT_FLR, sizeof(*req)); 262 263 /* add list termination tlv */ 264 ecore_add_tlv(&p_iov->offset, 265 CHANNEL_TLV_LIST_END, 266 sizeof(struct channel_list_end_tlv)); 267 268 resp = &p_iov->pf2vf_reply->default_resp; 269 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 270 271 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "rc=0x%x\n", rc); 272 273 /* to release the mutex as ecore_vf_pf_acquire() take the mutex */ 274 ecore_vf_pf_req_end(p_hwfn, ECORE_AGAIN); 275 276 /* As of today, there is no mechanism in place for VF to know the FLR 277 * status, so sufficiently (worst case time) wait for FLR to complete, 278 * as mailbox request to MFW by the PF for initiating VF flr and PF 279 * processing VF FLR could take time. 280 */ 281 OSAL_MSLEEP(3000); 282 283 return ecore_vf_pf_acquire(p_hwfn); 284 } 285 286 enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn) 287 { 288 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 289 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 290 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 291 struct ecore_vf_acquire_sw_info vf_sw_info; 292 struct ecore_dev *p_dev = p_hwfn->p_dev; 293 u8 retry_cnt = p_iov->acquire_retry_cnt; 294 struct vf_pf_resc_request *p_resc; 295 bool resources_acquired = false; 296 struct vfpf_acquire_tlv *req; 297 int attempts = 0; 298 enum _ecore_status_t rc = ECORE_SUCCESS; 299 300 /* clear mailbox and prep first tlv */ 301 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); 302 p_resc = &req->resc_request; 303 304 /* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */ 305 req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; 306 307 p_resc->num_rxqs = ECORE_MAX_VF_CHAINS_PER_PF; 308 p_resc->num_txqs = ECORE_MAX_VF_CHAINS_PER_PF; 309 p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF; 310 p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS; 311 p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS; 312 p_resc->num_cids = ECORE_ETH_VF_DEFAULT_NUM_CIDS; 313 314 OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info)); 315 OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info); 316 317 req->vfdev_info.os_type = vf_sw_info.os_type; 318 req->vfdev_info.driver_version = vf_sw_info.driver_version; 319 req->vfdev_info.fw_major = FW_MAJOR_VERSION; 320 req->vfdev_info.fw_minor = FW_MINOR_VERSION; 321 req->vfdev_info.fw_revision = FW_REVISION_VERSION; 322 req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; 323 req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR; 324 req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR; 325 326 /* Fill capability field with any non-deprecated config we support */ 327 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; 328 329 /* If we've mapped the doorbell bar, try using queue qids */ 330 if (p_iov->b_doorbell_bar) 331 req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR | 332 VFPF_ACQUIRE_CAP_QUEUE_QIDS; 333 334 /* pf 2 vf bulletin board address */ 335 req->bulletin_addr = p_iov->bulletin.phys; 336 req->bulletin_size = p_iov->bulletin.size; 337 338 /* add list termination tlv */ 339 ecore_add_tlv(&p_iov->offset, 340 CHANNEL_TLV_LIST_END, 341 sizeof(struct channel_list_end_tlv)); 342 343 while (!resources_acquired) { 344 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 345 "attempting to acquire resources\n"); 346 347 /* Clear response buffer, as this might be a re-send */ 348 OSAL_MEMSET(p_iov->pf2vf_reply, 0, 349 sizeof(union pfvf_tlvs)); 350 351 /* send acquire request */ 352 rc = ecore_send_msg2pf(p_hwfn, 353 &resp->hdr.status, sizeof(*resp)); 354 355 if (retry_cnt && rc == ECORE_TIMEOUT) { 356 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 357 "VF retrying to acquire due to VPC timeout\n"); 358 retry_cnt--; 359 continue; 360 } 361 362 if (rc != ECORE_SUCCESS) 363 goto exit; 364 365 /* copy acquire response from buffer to p_hwfn */ 366 OSAL_MEMCPY(&p_iov->acquire_resp, 367 resp, sizeof(p_iov->acquire_resp)); 368 369 attempts++; 370 371 if (resp->hdr.status == PFVF_STATUS_SUCCESS) { 372 /* PF agrees to allocate our resources */ 373 if (!(resp->pfdev_info.capabilities & 374 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { 375 /* It's possible legacy PF mistakenly accepted; 376 * but we don't care - simply mark it as 377 * legacy and continue. 378 */ 379 req->vfdev_info.capabilities |= 380 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 381 } 382 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 383 "resources acquired\n"); 384 resources_acquired = true; 385 } /* PF refuses to allocate our resources */ 386 else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && 387 attempts < ECORE_VF_ACQUIRE_THRESH) { 388 ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, 389 &resp->resc); 390 391 } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) { 392 if (pfdev_info->major_fp_hsi && 393 (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { 394 DP_NOTICE(p_hwfn, false, 395 "PF uses an incompatible fastpath HSI" 396 " %02x.%02x [VF requires %02x.%02x]." 397 " Please change to a VF driver using" 398 " %02x.xx.\n", 399 pfdev_info->major_fp_hsi, 400 pfdev_info->minor_fp_hsi, 401 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR, 402 pfdev_info->major_fp_hsi); 403 rc = ECORE_INVAL; 404 goto exit; 405 } 406 407 if (!pfdev_info->major_fp_hsi) { 408 if (req->vfdev_info.capabilities & 409 VFPF_ACQUIRE_CAP_PRE_FP_HSI) { 410 DP_NOTICE(p_hwfn, false, 411 "PF uses very old drivers." 412 " Please change to a VF" 413 " driver using no later than" 414 " 8.8.x.x.\n"); 415 rc = ECORE_INVAL; 416 goto exit; 417 } else { 418 DP_INFO(p_hwfn, 419 "PF is old - try re-acquire to" 420 " see if it supports FW-version" 421 " override\n"); 422 req->vfdev_info.capabilities |= 423 VFPF_ACQUIRE_CAP_PRE_FP_HSI; 424 continue; 425 } 426 } 427 428 /* If PF/VF are using same Major, PF must have had 429 * it's reasons. Simply fail. 430 */ 431 DP_NOTICE(p_hwfn, false, 432 "PF rejected acquisition by VF\n"); 433 rc = ECORE_INVAL; 434 goto exit; 435 } else if (resp->hdr.status == PFVF_STATUS_ACQUIRED) { 436 ecore_vf_pf_req_end(p_hwfn, ECORE_AGAIN); 437 return ecore_vf_pf_soft_flr_acquire(p_hwfn); 438 } else { 439 DP_ERR(p_hwfn, 440 "PF returned err %d to VF acquisition request\n", 441 resp->hdr.status); 442 rc = ECORE_AGAIN; 443 goto exit; 444 } 445 } 446 447 /* Mark the PF as legacy, if needed */ 448 if (req->vfdev_info.capabilities & 449 VFPF_ACQUIRE_CAP_PRE_FP_HSI) 450 p_iov->b_pre_fp_hsi = true; 451 452 /* In case PF doesn't support multi-queue Tx, update the number of 453 * CIDs to reflect the number of queues [older PFs didn't fill that 454 * field]. 455 */ 456 if (!(resp->pfdev_info.capabilities & 457 PFVF_ACQUIRE_CAP_QUEUE_QIDS)) 458 resp->resc.num_cids = resp->resc.num_rxqs + 459 resp->resc.num_txqs; 460 461 rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc); 462 if (rc) { 463 DP_NOTICE(p_hwfn, true, 464 "VF_UPDATE_ACQUIRE_RESC_RESP Failed:" 465 " status = 0x%x.\n", 466 rc); 467 rc = ECORE_AGAIN; 468 goto exit; 469 } 470 471 /* Update bulletin board size with response from PF */ 472 p_iov->bulletin.size = resp->bulletin_size; 473 474 /* get HW info */ 475 p_dev->type = resp->pfdev_info.dev_type; 476 p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev; 477 478 DP_INFO(p_hwfn, "Chip details - %s%d\n", 479 ECORE_IS_BB(p_dev) ? "BB" : "AH", 480 CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1); 481 482 p_dev->chip_num = pfdev_info->chip_num & 0xffff; 483 484 /* Learn of the possibility of CMT */ 485 if (IS_LEAD_HWFN(p_hwfn)) { 486 if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { 487 DP_INFO(p_hwfn, "100g VF\n"); 488 p_dev->num_hwfns = 2; 489 } 490 } 491 492 /* @DPDK */ 493 if (((p_iov->b_pre_fp_hsi == true) & 494 ETH_HSI_VER_MINOR) && 495 (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) 496 DP_INFO(p_hwfn, 497 "PF is using older fastpath HSI;" 498 " %02x.%02x is configured\n", 499 ETH_HSI_VER_MAJOR, 500 resp->pfdev_info.minor_fp_hsi); 501 502 exit: 503 ecore_vf_pf_req_end(p_hwfn, rc); 504 505 return rc; 506 } 507 508 u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn, 509 enum BAR_ID bar_id) 510 { 511 u32 bar_size; 512 513 /* Regview size is fixed */ 514 if (bar_id == BAR_ID_0) 515 return 1 << 17; 516 517 /* Doorbell is received from PF */ 518 bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size; 519 if (bar_size) 520 return 1 << bar_size; 521 return 0; 522 } 523 524 enum _ecore_status_t 525 ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn, 526 struct ecore_hw_prepare_params *p_params) 527 { 528 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_hwfn->p_dev); 529 struct ecore_vf_iov *p_iov; 530 u32 reg; 531 enum _ecore_status_t rc; 532 533 /* Set number of hwfns - might be overridden once leading hwfn learns 534 * actual configuration from PF. 535 */ 536 if (IS_LEAD_HWFN(p_hwfn)) 537 p_hwfn->p_dev->num_hwfns = 1; 538 539 reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; 540 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); 541 542 reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; 543 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); 544 545 /* Allocate vf sriov info */ 546 p_iov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_iov)); 547 if (!p_iov) { 548 DP_NOTICE(p_hwfn, true, 549 "Failed to allocate `struct ecore_sriov'\n"); 550 return ECORE_NOMEM; 551 } 552 553 /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell 554 * value, but there are several incompatibily scenarios where that 555 * would be incorrect and we'd need to override it. 556 */ 557 if (p_hwfn->doorbells == OSAL_NULL) { 558 p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview + 559 PXP_VF_BAR0_START_DQ; 560 } else if (p_hwfn == p_lead) { 561 /* For leading hw-function, value is always correct, but need 562 * to handle scenario where legacy PF would not support 100g 563 * mapped bars later. 564 */ 565 p_iov->b_doorbell_bar = true; 566 } else { 567 /* here, value would be correct ONLY if the leading hwfn 568 * received indication that mapped-bars are supported. 569 */ 570 if (p_lead->vf_iov_info->b_doorbell_bar) 571 p_iov->b_doorbell_bar = true; 572 else 573 p_hwfn->doorbells = (u8 OSAL_IOMEM *) 574 p_hwfn->regview + 575 PXP_VF_BAR0_START_DQ; 576 } 577 578 /* Allocate vf2pf msg */ 579 p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 580 &p_iov-> 581 vf2pf_request_phys, 582 sizeof(union 583 vfpf_tlvs)); 584 if (!p_iov->vf2pf_request) { 585 DP_NOTICE(p_hwfn, true, 586 "Failed to allocate `vf2pf_request' DMA memory\n"); 587 goto free_p_iov; 588 } 589 590 p_iov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 591 &p_iov-> 592 pf2vf_reply_phys, 593 sizeof(union pfvf_tlvs)); 594 if (!p_iov->pf2vf_reply) { 595 DP_NOTICE(p_hwfn, true, 596 "Failed to allocate `pf2vf_reply' DMA memory\n"); 597 goto free_vf2pf_request; 598 } 599 600 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 601 "VF's Request mailbox [%p virt 0x%lx phys], " 602 "Response mailbox [%p virt 0x%lx phys]\n", 603 p_iov->vf2pf_request, 604 (unsigned long)p_iov->vf2pf_request_phys, 605 p_iov->pf2vf_reply, 606 (unsigned long)p_iov->pf2vf_reply_phys); 607 608 /* Allocate Bulletin board */ 609 p_iov->bulletin.size = sizeof(struct ecore_bulletin_content); 610 p_iov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 611 &p_iov->bulletin. 612 phys, 613 p_iov->bulletin. 614 size); 615 if (!p_iov->bulletin.p_virt) { 616 DP_NOTICE(p_hwfn, false, "Failed to alloc bulletin memory\n"); 617 goto free_pf2vf_reply; 618 } 619 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 620 "VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\n", 621 p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys, 622 p_iov->bulletin.size); 623 624 #ifdef CONFIG_ECORE_LOCK_ALLOC 625 if (OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex)) { 626 DP_NOTICE(p_hwfn, false, "Failed to allocate p_iov->mutex\n"); 627 goto free_bulletin_mem; 628 } 629 #endif 630 OSAL_MUTEX_INIT(&p_iov->mutex); 631 632 p_iov->acquire_retry_cnt = p_params->acquire_retry_cnt; 633 p_hwfn->vf_iov_info = p_iov; 634 635 p_hwfn->hw_info.personality = ECORE_PCI_ETH; 636 637 rc = ecore_vf_pf_acquire(p_hwfn); 638 639 /* If VF is 100g using a mapped bar and PF is too old to support that, 640 * acquisition would succeed - but the VF would have no way knowing 641 * the size of the doorbell bar configured in HW and thus will not 642 * know how to split it for 2nd hw-function. 643 * In this case we re-try without the indication of the mapped 644 * doorbell. 645 */ 646 if (rc == ECORE_SUCCESS && 647 p_iov->b_doorbell_bar && 648 !ecore_vf_hw_bar_size(p_hwfn, BAR_ID_1) && 649 ECORE_IS_CMT(p_hwfn->p_dev)) { 650 rc = _ecore_vf_pf_release(p_hwfn, false); 651 if (rc != ECORE_SUCCESS) 652 return rc; 653 654 p_iov->b_doorbell_bar = false; 655 p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview + 656 PXP_VF_BAR0_START_DQ; 657 rc = ecore_vf_pf_acquire(p_hwfn); 658 } 659 660 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 661 "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n", 662 p_hwfn->regview, p_hwfn->doorbells, 663 p_hwfn->p_dev->doorbells); 664 665 return rc; 666 667 #ifdef CONFIG_ECORE_LOCK_ALLOC 668 free_bulletin_mem: 669 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->bulletin.p_virt, 670 p_iov->bulletin.phys, 671 p_iov->bulletin.size); 672 #endif 673 free_pf2vf_reply: 674 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->pf2vf_reply, 675 p_iov->pf2vf_reply_phys, 676 sizeof(union pfvf_tlvs)); 677 free_vf2pf_request: 678 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request, 679 p_iov->vf2pf_request_phys, 680 sizeof(union vfpf_tlvs)); 681 free_p_iov: 682 OSAL_FREE(p_hwfn->p_dev, p_iov); 683 684 return ECORE_NOMEM; 685 } 686 687 /* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */ 688 static void 689 __ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 690 struct ecore_tunn_update_type *p_src, 691 enum ecore_tunn_mode mask, u8 *p_cls) 692 { 693 if (p_src->b_update_mode) { 694 p_req->tun_mode_update_mask |= (1 << mask); 695 696 if (p_src->b_mode_enabled) 697 p_req->tunn_mode |= (1 << mask); 698 } 699 700 *p_cls = p_src->tun_cls; 701 } 702 703 /* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */ 704 static void 705 ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 706 struct ecore_tunn_update_type *p_src, 707 enum ecore_tunn_mode mask, u8 *p_cls, 708 struct ecore_tunn_update_udp_port *p_port, 709 u8 *p_update_port, u16 *p_udp_port) 710 { 711 if (p_port->b_update_port) { 712 *p_update_port = 1; 713 *p_udp_port = p_port->port; 714 } 715 716 __ecore_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls); 717 } 718 719 void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun) 720 { 721 if (p_tun->vxlan.b_mode_enabled) 722 p_tun->vxlan.b_update_mode = true; 723 if (p_tun->l2_geneve.b_mode_enabled) 724 p_tun->l2_geneve.b_update_mode = true; 725 if (p_tun->ip_geneve.b_mode_enabled) 726 p_tun->ip_geneve.b_update_mode = true; 727 if (p_tun->l2_gre.b_mode_enabled) 728 p_tun->l2_gre.b_update_mode = true; 729 if (p_tun->ip_gre.b_mode_enabled) 730 p_tun->ip_gre.b_update_mode = true; 731 732 p_tun->b_update_rx_cls = true; 733 p_tun->b_update_tx_cls = true; 734 } 735 736 static void 737 __ecore_vf_update_tunn_param(struct ecore_tunn_update_type *p_tun, 738 u16 feature_mask, u8 tunn_mode, u8 tunn_cls, 739 enum ecore_tunn_mode val) 740 { 741 if (feature_mask & (1 << val)) { 742 p_tun->b_mode_enabled = tunn_mode; 743 p_tun->tun_cls = tunn_cls; 744 } else { 745 p_tun->b_mode_enabled = false; 746 } 747 } 748 749 static void 750 ecore_vf_update_tunn_param(struct ecore_hwfn *p_hwfn, 751 struct ecore_tunnel_info *p_tun, 752 struct pfvf_update_tunn_param_tlv *p_resp) 753 { 754 /* Update mode and classes provided by PF */ 755 u16 feat_mask = p_resp->tunn_feature_mask; 756 757 __ecore_vf_update_tunn_param(&p_tun->vxlan, feat_mask, 758 p_resp->vxlan_mode, p_resp->vxlan_clss, 759 ECORE_MODE_VXLAN_TUNN); 760 __ecore_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask, 761 p_resp->l2geneve_mode, 762 p_resp->l2geneve_clss, 763 ECORE_MODE_L2GENEVE_TUNN); 764 __ecore_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask, 765 p_resp->ipgeneve_mode, 766 p_resp->ipgeneve_clss, 767 ECORE_MODE_IPGENEVE_TUNN); 768 __ecore_vf_update_tunn_param(&p_tun->l2_gre, feat_mask, 769 p_resp->l2gre_mode, p_resp->l2gre_clss, 770 ECORE_MODE_L2GRE_TUNN); 771 __ecore_vf_update_tunn_param(&p_tun->ip_gre, feat_mask, 772 p_resp->ipgre_mode, p_resp->ipgre_clss, 773 ECORE_MODE_IPGRE_TUNN); 774 p_tun->geneve_port.port = p_resp->geneve_udp_port; 775 p_tun->vxlan_port.port = p_resp->vxlan_udp_port; 776 777 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 778 "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x", 779 p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled, 780 p_tun->ip_geneve.b_mode_enabled, 781 p_tun->l2_gre.b_mode_enabled, 782 p_tun->ip_gre.b_mode_enabled); 783 } 784 785 enum _ecore_status_t 786 ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn, 787 struct ecore_tunnel_info *p_src) 788 { 789 struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel; 790 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 791 struct pfvf_update_tunn_param_tlv *p_resp; 792 struct vfpf_update_tunn_param_tlv *p_req; 793 enum _ecore_status_t rc; 794 795 p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM, 796 sizeof(*p_req)); 797 798 if (p_src->b_update_rx_cls && p_src->b_update_tx_cls) 799 p_req->update_tun_cls = 1; 800 801 ecore_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, ECORE_MODE_VXLAN_TUNN, 802 &p_req->vxlan_clss, &p_src->vxlan_port, 803 &p_req->update_vxlan_port, 804 &p_req->vxlan_port); 805 ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve, 806 ECORE_MODE_L2GENEVE_TUNN, 807 &p_req->l2geneve_clss, &p_src->geneve_port, 808 &p_req->update_geneve_port, 809 &p_req->geneve_port); 810 __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve, 811 ECORE_MODE_IPGENEVE_TUNN, 812 &p_req->ipgeneve_clss); 813 __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre, 814 ECORE_MODE_L2GRE_TUNN, &p_req->l2gre_clss); 815 __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre, 816 ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss); 817 818 /* add list termination tlv */ 819 ecore_add_tlv(&p_iov->offset, 820 CHANNEL_TLV_LIST_END, 821 sizeof(struct channel_list_end_tlv)); 822 823 p_resp = &p_iov->pf2vf_reply->tunn_param_resp; 824 rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); 825 826 if (rc) 827 goto exit; 828 829 if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) { 830 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 831 "Failed to update tunnel parameters\n"); 832 rc = ECORE_INVAL; 833 } 834 835 ecore_vf_update_tunn_param(p_hwfn, p_tun, p_resp); 836 exit: 837 ecore_vf_pf_req_end(p_hwfn, rc); 838 return rc; 839 } 840 841 enum _ecore_status_t 842 ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn, 843 struct ecore_queue_cid *p_cid, 844 u16 bd_max_bytes, 845 dma_addr_t bd_chain_phys_addr, 846 dma_addr_t cqe_pbl_addr, 847 u16 cqe_pbl_size, 848 void OSAL_IOMEM **pp_prod) 849 { 850 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 851 struct pfvf_start_queue_resp_tlv *resp; 852 struct vfpf_start_rxq_tlv *req; 853 u16 rx_qid = p_cid->rel.queue_id; 854 enum _ecore_status_t rc; 855 856 /* clear mailbox and prep first tlv */ 857 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); 858 859 req->rx_qid = rx_qid; 860 req->cqe_pbl_addr = cqe_pbl_addr; 861 req->cqe_pbl_size = cqe_pbl_size; 862 req->rxq_addr = bd_chain_phys_addr; 863 req->hw_sb = p_cid->sb_igu_id; 864 req->sb_index = p_cid->sb_idx; 865 req->bd_max_bytes = bd_max_bytes; 866 req->stat_id = -1; /* Keep initialized, for future compatibility */ 867 868 /* If PF is legacy, we'll need to calculate producers ourselves 869 * as well as clean them. 870 */ 871 if (p_iov->b_pre_fp_hsi) { 872 u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; 873 u32 init_prod_val = 0; 874 875 *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + 876 MSTORM_QZONE_START(p_hwfn->p_dev) + 877 (hw_qid) * MSTORM_QZONE_SIZE; 878 879 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ 880 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 881 (u32 *)(&init_prod_val)); 882 } 883 884 ecore_vf_pf_add_qid(p_hwfn, p_cid); 885 886 /* add list termination tlv */ 887 ecore_add_tlv(&p_iov->offset, 888 CHANNEL_TLV_LIST_END, 889 sizeof(struct channel_list_end_tlv)); 890 891 resp = &p_iov->pf2vf_reply->queue_start; 892 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 893 if (rc) 894 goto exit; 895 896 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 897 rc = ECORE_INVAL; 898 goto exit; 899 } 900 901 /* Learn the address of the producer from the response */ 902 if (!p_iov->b_pre_fp_hsi) { 903 u32 init_prod_val = 0; 904 905 *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset; 906 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 907 "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", 908 rx_qid, *pp_prod, resp->offset); 909 910 /* Init the rcq, rx bd and rx sge (if valid) producers to 0. 911 * It was actually the PF's responsibility, but since some 912 * old PFs might fail to do so, we do this as well. 913 */ 914 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3); 915 __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), 916 (u32 *)&init_prod_val); 917 } 918 919 exit: 920 ecore_vf_pf_req_end(p_hwfn, rc); 921 922 return rc; 923 } 924 925 enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn, 926 struct ecore_queue_cid *p_cid, 927 bool cqe_completion) 928 { 929 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 930 struct vfpf_stop_rxqs_tlv *req; 931 struct pfvf_def_resp_tlv *resp; 932 enum _ecore_status_t rc; 933 934 /* clear mailbox and prep first tlv */ 935 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); 936 937 req->rx_qid = p_cid->rel.queue_id; 938 req->num_rxqs = 1; 939 req->cqe_completion = cqe_completion; 940 941 ecore_vf_pf_add_qid(p_hwfn, p_cid); 942 943 /* add list termination tlv */ 944 ecore_add_tlv(&p_iov->offset, 945 CHANNEL_TLV_LIST_END, 946 sizeof(struct channel_list_end_tlv)); 947 948 resp = &p_iov->pf2vf_reply->default_resp; 949 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 950 if (rc) 951 goto exit; 952 953 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 954 rc = ECORE_INVAL; 955 goto exit; 956 } 957 958 exit: 959 ecore_vf_pf_req_end(p_hwfn, rc); 960 961 return rc; 962 } 963 964 enum _ecore_status_t 965 ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn, 966 struct ecore_queue_cid *p_cid, 967 dma_addr_t pbl_addr, u16 pbl_size, 968 void OSAL_IOMEM **pp_doorbell) 969 { 970 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 971 struct pfvf_start_queue_resp_tlv *resp; 972 struct vfpf_start_txq_tlv *req; 973 u16 qid = p_cid->rel.queue_id; 974 enum _ecore_status_t rc; 975 976 /* clear mailbox and prep first tlv */ 977 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); 978 979 req->tx_qid = qid; 980 981 /* Tx */ 982 req->pbl_addr = pbl_addr; 983 req->pbl_size = pbl_size; 984 req->hw_sb = p_cid->sb_igu_id; 985 req->sb_index = p_cid->sb_idx; 986 987 ecore_vf_pf_add_qid(p_hwfn, p_cid); 988 989 /* add list termination tlv */ 990 ecore_add_tlv(&p_iov->offset, 991 CHANNEL_TLV_LIST_END, 992 sizeof(struct channel_list_end_tlv)); 993 994 resp = &p_iov->pf2vf_reply->queue_start; 995 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 996 if (rc) 997 goto exit; 998 999 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1000 rc = ECORE_INVAL; 1001 goto exit; 1002 } 1003 1004 /* Modern PFs provide the actual offsets, while legacy 1005 * provided only the queue id. 1006 */ 1007 if (!p_iov->b_pre_fp_hsi) { 1008 *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells + 1009 resp->offset; 1010 } else { 1011 u8 cid = p_iov->acquire_resp.resc.cid[qid]; 1012 1013 *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells + 1014 DB_ADDR_VF(cid, DQ_DEMS_LEGACY); 1015 } 1016 1017 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1018 "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", 1019 qid, *pp_doorbell, resp->offset); 1020 exit: 1021 ecore_vf_pf_req_end(p_hwfn, rc); 1022 1023 return rc; 1024 } 1025 1026 enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, 1027 struct ecore_queue_cid *p_cid) 1028 { 1029 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1030 struct vfpf_stop_txqs_tlv *req; 1031 struct pfvf_def_resp_tlv *resp; 1032 enum _ecore_status_t rc; 1033 1034 /* clear mailbox and prep first tlv */ 1035 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); 1036 1037 req->tx_qid = p_cid->rel.queue_id; 1038 req->num_txqs = 1; 1039 1040 ecore_vf_pf_add_qid(p_hwfn, p_cid); 1041 1042 /* add list termination tlv */ 1043 ecore_add_tlv(&p_iov->offset, 1044 CHANNEL_TLV_LIST_END, 1045 sizeof(struct channel_list_end_tlv)); 1046 1047 resp = &p_iov->pf2vf_reply->default_resp; 1048 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1049 if (rc) 1050 goto exit; 1051 1052 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1053 rc = ECORE_INVAL; 1054 goto exit; 1055 } 1056 1057 exit: 1058 ecore_vf_pf_req_end(p_hwfn, rc); 1059 1060 return rc; 1061 } 1062 1063 enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn, 1064 struct ecore_queue_cid **pp_cid, 1065 u8 num_rxqs, 1066 u8 comp_cqe_flg, 1067 u8 comp_event_flg) 1068 { 1069 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1070 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1071 struct vfpf_update_rxq_tlv *req; 1072 enum _ecore_status_t rc; 1073 1074 /* Starting with CHANNEL_TLV_QID and the need for additional queue 1075 * information, this API stopped supporting multiple rxqs. 1076 * TODO - remove this and change the API to accept a single queue-cid 1077 * in a follow-up patch. 1078 */ 1079 if (num_rxqs != 1) { 1080 DP_NOTICE(p_hwfn, true, 1081 "VFs can no longer update more than a single queue\n"); 1082 return ECORE_INVAL; 1083 } 1084 1085 /* clear mailbox and prep first tlv */ 1086 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req)); 1087 1088 req->rx_qid = (*pp_cid)->rel.queue_id; 1089 req->num_rxqs = 1; 1090 1091 if (comp_cqe_flg) 1092 req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG; 1093 if (comp_event_flg) 1094 req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG; 1095 1096 ecore_vf_pf_add_qid(p_hwfn, *pp_cid); 1097 1098 /* add list termination tlv */ 1099 ecore_add_tlv(&p_iov->offset, 1100 CHANNEL_TLV_LIST_END, 1101 sizeof(struct channel_list_end_tlv)); 1102 1103 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1104 if (rc) 1105 goto exit; 1106 1107 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1108 rc = ECORE_INVAL; 1109 goto exit; 1110 } 1111 1112 exit: 1113 ecore_vf_pf_req_end(p_hwfn, rc); 1114 return rc; 1115 } 1116 1117 enum _ecore_status_t 1118 ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id, 1119 u16 mtu, u8 inner_vlan_removal, 1120 enum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe, 1121 u8 only_untagged) 1122 { 1123 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1124 struct vfpf_vport_start_tlv *req; 1125 struct pfvf_def_resp_tlv *resp; 1126 enum _ecore_status_t rc; 1127 int i; 1128 1129 /* clear mailbox and prep first tlv */ 1130 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); 1131 1132 req->mtu = mtu; 1133 req->vport_id = vport_id; 1134 req->inner_vlan_removal = inner_vlan_removal; 1135 req->tpa_mode = tpa_mode; 1136 req->max_buffers_per_cqe = max_buffers_per_cqe; 1137 req->only_untagged = only_untagged; 1138 1139 /* status blocks */ 1140 for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) { 1141 struct ecore_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i]; 1142 1143 if (p_sb) 1144 req->sb_addr[i] = p_sb->sb_phys; 1145 } 1146 1147 /* add list termination tlv */ 1148 ecore_add_tlv(&p_iov->offset, 1149 CHANNEL_TLV_LIST_END, 1150 sizeof(struct channel_list_end_tlv)); 1151 1152 resp = &p_iov->pf2vf_reply->default_resp; 1153 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1154 if (rc) 1155 goto exit; 1156 1157 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1158 rc = ECORE_INVAL; 1159 goto exit; 1160 } 1161 1162 exit: 1163 ecore_vf_pf_req_end(p_hwfn, rc); 1164 1165 return rc; 1166 } 1167 1168 enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn) 1169 { 1170 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1171 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1172 enum _ecore_status_t rc; 1173 1174 /* clear mailbox and prep first tlv */ 1175 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, 1176 sizeof(struct vfpf_first_tlv)); 1177 1178 /* add list termination tlv */ 1179 ecore_add_tlv(&p_iov->offset, 1180 CHANNEL_TLV_LIST_END, 1181 sizeof(struct channel_list_end_tlv)); 1182 1183 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1184 if (rc) 1185 goto exit; 1186 1187 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1188 rc = ECORE_INVAL; 1189 goto exit; 1190 } 1191 1192 exit: 1193 ecore_vf_pf_req_end(p_hwfn, rc); 1194 1195 return rc; 1196 } 1197 1198 static bool 1199 ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn, 1200 struct ecore_sp_vport_update_params *p_data, 1201 u16 tlv) 1202 { 1203 switch (tlv) { 1204 case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: 1205 return !!(p_data->update_vport_active_rx_flg || 1206 p_data->update_vport_active_tx_flg); 1207 case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: 1208 #ifndef ASIC_ONLY 1209 /* FPGA doesn't have PVFC and so can't support tx-switching */ 1210 return !!(p_data->update_tx_switching_flg && 1211 !CHIP_REV_IS_FPGA(p_hwfn->p_dev)); 1212 #else 1213 return !!p_data->update_tx_switching_flg; 1214 #endif 1215 case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: 1216 return !!p_data->update_inner_vlan_removal_flg; 1217 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: 1218 return !!p_data->update_accept_any_vlan_flg; 1219 case CHANNEL_TLV_VPORT_UPDATE_MCAST: 1220 return !!p_data->update_approx_mcast_flg; 1221 case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: 1222 return !!(p_data->accept_flags.update_rx_mode_config || 1223 p_data->accept_flags.update_tx_mode_config); 1224 case CHANNEL_TLV_VPORT_UPDATE_RSS: 1225 return !!p_data->rss_params; 1226 case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: 1227 return !!p_data->sge_tpa_params; 1228 default: 1229 DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n", 1230 tlv, qede_ecore_channel_tlvs_string[tlv]); 1231 return false; 1232 } 1233 } 1234 1235 static void 1236 ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn, 1237 struct ecore_sp_vport_update_params *p_data) 1238 { 1239 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1240 struct pfvf_def_resp_tlv *p_resp; 1241 u16 tlv; 1242 1243 for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; 1244 tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; 1245 tlv++) { 1246 if (!ecore_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) 1247 continue; 1248 1249 p_resp = (struct pfvf_def_resp_tlv *) 1250 ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv); 1251 if (p_resp && p_resp->hdr.status) 1252 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1253 "TLV[%d] type %s Configuration %s\n", 1254 tlv, qede_ecore_channel_tlvs_string[tlv], 1255 (p_resp && p_resp->hdr.status) ? "succeeded" 1256 : "failed"); 1257 } 1258 } 1259 1260 enum _ecore_status_t 1261 ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn, 1262 struct ecore_sp_vport_update_params *p_params) 1263 { 1264 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1265 struct vfpf_vport_update_tlv *req; 1266 struct pfvf_def_resp_tlv *resp; 1267 u8 update_rx, update_tx; 1268 u32 resp_size = 0; 1269 u16 size, tlv; 1270 enum _ecore_status_t rc; 1271 1272 resp = &p_iov->pf2vf_reply->default_resp; 1273 resp_size = sizeof(*resp); 1274 1275 update_rx = p_params->update_vport_active_rx_flg; 1276 update_tx = p_params->update_vport_active_tx_flg; 1277 1278 /* clear mailbox and prep header tlv */ 1279 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); 1280 1281 /* Prepare extended tlvs */ 1282 if (update_rx || update_tx) { 1283 struct vfpf_vport_update_activate_tlv *p_act_tlv; 1284 1285 size = sizeof(struct vfpf_vport_update_activate_tlv); 1286 p_act_tlv = ecore_add_tlv(&p_iov->offset, 1287 CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, 1288 size); 1289 resp_size += sizeof(struct pfvf_def_resp_tlv); 1290 1291 if (update_rx) { 1292 p_act_tlv->update_rx = update_rx; 1293 p_act_tlv->active_rx = p_params->vport_active_rx_flg; 1294 } 1295 1296 if (update_tx) { 1297 p_act_tlv->update_tx = update_tx; 1298 p_act_tlv->active_tx = p_params->vport_active_tx_flg; 1299 } 1300 } 1301 1302 if (p_params->update_inner_vlan_removal_flg) { 1303 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; 1304 1305 size = sizeof(struct vfpf_vport_update_vlan_strip_tlv); 1306 p_vlan_tlv = ecore_add_tlv(&p_iov->offset, 1307 CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, 1308 size); 1309 resp_size += sizeof(struct pfvf_def_resp_tlv); 1310 1311 p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg; 1312 } 1313 1314 if (p_params->update_tx_switching_flg) { 1315 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; 1316 1317 size = sizeof(struct vfpf_vport_update_tx_switch_tlv); 1318 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; 1319 p_tx_switch_tlv = ecore_add_tlv(&p_iov->offset, 1320 tlv, size); 1321 resp_size += sizeof(struct pfvf_def_resp_tlv); 1322 1323 p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; 1324 } 1325 1326 if (p_params->update_approx_mcast_flg) { 1327 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; 1328 1329 size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); 1330 p_mcast_tlv = ecore_add_tlv(&p_iov->offset, 1331 CHANNEL_TLV_VPORT_UPDATE_MCAST, 1332 size); 1333 resp_size += sizeof(struct pfvf_def_resp_tlv); 1334 1335 OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins, 1336 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); 1337 } 1338 1339 update_rx = p_params->accept_flags.update_rx_mode_config; 1340 update_tx = p_params->accept_flags.update_tx_mode_config; 1341 1342 if (update_rx || update_tx) { 1343 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; 1344 1345 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; 1346 size = sizeof(struct vfpf_vport_update_accept_param_tlv); 1347 p_accept_tlv = ecore_add_tlv(&p_iov->offset, tlv, size); 1348 resp_size += sizeof(struct pfvf_def_resp_tlv); 1349 1350 if (update_rx) { 1351 p_accept_tlv->update_rx_mode = update_rx; 1352 p_accept_tlv->rx_accept_filter = 1353 p_params->accept_flags.rx_accept_filter; 1354 } 1355 1356 if (update_tx) { 1357 p_accept_tlv->update_tx_mode = update_tx; 1358 p_accept_tlv->tx_accept_filter = 1359 p_params->accept_flags.tx_accept_filter; 1360 } 1361 } 1362 1363 if (p_params->rss_params) { 1364 struct ecore_rss_params *rss_params = p_params->rss_params; 1365 struct vfpf_vport_update_rss_tlv *p_rss_tlv; 1366 int i, table_size; 1367 1368 size = sizeof(struct vfpf_vport_update_rss_tlv); 1369 p_rss_tlv = ecore_add_tlv(&p_iov->offset, 1370 CHANNEL_TLV_VPORT_UPDATE_RSS, size); 1371 resp_size += sizeof(struct pfvf_def_resp_tlv); 1372 1373 if (rss_params->update_rss_config) 1374 p_rss_tlv->update_rss_flags |= 1375 VFPF_UPDATE_RSS_CONFIG_FLAG; 1376 if (rss_params->update_rss_capabilities) 1377 p_rss_tlv->update_rss_flags |= 1378 VFPF_UPDATE_RSS_CAPS_FLAG; 1379 if (rss_params->update_rss_ind_table) 1380 p_rss_tlv->update_rss_flags |= 1381 VFPF_UPDATE_RSS_IND_TABLE_FLAG; 1382 if (rss_params->update_rss_key) 1383 p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG; 1384 1385 p_rss_tlv->rss_enable = rss_params->rss_enable; 1386 p_rss_tlv->rss_caps = rss_params->rss_caps; 1387 p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; 1388 1389 table_size = OSAL_MIN_T(int, T_ETH_INDIRECTION_TABLE_SIZE, 1390 1 << p_rss_tlv->rss_table_size_log); 1391 for (i = 0; i < table_size; i++) { 1392 struct ecore_queue_cid *p_queue; 1393 1394 p_queue = rss_params->rss_ind_table[i]; 1395 p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id; 1396 } 1397 1398 OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key, 1399 sizeof(rss_params->rss_key)); 1400 } 1401 1402 if (p_params->update_accept_any_vlan_flg) { 1403 struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; 1404 1405 size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); 1406 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; 1407 p_any_vlan_tlv = ecore_add_tlv(&p_iov->offset, tlv, size); 1408 1409 resp_size += sizeof(struct pfvf_def_resp_tlv); 1410 p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; 1411 p_any_vlan_tlv->update_accept_any_vlan_flg = 1412 p_params->update_accept_any_vlan_flg; 1413 } 1414 1415 if (p_params->sge_tpa_params) { 1416 struct ecore_sge_tpa_params *sge_tpa_params; 1417 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; 1418 1419 sge_tpa_params = p_params->sge_tpa_params; 1420 size = sizeof(struct vfpf_vport_update_sge_tpa_tlv); 1421 p_sge_tpa_tlv = ecore_add_tlv(&p_iov->offset, 1422 CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, 1423 size); 1424 resp_size += sizeof(struct pfvf_def_resp_tlv); 1425 1426 if (sge_tpa_params->update_tpa_en_flg) 1427 p_sge_tpa_tlv->update_sge_tpa_flags |= 1428 VFPF_UPDATE_TPA_EN_FLAG; 1429 if (sge_tpa_params->update_tpa_param_flg) 1430 p_sge_tpa_tlv->update_sge_tpa_flags |= 1431 VFPF_UPDATE_TPA_PARAM_FLAG; 1432 1433 if (sge_tpa_params->tpa_ipv4_en_flg) 1434 p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV4_EN_FLAG; 1435 if (sge_tpa_params->tpa_ipv6_en_flg) 1436 p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV6_EN_FLAG; 1437 if (sge_tpa_params->tpa_pkt_split_flg) 1438 p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_PKT_SPLIT_FLAG; 1439 if (sge_tpa_params->tpa_hdr_data_split_flg) 1440 p_sge_tpa_tlv->sge_tpa_flags |= 1441 VFPF_TPA_HDR_DATA_SPLIT_FLAG; 1442 if (sge_tpa_params->tpa_gro_consistent_flg) 1443 p_sge_tpa_tlv->sge_tpa_flags |= 1444 VFPF_TPA_GRO_CONSIST_FLAG; 1445 if (sge_tpa_params->tpa_ipv4_tunn_en_flg) 1446 p_sge_tpa_tlv->sge_tpa_flags |= 1447 VFPF_TPA_TUNN_IPV4_EN_FLAG; 1448 if (sge_tpa_params->tpa_ipv6_tunn_en_flg) 1449 p_sge_tpa_tlv->sge_tpa_flags |= 1450 VFPF_TPA_TUNN_IPV6_EN_FLAG; 1451 1452 p_sge_tpa_tlv->tpa_max_aggs_num = 1453 sge_tpa_params->tpa_max_aggs_num; 1454 p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size; 1455 p_sge_tpa_tlv->tpa_min_size_to_start = 1456 sge_tpa_params->tpa_min_size_to_start; 1457 p_sge_tpa_tlv->tpa_min_size_to_cont = 1458 sge_tpa_params->tpa_min_size_to_cont; 1459 1460 p_sge_tpa_tlv->max_buffers_per_cqe = 1461 sge_tpa_params->max_buffers_per_cqe; 1462 } 1463 1464 /* add list termination tlv */ 1465 ecore_add_tlv(&p_iov->offset, 1466 CHANNEL_TLV_LIST_END, 1467 sizeof(struct channel_list_end_tlv)); 1468 1469 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); 1470 if (rc) 1471 goto exit; 1472 1473 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1474 rc = ECORE_INVAL; 1475 goto exit; 1476 } 1477 1478 ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); 1479 1480 exit: 1481 ecore_vf_pf_req_end(p_hwfn, rc); 1482 1483 return rc; 1484 } 1485 1486 enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn) 1487 { 1488 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1489 struct pfvf_def_resp_tlv *resp; 1490 struct vfpf_first_tlv *req; 1491 enum _ecore_status_t rc; 1492 1493 /* clear mailbox and prep first tlv */ 1494 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); 1495 1496 /* add list termination tlv */ 1497 ecore_add_tlv(&p_iov->offset, 1498 CHANNEL_TLV_LIST_END, 1499 sizeof(struct channel_list_end_tlv)); 1500 1501 resp = &p_iov->pf2vf_reply->default_resp; 1502 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1503 if (rc) 1504 goto exit; 1505 1506 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1507 rc = ECORE_AGAIN; 1508 goto exit; 1509 } 1510 1511 p_hwfn->b_int_enabled = 0; 1512 1513 exit: 1514 ecore_vf_pf_req_end(p_hwfn, rc); 1515 1516 return rc; 1517 } 1518 1519 void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn, 1520 struct ecore_filter_mcast *p_filter_cmd) 1521 { 1522 struct ecore_sp_vport_update_params sp_params; 1523 int i; 1524 1525 OSAL_MEMSET(&sp_params, 0, sizeof(sp_params)); 1526 sp_params.update_approx_mcast_flg = 1; 1527 1528 if (p_filter_cmd->opcode == ECORE_FILTER_ADD) { 1529 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1530 u32 bit; 1531 1532 bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1533 sp_params.bins[bit / 32] |= 1 << (bit % 32); 1534 } 1535 } 1536 1537 ecore_vf_pf_vport_update(p_hwfn, &sp_params); 1538 } 1539 1540 enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn, 1541 struct ecore_filter_ucast 1542 *p_ucast) 1543 { 1544 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1545 struct vfpf_ucast_filter_tlv *req; 1546 struct pfvf_def_resp_tlv *resp; 1547 enum _ecore_status_t rc; 1548 1549 /* Sanitize */ 1550 if (p_ucast->opcode == ECORE_FILTER_MOVE) { 1551 DP_NOTICE(p_hwfn, true, 1552 "VFs don't support Moving of filters\n"); 1553 return ECORE_INVAL; 1554 } 1555 1556 /* clear mailbox and prep first tlv */ 1557 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); 1558 req->opcode = (u8)p_ucast->opcode; 1559 req->type = (u8)p_ucast->type; 1560 OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN); 1561 req->vlan = p_ucast->vlan; 1562 1563 /* add list termination tlv */ 1564 ecore_add_tlv(&p_iov->offset, 1565 CHANNEL_TLV_LIST_END, 1566 sizeof(struct channel_list_end_tlv)); 1567 1568 resp = &p_iov->pf2vf_reply->default_resp; 1569 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1570 if (rc) 1571 goto exit; 1572 1573 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1574 rc = ECORE_AGAIN; 1575 goto exit; 1576 } 1577 1578 exit: 1579 ecore_vf_pf_req_end(p_hwfn, rc); 1580 1581 return rc; 1582 } 1583 1584 enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn) 1585 { 1586 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1587 struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; 1588 enum _ecore_status_t rc; 1589 1590 /* clear mailbox and prep first tlv */ 1591 ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, 1592 sizeof(struct vfpf_first_tlv)); 1593 1594 /* add list termination tlv */ 1595 ecore_add_tlv(&p_iov->offset, 1596 CHANNEL_TLV_LIST_END, 1597 sizeof(struct channel_list_end_tlv)); 1598 1599 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1600 if (rc) 1601 goto exit; 1602 1603 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 1604 rc = ECORE_INVAL; 1605 goto exit; 1606 } 1607 1608 exit: 1609 ecore_vf_pf_req_end(p_hwfn, rc); 1610 1611 return rc; 1612 } 1613 1614 enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn, 1615 u16 *p_coal, 1616 struct ecore_queue_cid *p_cid) 1617 { 1618 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1619 struct pfvf_read_coal_resp_tlv *resp; 1620 struct vfpf_read_coal_req_tlv *req; 1621 enum _ecore_status_t rc; 1622 1623 /* clear mailbox and prep header tlv */ 1624 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, 1625 sizeof(*req)); 1626 req->qid = p_cid->rel.queue_id; 1627 req->is_rx = p_cid->b_is_rx ? 1 : 0; 1628 1629 ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END, 1630 sizeof(struct channel_list_end_tlv)); 1631 resp = &p_iov->pf2vf_reply->read_coal_resp; 1632 1633 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1634 if (rc != ECORE_SUCCESS) 1635 goto exit; 1636 1637 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 1638 goto exit; 1639 1640 *p_coal = resp->coal; 1641 exit: 1642 ecore_vf_pf_req_end(p_hwfn, rc); 1643 1644 return rc; 1645 } 1646 1647 enum _ecore_status_t 1648 ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal, 1649 struct ecore_queue_cid *p_cid) 1650 { 1651 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1652 struct vfpf_update_coalesce *req; 1653 struct pfvf_def_resp_tlv *resp; 1654 enum _ecore_status_t rc; 1655 1656 /* clear mailbox and prep header tlv */ 1657 req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, 1658 sizeof(*req)); 1659 1660 req->rx_coal = rx_coal; 1661 req->tx_coal = tx_coal; 1662 req->qid = p_cid->rel.queue_id; 1663 1664 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1665 "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n", 1666 rx_coal, tx_coal, req->qid); 1667 1668 /* add list termination tlv */ 1669 ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END, 1670 sizeof(struct channel_list_end_tlv)); 1671 1672 resp = &p_iov->pf2vf_reply->default_resp; 1673 rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 1674 1675 if (rc != ECORE_SUCCESS) 1676 goto exit; 1677 1678 if (resp->hdr.status != PFVF_STATUS_SUCCESS) 1679 goto exit; 1680 1681 p_hwfn->p_dev->rx_coalesce_usecs = rx_coal; 1682 p_hwfn->p_dev->tx_coalesce_usecs = tx_coal; 1683 1684 exit: 1685 ecore_vf_pf_req_end(p_hwfn, rc); 1686 return rc; 1687 } 1688 1689 enum _ecore_status_t 1690 ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu) 1691 { 1692 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1693 struct vfpf_update_mtu_tlv *p_req; 1694 struct pfvf_def_resp_tlv *p_resp; 1695 enum _ecore_status_t rc; 1696 1697 if (!mtu) 1698 return ECORE_INVAL; 1699 1700 /* clear mailbox and prep header tlv */ 1701 p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_MTU, 1702 sizeof(*p_req)); 1703 p_req->mtu = mtu; 1704 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1705 "Requesting MTU update to %d\n", mtu); 1706 1707 /* add list termination tlv */ 1708 ecore_add_tlv(&p_iov->offset, 1709 CHANNEL_TLV_LIST_END, 1710 sizeof(struct channel_list_end_tlv)); 1711 1712 p_resp = &p_iov->pf2vf_reply->default_resp; 1713 rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); 1714 if (p_resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) 1715 rc = ECORE_INVAL; 1716 1717 ecore_vf_pf_req_end(p_hwfn, rc); 1718 1719 return rc; 1720 } 1721 1722 u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, 1723 u16 sb_id) 1724 { 1725 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1726 1727 if (!p_iov) { 1728 DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n"); 1729 return 0; 1730 } 1731 1732 return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; 1733 } 1734 1735 void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn, 1736 u16 sb_id, struct ecore_sb_info *p_sb) 1737 { 1738 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1739 1740 if (!p_iov) { 1741 DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n"); 1742 return; 1743 } 1744 1745 if (sb_id >= PFVF_MAX_SBS_PER_VF) { 1746 DP_NOTICE(p_hwfn, true, "Can't configure SB %04x\n", sb_id); 1747 return; 1748 } 1749 1750 p_iov->sbs_info[sb_id] = p_sb; 1751 } 1752 1753 enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn, 1754 u8 *p_change) 1755 { 1756 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; 1757 struct ecore_bulletin_content shadow; 1758 u32 crc, crc_size; 1759 1760 crc_size = sizeof(p_iov->bulletin.p_virt->crc); 1761 *p_change = 0; 1762 1763 /* Need to guarantee PF is not in the middle of writing it */ 1764 OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); 1765 1766 /* If version did not update, no need to do anything */ 1767 if (shadow.version == p_iov->bulletin_shadow.version) 1768 return ECORE_SUCCESS; 1769 1770 /* Verify the bulletin we see is valid */ 1771 crc = OSAL_CRC32(0, (u8 *)&shadow + crc_size, 1772 p_iov->bulletin.size - crc_size); 1773 if (crc != shadow.crc) 1774 return ECORE_AGAIN; 1775 1776 /* Set the shadow bulletin and process it */ 1777 OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); 1778 1779 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, 1780 "Read a bulletin update %08x\n", shadow.version); 1781 1782 *p_change = 1; 1783 1784 return ECORE_SUCCESS; 1785 } 1786 1787 void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params, 1788 struct ecore_bulletin_content *p_bulletin) 1789 { 1790 OSAL_MEMSET(p_params, 0, sizeof(*p_params)); 1791 1792 p_params->speed.autoneg = p_bulletin->req_autoneg; 1793 p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; 1794 p_params->speed.forced_speed = p_bulletin->req_forced_speed; 1795 p_params->pause.autoneg = p_bulletin->req_autoneg_pause; 1796 p_params->pause.forced_rx = p_bulletin->req_forced_rx; 1797 p_params->pause.forced_tx = p_bulletin->req_forced_tx; 1798 p_params->loopback_mode = p_bulletin->req_loopback; 1799 } 1800 1801 void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, 1802 struct ecore_mcp_link_params *params) 1803 { 1804 __ecore_vf_get_link_params(params, 1805 &p_hwfn->vf_iov_info->bulletin_shadow); 1806 } 1807 1808 void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link, 1809 struct ecore_bulletin_content *p_bulletin) 1810 { 1811 OSAL_MEMSET(p_link, 0, sizeof(*p_link)); 1812 1813 p_link->link_up = p_bulletin->link_up; 1814 p_link->speed = p_bulletin->speed; 1815 p_link->full_duplex = p_bulletin->full_duplex; 1816 p_link->an = p_bulletin->autoneg; 1817 p_link->an_complete = p_bulletin->autoneg_complete; 1818 p_link->parallel_detection = p_bulletin->parallel_detection; 1819 p_link->pfc_enabled = p_bulletin->pfc_enabled; 1820 p_link->partner_adv_speed = p_bulletin->partner_adv_speed; 1821 p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; 1822 p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; 1823 p_link->partner_adv_pause = p_bulletin->partner_adv_pause; 1824 p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; 1825 } 1826 1827 void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, 1828 struct ecore_mcp_link_state *link) 1829 { 1830 __ecore_vf_get_link_state(link, 1831 &p_hwfn->vf_iov_info->bulletin_shadow); 1832 } 1833 1834 void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps, 1835 struct ecore_bulletin_content *p_bulletin) 1836 { 1837 OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps)); 1838 p_link_caps->speed_capabilities = p_bulletin->capability_speed; 1839 } 1840 1841 void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, 1842 struct ecore_mcp_link_capabilities *p_link_caps) 1843 { 1844 __ecore_vf_get_link_caps(p_link_caps, 1845 &p_hwfn->vf_iov_info->bulletin_shadow); 1846 } 1847 1848 void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs) 1849 { 1850 *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; 1851 } 1852 1853 void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn, 1854 u8 *num_txqs) 1855 { 1856 *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs; 1857 } 1858 1859 void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac) 1860 { 1861 OSAL_MEMCPY(port_mac, 1862 p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, 1863 ETH_ALEN); 1864 } 1865 1866 void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn, 1867 u8 *num_vlan_filters) 1868 { 1869 struct ecore_vf_iov *p_vf; 1870 1871 p_vf = p_hwfn->vf_iov_info; 1872 *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; 1873 } 1874 1875 void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn, 1876 u32 *num_sbs) 1877 { 1878 struct ecore_vf_iov *p_vf; 1879 1880 p_vf = p_hwfn->vf_iov_info; 1881 *num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs; 1882 } 1883 1884 void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn, 1885 u32 *num_mac_filters) 1886 { 1887 struct ecore_vf_iov *p_vf = p_hwfn->vf_iov_info; 1888 1889 *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters; 1890 } 1891 1892 bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac) 1893 { 1894 struct ecore_bulletin_content *bulletin; 1895 1896 bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1897 if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) 1898 return true; 1899 1900 /* Forbid VF from changing a MAC enforced by PF */ 1901 if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN)) 1902 return false; 1903 1904 return false; 1905 } 1906 1907 bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac, 1908 u8 *p_is_forced) 1909 { 1910 struct ecore_bulletin_content *bulletin; 1911 1912 bulletin = &hwfn->vf_iov_info->bulletin_shadow; 1913 1914 if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { 1915 if (p_is_forced) 1916 *p_is_forced = 1; 1917 } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { 1918 if (p_is_forced) 1919 *p_is_forced = 0; 1920 } else { 1921 return false; 1922 } 1923 1924 OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN); 1925 1926 return true; 1927 } 1928 1929 void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn, 1930 u16 *p_vxlan_port, 1931 u16 *p_geneve_port) 1932 { 1933 struct ecore_bulletin_content *p_bulletin; 1934 1935 p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1936 1937 *p_vxlan_port = p_bulletin->vxlan_udp_port; 1938 *p_geneve_port = p_bulletin->geneve_udp_port; 1939 } 1940 1941 bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid) 1942 { 1943 struct ecore_bulletin_content *bulletin; 1944 1945 bulletin = &hwfn->vf_iov_info->bulletin_shadow; 1946 1947 if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED))) 1948 return false; 1949 1950 if (dst_pvid) 1951 *dst_pvid = bulletin->pvid; 1952 1953 return true; 1954 } 1955 1956 bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn) 1957 { 1958 return p_hwfn->vf_iov_info->b_pre_fp_hsi; 1959 } 1960 1961 void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn, 1962 u16 *fw_major, u16 *fw_minor, u16 *fw_rev, 1963 u16 *fw_eng) 1964 { 1965 struct pf_vf_pfdev_info *info; 1966 1967 info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; 1968 1969 *fw_major = info->fw_major; 1970 *fw_minor = info->fw_minor; 1971 *fw_rev = info->fw_rev; 1972 *fw_eng = info->fw_eng; 1973 } 1974 1975 #ifdef CONFIG_ECORE_SW_CHANNEL 1976 void ecore_vf_set_hw_channel(struct ecore_hwfn *p_hwfn, bool b_is_hw) 1977 { 1978 p_hwfn->vf_iov_info->b_hw_channel = b_is_hw; 1979 } 1980 #endif 1981