1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2021 Marvell. 3 */ 4 5 #include <fcntl.h> 6 #include <inttypes.h> 7 #include <string.h> 8 #include <sys/mman.h> 9 #include <unistd.h> 10 11 #include "roc_api.h" 12 #include "roc_priv.h" 13 14 /* PCI Extended capability ID */ 15 #define ROC_PCI_EXT_CAP_ID_SRIOV 0x10 /* SRIOV cap */ 16 17 /* Single Root I/O Virtualization */ 18 #define ROC_PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */ 19 20 static void * 21 mbox_mem_map(off_t off, size_t size) 22 { 23 void *va = MAP_FAILED; 24 int mem_fd; 25 26 if (size <= 0 || !off) { 27 plt_err("Invalid mbox area off 0x%lx size %lu", off, size); 28 goto error; 29 } 30 31 mem_fd = open("/dev/mem", O_RDWR); 32 if (mem_fd < 0) 33 goto error; 34 35 va = plt_mmap(NULL, size, PLT_PROT_READ | PLT_PROT_WRITE, 36 PLT_MAP_SHARED, mem_fd, off); 37 close(mem_fd); 38 39 if (va == MAP_FAILED) 40 plt_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd", size, mem_fd, 41 (intmax_t)off); 42 error: 43 return va; 44 } 45 46 static void 47 mbox_mem_unmap(void *va, size_t size) 48 { 49 if (va) 50 munmap(va, size); 51 } 52 53 static int 54 pf_af_sync_msg(struct dev *dev, struct mbox_msghdr **rsp) 55 { 56 uint32_t timeout = 0, sleep = 1; 57 struct mbox *mbox = dev->mbox; 58 struct mbox_dev *mdev = &mbox->dev[0]; 59 60 volatile uint64_t int_status = 0; 61 struct mbox_msghdr *msghdr; 62 uint64_t off; 63 int rc = 0; 64 65 /* We need to disable PF interrupts. We are in timer interrupt */ 66 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C); 67 68 /* Send message */ 69 mbox_msg_send(mbox, 0); 70 71 do { 72 plt_delay_ms(sleep); 73 timeout += sleep; 74 if (timeout >= mbox->rsp_tmo) { 75 plt_err("Message timeout: %dms", mbox->rsp_tmo); 76 rc = -EIO; 77 break; 78 } 79 int_status = plt_read64(dev->bar2 + RVU_PF_INT); 80 } while ((int_status & 0x1) != 0x1); 81 82 /* Clear */ 83 plt_write64(int_status, dev->bar2 + RVU_PF_INT); 84 85 /* Enable interrupts */ 86 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S); 87 88 if (rc == 0) { 89 /* Get message */ 90 off = mbox->rx_start + 91 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 92 msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off); 93 if (rsp) 94 *rsp = msghdr; 95 rc = msghdr->rc; 96 } 97 98 return rc; 99 } 100 101 static int 102 af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg) 103 { 104 uint32_t timeout = 0, sleep = 1; 105 struct mbox *mbox = dev->mbox; 106 struct mbox_dev *mdev = &mbox->dev[0]; 107 volatile uint64_t int_status; 108 struct mbox_hdr *req_hdr; 109 struct mbox_msghdr *msg; 110 struct mbox_msghdr *rsp; 111 uint64_t offset; 112 size_t size; 113 int i; 114 115 /* We need to disable PF interrupts. We are in timer interrupt */ 116 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C); 117 118 /* Send message */ 119 mbox_msg_send(mbox, 0); 120 121 do { 122 plt_delay_ms(sleep); 123 timeout++; 124 if (timeout >= mbox->rsp_tmo) { 125 plt_err("Routed messages %d timeout: %dms", num_msg, 126 mbox->rsp_tmo); 127 break; 128 } 129 int_status = plt_read64(dev->bar2 + RVU_PF_INT); 130 } while ((int_status & 0x1) != 0x1); 131 132 /* Clear */ 133 plt_write64(~0ull, dev->bar2 + RVU_PF_INT); 134 135 /* Enable interrupts */ 136 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S); 137 138 plt_spinlock_lock(&mdev->mbox_lock); 139 140 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start); 141 if (req_hdr->num_msgs != num_msg) 142 plt_err("Routed messages: %d received: %d", num_msg, 143 req_hdr->num_msgs); 144 145 /* Get messages from mbox */ 146 offset = mbox->rx_start + 147 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 148 for (i = 0; i < req_hdr->num_msgs; i++) { 149 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset); 150 size = mbox->rx_start + msg->next_msgoff - offset; 151 152 /* Reserve PF/VF mbox message */ 153 size = PLT_ALIGN(size, MBOX_MSG_ALIGN); 154 rsp = mbox_alloc_msg(&dev->mbox_vfpf, vf, size); 155 if (!rsp) { 156 plt_err("Failed to reserve VF%d message", vf); 157 continue; 158 } 159 160 mbox_rsp_init(msg->id, rsp); 161 162 /* Copy message from AF<->PF mbox to PF<->VF mbox */ 163 mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr), 164 (uint8_t *)msg + sizeof(struct mbox_msghdr), 165 size - sizeof(struct mbox_msghdr)); 166 167 /* Set status and sender pf_func data */ 168 rsp->rc = msg->rc; 169 rsp->pcifunc = msg->pcifunc; 170 171 /* Whenever a PF comes up, AF sends the link status to it but 172 * when VF comes up no such event is sent to respective VF. 173 * Using MBOX_MSG_NIX_LF_START_RX response from AF for the 174 * purpose and send the link status of PF to VF. 175 */ 176 if (msg->id == MBOX_MSG_NIX_LF_START_RX) { 177 /* Send link status to VF */ 178 struct cgx_link_user_info linfo; 179 struct mbox_msghdr *vf_msg; 180 size_t sz; 181 182 /* Get the link status */ 183 memset(&linfo, 0, sizeof(struct cgx_link_user_info)); 184 if (dev->ops && dev->ops->link_status_get) 185 dev->ops->link_status_get(dev->roc_nix, &linfo); 186 187 sz = PLT_ALIGN(mbox_id2size(MBOX_MSG_CGX_LINK_EVENT), 188 MBOX_MSG_ALIGN); 189 /* Prepare the message to be sent */ 190 vf_msg = mbox_alloc_msg(&dev->mbox_vfpf_up, vf, sz); 191 if (vf_msg) { 192 mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg); 193 memcpy((uint8_t *)vf_msg + 194 sizeof(struct mbox_msghdr), &linfo, 195 sizeof(struct cgx_link_user_info)); 196 197 vf_msg->rc = msg->rc; 198 vf_msg->pcifunc = msg->pcifunc; 199 /* Send to VF */ 200 mbox_msg_send(&dev->mbox_vfpf_up, vf); 201 } 202 } 203 204 offset = mbox->rx_start + msg->next_msgoff; 205 } 206 plt_spinlock_unlock(&mdev->mbox_lock); 207 208 return req_hdr->num_msgs; 209 } 210 211 static int 212 vf_pf_process_msgs(struct dev *dev, uint16_t vf) 213 { 214 struct mbox *mbox = &dev->mbox_vfpf; 215 struct mbox_dev *mdev = &mbox->dev[vf]; 216 struct mbox_hdr *req_hdr; 217 struct mbox_msghdr *msg; 218 int offset, routed = 0; 219 size_t size; 220 uint16_t i; 221 222 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start); 223 if (!req_hdr->num_msgs) 224 return 0; 225 226 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 227 228 for (i = 0; i < req_hdr->num_msgs; i++) { 229 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset); 230 size = mbox->rx_start + msg->next_msgoff - offset; 231 232 /* RVU_PF_FUNC_S */ 233 msg->pcifunc = dev_pf_func(dev->pf, vf); 234 235 if (msg->id == MBOX_MSG_READY) { 236 struct ready_msg_rsp *rsp; 237 uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8; 238 239 /* Handle READY message in PF */ 240 dev->active_vfs[vf / max_bits] |= 241 BIT_ULL(vf % max_bits); 242 rsp = (struct ready_msg_rsp *)mbox_alloc_msg( 243 mbox, vf, sizeof(*rsp)); 244 if (!rsp) { 245 plt_err("Failed to alloc VF%d READY message", 246 vf); 247 continue; 248 } 249 250 mbox_rsp_init(msg->id, rsp); 251 252 /* PF/VF function ID */ 253 rsp->hdr.pcifunc = msg->pcifunc; 254 rsp->hdr.rc = 0; 255 } else { 256 struct mbox_msghdr *af_req; 257 /* Reserve AF/PF mbox message */ 258 size = PLT_ALIGN(size, MBOX_MSG_ALIGN); 259 af_req = mbox_alloc_msg(dev->mbox, 0, size); 260 if (af_req == NULL) 261 return -ENOSPC; 262 mbox_req_init(msg->id, af_req); 263 264 /* Copy message from VF<->PF mbox to PF<->AF mbox */ 265 mbox_memcpy((uint8_t *)af_req + 266 sizeof(struct mbox_msghdr), 267 (uint8_t *)msg + sizeof(struct mbox_msghdr), 268 size - sizeof(struct mbox_msghdr)); 269 af_req->pcifunc = msg->pcifunc; 270 routed++; 271 } 272 offset = mbox->rx_start + msg->next_msgoff; 273 } 274 275 if (routed > 0) { 276 plt_base_dbg("pf:%d routed %d messages from vf:%d to AF", 277 dev->pf, routed, vf); 278 af_pf_wait_msg(dev, vf, routed); 279 mbox_reset(dev->mbox, 0); 280 } 281 282 /* Send mbox responses to VF */ 283 if (mdev->num_msgs) { 284 plt_base_dbg("pf:%d reply %d messages to vf:%d", dev->pf, 285 mdev->num_msgs, vf); 286 mbox_msg_send(mbox, vf); 287 } 288 289 return i; 290 } 291 292 static int 293 vf_pf_process_up_msgs(struct dev *dev, uint16_t vf) 294 { 295 struct mbox *mbox = &dev->mbox_vfpf_up; 296 struct mbox_dev *mdev = &mbox->dev[vf]; 297 struct mbox_hdr *req_hdr; 298 struct mbox_msghdr *msg; 299 int msgs_acked = 0; 300 int offset; 301 uint16_t i; 302 303 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start); 304 if (req_hdr->num_msgs == 0) 305 return 0; 306 307 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 308 309 for (i = 0; i < req_hdr->num_msgs; i++) { 310 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset); 311 312 msgs_acked++; 313 /* RVU_PF_FUNC_S */ 314 msg->pcifunc = dev_pf_func(dev->pf, vf); 315 316 switch (msg->id) { 317 case MBOX_MSG_CGX_LINK_EVENT: 318 plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)", 319 msg->id, mbox_id2name(msg->id), 320 msg->pcifunc, dev_get_pf(msg->pcifunc), 321 dev_get_vf(msg->pcifunc)); 322 break; 323 case MBOX_MSG_CGX_PTP_RX_INFO: 324 plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)", 325 msg->id, mbox_id2name(msg->id), 326 msg->pcifunc, dev_get_pf(msg->pcifunc), 327 dev_get_vf(msg->pcifunc)); 328 break; 329 default: 330 plt_err("Not handled UP msg 0x%x (%s) func:0x%x", 331 msg->id, mbox_id2name(msg->id), msg->pcifunc); 332 } 333 offset = mbox->rx_start + msg->next_msgoff; 334 } 335 mbox_reset(mbox, vf); 336 mdev->msgs_acked = msgs_acked; 337 plt_wmb(); 338 339 return i; 340 } 341 342 static void 343 roc_vf_pf_mbox_handle_msg(void *param) 344 { 345 uint16_t vf, max_vf, max_bits; 346 struct dev *dev = param; 347 348 max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t); 349 max_vf = max_bits * MAX_VFPF_DWORD_BITS; 350 351 for (vf = 0; vf < max_vf; vf++) { 352 if (dev->intr.bits[vf / max_bits] & BIT_ULL(vf % max_bits)) { 353 plt_base_dbg("Process vf:%d request (pf:%d, vf:%d)", vf, 354 dev->pf, dev->vf); 355 vf_pf_process_msgs(dev, vf); 356 /* UP messages */ 357 vf_pf_process_up_msgs(dev, vf); 358 dev->intr.bits[vf / max_bits] &= 359 ~(BIT_ULL(vf % max_bits)); 360 } 361 } 362 dev->timer_set = 0; 363 } 364 365 static void 366 roc_vf_pf_mbox_irq(void *param) 367 { 368 struct dev *dev = param; 369 bool alarm_set = false; 370 uint64_t intr; 371 int vfpf; 372 373 for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) { 374 intr = plt_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf)); 375 if (!intr) 376 continue; 377 378 plt_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)", 379 vfpf, intr, dev->pf, dev->vf); 380 381 /* Save and clear intr bits */ 382 dev->intr.bits[vfpf] |= intr; 383 plt_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf)); 384 alarm_set = true; 385 } 386 387 if (!dev->timer_set && alarm_set) { 388 dev->timer_set = 1; 389 /* Start timer to handle messages */ 390 plt_alarm_set(VF_PF_MBOX_TIMER_MS, roc_vf_pf_mbox_handle_msg, 391 dev); 392 } 393 } 394 395 static void 396 process_msgs(struct dev *dev, struct mbox *mbox) 397 { 398 struct mbox_dev *mdev = &mbox->dev[0]; 399 struct mbox_hdr *req_hdr; 400 struct mbox_msghdr *msg; 401 int msgs_acked = 0; 402 int offset; 403 uint16_t i; 404 405 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start); 406 if (req_hdr->num_msgs == 0) 407 return; 408 409 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 410 for (i = 0; i < req_hdr->num_msgs; i++) { 411 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset); 412 413 msgs_acked++; 414 plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id, 415 mbox_id2name(msg->id), dev_get_pf(msg->pcifunc), 416 dev_get_vf(msg->pcifunc)); 417 418 switch (msg->id) { 419 /* Add message id's that are handled here */ 420 case MBOX_MSG_READY: 421 /* Get our identity */ 422 dev->pf_func = msg->pcifunc; 423 break; 424 425 default: 426 if (msg->rc) 427 plt_err("Message (%s) response has err=%d", 428 mbox_id2name(msg->id), msg->rc); 429 break; 430 } 431 offset = mbox->rx_start + msg->next_msgoff; 432 } 433 434 mbox_reset(mbox, 0); 435 /* Update acked if someone is waiting a message */ 436 mdev->msgs_acked = msgs_acked; 437 plt_wmb(); 438 } 439 440 /* Copies the message received from AF and sends it to VF */ 441 static void 442 pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg) 443 { 444 uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t); 445 struct mbox *vf_mbox = &dev->mbox_vfpf_up; 446 struct msg_req *msg = rec_msg; 447 struct mbox_msghdr *vf_msg; 448 uint16_t vf; 449 size_t size; 450 451 size = PLT_ALIGN(mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN); 452 /* Send UP message to all VF's */ 453 for (vf = 0; vf < vf_mbox->ndevs; vf++) { 454 /* VF active */ 455 if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf)))) 456 continue; 457 458 plt_base_dbg("(%s) size: %zx to VF: %d", 459 mbox_id2name(msg->hdr.id), size, vf); 460 461 /* Reserve PF/VF mbox message */ 462 vf_msg = mbox_alloc_msg(vf_mbox, vf, size); 463 if (!vf_msg) { 464 plt_err("Failed to alloc VF%d UP message", vf); 465 continue; 466 } 467 mbox_req_init(msg->hdr.id, vf_msg); 468 469 /* 470 * Copy message from AF<->PF UP mbox 471 * to PF<->VF UP mbox 472 */ 473 mbox_memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr), 474 (uint8_t *)msg + sizeof(struct mbox_msghdr), 475 size - sizeof(struct mbox_msghdr)); 476 477 vf_msg->rc = msg->hdr.rc; 478 /* Set PF to be a sender */ 479 vf_msg->pcifunc = dev->pf_func; 480 481 /* Send to VF */ 482 mbox_msg_send(vf_mbox, vf); 483 } 484 } 485 486 static int 487 mbox_up_handler_cgx_link_event(struct dev *dev, struct cgx_link_info_msg *msg, 488 struct msg_rsp *rsp) 489 { 490 struct cgx_link_user_info *linfo = &msg->link_info; 491 void *roc_nix = dev->roc_nix; 492 493 plt_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d", 494 dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func), 495 linfo->link_up ? "UP" : "DOWN", msg->hdr.id, 496 mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc), 497 dev_get_vf(msg->hdr.pcifunc)); 498 499 /* PF gets link notification from AF */ 500 if (dev_get_pf(msg->hdr.pcifunc) == 0) { 501 if (dev->ops && dev->ops->link_status_update) 502 dev->ops->link_status_update(roc_nix, linfo); 503 504 /* Forward the same message as received from AF to VF */ 505 pf_vf_mbox_send_up_msg(dev, msg); 506 } else { 507 /* VF gets link up notification */ 508 if (dev->ops && dev->ops->link_status_update) 509 dev->ops->link_status_update(roc_nix, linfo); 510 } 511 512 rsp->hdr.rc = 0; 513 return 0; 514 } 515 516 static int 517 mbox_up_handler_cgx_ptp_rx_info(struct dev *dev, 518 struct cgx_ptp_rx_info_msg *msg, 519 struct msg_rsp *rsp) 520 { 521 void *roc_nix = dev->roc_nix; 522 523 plt_base_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d", 524 dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func), 525 msg->ptp_en ? "ENABLED" : "DISABLED", msg->hdr.id, 526 mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc), 527 dev_get_vf(msg->hdr.pcifunc)); 528 529 /* PF gets PTP notification from AF */ 530 if (dev_get_pf(msg->hdr.pcifunc) == 0) { 531 if (dev->ops && dev->ops->ptp_info_update) 532 dev->ops->ptp_info_update(roc_nix, msg->ptp_en); 533 534 /* Forward the same message as received from AF to VF */ 535 pf_vf_mbox_send_up_msg(dev, msg); 536 } else { 537 /* VF gets PTP notification */ 538 if (dev->ops && dev->ops->ptp_info_update) 539 dev->ops->ptp_info_update(roc_nix, msg->ptp_en); 540 } 541 542 rsp->hdr.rc = 0; 543 return 0; 544 } 545 546 static int 547 mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req) 548 { 549 /* Check if valid, if not reply with a invalid msg */ 550 if (req->sig != MBOX_REQ_SIG) 551 return -EIO; 552 553 switch (req->id) { 554 default: 555 reply_invalid_msg(&dev->mbox_up, 0, 0, req->id); 556 break; 557 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 558 case _id: { \ 559 struct _rsp_type *rsp; \ 560 int err; \ 561 rsp = (struct _rsp_type *)mbox_alloc_msg( \ 562 &dev->mbox_up, 0, sizeof(struct _rsp_type)); \ 563 if (!rsp) \ 564 return -ENOMEM; \ 565 rsp->hdr.id = _id; \ 566 rsp->hdr.sig = MBOX_RSP_SIG; \ 567 rsp->hdr.pcifunc = dev->pf_func; \ 568 rsp->hdr.rc = 0; \ 569 err = mbox_up_handler_##_fn_name(dev, (struct _req_type *)req, \ 570 rsp); \ 571 return err; \ 572 } 573 MBOX_UP_CGX_MESSAGES 574 #undef M 575 } 576 577 return -ENODEV; 578 } 579 580 static void 581 process_msgs_up(struct dev *dev, struct mbox *mbox) 582 { 583 struct mbox_dev *mdev = &mbox->dev[0]; 584 struct mbox_hdr *req_hdr; 585 struct mbox_msghdr *msg; 586 int i, err, offset; 587 588 req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start); 589 if (req_hdr->num_msgs == 0) 590 return; 591 592 offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 593 for (i = 0; i < req_hdr->num_msgs; i++) { 594 msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset); 595 596 plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id, 597 mbox_id2name(msg->id), dev_get_pf(msg->pcifunc), 598 dev_get_vf(msg->pcifunc)); 599 err = mbox_process_msgs_up(dev, msg); 600 if (err) 601 plt_err("Error %d handling 0x%x (%s)", err, msg->id, 602 mbox_id2name(msg->id)); 603 offset = mbox->rx_start + msg->next_msgoff; 604 } 605 /* Send mbox responses */ 606 if (mdev->num_msgs) { 607 plt_base_dbg("Reply num_msgs:%d", mdev->num_msgs); 608 mbox_msg_send(mbox, 0); 609 } 610 } 611 612 static void 613 roc_pf_vf_mbox_irq(void *param) 614 { 615 struct dev *dev = param; 616 uint64_t intr; 617 618 intr = plt_read64(dev->bar2 + RVU_VF_INT); 619 if (intr == 0) 620 plt_base_dbg("Proceeding to check mbox UP messages if any"); 621 622 plt_write64(intr, dev->bar2 + RVU_VF_INT); 623 plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf); 624 625 /* First process all configuration messages */ 626 process_msgs(dev, dev->mbox); 627 628 /* Process Uplink messages */ 629 process_msgs_up(dev, &dev->mbox_up); 630 } 631 632 static void 633 roc_af_pf_mbox_irq(void *param) 634 { 635 struct dev *dev = param; 636 uint64_t intr; 637 638 intr = plt_read64(dev->bar2 + RVU_PF_INT); 639 if (intr == 0) 640 plt_base_dbg("Proceeding to check mbox UP messages if any"); 641 642 plt_write64(intr, dev->bar2 + RVU_PF_INT); 643 plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf); 644 645 /* First process all configuration messages */ 646 process_msgs(dev, dev->mbox); 647 648 /* Process Uplink messages */ 649 process_msgs_up(dev, &dev->mbox_up); 650 } 651 652 static int 653 mbox_register_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev) 654 { 655 struct plt_intr_handle *intr_handle = pci_dev->intr_handle; 656 int i, rc; 657 658 /* HW clear irq */ 659 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) 660 plt_write64(~0ull, 661 dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i)); 662 663 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C); 664 665 dev->timer_set = 0; 666 667 /* MBOX interrupt for VF(0...63) <-> PF */ 668 rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev, 669 RVU_PF_INT_VEC_VFPF_MBOX0); 670 671 if (rc) { 672 plt_err("Fail to register PF(VF0-63) mbox irq"); 673 return rc; 674 } 675 /* MBOX interrupt for VF(64...128) <-> PF */ 676 rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev, 677 RVU_PF_INT_VEC_VFPF_MBOX1); 678 679 if (rc) { 680 plt_err("Fail to register PF(VF64-128) mbox irq"); 681 return rc; 682 } 683 /* MBOX interrupt AF <-> PF */ 684 rc = dev_irq_register(intr_handle, roc_af_pf_mbox_irq, dev, 685 RVU_PF_INT_VEC_AFPF_MBOX); 686 if (rc) { 687 plt_err("Fail to register AF<->PF mbox irq"); 688 return rc; 689 } 690 691 /* HW enable intr */ 692 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) 693 plt_write64(~0ull, 694 dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i)); 695 696 plt_write64(~0ull, dev->bar2 + RVU_PF_INT); 697 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S); 698 699 return rc; 700 } 701 702 static int 703 mbox_register_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev) 704 { 705 struct plt_intr_handle *intr_handle = pci_dev->intr_handle; 706 int rc; 707 708 /* Clear irq */ 709 plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C); 710 711 /* MBOX interrupt PF <-> VF */ 712 rc = dev_irq_register(intr_handle, roc_pf_vf_mbox_irq, dev, 713 RVU_VF_INT_VEC_MBOX); 714 if (rc) { 715 plt_err("Fail to register PF<->VF mbox irq"); 716 return rc; 717 } 718 719 /* HW enable intr */ 720 plt_write64(~0ull, dev->bar2 + RVU_VF_INT); 721 plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1S); 722 723 return rc; 724 } 725 726 static int 727 mbox_register_irq(struct plt_pci_device *pci_dev, struct dev *dev) 728 { 729 if (dev_is_vf(dev)) 730 return mbox_register_vf_irq(pci_dev, dev); 731 else 732 return mbox_register_pf_irq(pci_dev, dev); 733 } 734 735 static void 736 mbox_unregister_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev) 737 { 738 struct plt_intr_handle *intr_handle = pci_dev->intr_handle; 739 int i; 740 741 /* HW clear irq */ 742 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) 743 plt_write64(~0ull, 744 dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i)); 745 746 plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C); 747 748 dev->timer_set = 0; 749 750 plt_alarm_cancel(roc_vf_pf_mbox_handle_msg, dev); 751 752 /* Unregister the interrupt handler for each vectors */ 753 /* MBOX interrupt for VF(0...63) <-> PF */ 754 dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev, 755 RVU_PF_INT_VEC_VFPF_MBOX0); 756 757 /* MBOX interrupt for VF(64...128) <-> PF */ 758 dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev, 759 RVU_PF_INT_VEC_VFPF_MBOX1); 760 761 /* MBOX interrupt AF <-> PF */ 762 dev_irq_unregister(intr_handle, roc_af_pf_mbox_irq, dev, 763 RVU_PF_INT_VEC_AFPF_MBOX); 764 } 765 766 static void 767 mbox_unregister_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev) 768 { 769 struct plt_intr_handle *intr_handle = pci_dev->intr_handle; 770 771 /* Clear irq */ 772 plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C); 773 774 /* Unregister the interrupt handler */ 775 dev_irq_unregister(intr_handle, roc_pf_vf_mbox_irq, dev, 776 RVU_VF_INT_VEC_MBOX); 777 } 778 779 static void 780 mbox_unregister_irq(struct plt_pci_device *pci_dev, struct dev *dev) 781 { 782 if (dev_is_vf(dev)) 783 mbox_unregister_vf_irq(pci_dev, dev); 784 else 785 mbox_unregister_pf_irq(pci_dev, dev); 786 } 787 788 static int 789 vf_flr_send_msg(struct dev *dev, uint16_t vf) 790 { 791 struct mbox *mbox = dev->mbox; 792 struct msg_req *req; 793 int rc; 794 795 req = mbox_alloc_msg_vf_flr(mbox); 796 if (req == NULL) 797 return -ENOSPC; 798 /* Overwrite pcifunc to indicate VF */ 799 req->hdr.pcifunc = dev_pf_func(dev->pf, vf); 800 801 /* Sync message in interrupt context */ 802 rc = pf_af_sync_msg(dev, NULL); 803 if (rc) 804 plt_err("Failed to send VF FLR mbox msg, rc=%d", rc); 805 806 return rc; 807 } 808 809 static void 810 roc_pf_vf_flr_irq(void *param) 811 { 812 struct dev *dev = (struct dev *)param; 813 uint16_t max_vf = 64, vf; 814 uintptr_t bar2; 815 uint64_t intr; 816 int i; 817 818 max_vf = (dev->maxvf > 0) ? dev->maxvf : 64; 819 bar2 = dev->bar2; 820 821 plt_base_dbg("FLR VF interrupt: max_vf: %d", max_vf); 822 823 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) { 824 intr = plt_read64(bar2 + RVU_PF_VFFLR_INTX(i)); 825 if (!intr) 826 continue; 827 828 for (vf = 0; vf < max_vf; vf++) { 829 if (!(intr & (1ULL << vf))) 830 continue; 831 832 plt_base_dbg("FLR: i :%d intr: 0x%" PRIx64 ", vf-%d", i, 833 intr, (64 * i + vf)); 834 /* Clear interrupt */ 835 plt_write64(BIT_ULL(vf), bar2 + RVU_PF_VFFLR_INTX(i)); 836 /* Disable the interrupt */ 837 plt_write64(BIT_ULL(vf), 838 bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i)); 839 /* Inform AF about VF reset */ 840 vf_flr_send_msg(dev, vf); 841 842 /* Signal FLR finish */ 843 plt_write64(BIT_ULL(vf), bar2 + RVU_PF_VFTRPENDX(i)); 844 /* Enable interrupt */ 845 plt_write64(~0ull, bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i)); 846 } 847 } 848 } 849 850 static int 851 vf_flr_unregister_irqs(struct plt_pci_device *pci_dev, struct dev *dev) 852 { 853 struct plt_intr_handle *intr_handle = pci_dev->intr_handle; 854 int i; 855 856 plt_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name); 857 858 /* HW clear irq */ 859 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++) 860 plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i)); 861 862 dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev, 863 RVU_PF_INT_VEC_VFFLR0); 864 865 dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev, 866 RVU_PF_INT_VEC_VFFLR1); 867 868 return 0; 869 } 870 871 static int 872 vf_flr_register_irqs(struct plt_pci_device *pci_dev, struct dev *dev) 873 { 874 struct plt_intr_handle *handle = pci_dev->intr_handle; 875 int i, rc; 876 877 plt_base_dbg("Register VF FLR interrupts for %s", pci_dev->name); 878 879 rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev, 880 RVU_PF_INT_VEC_VFFLR0); 881 if (rc) 882 plt_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc); 883 884 rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev, 885 RVU_PF_INT_VEC_VFFLR1); 886 if (rc) 887 plt_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc); 888 889 /* Enable HW interrupt */ 890 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) { 891 plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i)); 892 plt_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i)); 893 plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i)); 894 } 895 return 0; 896 } 897 898 static void 899 clear_rvum_interrupts(struct dev *dev) 900 { 901 uint64_t intr; 902 int i; 903 904 if (dev_is_vf(dev)) { 905 /* Clear VF mbox interrupt */ 906 intr = plt_read64(dev->bar2 + RVU_VF_INT); 907 if (intr) 908 plt_write64(intr, dev->bar2 + RVU_VF_INT); 909 } else { 910 /* Clear AF PF interrupt line */ 911 intr = plt_read64(dev->bar2 + RVU_PF_INT); 912 if (intr) 913 plt_write64(intr, dev->bar2 + RVU_PF_INT); 914 for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) { 915 /* Clear MBOX interrupts */ 916 intr = plt_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(i)); 917 if (intr) 918 plt_write64(intr, 919 dev->bar2 + 920 RVU_PF_VFPF_MBOX_INTX(i)); 921 /* Clear VF FLR interrupts */ 922 intr = plt_read64(dev->bar2 + RVU_PF_VFFLR_INTX(i)); 923 if (intr) 924 plt_write64(intr, 925 dev->bar2 + RVU_PF_VFFLR_INTX(i)); 926 } 927 } 928 } 929 930 int 931 dev_active_vfs(struct dev *dev) 932 { 933 int i, count = 0; 934 935 for (i = 0; i < MAX_VFPF_DWORD_BITS; i++) 936 count += __builtin_popcount(dev->active_vfs[i]); 937 938 return count; 939 } 940 941 static void 942 dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev) 943 { 944 switch (pci_dev->id.device_id) { 945 case PCI_DEVID_CNXK_RVU_PF: 946 break; 947 case PCI_DEVID_CNXK_RVU_SSO_TIM_VF: 948 case PCI_DEVID_CNXK_RVU_NPA_VF: 949 case PCI_DEVID_CN10K_RVU_CPT_VF: 950 case PCI_DEVID_CN9K_RVU_CPT_VF: 951 case PCI_DEVID_CNXK_RVU_AF_VF: 952 case PCI_DEVID_CNXK_RVU_VF: 953 case PCI_DEVID_CNXK_RVU_SDP_VF: 954 dev->hwcap |= DEV_HWCAP_F_VF; 955 break; 956 } 957 } 958 959 static uintptr_t 960 dev_vf_mbase_get(struct plt_pci_device *pci_dev, struct dev *dev) 961 { 962 void *vf_mbase = NULL; 963 uintptr_t pa; 964 965 if (dev_is_vf(dev)) 966 return 0; 967 968 /* For CN10K onwards, it is just after PF MBOX */ 969 if (!roc_model_is_cn9k()) 970 return dev->bar4 + MBOX_SIZE; 971 972 pa = plt_read64(dev->bar2 + RVU_PF_VF_BAR4_ADDR); 973 if (!pa) { 974 plt_err("Invalid VF mbox base pa"); 975 return pa; 976 } 977 978 vf_mbase = mbox_mem_map(pa, MBOX_SIZE * pci_dev->max_vfs); 979 if (vf_mbase == MAP_FAILED) { 980 plt_err("Failed to mmap vf mbase at pa 0x%lx, rc=%d", pa, 981 errno); 982 return 0; 983 } 984 return (uintptr_t)vf_mbase; 985 } 986 987 static void 988 dev_vf_mbase_put(struct plt_pci_device *pci_dev, uintptr_t vf_mbase) 989 { 990 if (!vf_mbase || !pci_dev->max_vfs || !roc_model_is_cn9k()) 991 return; 992 993 mbox_mem_unmap((void *)vf_mbase, MBOX_SIZE * pci_dev->max_vfs); 994 } 995 996 static int 997 dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova) 998 { 999 struct lmtst_tbl_setup_req *req; 1000 1001 req = mbox_alloc_msg_lmtst_tbl_setup(mbox); 1002 if (!req) 1003 return -ENOSPC; 1004 1005 /* This pcifunc is defined with primary pcifunc whose LMT address 1006 * will be shared. If call contains valid IOVA, following pcifunc 1007 * field is of no use. 1008 */ 1009 req->pcifunc = valid_iova ? 0 : idev_lmt_pffunc_get(); 1010 req->use_local_lmt_region = valid_iova; 1011 req->lmt_iova = iova; 1012 1013 return mbox_process(mbox); 1014 } 1015 1016 /* Total no of lines * size of each lmtline */ 1017 #define LMT_REGION_SIZE (ROC_NUM_LMT_LINES * ROC_LMT_LINE_SZ) 1018 static int 1019 dev_lmt_setup(struct dev *dev) 1020 { 1021 char name[PLT_MEMZONE_NAMESIZE]; 1022 const struct plt_memzone *mz; 1023 struct idev_cfg *idev; 1024 int rc; 1025 1026 if (roc_model_is_cn9k()) { 1027 dev->lmt_base = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20); 1028 return 0; 1029 } 1030 1031 /* [CN10K, .) */ 1032 1033 /* Set common lmt region from second pf_func onwards. */ 1034 if (!dev->disable_shared_lmt && idev_lmt_pffunc_get() && 1035 dev->pf_func != idev_lmt_pffunc_get()) { 1036 rc = dev_setup_shared_lmt_region(dev->mbox, false, 0); 1037 if (!rc) { 1038 /* On success, updating lmt base of secondary pf_funcs 1039 * with primary pf_func's lmt base. 1040 */ 1041 dev->lmt_base = roc_idev_lmt_base_addr_get(); 1042 return rc; 1043 } 1044 plt_err("Failed to setup shared lmt region, pf_func %d err %d " 1045 "Using respective LMT region per pf func", 1046 dev->pf_func, rc); 1047 } 1048 1049 /* Allocating memory for LMT region */ 1050 sprintf(name, "LMT_MAP%x", dev->pf_func); 1051 1052 /* Setting alignment to ensure correct masking for resetting to lmt base 1053 * of a core after all lmt lines under that core are used. 1054 * Alignment value LMT_REGION_SIZE to handle the case where all lines 1055 * are used by 1 core. 1056 */ 1057 mz = plt_lmt_region_reserve_aligned(name, LMT_REGION_SIZE, 1058 LMT_REGION_SIZE); 1059 if (!mz) { 1060 plt_err("Memory alloc failed: %s", strerror(errno)); 1061 goto fail; 1062 } 1063 1064 /* Share the IOVA address with Kernel */ 1065 rc = dev_setup_shared_lmt_region(dev->mbox, true, mz->iova); 1066 if (rc) { 1067 errno = rc; 1068 goto free; 1069 } 1070 1071 dev->lmt_base = mz->iova; 1072 dev->lmt_mz = mz; 1073 /* Base LMT address should be chosen from only those pci funcs which 1074 * participate in LMT shared mode. 1075 */ 1076 if (!dev->disable_shared_lmt) { 1077 idev = idev_get_cfg(); 1078 if (!idev) { 1079 errno = EFAULT; 1080 goto free; 1081 } 1082 1083 if (!__atomic_load_n(&idev->lmt_pf_func, __ATOMIC_ACQUIRE)) { 1084 idev->lmt_base_addr = dev->lmt_base; 1085 idev->lmt_pf_func = dev->pf_func; 1086 idev->num_lmtlines = RVU_LMT_LINE_MAX; 1087 } 1088 } 1089 1090 return 0; 1091 free: 1092 plt_memzone_free(mz); 1093 fail: 1094 return -errno; 1095 } 1096 1097 static bool 1098 dev_cache_line_size_valid(void) 1099 { 1100 if (roc_model_is_cn9k()) { 1101 if (PLT_CACHE_LINE_SIZE != 128) { 1102 plt_err("Cache line size of %d is wrong for CN9K", 1103 PLT_CACHE_LINE_SIZE); 1104 return false; 1105 } 1106 } else if (roc_model_is_cn10k()) { 1107 if (PLT_CACHE_LINE_SIZE == 128) { 1108 plt_warn("Cache line size of %d might affect performance", 1109 PLT_CACHE_LINE_SIZE); 1110 } else if (PLT_CACHE_LINE_SIZE != 64) { 1111 plt_err("Cache line size of %d is wrong for CN10K", 1112 PLT_CACHE_LINE_SIZE); 1113 return false; 1114 } 1115 } 1116 1117 return true; 1118 } 1119 1120 int 1121 dev_init(struct dev *dev, struct plt_pci_device *pci_dev) 1122 { 1123 int direction, up_direction, rc; 1124 uintptr_t bar2, bar4, mbox; 1125 uintptr_t vf_mbase = 0; 1126 uint64_t intr_offset; 1127 1128 if (!dev_cache_line_size_valid()) 1129 return -EFAULT; 1130 1131 bar2 = (uintptr_t)pci_dev->mem_resource[2].addr; 1132 bar4 = (uintptr_t)pci_dev->mem_resource[4].addr; 1133 if (bar2 == 0 || bar4 == 0) { 1134 plt_err("Failed to get PCI bars"); 1135 rc = -ENODEV; 1136 goto error; 1137 } 1138 1139 /* Trigger fault on bar2 and bar4 regions 1140 * to avoid BUG_ON in remap_pfn_range() 1141 * in latest kernel. 1142 */ 1143 *(volatile uint64_t *)bar2; 1144 *(volatile uint64_t *)bar4; 1145 1146 /* Check ROC model supported */ 1147 if (roc_model->flag == 0) { 1148 rc = UTIL_ERR_INVALID_MODEL; 1149 goto error; 1150 } 1151 1152 dev->maxvf = pci_dev->max_vfs; 1153 dev->bar2 = bar2; 1154 dev->bar4 = bar4; 1155 dev_vf_hwcap_update(pci_dev, dev); 1156 1157 if (dev_is_vf(dev)) { 1158 mbox = (roc_model_is_cn9k() ? 1159 bar4 : (bar2 + RVU_VF_MBOX_REGION)); 1160 direction = MBOX_DIR_VFPF; 1161 up_direction = MBOX_DIR_VFPF_UP; 1162 intr_offset = RVU_VF_INT; 1163 } else { 1164 mbox = bar4; 1165 direction = MBOX_DIR_PFAF; 1166 up_direction = MBOX_DIR_PFAF_UP; 1167 intr_offset = RVU_PF_INT; 1168 } 1169 1170 /* Clear all RVUM interrupts */ 1171 clear_rvum_interrupts(dev); 1172 1173 /* Initialize the local mbox */ 1174 rc = mbox_init(&dev->mbox_local, mbox, bar2, direction, 1, intr_offset); 1175 if (rc) 1176 goto error; 1177 dev->mbox = &dev->mbox_local; 1178 1179 rc = mbox_init(&dev->mbox_up, mbox, bar2, up_direction, 1, intr_offset); 1180 if (rc) 1181 goto mbox_fini; 1182 1183 /* Register mbox interrupts */ 1184 rc = mbox_register_irq(pci_dev, dev); 1185 if (rc) 1186 goto mbox_fini; 1187 1188 /* Check the readiness of PF/VF */ 1189 rc = send_ready_msg(dev->mbox, &dev->pf_func); 1190 if (rc) 1191 goto mbox_unregister; 1192 1193 dev->pf = dev_get_pf(dev->pf_func); 1194 dev->vf = dev_get_vf(dev->pf_func); 1195 memset(&dev->active_vfs, 0, sizeof(dev->active_vfs)); 1196 1197 /* Allocate memory for device ops */ 1198 dev->ops = plt_zmalloc(sizeof(struct dev_ops), 0); 1199 if (dev->ops == NULL) { 1200 rc = -ENOMEM; 1201 goto mbox_unregister; 1202 } 1203 1204 /* Found VF devices in a PF device */ 1205 if (pci_dev->max_vfs > 0) { 1206 /* Remap mbox area for all vf's */ 1207 vf_mbase = dev_vf_mbase_get(pci_dev, dev); 1208 if (!vf_mbase) { 1209 rc = -ENODEV; 1210 goto mbox_unregister; 1211 } 1212 /* Init mbox object */ 1213 rc = mbox_init(&dev->mbox_vfpf, vf_mbase, bar2, MBOX_DIR_PFVF, 1214 pci_dev->max_vfs, intr_offset); 1215 if (rc) 1216 goto iounmap; 1217 1218 /* PF -> VF UP messages */ 1219 rc = mbox_init(&dev->mbox_vfpf_up, vf_mbase, bar2, 1220 MBOX_DIR_PFVF_UP, pci_dev->max_vfs, intr_offset); 1221 if (rc) 1222 goto iounmap; 1223 } 1224 1225 /* Register VF-FLR irq handlers */ 1226 if (!dev_is_vf(dev)) { 1227 rc = vf_flr_register_irqs(pci_dev, dev); 1228 if (rc) 1229 goto iounmap; 1230 } 1231 dev->mbox_active = 1; 1232 1233 rc = npa_lf_init(dev, pci_dev); 1234 if (rc) 1235 goto iounmap; 1236 1237 /* Setup LMT line base */ 1238 rc = dev_lmt_setup(dev); 1239 if (rc) 1240 goto iounmap; 1241 1242 return rc; 1243 iounmap: 1244 dev_vf_mbase_put(pci_dev, vf_mbase); 1245 mbox_unregister: 1246 mbox_unregister_irq(pci_dev, dev); 1247 if (dev->ops) 1248 plt_free(dev->ops); 1249 mbox_fini: 1250 mbox_fini(dev->mbox); 1251 mbox_fini(&dev->mbox_up); 1252 error: 1253 return rc; 1254 } 1255 1256 int 1257 dev_fini(struct dev *dev, struct plt_pci_device *pci_dev) 1258 { 1259 struct plt_intr_handle *intr_handle = pci_dev->intr_handle; 1260 struct mbox *mbox; 1261 1262 /* Check if this dev hosts npalf and has 1+ refs */ 1263 if (idev_npa_lf_active(dev) > 1) 1264 return -EAGAIN; 1265 1266 /* Clear references to this pci dev */ 1267 npa_lf_fini(); 1268 1269 /* Releasing memory allocated for lmt region */ 1270 if (dev->lmt_mz) 1271 plt_memzone_free(dev->lmt_mz); 1272 1273 mbox_unregister_irq(pci_dev, dev); 1274 1275 if (!dev_is_vf(dev)) 1276 vf_flr_unregister_irqs(pci_dev, dev); 1277 /* Release PF - VF */ 1278 mbox = &dev->mbox_vfpf; 1279 if (mbox->hwbase && mbox->dev) 1280 dev_vf_mbase_put(pci_dev, mbox->hwbase); 1281 1282 if (dev->ops) 1283 plt_free(dev->ops); 1284 1285 mbox_fini(mbox); 1286 mbox = &dev->mbox_vfpf_up; 1287 mbox_fini(mbox); 1288 1289 /* Release PF - AF */ 1290 mbox = dev->mbox; 1291 mbox_fini(mbox); 1292 mbox = &dev->mbox_up; 1293 mbox_fini(mbox); 1294 dev->mbox_active = 0; 1295 1296 /* Disable MSIX vectors */ 1297 dev_irqs_disable(intr_handle); 1298 return 0; 1299 } 1300