1665ff1ccSJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause 2665ff1ccSJerin Jacob * Copyright(C) 2021 Marvell. 3665ff1ccSJerin Jacob */ 4665ff1ccSJerin Jacob 5665ff1ccSJerin Jacob #include <fcntl.h> 6665ff1ccSJerin Jacob #include <inttypes.h> 7665ff1ccSJerin Jacob #include <string.h> 8665ff1ccSJerin Jacob #include <sys/mman.h> 9665ff1ccSJerin Jacob #include <unistd.h> 10665ff1ccSJerin Jacob 11665ff1ccSJerin Jacob #include "roc_api.h" 12665ff1ccSJerin Jacob #include "roc_priv.h" 13665ff1ccSJerin Jacob 14665ff1ccSJerin Jacob /* PCI Extended capability ID */ 15665ff1ccSJerin Jacob #define ROC_PCI_EXT_CAP_ID_SRIOV 0x10 /* SRIOV cap */ 16665ff1ccSJerin Jacob 17665ff1ccSJerin Jacob /* Single Root I/O Virtualization */ 18665ff1ccSJerin Jacob #define ROC_PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */ 19665ff1ccSJerin Jacob 201c7a4d37SHarman Kalra /* VF Mbox handler thread name */ 21a7ba40b2SThomas Monjalon #define MBOX_HANDLER_NAME_MAX_LEN RTE_THREAD_INTERNAL_NAME_SIZE 221c7a4d37SHarman Kalra 231c7a4d37SHarman Kalra /* VF interrupt message pending bits - mbox or flr */ 241c7a4d37SHarman Kalra #define ROC_DEV_MBOX_PEND BIT_ULL(0) 251c7a4d37SHarman Kalra #define ROC_DEV_FLR_PEND BIT_ULL(1) 267816df79SHarman Kalra 279bd368caSHarman Kalra /* RVU PF interrupt status as received from AF*/ 289bd368caSHarman Kalra #define RVU_PF_INTR_STATUS 0x3 299bd368caSHarman Kalra 30585bb3e5SJerin Jacob static void * 31585bb3e5SJerin Jacob mbox_mem_map(off_t off, size_t size) 32585bb3e5SJerin Jacob { 33585bb3e5SJerin Jacob void *va = MAP_FAILED; 34585bb3e5SJerin Jacob int mem_fd; 35585bb3e5SJerin Jacob 36585bb3e5SJerin Jacob if (size <= 0 || !off) { 37585bb3e5SJerin Jacob plt_err("Invalid mbox area off 0x%lx size %lu", off, size); 38585bb3e5SJerin Jacob goto error; 39585bb3e5SJerin Jacob } 40585bb3e5SJerin Jacob 41585bb3e5SJerin Jacob mem_fd = open("/dev/mem", O_RDWR); 42585bb3e5SJerin Jacob if (mem_fd < 0) 43585bb3e5SJerin Jacob goto error; 44585bb3e5SJerin Jacob 45585bb3e5SJerin Jacob va = plt_mmap(NULL, size, PLT_PROT_READ | PLT_PROT_WRITE, 46585bb3e5SJerin Jacob PLT_MAP_SHARED, mem_fd, off); 47585bb3e5SJerin Jacob close(mem_fd); 48585bb3e5SJerin Jacob 49585bb3e5SJerin Jacob if (va == MAP_FAILED) 50585bb3e5SJerin Jacob plt_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd", size, mem_fd, 51585bb3e5SJerin Jacob (intmax_t)off); 52585bb3e5SJerin Jacob error: 53585bb3e5SJerin Jacob return va; 54585bb3e5SJerin Jacob } 55585bb3e5SJerin Jacob 56585bb3e5SJerin Jacob static void 57585bb3e5SJerin Jacob mbox_mem_unmap(void *va, size_t size) 58585bb3e5SJerin Jacob { 59585bb3e5SJerin Jacob if (va) 60585bb3e5SJerin Jacob munmap(va, size); 61585bb3e5SJerin Jacob } 62585bb3e5SJerin Jacob 63585bb3e5SJerin Jacob static int 64585bb3e5SJerin Jacob pf_af_sync_msg(struct dev *dev, struct mbox_msghdr **rsp) 65585bb3e5SJerin Jacob { 66585bb3e5SJerin Jacob uint32_t timeout = 0, sleep = 1; 67585bb3e5SJerin Jacob struct mbox *mbox = dev->mbox; 68585bb3e5SJerin Jacob struct mbox_dev *mdev = &mbox->dev[0]; 69585bb3e5SJerin Jacob 70a3b48642SNithin Dabilpuram volatile uint64_t int_status = 0; 71585bb3e5SJerin Jacob struct mbox_msghdr *msghdr; 72585bb3e5SJerin Jacob uint64_t off; 73585bb3e5SJerin Jacob int rc = 0; 74585bb3e5SJerin Jacob 75585bb3e5SJerin Jacob /* We need to disable PF interrupts. We are in timer interrupt */ 7661deac72SHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT_ENA_W1C); 77585bb3e5SJerin Jacob 78585bb3e5SJerin Jacob /* Send message */ 79585bb3e5SJerin Jacob mbox_msg_send(mbox, 0); 80585bb3e5SJerin Jacob 81585bb3e5SJerin Jacob do { 82585bb3e5SJerin Jacob plt_delay_ms(sleep); 83585bb3e5SJerin Jacob timeout += sleep; 84585bb3e5SJerin Jacob if (timeout >= mbox->rsp_tmo) { 85585bb3e5SJerin Jacob plt_err("Message timeout: %dms", mbox->rsp_tmo); 86585bb3e5SJerin Jacob rc = -EIO; 87585bb3e5SJerin Jacob break; 88585bb3e5SJerin Jacob } 8961deac72SHarman Kalra int_status = plt_read64(dev->mbox_reg_base + RVU_PF_INT); 909bd368caSHarman Kalra } while (!(int_status & RVU_PF_INTR_STATUS)); 91585bb3e5SJerin Jacob 92585bb3e5SJerin Jacob /* Clear */ 9361deac72SHarman Kalra plt_write64(int_status, dev->mbox_reg_base + RVU_PF_INT); 94585bb3e5SJerin Jacob 95585bb3e5SJerin Jacob /* Enable interrupts */ 9661deac72SHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT_ENA_W1S); 97585bb3e5SJerin Jacob 98585bb3e5SJerin Jacob if (rc == 0) { 99585bb3e5SJerin Jacob /* Get message */ 100585bb3e5SJerin Jacob off = mbox->rx_start + 101585bb3e5SJerin Jacob PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 102585bb3e5SJerin Jacob msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off); 103585bb3e5SJerin Jacob if (rsp) 104585bb3e5SJerin Jacob *rsp = msghdr; 105585bb3e5SJerin Jacob rc = msghdr->rc; 106585bb3e5SJerin Jacob } 107585bb3e5SJerin Jacob 108585bb3e5SJerin Jacob return rc; 109585bb3e5SJerin Jacob } 110585bb3e5SJerin Jacob 111dfb5a7a7SHarman Kalra /* PF will send the messages to AF and wait for responses and forward the 112dfb5a7a7SHarman Kalra * responses to VF. 113dfb5a7a7SHarman Kalra */ 114585bb3e5SJerin Jacob static int 115585bb3e5SJerin Jacob af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg) 116585bb3e5SJerin Jacob { 117585bb3e5SJerin Jacob uint32_t timeout = 0, sleep = 1; 118585bb3e5SJerin Jacob struct mbox *mbox = dev->mbox; 119585bb3e5SJerin Jacob struct mbox_dev *mdev = &mbox->dev[0]; 120585bb3e5SJerin Jacob volatile uint64_t int_status; 121585bb3e5SJerin Jacob struct mbox_hdr *req_hdr; 122585bb3e5SJerin Jacob struct mbox_msghdr *msg; 123585bb3e5SJerin Jacob struct mbox_msghdr *rsp; 124585bb3e5SJerin Jacob uint64_t offset; 125585bb3e5SJerin Jacob size_t size; 126585bb3e5SJerin Jacob int i; 127585bb3e5SJerin Jacob 128585bb3e5SJerin Jacob /* We need to disable PF interrupts. We are in timer interrupt */ 12961deac72SHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT_ENA_W1C); 130585bb3e5SJerin Jacob 131dfb5a7a7SHarman Kalra /* Send message to AF */ 132585bb3e5SJerin Jacob mbox_msg_send(mbox, 0); 133585bb3e5SJerin Jacob 134dfb5a7a7SHarman Kalra /* Wait for AF response */ 135585bb3e5SJerin Jacob do { 136585bb3e5SJerin Jacob plt_delay_ms(sleep); 137585bb3e5SJerin Jacob timeout++; 138585bb3e5SJerin Jacob if (timeout >= mbox->rsp_tmo) { 13961deac72SHarman Kalra plt_err("Routed messages %d timeout: %dms", num_msg, mbox->rsp_tmo); 140585bb3e5SJerin Jacob break; 141585bb3e5SJerin Jacob } 14261deac72SHarman Kalra int_status = plt_read64(dev->mbox_reg_base + RVU_PF_INT); 1439bd368caSHarman Kalra } while (!(int_status & RVU_PF_INTR_STATUS)); 144585bb3e5SJerin Jacob 145585bb3e5SJerin Jacob /* Clear */ 14661deac72SHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT); 147585bb3e5SJerin Jacob 148585bb3e5SJerin Jacob /* Enable interrupts */ 14961deac72SHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT_ENA_W1S); 150585bb3e5SJerin Jacob 151585bb3e5SJerin Jacob req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start); 152585bb3e5SJerin Jacob if (req_hdr->num_msgs != num_msg) 153585bb3e5SJerin Jacob plt_err("Routed messages: %d received: %d", num_msg, 154585bb3e5SJerin Jacob req_hdr->num_msgs); 155585bb3e5SJerin Jacob 156585bb3e5SJerin Jacob /* Get messages from mbox */ 157585bb3e5SJerin Jacob offset = mbox->rx_start + 158585bb3e5SJerin Jacob PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); 159585bb3e5SJerin Jacob for (i = 0; i < req_hdr->num_msgs; i++) { 160585bb3e5SJerin Jacob msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset); 161585bb3e5SJerin Jacob size = mbox->rx_start + msg->next_msgoff - offset; 162585bb3e5SJerin Jacob 163585bb3e5SJerin Jacob /* Reserve PF/VF mbox message */ 164585bb3e5SJerin Jacob size = PLT_ALIGN(size, MBOX_MSG_ALIGN); 165585bb3e5SJerin Jacob rsp = mbox_alloc_msg(&dev->mbox_vfpf, vf, size); 166da718c19SNithin Dabilpuram if (!rsp) { 167da718c19SNithin Dabilpuram plt_err("Failed to reserve VF%d message", vf); 168da718c19SNithin Dabilpuram continue; 169da718c19SNithin Dabilpuram } 170da718c19SNithin Dabilpuram 171585bb3e5SJerin Jacob mbox_rsp_init(msg->id, rsp); 172585bb3e5SJerin Jacob 173585bb3e5SJerin Jacob /* Copy message from AF<->PF mbox to PF<->VF mbox */ 174585bb3e5SJerin Jacob mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr), 175585bb3e5SJerin Jacob (uint8_t *)msg + sizeof(struct mbox_msghdr), 176585bb3e5SJerin Jacob size - sizeof(struct mbox_msghdr)); 177585bb3e5SJerin Jacob 178585bb3e5SJerin Jacob /* Set status and sender pf_func data */ 179585bb3e5SJerin Jacob rsp->rc = msg->rc; 180585bb3e5SJerin Jacob rsp->pcifunc = msg->pcifunc; 181585bb3e5SJerin Jacob 18202719901SHarman Kalra /* Whenever a PF comes up, AF sends the link status to it but 18302719901SHarman Kalra * when VF comes up no such event is sent to respective VF. 18402719901SHarman Kalra * Using MBOX_MSG_NIX_LF_START_RX response from AF for the 18502719901SHarman Kalra * purpose and send the link status of PF to VF. 18602719901SHarman Kalra */ 18702719901SHarman Kalra if (msg->id == MBOX_MSG_NIX_LF_START_RX) { 18802719901SHarman Kalra /* Send link status to VF */ 18902719901SHarman Kalra struct cgx_link_user_info linfo; 19002719901SHarman Kalra struct mbox_msghdr *vf_msg; 19102719901SHarman Kalra size_t sz; 19202719901SHarman Kalra 19302719901SHarman Kalra /* Get the link status */ 19402719901SHarman Kalra memset(&linfo, 0, sizeof(struct cgx_link_user_info)); 19502719901SHarman Kalra if (dev->ops && dev->ops->link_status_get) 19602719901SHarman Kalra dev->ops->link_status_get(dev->roc_nix, &linfo); 19702719901SHarman Kalra 19802719901SHarman Kalra sz = PLT_ALIGN(mbox_id2size(MBOX_MSG_CGX_LINK_EVENT), 19902719901SHarman Kalra MBOX_MSG_ALIGN); 20002719901SHarman Kalra /* Prepare the message to be sent */ 20102719901SHarman Kalra vf_msg = mbox_alloc_msg(&dev->mbox_vfpf_up, vf, sz); 20202719901SHarman Kalra if (vf_msg) { 20302719901SHarman Kalra mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg); 2044590d008SHarman Kalra mbox_memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr), &linfo, 20502719901SHarman Kalra sizeof(struct cgx_link_user_info)); 20602719901SHarman Kalra 20702719901SHarman Kalra vf_msg->rc = msg->rc; 20802719901SHarman Kalra vf_msg->pcifunc = msg->pcifunc; 20902719901SHarman Kalra /* Send to VF */ 210fa4ee2d4SHarman Kalra mbox_msg_send_up(&dev->mbox_vfpf_up, vf); 211fa4ee2d4SHarman Kalra mbox_wait_for_zero(&dev->mbox_vfpf_up, vf); 21202719901SHarman Kalra } 21302719901SHarman Kalra } 21402719901SHarman Kalra 215585bb3e5SJerin Jacob offset = mbox->rx_start + msg->next_msgoff; 216585bb3e5SJerin Jacob } 217585bb3e5SJerin Jacob 218585bb3e5SJerin Jacob return req_hdr->num_msgs; 219585bb3e5SJerin Jacob } 220585bb3e5SJerin Jacob 221*384903edSAkhil Goyal static int 222*384903edSAkhil Goyal process_rvu_lf_msgs(struct dev *dev, uint16_t vf, struct mbox_msghdr *msg, size_t size) 223*384903edSAkhil Goyal { 224*384903edSAkhil Goyal uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8; 225*384903edSAkhil Goyal uint8_t req[MBOX_MSG_REQ_SIZE_MAX]; 226*384903edSAkhil Goyal struct msg_rsp *rsp; 227*384903edSAkhil Goyal uint16_t rsp_len; 228*384903edSAkhil Goyal void *resp; 229*384903edSAkhil Goyal int rc = 0; 230*384903edSAkhil Goyal 231*384903edSAkhil Goyal /* Handle BPHY mailbox message in PF */ 232*384903edSAkhil Goyal dev->active_vfs[vf / max_bits] |= BIT_ULL(vf % max_bits); 233*384903edSAkhil Goyal 234*384903edSAkhil Goyal if ((size - sizeof(struct mbox_msghdr)) > MBOX_MSG_REQ_SIZE_MAX) { 235*384903edSAkhil Goyal plt_err("MBOX request size greater than %d", MBOX_MSG_REQ_SIZE_MAX); 236*384903edSAkhil Goyal return -1; 237*384903edSAkhil Goyal } 238*384903edSAkhil Goyal mbox_memcpy(req, (uint8_t *)msg + sizeof(struct mbox_msghdr), 239*384903edSAkhil Goyal size - sizeof(struct mbox_msghdr)); 240*384903edSAkhil Goyal 241*384903edSAkhil Goyal rc = dev->ops->msg_process_cb(dev_get_vf(msg->pcifunc), msg->id, req, 242*384903edSAkhil Goyal size - sizeof(struct mbox_msghdr), &resp, &rsp_len); 243*384903edSAkhil Goyal if (rc < 0) { 244*384903edSAkhil Goyal plt_err("Failed to process VF%d message", vf); 245*384903edSAkhil Goyal return -1; 246*384903edSAkhil Goyal } 247*384903edSAkhil Goyal 248*384903edSAkhil Goyal rsp = (struct msg_rsp *)mbox_alloc_msg(&dev->mbox_vfpf, vf, 249*384903edSAkhil Goyal rsp_len + sizeof(struct mbox_msghdr)); 250*384903edSAkhil Goyal if (!rsp) { 251*384903edSAkhil Goyal plt_err("Failed to alloc VF%d response message", vf); 252*384903edSAkhil Goyal return -1; 253*384903edSAkhil Goyal } 254*384903edSAkhil Goyal 255*384903edSAkhil Goyal mbox_rsp_init(msg->id, rsp); 256*384903edSAkhil Goyal 257*384903edSAkhil Goyal mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr), resp, rsp_len); 258*384903edSAkhil Goyal free(resp); 259*384903edSAkhil Goyal /* PF/VF function ID */ 260*384903edSAkhil Goyal rsp->hdr.pcifunc = msg->pcifunc; 261*384903edSAkhil Goyal rsp->hdr.rc = 0; 262*384903edSAkhil Goyal 263*384903edSAkhil Goyal return 0; 264*384903edSAkhil Goyal } 265*384903edSAkhil Goyal 266dfb5a7a7SHarman Kalra /* PF receives mbox DOWN messages from VF and forwards to AF */ 267585bb3e5SJerin Jacob static int 268585bb3e5SJerin Jacob vf_pf_process_msgs(struct dev *dev, uint16_t vf) 269585bb3e5SJerin Jacob { 270585bb3e5SJerin Jacob struct mbox *mbox = &dev->mbox_vfpf; 271585bb3e5SJerin Jacob struct mbox_dev *mdev = &mbox->dev[vf]; 272585bb3e5SJerin Jacob struct mbox_hdr *req_hdr; 273585bb3e5SJerin Jacob struct mbox_msghdr *msg; 274585bb3e5SJerin Jacob int offset, routed = 0; 275585bb3e5SJerin Jacob size_t size; 276585bb3e5SJerin Jacob uint16_t i; 277585bb3e5SJerin Jacob 278585bb3e5SJerin Jacob req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start); 279585bb3e5SJerin Jacob if (!req_hdr->num_msgs) 280585bb3e5SJerin Jacob return 0; 281585bb3e5SJerin Jacob 282585bb3e5SJerin Jacob offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 283585bb3e5SJerin Jacob 28444a9307cSRakesh Kudurumalla mbox_get(dev->mbox); 285585bb3e5SJerin Jacob for (i = 0; i < req_hdr->num_msgs; i++) { 286585bb3e5SJerin Jacob msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset); 287585bb3e5SJerin Jacob size = mbox->rx_start + msg->next_msgoff - offset; 288585bb3e5SJerin Jacob 289585bb3e5SJerin Jacob /* RVU_PF_FUNC_S */ 290585bb3e5SJerin Jacob msg->pcifunc = dev_pf_func(dev->pf, vf); 291585bb3e5SJerin Jacob 292585bb3e5SJerin Jacob if (msg->id == MBOX_MSG_READY) { 293585bb3e5SJerin Jacob struct ready_msg_rsp *rsp; 294585bb3e5SJerin Jacob uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8; 295585bb3e5SJerin Jacob 296585bb3e5SJerin Jacob /* Handle READY message in PF */ 297585bb3e5SJerin Jacob dev->active_vfs[vf / max_bits] |= 298585bb3e5SJerin Jacob BIT_ULL(vf % max_bits); 299585bb3e5SJerin Jacob rsp = (struct ready_msg_rsp *)mbox_alloc_msg( 300585bb3e5SJerin Jacob mbox, vf, sizeof(*rsp)); 301da718c19SNithin Dabilpuram if (!rsp) { 302da718c19SNithin Dabilpuram plt_err("Failed to alloc VF%d READY message", 303da718c19SNithin Dabilpuram vf); 304da718c19SNithin Dabilpuram continue; 305da718c19SNithin Dabilpuram } 306da718c19SNithin Dabilpuram 307585bb3e5SJerin Jacob mbox_rsp_init(msg->id, rsp); 308585bb3e5SJerin Jacob 309585bb3e5SJerin Jacob /* PF/VF function ID */ 310585bb3e5SJerin Jacob rsp->hdr.pcifunc = msg->pcifunc; 311585bb3e5SJerin Jacob rsp->hdr.rc = 0; 312*384903edSAkhil Goyal } else if (roc_rvu_lf_msg_id_range_check(dev->roc_rvu_lf, msg->id)) { 313*384903edSAkhil Goyal if (process_rvu_lf_msgs(dev, vf, msg, size) < 0) 314*384903edSAkhil Goyal continue; 315585bb3e5SJerin Jacob } else { 316585bb3e5SJerin Jacob struct mbox_msghdr *af_req; 317585bb3e5SJerin Jacob /* Reserve AF/PF mbox message */ 318585bb3e5SJerin Jacob size = PLT_ALIGN(size, MBOX_MSG_ALIGN); 319585bb3e5SJerin Jacob af_req = mbox_alloc_msg(dev->mbox, 0, size); 320585bb3e5SJerin Jacob if (af_req == NULL) 321585bb3e5SJerin Jacob return -ENOSPC; 322585bb3e5SJerin Jacob mbox_req_init(msg->id, af_req); 323585bb3e5SJerin Jacob 324585bb3e5SJerin Jacob /* Copy message from VF<->PF mbox to PF<->AF mbox */ 325585bb3e5SJerin Jacob mbox_memcpy((uint8_t *)af_req + 326585bb3e5SJerin Jacob sizeof(struct mbox_msghdr), 327585bb3e5SJerin Jacob (uint8_t *)msg + sizeof(struct mbox_msghdr), 328585bb3e5SJerin Jacob size - sizeof(struct mbox_msghdr)); 329585bb3e5SJerin Jacob af_req->pcifunc = msg->pcifunc; 330585bb3e5SJerin Jacob routed++; 331585bb3e5SJerin Jacob } 332585bb3e5SJerin Jacob offset = mbox->rx_start + msg->next_msgoff; 333585bb3e5SJerin Jacob } 334585bb3e5SJerin Jacob 335585bb3e5SJerin Jacob if (routed > 0) { 336585bb3e5SJerin Jacob plt_base_dbg("pf:%d routed %d messages from vf:%d to AF", 337585bb3e5SJerin Jacob dev->pf, routed, vf); 338dfb5a7a7SHarman Kalra /* PF will send the messages to AF and wait for responses */ 339585bb3e5SJerin Jacob af_pf_wait_msg(dev, vf, routed); 340585bb3e5SJerin Jacob mbox_reset(dev->mbox, 0); 341585bb3e5SJerin Jacob } 34244a9307cSRakesh Kudurumalla mbox_put(dev->mbox); 343585bb3e5SJerin Jacob 344585bb3e5SJerin Jacob /* Send mbox responses to VF */ 345585bb3e5SJerin Jacob if (mdev->num_msgs) { 346585bb3e5SJerin Jacob plt_base_dbg("pf:%d reply %d messages to vf:%d", dev->pf, 347585bb3e5SJerin Jacob mdev->num_msgs, vf); 348585bb3e5SJerin Jacob mbox_msg_send(mbox, vf); 349585bb3e5SJerin Jacob } 350585bb3e5SJerin Jacob 351585bb3e5SJerin Jacob return i; 352585bb3e5SJerin Jacob } 353585bb3e5SJerin Jacob 354dfb5a7a7SHarman Kalra /* VF sends Ack to PF's UP messages */ 355585bb3e5SJerin Jacob static int 356585bb3e5SJerin Jacob vf_pf_process_up_msgs(struct dev *dev, uint16_t vf) 357585bb3e5SJerin Jacob { 358585bb3e5SJerin Jacob struct mbox *mbox = &dev->mbox_vfpf_up; 359585bb3e5SJerin Jacob struct mbox_dev *mdev = &mbox->dev[vf]; 360585bb3e5SJerin Jacob struct mbox_hdr *req_hdr; 361585bb3e5SJerin Jacob struct mbox_msghdr *msg; 362585bb3e5SJerin Jacob int msgs_acked = 0; 363585bb3e5SJerin Jacob int offset; 364585bb3e5SJerin Jacob uint16_t i; 365585bb3e5SJerin Jacob 366585bb3e5SJerin Jacob req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start); 367585bb3e5SJerin Jacob if (req_hdr->num_msgs == 0) 368585bb3e5SJerin Jacob return 0; 369585bb3e5SJerin Jacob 370585bb3e5SJerin Jacob offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 371585bb3e5SJerin Jacob 372585bb3e5SJerin Jacob for (i = 0; i < req_hdr->num_msgs; i++) { 373585bb3e5SJerin Jacob msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset); 374585bb3e5SJerin Jacob 375585bb3e5SJerin Jacob msgs_acked++; 376585bb3e5SJerin Jacob /* RVU_PF_FUNC_S */ 377585bb3e5SJerin Jacob msg->pcifunc = dev_pf_func(dev->pf, vf); 378585bb3e5SJerin Jacob 379585bb3e5SJerin Jacob switch (msg->id) { 380585bb3e5SJerin Jacob case MBOX_MSG_CGX_LINK_EVENT: 381585bb3e5SJerin Jacob plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)", 382585bb3e5SJerin Jacob msg->id, mbox_id2name(msg->id), 383585bb3e5SJerin Jacob msg->pcifunc, dev_get_pf(msg->pcifunc), 384585bb3e5SJerin Jacob dev_get_vf(msg->pcifunc)); 385585bb3e5SJerin Jacob break; 386585bb3e5SJerin Jacob case MBOX_MSG_CGX_PTP_RX_INFO: 387585bb3e5SJerin Jacob plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)", 388585bb3e5SJerin Jacob msg->id, mbox_id2name(msg->id), 389585bb3e5SJerin Jacob msg->pcifunc, dev_get_pf(msg->pcifunc), 390585bb3e5SJerin Jacob dev_get_vf(msg->pcifunc)); 391585bb3e5SJerin Jacob break; 392585bb3e5SJerin Jacob default: 393*384903edSAkhil Goyal if (roc_rvu_lf_msg_id_range_check(dev->roc_rvu_lf, msg->id)) 394*384903edSAkhil Goyal plt_base_dbg("PF: Msg 0x%x fn:0x%x (pf:%d,vf:%d)", 395*384903edSAkhil Goyal msg->id, msg->pcifunc, dev_get_pf(msg->pcifunc), 396*384903edSAkhil Goyal dev_get_vf(msg->pcifunc)); 397*384903edSAkhil Goyal else 398585bb3e5SJerin Jacob plt_err("Not handled UP msg 0x%x (%s) func:0x%x", 399585bb3e5SJerin Jacob msg->id, mbox_id2name(msg->id), msg->pcifunc); 400585bb3e5SJerin Jacob } 401585bb3e5SJerin Jacob offset = mbox->rx_start + msg->next_msgoff; 402585bb3e5SJerin Jacob } 403585bb3e5SJerin Jacob mbox_reset(mbox, vf); 404585bb3e5SJerin Jacob mdev->msgs_acked = msgs_acked; 405585bb3e5SJerin Jacob plt_wmb(); 406585bb3e5SJerin Jacob 407585bb3e5SJerin Jacob return i; 408585bb3e5SJerin Jacob } 409585bb3e5SJerin Jacob 410dfb5a7a7SHarman Kalra /* PF handling messages from VF */ 411585bb3e5SJerin Jacob static void 4121c7a4d37SHarman Kalra roc_vf_pf_mbox_handle_msg(void *param, dev_intr_t *intr) 413585bb3e5SJerin Jacob { 414585bb3e5SJerin Jacob uint16_t vf, max_vf, max_bits; 415585bb3e5SJerin Jacob struct dev *dev = param; 416585bb3e5SJerin Jacob 417585bb3e5SJerin Jacob max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t); 418585bb3e5SJerin Jacob max_vf = max_bits * MAX_VFPF_DWORD_BITS; 419585bb3e5SJerin Jacob 420585bb3e5SJerin Jacob for (vf = 0; vf < max_vf; vf++) { 4211c7a4d37SHarman Kalra if (intr->bits[vf / max_bits] & BIT_ULL(vf % max_bits)) { 422585bb3e5SJerin Jacob plt_base_dbg("Process vf:%d request (pf:%d, vf:%d)", vf, 423585bb3e5SJerin Jacob dev->pf, dev->vf); 424dfb5a7a7SHarman Kalra /* VF initiated down messages */ 425585bb3e5SJerin Jacob vf_pf_process_msgs(dev, vf); 426dfb5a7a7SHarman Kalra /* VF replies to PF's UP messages */ 427585bb3e5SJerin Jacob vf_pf_process_up_msgs(dev, vf); 4281c7a4d37SHarman Kalra intr->bits[vf / max_bits] &= ~(BIT_ULL(vf % max_bits)); 429585bb3e5SJerin Jacob } 430585bb3e5SJerin Jacob } 431585bb3e5SJerin Jacob } 432585bb3e5SJerin Jacob 433dfb5a7a7SHarman Kalra /* IRQ to PF from VF - PF context (interrupt thread) */ 434585bb3e5SJerin Jacob static void 435585bb3e5SJerin Jacob roc_vf_pf_mbox_irq(void *param) 436585bb3e5SJerin Jacob { 4371c7a4d37SHarman Kalra bool signal_thread = false; 438585bb3e5SJerin Jacob struct dev *dev = param; 4391c7a4d37SHarman Kalra dev_intr_t intrb; 440585bb3e5SJerin Jacob uint64_t intr; 4411c7a4d37SHarman Kalra int vfpf, sz; 442585bb3e5SJerin Jacob 4431c7a4d37SHarman Kalra sz = sizeof(intrb.bits[0]) * MAX_VFPF_DWORD_BITS; 4441c7a4d37SHarman Kalra memset(intrb.bits, 0, sz); 445585bb3e5SJerin Jacob for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) { 4469bd368caSHarman Kalra intr = plt_read64(dev->mbox_reg_base + dev->mbox_plat->pfvf_mbox_intx[vfpf]); 447585bb3e5SJerin Jacob if (!intr) 448585bb3e5SJerin Jacob continue; 449585bb3e5SJerin Jacob 45061deac72SHarman Kalra plt_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)", vfpf, intr, dev->pf, 45161deac72SHarman Kalra dev->vf); 452585bb3e5SJerin Jacob 453585bb3e5SJerin Jacob /* Save and clear intr bits */ 4541c7a4d37SHarman Kalra intrb.bits[vfpf] |= intr; 4559bd368caSHarman Kalra plt_write64(intr, dev->mbox_reg_base + dev->mbox_plat->pfvf_mbox_intx[vfpf]); 4561c7a4d37SHarman Kalra signal_thread = true; 457585bb3e5SJerin Jacob } 458585bb3e5SJerin Jacob 4591c7a4d37SHarman Kalra if (signal_thread) { 4601c7a4d37SHarman Kalra pthread_mutex_lock(&dev->sync.mutex); 4611c7a4d37SHarman Kalra /* Interrupt state was saved in local variable first, as dev->intr.bits 4621c7a4d37SHarman Kalra * is a shared resources between VF msg and interrupt thread. 4631c7a4d37SHarman Kalra */ 4641c7a4d37SHarman Kalra memcpy(dev->intr.bits, intrb.bits, sz); 4651c7a4d37SHarman Kalra /* MBOX message received from VF */ 4661c7a4d37SHarman Kalra dev->sync.msg_avail |= ROC_DEV_MBOX_PEND; 4671c7a4d37SHarman Kalra /* Signal vf message handler thread */ 4681c7a4d37SHarman Kalra pthread_cond_signal(&dev->sync.pfvf_msg_cond); 4691c7a4d37SHarman Kalra pthread_mutex_unlock(&dev->sync.mutex); 470585bb3e5SJerin Jacob } 471585bb3e5SJerin Jacob } 472585bb3e5SJerin Jacob 473dfb5a7a7SHarman Kalra /* Received response from AF (PF context) / PF (VF context) */ 474665ff1ccSJerin Jacob static void 475665ff1ccSJerin Jacob process_msgs(struct dev *dev, struct mbox *mbox) 476665ff1ccSJerin Jacob { 477665ff1ccSJerin Jacob struct mbox_dev *mdev = &mbox->dev[0]; 478665ff1ccSJerin Jacob struct mbox_hdr *req_hdr; 479665ff1ccSJerin Jacob struct mbox_msghdr *msg; 480665ff1ccSJerin Jacob int msgs_acked = 0; 481665ff1ccSJerin Jacob int offset; 482665ff1ccSJerin Jacob uint16_t i; 483665ff1ccSJerin Jacob 484665ff1ccSJerin Jacob req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start); 485665ff1ccSJerin Jacob if (req_hdr->num_msgs == 0) 486665ff1ccSJerin Jacob return; 487665ff1ccSJerin Jacob 488665ff1ccSJerin Jacob offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 489665ff1ccSJerin Jacob for (i = 0; i < req_hdr->num_msgs; i++) { 490665ff1ccSJerin Jacob msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset); 491665ff1ccSJerin Jacob 492665ff1ccSJerin Jacob msgs_acked++; 493665ff1ccSJerin Jacob plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id, 494665ff1ccSJerin Jacob mbox_id2name(msg->id), dev_get_pf(msg->pcifunc), 495665ff1ccSJerin Jacob dev_get_vf(msg->pcifunc)); 496665ff1ccSJerin Jacob 497665ff1ccSJerin Jacob switch (msg->id) { 498665ff1ccSJerin Jacob /* Add message id's that are handled here */ 499665ff1ccSJerin Jacob case MBOX_MSG_READY: 500665ff1ccSJerin Jacob /* Get our identity */ 501665ff1ccSJerin Jacob dev->pf_func = msg->pcifunc; 502665ff1ccSJerin Jacob break; 503cc90e35bSHarman Kalra case MBOX_MSG_CGX_PRIO_FLOW_CTRL_CFG: 504074809b9SNithin Dabilpuram case MBOX_MSG_CGX_CFG_PAUSE_FRM: 505cc90e35bSHarman Kalra /* Handling the case where one VF tries to disable PFC 506cc90e35bSHarman Kalra * while PFC already configured on other VFs. This is 507cc90e35bSHarman Kalra * not an error but a warning which can be ignored. 508cc90e35bSHarman Kalra */ 509cc90e35bSHarman Kalra if (msg->rc) { 510cc90e35bSHarman Kalra if (msg->rc == LMAC_AF_ERR_PERM_DENIED) { 511cc90e35bSHarman Kalra plt_mbox_dbg( 512cc90e35bSHarman Kalra "Receive Flow control disable not permitted " 513cc90e35bSHarman Kalra "as its used by other PFVFs"); 514cc90e35bSHarman Kalra msg->rc = 0; 515cc90e35bSHarman Kalra } else { 516cc90e35bSHarman Kalra plt_err("Message (%s) response has err=%d", 517cc90e35bSHarman Kalra mbox_id2name(msg->id), msg->rc); 518cc90e35bSHarman Kalra } 519cc90e35bSHarman Kalra } 520cc90e35bSHarman Kalra break; 521bd9ef855SRahul Bhansali case MBOX_MSG_CGX_PROMISC_DISABLE: 522bd9ef855SRahul Bhansali case MBOX_MSG_CGX_PROMISC_ENABLE: 523bd9ef855SRahul Bhansali if (msg->rc) { 524bd9ef855SRahul Bhansali if (msg->rc == LMAC_AF_ERR_INVALID_PARAM) { 525bd9ef855SRahul Bhansali plt_mbox_dbg("Already in same promisc state"); 526bd9ef855SRahul Bhansali msg->rc = 0; 527bd9ef855SRahul Bhansali } else { 528bd9ef855SRahul Bhansali plt_err("Message (%s) response has err=%d", 529bd9ef855SRahul Bhansali mbox_id2name(msg->id), msg->rc); 530bd9ef855SRahul Bhansali } 531bd9ef855SRahul Bhansali } 532bd9ef855SRahul Bhansali break; 533665ff1ccSJerin Jacob 534665ff1ccSJerin Jacob default: 535665ff1ccSJerin Jacob if (msg->rc) 53662dcba5dSHiral Shah plt_err("Message (%s) response has err=%d (%s)", 53762dcba5dSHiral Shah mbox_id2name(msg->id), msg->rc, roc_error_msg_get(msg->rc)); 538665ff1ccSJerin Jacob break; 539665ff1ccSJerin Jacob } 540665ff1ccSJerin Jacob offset = mbox->rx_start + msg->next_msgoff; 541665ff1ccSJerin Jacob } 542665ff1ccSJerin Jacob 543665ff1ccSJerin Jacob mbox_reset(mbox, 0); 544dfb5a7a7SHarman Kalra /* Update acked if someone is waiting a message - mbox_wait is waiting */ 545665ff1ccSJerin Jacob mdev->msgs_acked = msgs_acked; 546665ff1ccSJerin Jacob plt_wmb(); 547665ff1ccSJerin Jacob } 548665ff1ccSJerin Jacob 549585bb3e5SJerin Jacob /* Copies the message received from AF and sends it to VF */ 550585bb3e5SJerin Jacob static void 551585bb3e5SJerin Jacob pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg) 552585bb3e5SJerin Jacob { 553585bb3e5SJerin Jacob uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t); 554585bb3e5SJerin Jacob struct mbox *vf_mbox = &dev->mbox_vfpf_up; 555585bb3e5SJerin Jacob struct msg_req *msg = rec_msg; 556585bb3e5SJerin Jacob struct mbox_msghdr *vf_msg; 557585bb3e5SJerin Jacob uint16_t vf; 558585bb3e5SJerin Jacob size_t size; 559585bb3e5SJerin Jacob 560585bb3e5SJerin Jacob size = PLT_ALIGN(mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN); 5619a92937cSSatheesh Paul if (size < sizeof(struct mbox_msghdr)) 5629a92937cSSatheesh Paul return; 563585bb3e5SJerin Jacob /* Send UP message to all VF's */ 564585bb3e5SJerin Jacob for (vf = 0; vf < vf_mbox->ndevs; vf++) { 565585bb3e5SJerin Jacob /* VF active */ 566585bb3e5SJerin Jacob if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf)))) 567585bb3e5SJerin Jacob continue; 568585bb3e5SJerin Jacob 569585bb3e5SJerin Jacob plt_base_dbg("(%s) size: %zx to VF: %d", 570585bb3e5SJerin Jacob mbox_id2name(msg->hdr.id), size, vf); 571585bb3e5SJerin Jacob 572585bb3e5SJerin Jacob /* Reserve PF/VF mbox message */ 573585bb3e5SJerin Jacob vf_msg = mbox_alloc_msg(vf_mbox, vf, size); 574585bb3e5SJerin Jacob if (!vf_msg) { 575585bb3e5SJerin Jacob plt_err("Failed to alloc VF%d UP message", vf); 576585bb3e5SJerin Jacob continue; 577585bb3e5SJerin Jacob } 578585bb3e5SJerin Jacob mbox_req_init(msg->hdr.id, vf_msg); 579585bb3e5SJerin Jacob 580585bb3e5SJerin Jacob /* 581585bb3e5SJerin Jacob * Copy message from AF<->PF UP mbox 582585bb3e5SJerin Jacob * to PF<->VF UP mbox 583585bb3e5SJerin Jacob */ 584585bb3e5SJerin Jacob mbox_memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr), 585585bb3e5SJerin Jacob (uint8_t *)msg + sizeof(struct mbox_msghdr), 586585bb3e5SJerin Jacob size - sizeof(struct mbox_msghdr)); 587585bb3e5SJerin Jacob 588585bb3e5SJerin Jacob vf_msg->rc = msg->hdr.rc; 589585bb3e5SJerin Jacob /* Set PF to be a sender */ 590585bb3e5SJerin Jacob vf_msg->pcifunc = dev->pf_func; 591585bb3e5SJerin Jacob 592585bb3e5SJerin Jacob /* Send to VF */ 593585bb3e5SJerin Jacob mbox_msg_send(vf_mbox, vf); 594fa4ee2d4SHarman Kalra mbox_wait_for_zero(&dev->mbox_vfpf_up, vf); 595585bb3e5SJerin Jacob } 596585bb3e5SJerin Jacob } 597585bb3e5SJerin Jacob 598585bb3e5SJerin Jacob static int 599e66a6e54SAnkur Dwivedi mbox_up_handler_rep_event_up_notify(struct dev *dev, struct rep_event *req, struct msg_rsp *rsp) 600d85c80b4SHarman Kalra { 601d85c80b4SHarman Kalra struct roc_eswitch_repte_notify_msg *notify_msg; 602d85c80b4SHarman Kalra int rc = 0; 603d85c80b4SHarman Kalra 604e66a6e54SAnkur Dwivedi plt_base_dbg("mbox_up_handler_rep_event_up_notify"); 605d85c80b4SHarman Kalra plt_base_dbg("pf:%d/vf:%d msg id 0x%x (%s) from: pf:%d/vf:%d", dev_get_pf(dev->pf_func), 606d85c80b4SHarman Kalra dev_get_vf(dev->pf_func), req->hdr.id, mbox_id2name(req->hdr.id), 607d85c80b4SHarman Kalra dev_get_pf(req->hdr.pcifunc), dev_get_vf(req->hdr.pcifunc)); 608d85c80b4SHarman Kalra 609d85c80b4SHarman Kalra if (dev->ops && dev->ops->repte_notify) { 610d85c80b4SHarman Kalra notify_msg = plt_zmalloc(sizeof(struct roc_eswitch_repte_notify_msg), 0); 611d85c80b4SHarman Kalra if (!notify_msg) { 612d85c80b4SHarman Kalra plt_err("Failed to allocate memory"); 613d85c80b4SHarman Kalra rc = -ENOMEM; 614d85c80b4SHarman Kalra goto fail; 615d85c80b4SHarman Kalra } 616e66a6e54SAnkur Dwivedi 617e66a6e54SAnkur Dwivedi switch (req->event) { 618e66a6e54SAnkur Dwivedi case RVU_EVENT_PORT_STATE: 619e66a6e54SAnkur Dwivedi plt_base_dbg("pcifunc %x, port_state %d", req->pcifunc, 620e66a6e54SAnkur Dwivedi req->evt_data.port_state); 621e66a6e54SAnkur Dwivedi notify_msg->type = ROC_ESWITCH_LINK_STATE; 622e66a6e54SAnkur Dwivedi notify_msg->link.hw_func = req->pcifunc; 623e66a6e54SAnkur Dwivedi notify_msg->link.enable = req->evt_data.port_state; 624e66a6e54SAnkur Dwivedi break; 625e66a6e54SAnkur Dwivedi case RVU_EVENT_PFVF_STATE: 626e66a6e54SAnkur Dwivedi plt_base_dbg("pcifunc %x, repte_state %d", req->pcifunc, 627e66a6e54SAnkur Dwivedi req->evt_data.vf_state); 628d85c80b4SHarman Kalra notify_msg->type = ROC_ESWITCH_REPTE_STATE; 629e66a6e54SAnkur Dwivedi notify_msg->state.hw_func = req->pcifunc; 630e66a6e54SAnkur Dwivedi notify_msg->state.enable = req->evt_data.vf_state; 631e66a6e54SAnkur Dwivedi break; 632e66a6e54SAnkur Dwivedi case RVU_EVENT_MTU_CHANGE: 633e66a6e54SAnkur Dwivedi plt_base_dbg("pcifunc %x, mtu val %d", req->pcifunc, req->evt_data.mtu); 634e66a6e54SAnkur Dwivedi notify_msg->type = ROC_ESWITCH_REPTE_MTU; 635e66a6e54SAnkur Dwivedi notify_msg->mtu.hw_func = req->pcifunc; 636e66a6e54SAnkur Dwivedi notify_msg->mtu.mtu = req->evt_data.mtu; 637e66a6e54SAnkur Dwivedi break; 638e66a6e54SAnkur Dwivedi default: 639e66a6e54SAnkur Dwivedi plt_err("Unknown event type %u", req->event); 640d85c80b4SHarman Kalra plt_free(notify_msg); 641e66a6e54SAnkur Dwivedi rc = -EINVAL; 642d85c80b4SHarman Kalra goto fail; 643d85c80b4SHarman Kalra } 644d85c80b4SHarman Kalra 645d85c80b4SHarman Kalra rc = dev->ops->repte_notify(dev->roc_nix, (void *)notify_msg); 646d85c80b4SHarman Kalra if (rc < 0) 647e66a6e54SAnkur Dwivedi plt_err("Failed to send notification type %x for representee %x", 648e66a6e54SAnkur Dwivedi notify_msg->type, notify_msg->state.hw_func); 649d85c80b4SHarman Kalra 650d85c80b4SHarman Kalra plt_free(notify_msg); 651d85c80b4SHarman Kalra } 652d85c80b4SHarman Kalra fail: 653d85c80b4SHarman Kalra rsp->hdr.rc = rc; 654d85c80b4SHarman Kalra return rc; 655d85c80b4SHarman Kalra } 656d85c80b4SHarman Kalra 657d85c80b4SHarman Kalra static int 658c26d94f2SAkhil Goyal mbox_up_handler_mcs_intr_notify(struct dev *dev, struct mcs_intr_info *info, struct msg_rsp *rsp) 659c26d94f2SAkhil Goyal { 660c26d94f2SAkhil Goyal struct roc_mcs_event_desc desc = {0}; 661c26d94f2SAkhil Goyal struct roc_mcs *mcs; 662c26d94f2SAkhil Goyal 663c26d94f2SAkhil Goyal plt_base_dbg("pf:%d/vf:%d msg id 0x%x (%s) from: pf:%d/vf:%d", dev_get_pf(dev->pf_func), 664c26d94f2SAkhil Goyal dev_get_vf(dev->pf_func), info->hdr.id, mbox_id2name(info->hdr.id), 665c26d94f2SAkhil Goyal dev_get_pf(info->hdr.pcifunc), dev_get_vf(info->hdr.pcifunc)); 666c26d94f2SAkhil Goyal 667c26d94f2SAkhil Goyal mcs = roc_idev_mcs_get(info->mcs_id); 668c26d94f2SAkhil Goyal if (!mcs) 669c26d94f2SAkhil Goyal goto exit; 670c26d94f2SAkhil Goyal 671c26d94f2SAkhil Goyal if (info->intr_mask) { 672c26d94f2SAkhil Goyal switch (info->intr_mask) { 673c26d94f2SAkhil Goyal case MCS_CPM_RX_SECTAG_V_EQ1_INT: 674c26d94f2SAkhil Goyal desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR; 675c26d94f2SAkhil Goyal desc.subtype = ROC_MCS_EVENT_RX_SECTAG_V_EQ1; 676c26d94f2SAkhil Goyal break; 677c26d94f2SAkhil Goyal case MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT: 678c26d94f2SAkhil Goyal desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR; 679c26d94f2SAkhil Goyal desc.subtype = ROC_MCS_EVENT_RX_SECTAG_E_EQ0_C_EQ1; 680c26d94f2SAkhil Goyal break; 681c26d94f2SAkhil Goyal case MCS_CPM_RX_SECTAG_SL_GTE48_INT: 682c26d94f2SAkhil Goyal desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR; 683c26d94f2SAkhil Goyal desc.subtype = ROC_MCS_EVENT_RX_SECTAG_SL_GTE48; 684c26d94f2SAkhil Goyal break; 685c26d94f2SAkhil Goyal case MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT: 686c26d94f2SAkhil Goyal desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR; 687c26d94f2SAkhil Goyal desc.subtype = ROC_MCS_EVENT_RX_SECTAG_ES_EQ1_SC_EQ1; 688c26d94f2SAkhil Goyal break; 689c26d94f2SAkhil Goyal case MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT: 690c26d94f2SAkhil Goyal desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR; 691c26d94f2SAkhil Goyal desc.subtype = ROC_MCS_EVENT_RX_SECTAG_SC_EQ1_SCB_EQ1; 692c26d94f2SAkhil Goyal break; 693c26d94f2SAkhil Goyal case MCS_CPM_RX_PACKET_XPN_EQ0_INT: 694c26d94f2SAkhil Goyal desc.type = ROC_MCS_EVENT_RX_SA_PN_HARD_EXP; 695c26d94f2SAkhil Goyal desc.metadata.sa_idx = info->sa_id; 696c26d94f2SAkhil Goyal break; 697c26d94f2SAkhil Goyal case MCS_CPM_RX_PN_THRESH_REACHED_INT: 698c26d94f2SAkhil Goyal desc.type = ROC_MCS_EVENT_RX_SA_PN_SOFT_EXP; 699c26d94f2SAkhil Goyal desc.metadata.sa_idx = info->sa_id; 700c26d94f2SAkhil Goyal break; 701c26d94f2SAkhil Goyal case MCS_CPM_TX_PACKET_XPN_EQ0_INT: 702c26d94f2SAkhil Goyal desc.type = ROC_MCS_EVENT_TX_SA_PN_HARD_EXP; 703c26d94f2SAkhil Goyal desc.metadata.sa_idx = info->sa_id; 704c26d94f2SAkhil Goyal break; 705c26d94f2SAkhil Goyal case MCS_CPM_TX_PN_THRESH_REACHED_INT: 706c26d94f2SAkhil Goyal desc.type = ROC_MCS_EVENT_TX_SA_PN_SOFT_EXP; 707c26d94f2SAkhil Goyal desc.metadata.sa_idx = info->sa_id; 708c26d94f2SAkhil Goyal break; 709c26d94f2SAkhil Goyal case MCS_CPM_TX_SA_NOT_VALID_INT: 710c26d94f2SAkhil Goyal desc.type = ROC_MCS_EVENT_SA_NOT_VALID; 711c26d94f2SAkhil Goyal break; 712c26d94f2SAkhil Goyal case MCS_BBE_RX_DFIFO_OVERFLOW_INT: 713c26d94f2SAkhil Goyal case MCS_BBE_TX_DFIFO_OVERFLOW_INT: 714c26d94f2SAkhil Goyal desc.type = ROC_MCS_EVENT_FIFO_OVERFLOW; 715c26d94f2SAkhil Goyal desc.subtype = ROC_MCS_EVENT_DATA_FIFO_OVERFLOW; 716c26d94f2SAkhil Goyal desc.metadata.lmac_id = info->lmac_id; 717c26d94f2SAkhil Goyal break; 718c26d94f2SAkhil Goyal case MCS_BBE_RX_PLFIFO_OVERFLOW_INT: 719c26d94f2SAkhil Goyal case MCS_BBE_TX_PLFIFO_OVERFLOW_INT: 720c26d94f2SAkhil Goyal desc.type = ROC_MCS_EVENT_FIFO_OVERFLOW; 721c26d94f2SAkhil Goyal desc.subtype = ROC_MCS_EVENT_POLICY_FIFO_OVERFLOW; 722c26d94f2SAkhil Goyal desc.metadata.lmac_id = info->lmac_id; 723c26d94f2SAkhil Goyal break; 724c26d94f2SAkhil Goyal case MCS_PAB_RX_CHAN_OVERFLOW_INT: 725c26d94f2SAkhil Goyal case MCS_PAB_TX_CHAN_OVERFLOW_INT: 726c26d94f2SAkhil Goyal desc.type = ROC_MCS_EVENT_FIFO_OVERFLOW; 727c26d94f2SAkhil Goyal desc.subtype = ROC_MCS_EVENT_PKT_ASSM_FIFO_OVERFLOW; 728c26d94f2SAkhil Goyal desc.metadata.lmac_id = info->lmac_id; 729c26d94f2SAkhil Goyal break; 730c26d94f2SAkhil Goyal default: 731c26d94f2SAkhil Goyal goto exit; 732c26d94f2SAkhil Goyal } 733c26d94f2SAkhil Goyal 734c26d94f2SAkhil Goyal mcs_event_cb_process(mcs, &desc); 735c26d94f2SAkhil Goyal } 736c26d94f2SAkhil Goyal 737c26d94f2SAkhil Goyal exit: 738c26d94f2SAkhil Goyal rsp->hdr.rc = 0; 739c26d94f2SAkhil Goyal return 0; 740c26d94f2SAkhil Goyal } 741c26d94f2SAkhil Goyal 742c26d94f2SAkhil Goyal static int 743585bb3e5SJerin Jacob mbox_up_handler_cgx_link_event(struct dev *dev, struct cgx_link_info_msg *msg, 744585bb3e5SJerin Jacob struct msg_rsp *rsp) 745585bb3e5SJerin Jacob { 746585bb3e5SJerin Jacob struct cgx_link_user_info *linfo = &msg->link_info; 747585bb3e5SJerin Jacob void *roc_nix = dev->roc_nix; 748585bb3e5SJerin Jacob 749585bb3e5SJerin Jacob plt_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d", 750585bb3e5SJerin Jacob dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func), 751585bb3e5SJerin Jacob linfo->link_up ? "UP" : "DOWN", msg->hdr.id, 752585bb3e5SJerin Jacob mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc), 753585bb3e5SJerin Jacob dev_get_vf(msg->hdr.pcifunc)); 754585bb3e5SJerin Jacob 755585bb3e5SJerin Jacob /* PF gets link notification from AF */ 756585bb3e5SJerin Jacob if (dev_get_pf(msg->hdr.pcifunc) == 0) { 757585bb3e5SJerin Jacob if (dev->ops && dev->ops->link_status_update) 758585bb3e5SJerin Jacob dev->ops->link_status_update(roc_nix, linfo); 759585bb3e5SJerin Jacob 760585bb3e5SJerin Jacob /* Forward the same message as received from AF to VF */ 761585bb3e5SJerin Jacob pf_vf_mbox_send_up_msg(dev, msg); 762585bb3e5SJerin Jacob } else { 763585bb3e5SJerin Jacob /* VF gets link up notification */ 764585bb3e5SJerin Jacob if (dev->ops && dev->ops->link_status_update) 765585bb3e5SJerin Jacob dev->ops->link_status_update(roc_nix, linfo); 766585bb3e5SJerin Jacob } 767585bb3e5SJerin Jacob 768585bb3e5SJerin Jacob rsp->hdr.rc = 0; 769585bb3e5SJerin Jacob return 0; 770585bb3e5SJerin Jacob } 771585bb3e5SJerin Jacob 772585bb3e5SJerin Jacob static int 773585bb3e5SJerin Jacob mbox_up_handler_cgx_ptp_rx_info(struct dev *dev, 774585bb3e5SJerin Jacob struct cgx_ptp_rx_info_msg *msg, 775585bb3e5SJerin Jacob struct msg_rsp *rsp) 776585bb3e5SJerin Jacob { 777585bb3e5SJerin Jacob void *roc_nix = dev->roc_nix; 778585bb3e5SJerin Jacob 779585bb3e5SJerin Jacob plt_base_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d", 780585bb3e5SJerin Jacob dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func), 781585bb3e5SJerin Jacob msg->ptp_en ? "ENABLED" : "DISABLED", msg->hdr.id, 782585bb3e5SJerin Jacob mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc), 783585bb3e5SJerin Jacob dev_get_vf(msg->hdr.pcifunc)); 784585bb3e5SJerin Jacob 785585bb3e5SJerin Jacob /* PF gets PTP notification from AF */ 786585bb3e5SJerin Jacob if (dev_get_pf(msg->hdr.pcifunc) == 0) { 787585bb3e5SJerin Jacob if (dev->ops && dev->ops->ptp_info_update) 788585bb3e5SJerin Jacob dev->ops->ptp_info_update(roc_nix, msg->ptp_en); 789585bb3e5SJerin Jacob 790585bb3e5SJerin Jacob /* Forward the same message as received from AF to VF */ 791585bb3e5SJerin Jacob pf_vf_mbox_send_up_msg(dev, msg); 792585bb3e5SJerin Jacob } else { 793585bb3e5SJerin Jacob /* VF gets PTP notification */ 794585bb3e5SJerin Jacob if (dev->ops && dev->ops->ptp_info_update) 795585bb3e5SJerin Jacob dev->ops->ptp_info_update(roc_nix, msg->ptp_en); 796585bb3e5SJerin Jacob } 797585bb3e5SJerin Jacob 798585bb3e5SJerin Jacob rsp->hdr.rc = 0; 799585bb3e5SJerin Jacob return 0; 800585bb3e5SJerin Jacob } 801585bb3e5SJerin Jacob 802665ff1ccSJerin Jacob static int 803665ff1ccSJerin Jacob mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req) 804665ff1ccSJerin Jacob { 805665ff1ccSJerin Jacob /* Check if valid, if not reply with a invalid msg */ 806665ff1ccSJerin Jacob if (req->sig != MBOX_REQ_SIG) 807665ff1ccSJerin Jacob return -EIO; 808665ff1ccSJerin Jacob 809665ff1ccSJerin Jacob switch (req->id) { 810665ff1ccSJerin Jacob default: 811665ff1ccSJerin Jacob reply_invalid_msg(&dev->mbox_up, 0, 0, req->id); 812665ff1ccSJerin Jacob break; 813585bb3e5SJerin Jacob #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ 814585bb3e5SJerin Jacob case _id: { \ 815585bb3e5SJerin Jacob struct _rsp_type *rsp; \ 816585bb3e5SJerin Jacob int err; \ 817585bb3e5SJerin Jacob rsp = (struct _rsp_type *)mbox_alloc_msg( \ 818585bb3e5SJerin Jacob &dev->mbox_up, 0, sizeof(struct _rsp_type)); \ 819585bb3e5SJerin Jacob if (!rsp) \ 820585bb3e5SJerin Jacob return -ENOMEM; \ 821585bb3e5SJerin Jacob rsp->hdr.id = _id; \ 822585bb3e5SJerin Jacob rsp->hdr.sig = MBOX_RSP_SIG; \ 823585bb3e5SJerin Jacob rsp->hdr.pcifunc = dev->pf_func; \ 824585bb3e5SJerin Jacob rsp->hdr.rc = 0; \ 825585bb3e5SJerin Jacob err = mbox_up_handler_##_fn_name(dev, (struct _req_type *)req, \ 826585bb3e5SJerin Jacob rsp); \ 827585bb3e5SJerin Jacob return err; \ 828585bb3e5SJerin Jacob } 829585bb3e5SJerin Jacob MBOX_UP_CGX_MESSAGES 830c26d94f2SAkhil Goyal MBOX_UP_MCS_MESSAGES 831d85c80b4SHarman Kalra MBOX_UP_REP_MESSAGES 832585bb3e5SJerin Jacob #undef M 833665ff1ccSJerin Jacob } 834665ff1ccSJerin Jacob 835665ff1ccSJerin Jacob return -ENODEV; 836665ff1ccSJerin Jacob } 837665ff1ccSJerin Jacob 838*384903edSAkhil Goyal static int 839*384903edSAkhil Goyal process_rvu_lf_msgs_up(struct dev *dev, struct mbox_msghdr *msg, size_t size) 840*384903edSAkhil Goyal { 841*384903edSAkhil Goyal uint8_t req[MBOX_MSG_REQ_SIZE_MAX]; 842*384903edSAkhil Goyal struct msg_rsp *rsp; 843*384903edSAkhil Goyal uint16_t rsp_len; 844*384903edSAkhil Goyal void *resp; 845*384903edSAkhil Goyal int rc = 0; 846*384903edSAkhil Goyal 847*384903edSAkhil Goyal /* Check if valid, if not reply with an invalid msg */ 848*384903edSAkhil Goyal if (msg->sig != MBOX_REQ_SIG) 849*384903edSAkhil Goyal return -EIO; 850*384903edSAkhil Goyal 851*384903edSAkhil Goyal if ((size - sizeof(struct mbox_msghdr)) > MBOX_MSG_REQ_SIZE_MAX) { 852*384903edSAkhil Goyal plt_err("MBOX request size greater than %d", MBOX_MSG_REQ_SIZE_MAX); 853*384903edSAkhil Goyal return -ENOMEM; 854*384903edSAkhil Goyal } 855*384903edSAkhil Goyal mbox_memcpy(req, (uint8_t *)msg + sizeof(struct mbox_msghdr), 856*384903edSAkhil Goyal size - sizeof(struct mbox_msghdr)); 857*384903edSAkhil Goyal rc = dev->ops->msg_process_cb(dev_get_vf(msg->pcifunc), msg->id, req, 858*384903edSAkhil Goyal size - sizeof(struct mbox_msghdr), &resp, &rsp_len); 859*384903edSAkhil Goyal if (rc < 0) { 860*384903edSAkhil Goyal plt_err("Failed to process VF%d message", dev->vf); 861*384903edSAkhil Goyal return rc; 862*384903edSAkhil Goyal } 863*384903edSAkhil Goyal 864*384903edSAkhil Goyal rsp = (struct msg_rsp *)mbox_alloc_msg(&dev->mbox_up, 0, 865*384903edSAkhil Goyal rsp_len + sizeof(struct mbox_msghdr)); 866*384903edSAkhil Goyal if (!rsp) { 867*384903edSAkhil Goyal plt_err("Failed to alloc VF%d response message", dev->vf); 868*384903edSAkhil Goyal return -ENOMEM; 869*384903edSAkhil Goyal } 870*384903edSAkhil Goyal 871*384903edSAkhil Goyal mbox_rsp_init(msg->id, rsp); 872*384903edSAkhil Goyal 873*384903edSAkhil Goyal mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr), resp, rsp_len); 874*384903edSAkhil Goyal free(resp); 875*384903edSAkhil Goyal /* PF/VF function ID */ 876*384903edSAkhil Goyal rsp->hdr.pcifunc = msg->pcifunc; 877*384903edSAkhil Goyal rsp->hdr.rc = 0; 878*384903edSAkhil Goyal 879*384903edSAkhil Goyal return rc; 880*384903edSAkhil Goyal } 881*384903edSAkhil Goyal 882dfb5a7a7SHarman Kalra /* Received up messages from AF (PF context) / PF (in context) */ 883665ff1ccSJerin Jacob static void 884665ff1ccSJerin Jacob process_msgs_up(struct dev *dev, struct mbox *mbox) 885665ff1ccSJerin Jacob { 886665ff1ccSJerin Jacob struct mbox_dev *mdev = &mbox->dev[0]; 887665ff1ccSJerin Jacob struct mbox_hdr *req_hdr; 888665ff1ccSJerin Jacob struct mbox_msghdr *msg; 889665ff1ccSJerin Jacob int i, err, offset; 890*384903edSAkhil Goyal size_t size; 891665ff1ccSJerin Jacob 892665ff1ccSJerin Jacob req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start); 893665ff1ccSJerin Jacob if (req_hdr->num_msgs == 0) 894665ff1ccSJerin Jacob return; 895665ff1ccSJerin Jacob 896665ff1ccSJerin Jacob offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); 897665ff1ccSJerin Jacob for (i = 0; i < req_hdr->num_msgs; i++) { 898665ff1ccSJerin Jacob msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset); 899665ff1ccSJerin Jacob 900665ff1ccSJerin Jacob plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id, 901665ff1ccSJerin Jacob mbox_id2name(msg->id), dev_get_pf(msg->pcifunc), 902665ff1ccSJerin Jacob dev_get_vf(msg->pcifunc)); 903*384903edSAkhil Goyal if (roc_rvu_lf_msg_id_range_check(dev->roc_rvu_lf, msg->id)) { 904*384903edSAkhil Goyal size = mbox->rx_start + msg->next_msgoff - offset; 905*384903edSAkhil Goyal err = process_rvu_lf_msgs_up(dev, msg, size); 906*384903edSAkhil Goyal if (err) 907*384903edSAkhil Goyal plt_err("Error %d handling 0x%x RVU_LF up msg", err, msg->id); 908*384903edSAkhil Goyal } else { 909665ff1ccSJerin Jacob err = mbox_process_msgs_up(dev, msg); 910665ff1ccSJerin Jacob if (err) 911665ff1ccSJerin Jacob plt_err("Error %d handling 0x%x (%s)", err, msg->id, 912665ff1ccSJerin Jacob mbox_id2name(msg->id)); 913*384903edSAkhil Goyal } 914665ff1ccSJerin Jacob offset = mbox->rx_start + msg->next_msgoff; 915665ff1ccSJerin Jacob } 916665ff1ccSJerin Jacob /* Send mbox responses */ 917665ff1ccSJerin Jacob if (mdev->num_msgs) { 918665ff1ccSJerin Jacob plt_base_dbg("Reply num_msgs:%d", mdev->num_msgs); 919665ff1ccSJerin Jacob mbox_msg_send(mbox, 0); 920665ff1ccSJerin Jacob } 921665ff1ccSJerin Jacob } 922665ff1ccSJerin Jacob 923dfb5a7a7SHarman Kalra /* IRQ to VF from PF - VF context (interrupt thread) */ 924665ff1ccSJerin Jacob static void 925585bb3e5SJerin Jacob roc_pf_vf_mbox_irq(void *param) 926585bb3e5SJerin Jacob { 927585bb3e5SJerin Jacob struct dev *dev = param; 928fa4ee2d4SHarman Kalra uint64_t mbox_data; 929585bb3e5SJerin Jacob uint64_t intr; 930585bb3e5SJerin Jacob 93161deac72SHarman Kalra intr = plt_read64(dev->mbox_reg_base + RVU_VF_INT); 932585bb3e5SJerin Jacob if (intr == 0) 933585bb3e5SJerin Jacob plt_base_dbg("Proceeding to check mbox UP messages if any"); 934585bb3e5SJerin Jacob 93561deac72SHarman Kalra plt_write64(intr, dev->mbox_reg_base + RVU_VF_INT); 936585bb3e5SJerin Jacob plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf); 937585bb3e5SJerin Jacob 938fa4ee2d4SHarman Kalra /* Reading for UP/DOWN message, next message sending will be delayed 939fa4ee2d4SHarman Kalra * by 1ms until this region is zeroed mbox_wait_for_zero() 940fa4ee2d4SHarman Kalra */ 94161deac72SHarman Kalra mbox_data = plt_read64(dev->mbox_reg_base + RVU_VF_VFPF_MBOX0); 94288d7fa4aSHarman Kalra /* If interrupt occurred for down message */ 9439bd368caSHarman Kalra if (mbox_data & MBOX_DOWN_MSG || intr & BIT_ULL(1)) { 94488d7fa4aSHarman Kalra mbox_data &= ~MBOX_DOWN_MSG; 94561deac72SHarman Kalra plt_write64(mbox_data, dev->mbox_reg_base + RVU_VF_VFPF_MBOX0); 946fa4ee2d4SHarman Kalra 947585bb3e5SJerin Jacob /* First process all configuration messages */ 948585bb3e5SJerin Jacob process_msgs(dev, dev->mbox); 94988d7fa4aSHarman Kalra } 95088d7fa4aSHarman Kalra /* If interrupt occurred for UP message */ 9519bd368caSHarman Kalra if (mbox_data & MBOX_UP_MSG || intr & BIT_ULL(0)) { 95288d7fa4aSHarman Kalra mbox_data &= ~MBOX_UP_MSG; 95361deac72SHarman Kalra plt_write64(mbox_data, dev->mbox_reg_base + RVU_VF_VFPF_MBOX0); 954585bb3e5SJerin Jacob 955585bb3e5SJerin Jacob /* Process Uplink messages */ 956585bb3e5SJerin Jacob process_msgs_up(dev, &dev->mbox_up); 957585bb3e5SJerin Jacob } 95888d7fa4aSHarman Kalra } 959585bb3e5SJerin Jacob 960dfb5a7a7SHarman Kalra /* IRQ to PF from AF - PF context (interrupt thread) */ 961585bb3e5SJerin Jacob static void 962665ff1ccSJerin Jacob roc_af_pf_mbox_irq(void *param) 963665ff1ccSJerin Jacob { 964665ff1ccSJerin Jacob struct dev *dev = param; 965fa4ee2d4SHarman Kalra uint64_t mbox_data; 966665ff1ccSJerin Jacob uint64_t intr; 967665ff1ccSJerin Jacob 96861deac72SHarman Kalra intr = plt_read64(dev->mbox_reg_base + RVU_PF_INT); 969665ff1ccSJerin Jacob if (intr == 0) 970665ff1ccSJerin Jacob plt_base_dbg("Proceeding to check mbox UP messages if any"); 971665ff1ccSJerin Jacob 97261deac72SHarman Kalra plt_write64(intr, dev->mbox_reg_base + RVU_PF_INT); 973585bb3e5SJerin Jacob plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf); 974665ff1ccSJerin Jacob 975fa4ee2d4SHarman Kalra /* Reading for UP/DOWN message, next message sending will be delayed 976fa4ee2d4SHarman Kalra * by 1ms until this region is zeroed mbox_wait_for_zero() 977fa4ee2d4SHarman Kalra */ 97861deac72SHarman Kalra mbox_data = plt_read64(dev->mbox_reg_base + RVU_PF_PFAF_MBOX0); 97988d7fa4aSHarman Kalra /* If interrupt occurred for down message */ 98061deac72SHarman Kalra if (mbox_data & MBOX_DOWN_MSG || intr & BIT_ULL(1)) { 98188d7fa4aSHarman Kalra mbox_data &= ~MBOX_DOWN_MSG; 98261deac72SHarman Kalra plt_write64(mbox_data, dev->mbox_reg_base + RVU_PF_PFAF_MBOX0); 983fa4ee2d4SHarman Kalra 984665ff1ccSJerin Jacob /* First process all configuration messages */ 985665ff1ccSJerin Jacob process_msgs(dev, dev->mbox); 98688d7fa4aSHarman Kalra } 98788d7fa4aSHarman Kalra /* If interrupt occurred for up message */ 98861deac72SHarman Kalra if (mbox_data & MBOX_UP_MSG || intr & BIT_ULL(0)) { 98988d7fa4aSHarman Kalra mbox_data &= ~MBOX_UP_MSG; 99061deac72SHarman Kalra plt_write64(mbox_data, dev->mbox_reg_base + RVU_PF_PFAF_MBOX0); 991665ff1ccSJerin Jacob 992665ff1ccSJerin Jacob /* Process Uplink messages */ 993665ff1ccSJerin Jacob process_msgs_up(dev, &dev->mbox_up); 994665ff1ccSJerin Jacob } 99588d7fa4aSHarman Kalra } 996665ff1ccSJerin Jacob 997665ff1ccSJerin Jacob static int 998665ff1ccSJerin Jacob mbox_register_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev) 999665ff1ccSJerin Jacob { 1000d61138d4SHarman Kalra struct plt_intr_handle *intr_handle = pci_dev->intr_handle; 1001585bb3e5SJerin Jacob int i, rc; 1002585bb3e5SJerin Jacob 1003585bb3e5SJerin Jacob /* HW clear irq */ 10049bd368caSHarman Kalra for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) { 10059bd368caSHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + dev->mbox_plat->pfvf_mbox_int_ena_w1c[i]); 10069bd368caSHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + dev->mbox_plat->pfvf1_mbox_int_ena_w1c[i]); 10079bd368caSHarman Kalra } 1008665ff1ccSJerin Jacob 100961deac72SHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT_ENA_W1C); 1010665ff1ccSJerin Jacob 1011585bb3e5SJerin Jacob /* MBOX interrupt for VF(0...63) <-> PF */ 10129bd368caSHarman Kalra rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev, dev->mbox_plat->pfvf_mbox0_vec); 1013585bb3e5SJerin Jacob 1014585bb3e5SJerin Jacob if (rc) { 1015585bb3e5SJerin Jacob plt_err("Fail to register PF(VF0-63) mbox irq"); 1016585bb3e5SJerin Jacob return rc; 1017585bb3e5SJerin Jacob } 1018585bb3e5SJerin Jacob /* MBOX interrupt for VF(64...128) <-> PF */ 10199bd368caSHarman Kalra rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev, dev->mbox_plat->pfvf_mbox1_vec); 1020585bb3e5SJerin Jacob 1021585bb3e5SJerin Jacob if (rc) { 1022585bb3e5SJerin Jacob plt_err("Fail to register PF(VF64-128) mbox irq"); 1023585bb3e5SJerin Jacob return rc; 1024585bb3e5SJerin Jacob } 10259bd368caSHarman Kalra 10269bd368caSHarman Kalra /* Additional interrupt vector which can be used by VF -> PF using when 10279bd368caSHarman Kalra * RVU_VF_VFPF_TRIG(1) trigger register. 10289bd368caSHarman Kalra */ 10299bd368caSHarman Kalra if (roc_model_is_cn20k()) { 10309bd368caSHarman Kalra /* MBOX1 interrupt for VF(0...63) <-> PF */ 10319bd368caSHarman Kalra rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev, 10329bd368caSHarman Kalra dev->mbox_plat->pfvf1_mbox0_vec); 10339bd368caSHarman Kalra 10349bd368caSHarman Kalra if (rc) { 10359bd368caSHarman Kalra plt_err("Fail to register PF1(VF0-63) mbox irq"); 10369bd368caSHarman Kalra return rc; 10379bd368caSHarman Kalra } 10389bd368caSHarman Kalra /* MBOX1 interrupt for VF(64...128) <-> PF */ 10399bd368caSHarman Kalra rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev, 10409bd368caSHarman Kalra dev->mbox_plat->pfvf1_mbox1_vec); 10419bd368caSHarman Kalra 10429bd368caSHarman Kalra if (rc) { 10439bd368caSHarman Kalra plt_err("Fail to register PF1(VF64-128) mbox irq"); 10449bd368caSHarman Kalra return rc; 10459bd368caSHarman Kalra } 10469bd368caSHarman Kalra } 1047665ff1ccSJerin Jacob /* MBOX interrupt AF <-> PF */ 10489bd368caSHarman Kalra rc = dev_irq_register(intr_handle, roc_af_pf_mbox_irq, dev, dev->mbox_plat->pfaf_vec); 1049665ff1ccSJerin Jacob if (rc) { 1050665ff1ccSJerin Jacob plt_err("Fail to register AF<->PF mbox irq"); 1051665ff1ccSJerin Jacob return rc; 1052665ff1ccSJerin Jacob } 1053665ff1ccSJerin Jacob 1054585bb3e5SJerin Jacob /* HW enable intr */ 10559bd368caSHarman Kalra for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) { 10569bd368caSHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + dev->mbox_plat->pfvf_mbox_int_ena_w1s[i]); 10579bd368caSHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + dev->mbox_plat->pfvf1_mbox_int_ena_w1s[i]); 10589bd368caSHarman Kalra } 1059585bb3e5SJerin Jacob 106061deac72SHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT); 106161deac72SHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT_ENA_W1S); 1062665ff1ccSJerin Jacob 1063665ff1ccSJerin Jacob return rc; 1064665ff1ccSJerin Jacob } 1065665ff1ccSJerin Jacob 1066665ff1ccSJerin Jacob static int 1067585bb3e5SJerin Jacob mbox_register_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev) 1068585bb3e5SJerin Jacob { 1069d61138d4SHarman Kalra struct plt_intr_handle *intr_handle = pci_dev->intr_handle; 1070585bb3e5SJerin Jacob int rc; 1071585bb3e5SJerin Jacob 1072585bb3e5SJerin Jacob /* Clear irq */ 107361deac72SHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + RVU_VF_INT_ENA_W1C); 1074585bb3e5SJerin Jacob 1075585bb3e5SJerin Jacob /* MBOX interrupt PF <-> VF */ 107661deac72SHarman Kalra rc = dev_irq_register(intr_handle, roc_pf_vf_mbox_irq, dev, RVU_VF_INT_VEC_MBOX); 1077585bb3e5SJerin Jacob if (rc) { 1078585bb3e5SJerin Jacob plt_err("Fail to register PF<->VF mbox irq"); 1079585bb3e5SJerin Jacob return rc; 1080585bb3e5SJerin Jacob } 1081585bb3e5SJerin Jacob 1082585bb3e5SJerin Jacob /* HW enable intr */ 108361deac72SHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + RVU_VF_INT); 108461deac72SHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + RVU_VF_INT_ENA_W1S); 1085585bb3e5SJerin Jacob 1086585bb3e5SJerin Jacob return rc; 1087585bb3e5SJerin Jacob } 1088585bb3e5SJerin Jacob 1089993107f0SShijith Thotton int 1090993107f0SShijith Thotton dev_mbox_register_irq(struct plt_pci_device *pci_dev, struct dev *dev) 1091665ff1ccSJerin Jacob { 1092585bb3e5SJerin Jacob if (dev_is_vf(dev)) 1093585bb3e5SJerin Jacob return mbox_register_vf_irq(pci_dev, dev); 1094585bb3e5SJerin Jacob else 1095665ff1ccSJerin Jacob return mbox_register_pf_irq(pci_dev, dev); 1096665ff1ccSJerin Jacob } 1097665ff1ccSJerin Jacob 1098665ff1ccSJerin Jacob static void 1099665ff1ccSJerin Jacob mbox_unregister_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev) 1100665ff1ccSJerin Jacob { 1101d61138d4SHarman Kalra struct plt_intr_handle *intr_handle = pci_dev->intr_handle; 1102585bb3e5SJerin Jacob int i; 1103585bb3e5SJerin Jacob 1104585bb3e5SJerin Jacob /* HW clear irq */ 11059bd368caSHarman Kalra for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) { 11069bd368caSHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + dev->mbox_plat->pfvf_mbox_int_ena_w1c[i]); 11079bd368caSHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + dev->mbox_plat->pfvf1_mbox_int_ena_w1c[i]); 11089bd368caSHarman Kalra } 1109665ff1ccSJerin Jacob 111061deac72SHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT_ENA_W1C); 1111665ff1ccSJerin Jacob 1112585bb3e5SJerin Jacob /* Unregister the interrupt handler for each vectors */ 1113585bb3e5SJerin Jacob /* MBOX interrupt for VF(0...63) <-> PF */ 11149bd368caSHarman Kalra dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev, dev->mbox_plat->pfvf_mbox0_vec); 1115585bb3e5SJerin Jacob 1116585bb3e5SJerin Jacob /* MBOX interrupt for VF(64...128) <-> PF */ 11179bd368caSHarman Kalra dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev, dev->mbox_plat->pfvf_mbox1_vec); 11189bd368caSHarman Kalra 11199bd368caSHarman Kalra if (roc_model_is_cn20k()) { 11209bd368caSHarman Kalra /* MBOX1 interrupt for VF(0...63) <-> PF */ 11219bd368caSHarman Kalra dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev, 11229bd368caSHarman Kalra dev->mbox_plat->pfvf1_mbox0_vec); 11239bd368caSHarman Kalra 11249bd368caSHarman Kalra /* MBOX1 interrupt for VF(64...128) <-> PF */ 11259bd368caSHarman Kalra dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev, 11269bd368caSHarman Kalra dev->mbox_plat->pfvf1_mbox1_vec); 11279bd368caSHarman Kalra } 1128585bb3e5SJerin Jacob 1129665ff1ccSJerin Jacob /* MBOX interrupt AF <-> PF */ 11309bd368caSHarman Kalra dev_irq_unregister(intr_handle, roc_af_pf_mbox_irq, dev, dev->mbox_plat->pfaf_vec); 1131665ff1ccSJerin Jacob } 1132665ff1ccSJerin Jacob 1133665ff1ccSJerin Jacob static void 1134585bb3e5SJerin Jacob mbox_unregister_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev) 1135585bb3e5SJerin Jacob { 1136d61138d4SHarman Kalra struct plt_intr_handle *intr_handle = pci_dev->intr_handle; 1137585bb3e5SJerin Jacob 1138585bb3e5SJerin Jacob /* Clear irq */ 113961deac72SHarman Kalra plt_write64(~0ull, dev->mbox_reg_base + RVU_VF_INT_ENA_W1C); 1140585bb3e5SJerin Jacob 1141585bb3e5SJerin Jacob /* Unregister the interrupt handler */ 114261deac72SHarman Kalra dev_irq_unregister(intr_handle, roc_pf_vf_mbox_irq, dev, RVU_VF_INT_VEC_MBOX); 1143585bb3e5SJerin Jacob } 1144585bb3e5SJerin Jacob 1145758b58f0SPavan Nikhilesh void 1146758b58f0SPavan Nikhilesh dev_mbox_unregister_irq(struct plt_pci_device *pci_dev, struct dev *dev) 1147665ff1ccSJerin Jacob { 1148585bb3e5SJerin Jacob if (dev_is_vf(dev)) 1149585bb3e5SJerin Jacob mbox_unregister_vf_irq(pci_dev, dev); 1150585bb3e5SJerin Jacob else 1151665ff1ccSJerin Jacob mbox_unregister_pf_irq(pci_dev, dev); 1152665ff1ccSJerin Jacob } 1153665ff1ccSJerin Jacob 1154585bb3e5SJerin Jacob static int 1155585bb3e5SJerin Jacob vf_flr_send_msg(struct dev *dev, uint16_t vf) 1156585bb3e5SJerin Jacob { 1157585bb3e5SJerin Jacob struct mbox *mbox = dev->mbox; 1158585bb3e5SJerin Jacob struct msg_req *req; 1159585bb3e5SJerin Jacob int rc; 1160585bb3e5SJerin Jacob 11611c7a4d37SHarman Kalra req = mbox_alloc_msg_vf_flr(mbox_get(mbox)); 1162585bb3e5SJerin Jacob if (req == NULL) 1163585bb3e5SJerin Jacob return -ENOSPC; 1164585bb3e5SJerin Jacob /* Overwrite pcifunc to indicate VF */ 1165585bb3e5SJerin Jacob req->hdr.pcifunc = dev_pf_func(dev->pf, vf); 1166585bb3e5SJerin Jacob 1167585bb3e5SJerin Jacob /* Sync message in interrupt context */ 1168585bb3e5SJerin Jacob rc = pf_af_sync_msg(dev, NULL); 1169585bb3e5SJerin Jacob if (rc) 1170585bb3e5SJerin Jacob plt_err("Failed to send VF FLR mbox msg, rc=%d", rc); 1171585bb3e5SJerin Jacob 11721c7a4d37SHarman Kalra mbox_put(mbox); 11731c7a4d37SHarman Kalra 1174585bb3e5SJerin Jacob return rc; 1175585bb3e5SJerin Jacob } 1176585bb3e5SJerin Jacob 1177585bb3e5SJerin Jacob static void 1178585bb3e5SJerin Jacob roc_pf_vf_flr_irq(void *param) 1179585bb3e5SJerin Jacob { 1180585bb3e5SJerin Jacob struct dev *dev = (struct dev *)param; 11811c7a4d37SHarman Kalra bool signal_thread = false; 11821c7a4d37SHarman Kalra dev_intr_t flr; 1183585bb3e5SJerin Jacob uintptr_t bar2; 1184585bb3e5SJerin Jacob uint64_t intr; 11851c7a4d37SHarman Kalra int i, sz; 1186585bb3e5SJerin Jacob 1187585bb3e5SJerin Jacob bar2 = dev->bar2; 1188585bb3e5SJerin Jacob 11891c7a4d37SHarman Kalra sz = sizeof(flr.bits[0]) * MAX_VFPF_DWORD_BITS; 11901c7a4d37SHarman Kalra memset(flr.bits, 0, sz); 1191585bb3e5SJerin Jacob for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) { 1192585bb3e5SJerin Jacob intr = plt_read64(bar2 + RVU_PF_VFFLR_INTX(i)); 1193585bb3e5SJerin Jacob if (!intr) 1194585bb3e5SJerin Jacob continue; 1195585bb3e5SJerin Jacob 1196585bb3e5SJerin Jacob /* Clear interrupt */ 11971c7a4d37SHarman Kalra plt_write64(intr, bar2 + RVU_PF_VFFLR_INTX(i)); 1198585bb3e5SJerin Jacob /* Disable the interrupt */ 11991c7a4d37SHarman Kalra plt_write64(intr, 1200585bb3e5SJerin Jacob bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i)); 1201585bb3e5SJerin Jacob 12021c7a4d37SHarman Kalra /* Save FLR interrupts per VF as bits */ 12031c7a4d37SHarman Kalra flr.bits[i] |= intr; 1204585bb3e5SJerin Jacob /* Enable interrupt */ 12051c7a4d37SHarman Kalra plt_write64(~0ull, 12061c7a4d37SHarman Kalra bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i)); 12071c7a4d37SHarman Kalra signal_thread = true; 1208585bb3e5SJerin Jacob } 12091c7a4d37SHarman Kalra 12101c7a4d37SHarman Kalra if (signal_thread) { 12111c7a4d37SHarman Kalra pthread_mutex_lock(&dev->sync.mutex); 12121c7a4d37SHarman Kalra /* Interrupt state was saved in local variable first, as dev->flr.bits 12131c7a4d37SHarman Kalra * is a shared resources between VF msg and interrupt thread. 12141c7a4d37SHarman Kalra */ 12151c7a4d37SHarman Kalra memcpy(dev->flr.bits, flr.bits, sz); 12161c7a4d37SHarman Kalra /* FLR message received from VF */ 12171c7a4d37SHarman Kalra dev->sync.msg_avail |= ROC_DEV_FLR_PEND; 12181c7a4d37SHarman Kalra /* Signal vf message handler thread */ 12191c7a4d37SHarman Kalra pthread_cond_signal(&dev->sync.pfvf_msg_cond); 12201c7a4d37SHarman Kalra pthread_mutex_unlock(&dev->sync.mutex); 1221585bb3e5SJerin Jacob } 1222585bb3e5SJerin Jacob } 1223585bb3e5SJerin Jacob 1224758b58f0SPavan Nikhilesh void 1225758b58f0SPavan Nikhilesh dev_vf_flr_unregister_irqs(struct plt_pci_device *pci_dev, struct dev *dev) 1226585bb3e5SJerin Jacob { 1227d61138d4SHarman Kalra struct plt_intr_handle *intr_handle = pci_dev->intr_handle; 1228585bb3e5SJerin Jacob int i; 1229585bb3e5SJerin Jacob 1230585bb3e5SJerin Jacob plt_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name); 1231585bb3e5SJerin Jacob 1232585bb3e5SJerin Jacob /* HW clear irq */ 1233585bb3e5SJerin Jacob for (i = 0; i < MAX_VFPF_DWORD_BITS; i++) 1234585bb3e5SJerin Jacob plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i)); 1235585bb3e5SJerin Jacob 1236585bb3e5SJerin Jacob dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev, 1237585bb3e5SJerin Jacob RVU_PF_INT_VEC_VFFLR0); 1238585bb3e5SJerin Jacob 1239585bb3e5SJerin Jacob dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev, 1240585bb3e5SJerin Jacob RVU_PF_INT_VEC_VFFLR1); 1241585bb3e5SJerin Jacob } 1242585bb3e5SJerin Jacob 1243993107f0SShijith Thotton int 1244993107f0SShijith Thotton dev_vf_flr_register_irqs(struct plt_pci_device *pci_dev, struct dev *dev) 1245585bb3e5SJerin Jacob { 1246d61138d4SHarman Kalra struct plt_intr_handle *handle = pci_dev->intr_handle; 1247585bb3e5SJerin Jacob int i, rc; 1248585bb3e5SJerin Jacob 1249585bb3e5SJerin Jacob plt_base_dbg("Register VF FLR interrupts for %s", pci_dev->name); 1250585bb3e5SJerin Jacob 1251585bb3e5SJerin Jacob rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev, 1252585bb3e5SJerin Jacob RVU_PF_INT_VEC_VFFLR0); 1253585bb3e5SJerin Jacob if (rc) 1254585bb3e5SJerin Jacob plt_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc); 1255585bb3e5SJerin Jacob 1256585bb3e5SJerin Jacob rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev, 1257585bb3e5SJerin Jacob RVU_PF_INT_VEC_VFFLR1); 1258585bb3e5SJerin Jacob if (rc) 1259585bb3e5SJerin Jacob plt_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc); 1260585bb3e5SJerin Jacob 1261585bb3e5SJerin Jacob /* Enable HW interrupt */ 1262585bb3e5SJerin Jacob for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) { 1263585bb3e5SJerin Jacob plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i)); 1264585bb3e5SJerin Jacob plt_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i)); 1265585bb3e5SJerin Jacob plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i)); 1266585bb3e5SJerin Jacob } 1267585bb3e5SJerin Jacob return 0; 1268585bb3e5SJerin Jacob } 1269585bb3e5SJerin Jacob 1270f1187392SHarman Kalra static void 12711c7a4d37SHarman Kalra vf_flr_handle_msg(void *param, dev_intr_t *flr) 12721c7a4d37SHarman Kalra { 12731c7a4d37SHarman Kalra uint16_t vf, max_vf, max_bits; 12741c7a4d37SHarman Kalra struct dev *dev = param; 12751c7a4d37SHarman Kalra 12761c7a4d37SHarman Kalra max_bits = sizeof(flr->bits[0]) * sizeof(uint64_t); 12771c7a4d37SHarman Kalra max_vf = max_bits * MAX_VFPF_DWORD_BITS; 12781c7a4d37SHarman Kalra 12791c7a4d37SHarman Kalra for (vf = 0; vf < max_vf; vf++) { 12801c7a4d37SHarman Kalra if (flr->bits[vf / max_bits] & BIT_ULL(vf % max_bits)) { 12811c7a4d37SHarman Kalra plt_base_dbg("Process FLR vf:%d request (pf:%d, vf:%d)", 12821c7a4d37SHarman Kalra vf, dev->pf, dev->vf); 12831c7a4d37SHarman Kalra /* Inform AF about VF reset */ 12841c7a4d37SHarman Kalra vf_flr_send_msg(dev, vf); 12851c7a4d37SHarman Kalra flr->bits[vf / max_bits] &= ~(BIT_ULL(vf % max_bits)); 12861c7a4d37SHarman Kalra 12871c7a4d37SHarman Kalra /* Signal FLR finish */ 12881c7a4d37SHarman Kalra plt_write64(BIT_ULL(vf % max_bits), 12891c7a4d37SHarman Kalra dev->bar2 + RVU_PF_VFTRPENDX(vf / max_bits)); 12901c7a4d37SHarman Kalra } 12911c7a4d37SHarman Kalra } 12921c7a4d37SHarman Kalra } 12931c7a4d37SHarman Kalra 1294a7ba40b2SThomas Monjalon static uint32_t 12951c7a4d37SHarman Kalra pf_vf_mbox_thread_main(void *arg) 12961c7a4d37SHarman Kalra { 12971c7a4d37SHarman Kalra struct dev *dev = arg; 12981c7a4d37SHarman Kalra bool is_flr, is_mbox; 12991c7a4d37SHarman Kalra dev_intr_t flr, intr; 13001c7a4d37SHarman Kalra int sz, rc; 13011c7a4d37SHarman Kalra 13021c7a4d37SHarman Kalra sz = sizeof(intr.bits[0]) * MAX_VFPF_DWORD_BITS; 13031c7a4d37SHarman Kalra pthread_mutex_lock(&dev->sync.mutex); 13041c7a4d37SHarman Kalra while (dev->sync.start_thread) { 13051c7a4d37SHarman Kalra do { 13061c7a4d37SHarman Kalra rc = pthread_cond_wait(&dev->sync.pfvf_msg_cond, &dev->sync.mutex); 13071c7a4d37SHarman Kalra } while (rc != 0); 13081c7a4d37SHarman Kalra 13091c7a4d37SHarman Kalra if (!dev->sync.msg_avail) { 13101c7a4d37SHarman Kalra continue; 13111c7a4d37SHarman Kalra } else { 13121c7a4d37SHarman Kalra while (dev->sync.msg_avail) { 13131c7a4d37SHarman Kalra /* Check which VF msg received */ 13141c7a4d37SHarman Kalra is_mbox = dev->sync.msg_avail & ROC_DEV_MBOX_PEND; 13151c7a4d37SHarman Kalra is_flr = dev->sync.msg_avail & ROC_DEV_FLR_PEND; 13161c7a4d37SHarman Kalra memcpy(intr.bits, dev->intr.bits, sz); 13171c7a4d37SHarman Kalra memcpy(flr.bits, dev->flr.bits, sz); 13181c7a4d37SHarman Kalra memset(dev->flr.bits, 0, sz); 13191c7a4d37SHarman Kalra memset(dev->intr.bits, 0, sz); 13201c7a4d37SHarman Kalra dev->sync.msg_avail = 0; 13211c7a4d37SHarman Kalra /* Unlocking for interrupt thread to grab lock 13221c7a4d37SHarman Kalra * and update msg_avail field. 13231c7a4d37SHarman Kalra */ 13241c7a4d37SHarman Kalra pthread_mutex_unlock(&dev->sync.mutex); 13251c7a4d37SHarman Kalra /* Calling respective message handlers */ 13261c7a4d37SHarman Kalra if (is_mbox) 13271c7a4d37SHarman Kalra roc_vf_pf_mbox_handle_msg(dev, &intr); 13281c7a4d37SHarman Kalra if (is_flr) 13291c7a4d37SHarman Kalra vf_flr_handle_msg(dev, &flr); 13301c7a4d37SHarman Kalra /* Locking as cond wait will unlock before wait */ 13311c7a4d37SHarman Kalra pthread_mutex_lock(&dev->sync.mutex); 13321c7a4d37SHarman Kalra } 13331c7a4d37SHarman Kalra } 13341c7a4d37SHarman Kalra } 13351c7a4d37SHarman Kalra 13361c7a4d37SHarman Kalra pthread_mutex_unlock(&dev->sync.mutex); 13371c7a4d37SHarman Kalra 1338a7ba40b2SThomas Monjalon return 0; 13391c7a4d37SHarman Kalra } 13401c7a4d37SHarman Kalra 13411c7a4d37SHarman Kalra static void 1342f1187392SHarman Kalra clear_rvum_interrupts(struct dev *dev) 1343f1187392SHarman Kalra { 1344f1187392SHarman Kalra uint64_t intr; 1345f1187392SHarman Kalra int i; 1346f1187392SHarman Kalra 1347f1187392SHarman Kalra if (dev_is_vf(dev)) { 1348f1187392SHarman Kalra /* Clear VF mbox interrupt */ 134961deac72SHarman Kalra intr = plt_read64(dev->mbox_reg_base + RVU_VF_INT); 1350f1187392SHarman Kalra if (intr) 135161deac72SHarman Kalra plt_write64(intr, dev->mbox_reg_base + RVU_VF_INT); 1352f1187392SHarman Kalra } else { 1353f1187392SHarman Kalra /* Clear AF PF interrupt line */ 135461deac72SHarman Kalra intr = plt_read64(dev->mbox_reg_base + RVU_PF_INT); 1355f1187392SHarman Kalra if (intr) 135661deac72SHarman Kalra plt_write64(intr, dev->mbox_reg_base + RVU_PF_INT); 1357f1187392SHarman Kalra for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) { 1358f1187392SHarman Kalra /* Clear MBOX interrupts */ 13599bd368caSHarman Kalra intr = plt_read64(dev->mbox_reg_base + dev->mbox_plat->pfvf_mbox_intx[i]); 13609bd368caSHarman Kalra if (intr) { 13619bd368caSHarman Kalra plt_write64(intr, 13629bd368caSHarman Kalra dev->mbox_reg_base + dev->mbox_plat->pfvf_mbox_intx[i]); 13639bd368caSHarman Kalra if (roc_model_is_cn20k()) 13649bd368caSHarman Kalra plt_write64(intr, 13659bd368caSHarman Kalra dev->mbox_reg_base + 13669bd368caSHarman Kalra dev->mbox_plat->pfvf1_mbox_intx[i]); 13679bd368caSHarman Kalra } 1368f1187392SHarman Kalra /* Clear VF FLR interrupts */ 1369f1187392SHarman Kalra intr = plt_read64(dev->bar2 + RVU_PF_VFFLR_INTX(i)); 1370f1187392SHarman Kalra if (intr) 1371f1187392SHarman Kalra plt_write64(intr, 1372f1187392SHarman Kalra dev->bar2 + RVU_PF_VFFLR_INTX(i)); 1373f1187392SHarman Kalra } 1374f1187392SHarman Kalra } 1375f1187392SHarman Kalra } 1376f1187392SHarman Kalra 1377585bb3e5SJerin Jacob int 1378585bb3e5SJerin Jacob dev_active_vfs(struct dev *dev) 1379585bb3e5SJerin Jacob { 1380585bb3e5SJerin Jacob int i, count = 0; 1381585bb3e5SJerin Jacob 1382585bb3e5SJerin Jacob for (i = 0; i < MAX_VFPF_DWORD_BITS; i++) 1383354bf671SJerin Jacob count += plt_popcount32(dev->active_vfs[i]); 1384585bb3e5SJerin Jacob 1385585bb3e5SJerin Jacob return count; 1386585bb3e5SJerin Jacob } 1387585bb3e5SJerin Jacob 1388585bb3e5SJerin Jacob static void 1389585bb3e5SJerin Jacob dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev) 1390585bb3e5SJerin Jacob { 1391585bb3e5SJerin Jacob switch (pci_dev->id.device_id) { 1392585bb3e5SJerin Jacob case PCI_DEVID_CNXK_RVU_PF: 1393585bb3e5SJerin Jacob break; 1394585bb3e5SJerin Jacob case PCI_DEVID_CNXK_RVU_SSO_TIM_VF: 1395585bb3e5SJerin Jacob case PCI_DEVID_CNXK_RVU_NPA_VF: 1396c045d2e5SAnoob Joseph case PCI_DEVID_CN10K_RVU_CPT_VF: 1397c045d2e5SAnoob Joseph case PCI_DEVID_CN9K_RVU_CPT_VF: 1398585bb3e5SJerin Jacob case PCI_DEVID_CNXK_RVU_AF_VF: 1399585bb3e5SJerin Jacob case PCI_DEVID_CNXK_RVU_VF: 1400585bb3e5SJerin Jacob case PCI_DEVID_CNXK_RVU_SDP_VF: 1401d71d40c1SNithin Dabilpuram case PCI_DEVID_CNXK_RVU_NIX_INL_VF: 1402*384903edSAkhil Goyal case PCI_DEVID_CNXK_RVU_BPHY_VF: 140354ec7c87SHarman Kalra case PCI_DEVID_CNXK_RVU_ESWITCH_VF: 1404585bb3e5SJerin Jacob dev->hwcap |= DEV_HWCAP_F_VF; 1405585bb3e5SJerin Jacob break; 1406585bb3e5SJerin Jacob } 1407585bb3e5SJerin Jacob } 1408585bb3e5SJerin Jacob 1409585bb3e5SJerin Jacob static uintptr_t 14109bd368caSHarman Kalra cn20k_pfvf_mbox_alloc(struct dev *dev, uint16_t max_vfs) 14119bd368caSHarman Kalra { 14129bd368caSHarman Kalra char name[PLT_MEMZONE_NAMESIZE]; 14139bd368caSHarman Kalra const struct plt_memzone *mz; 14149bd368caSHarman Kalra uint32_t vf_mbox_region; 14159bd368caSHarman Kalra 14169bd368caSHarman Kalra vf_mbox_region = plt_align64pow2(MBOX_SIZE * max_vfs); 14179bd368caSHarman Kalra /* Allocating memory for LMT region */ 14189bd368caSHarman Kalra sprintf(name, "PFVF_MBOX_REGION%x", dev->pf_func); 14199bd368caSHarman Kalra 14209bd368caSHarman Kalra mz = plt_memzone_reserve_aligned(name, vf_mbox_region, 0, MBOX_SIZE); 14219bd368caSHarman Kalra if (!mz) { 14229bd368caSHarman Kalra plt_err("Memory alloc failed: %s", strerror(errno)); 14239bd368caSHarman Kalra goto fail; 14249bd368caSHarman Kalra } 14259bd368caSHarman Kalra 14269bd368caSHarman Kalra dev->vf_mbox_base = mz->iova; 14279bd368caSHarman Kalra dev->vf_mbox_mz = mz; 14289bd368caSHarman Kalra plt_write64(dev->vf_mbox_base, dev->mbox_reg_base + RVU_PF_VF_MBOX_ADDR); 14299bd368caSHarman Kalra 14309bd368caSHarman Kalra return dev->vf_mbox_base; 14319bd368caSHarman Kalra fail: 14329bd368caSHarman Kalra return (uintptr_t)NULL; 14339bd368caSHarman Kalra } 14349bd368caSHarman Kalra 14359bd368caSHarman Kalra static uintptr_t 1436585bb3e5SJerin Jacob dev_vf_mbase_get(struct plt_pci_device *pci_dev, struct dev *dev) 1437585bb3e5SJerin Jacob { 1438585bb3e5SJerin Jacob void *vf_mbase = NULL; 1439585bb3e5SJerin Jacob uintptr_t pa; 1440585bb3e5SJerin Jacob 1441585bb3e5SJerin Jacob if (dev_is_vf(dev)) 1442585bb3e5SJerin Jacob return 0; 1443585bb3e5SJerin Jacob 14449bd368caSHarman Kalra if (roc_model_is_cn20k()) 14459bd368caSHarman Kalra return cn20k_pfvf_mbox_alloc(dev, pci_dev->max_vfs); 14469bd368caSHarman Kalra 14479bd368caSHarman Kalra /* For CN10K, it is just after PF MBOX */ 14489bd368caSHarman Kalra if (roc_model_is_cn10k()) 1449585bb3e5SJerin Jacob return dev->bar4 + MBOX_SIZE; 1450585bb3e5SJerin Jacob 1451585bb3e5SJerin Jacob pa = plt_read64(dev->bar2 + RVU_PF_VF_BAR4_ADDR); 1452585bb3e5SJerin Jacob if (!pa) { 1453585bb3e5SJerin Jacob plt_err("Invalid VF mbox base pa"); 1454585bb3e5SJerin Jacob return pa; 1455585bb3e5SJerin Jacob } 1456585bb3e5SJerin Jacob 1457585bb3e5SJerin Jacob vf_mbase = mbox_mem_map(pa, MBOX_SIZE * pci_dev->max_vfs); 1458585bb3e5SJerin Jacob if (vf_mbase == MAP_FAILED) { 1459585bb3e5SJerin Jacob plt_err("Failed to mmap vf mbase at pa 0x%lx, rc=%d", pa, 1460585bb3e5SJerin Jacob errno); 1461585bb3e5SJerin Jacob return 0; 1462585bb3e5SJerin Jacob } 1463585bb3e5SJerin Jacob return (uintptr_t)vf_mbase; 1464585bb3e5SJerin Jacob } 1465585bb3e5SJerin Jacob 1466585bb3e5SJerin Jacob static void 1467585bb3e5SJerin Jacob dev_vf_mbase_put(struct plt_pci_device *pci_dev, uintptr_t vf_mbase) 1468585bb3e5SJerin Jacob { 1469585bb3e5SJerin Jacob if (!vf_mbase || !pci_dev->max_vfs || !roc_model_is_cn9k()) 1470585bb3e5SJerin Jacob return; 1471585bb3e5SJerin Jacob 1472585bb3e5SJerin Jacob mbox_mem_unmap((void *)vf_mbase, MBOX_SIZE * pci_dev->max_vfs); 1473585bb3e5SJerin Jacob } 1474585bb3e5SJerin Jacob 1475665ff1ccSJerin Jacob static int 14769854e5dbSHarman Kalra dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova) 1477665ff1ccSJerin Jacob { 1478665ff1ccSJerin Jacob struct lmtst_tbl_setup_req *req; 147944a9307cSRakesh Kudurumalla int rc; 1480665ff1ccSJerin Jacob 148144a9307cSRakesh Kudurumalla req = mbox_alloc_msg_lmtst_tbl_setup(mbox_get(mbox)); 148244a9307cSRakesh Kudurumalla if (!req) { 148344a9307cSRakesh Kudurumalla rc = -ENOSPC; 148444a9307cSRakesh Kudurumalla goto exit; 148544a9307cSRakesh Kudurumalla } 1486da718c19SNithin Dabilpuram 14879854e5dbSHarman Kalra /* This pcifunc is defined with primary pcifunc whose LMT address 14889854e5dbSHarman Kalra * will be shared. If call contains valid IOVA, following pcifunc 14899854e5dbSHarman Kalra * field is of no use. 14909854e5dbSHarman Kalra */ 14919854e5dbSHarman Kalra req->pcifunc = valid_iova ? 0 : idev_lmt_pffunc_get(); 14929854e5dbSHarman Kalra req->use_local_lmt_region = valid_iova; 14939854e5dbSHarman Kalra req->lmt_iova = iova; 1494665ff1ccSJerin Jacob 149544a9307cSRakesh Kudurumalla rc = mbox_process(mbox); 149644a9307cSRakesh Kudurumalla exit: 149744a9307cSRakesh Kudurumalla mbox_put(mbox); 149844a9307cSRakesh Kudurumalla return rc; 1499665ff1ccSJerin Jacob } 1500665ff1ccSJerin Jacob 15019854e5dbSHarman Kalra /* Total no of lines * size of each lmtline */ 15029854e5dbSHarman Kalra #define LMT_REGION_SIZE (ROC_NUM_LMT_LINES * ROC_LMT_LINE_SZ) 1503665ff1ccSJerin Jacob static int 15049854e5dbSHarman Kalra dev_lmt_setup(struct dev *dev) 1505665ff1ccSJerin Jacob { 15069854e5dbSHarman Kalra char name[PLT_MEMZONE_NAMESIZE]; 15079854e5dbSHarman Kalra const struct plt_memzone *mz; 1508665ff1ccSJerin Jacob struct idev_cfg *idev; 1509665ff1ccSJerin Jacob int rc; 1510665ff1ccSJerin Jacob 1511665ff1ccSJerin Jacob if (roc_model_is_cn9k()) { 151261deac72SHarman Kalra dev->lmt_base = dev->mbox_reg_base + (RVU_BLOCK_ADDR_LMT << 20); 1513665ff1ccSJerin Jacob return 0; 1514665ff1ccSJerin Jacob } 1515665ff1ccSJerin Jacob 1516665ff1ccSJerin Jacob /* [CN10K, .) */ 1517665ff1ccSJerin Jacob 1518665ff1ccSJerin Jacob /* Set common lmt region from second pf_func onwards. */ 1519665ff1ccSJerin Jacob if (!dev->disable_shared_lmt && idev_lmt_pffunc_get() && 1520665ff1ccSJerin Jacob dev->pf_func != idev_lmt_pffunc_get()) { 15219854e5dbSHarman Kalra rc = dev_setup_shared_lmt_region(dev->mbox, false, 0); 1522665ff1ccSJerin Jacob if (!rc) { 15239854e5dbSHarman Kalra /* On success, updating lmt base of secondary pf_funcs 15249854e5dbSHarman Kalra * with primary pf_func's lmt base. 15259854e5dbSHarman Kalra */ 1526665ff1ccSJerin Jacob dev->lmt_base = roc_idev_lmt_base_addr_get(); 1527665ff1ccSJerin Jacob return rc; 1528665ff1ccSJerin Jacob } 1529665ff1ccSJerin Jacob plt_err("Failed to setup shared lmt region, pf_func %d err %d " 1530665ff1ccSJerin Jacob "Using respective LMT region per pf func", 1531665ff1ccSJerin Jacob dev->pf_func, rc); 1532665ff1ccSJerin Jacob } 1533665ff1ccSJerin Jacob 15349854e5dbSHarman Kalra /* Allocating memory for LMT region */ 15359854e5dbSHarman Kalra sprintf(name, "LMT_MAP%x", dev->pf_func); 15369854e5dbSHarman Kalra 15379854e5dbSHarman Kalra /* Setting alignment to ensure correct masking for resetting to lmt base 15389854e5dbSHarman Kalra * of a core after all lmt lines under that core are used. 15399854e5dbSHarman Kalra * Alignment value LMT_REGION_SIZE to handle the case where all lines 15409854e5dbSHarman Kalra * are used by 1 core. 1541585bb3e5SJerin Jacob */ 154261deac72SHarman Kalra mz = plt_lmt_region_reserve_aligned(name, LMT_REGION_SIZE, LMT_REGION_SIZE); 15439854e5dbSHarman Kalra if (!mz) { 15449854e5dbSHarman Kalra plt_err("Memory alloc failed: %s", strerror(errno)); 15459854e5dbSHarman Kalra goto fail; 1546585bb3e5SJerin Jacob } 1547585bb3e5SJerin Jacob 15489854e5dbSHarman Kalra /* Share the IOVA address with Kernel */ 15499854e5dbSHarman Kalra rc = dev_setup_shared_lmt_region(dev->mbox, true, mz->iova); 15509854e5dbSHarman Kalra if (rc) { 15519854e5dbSHarman Kalra errno = rc; 15529854e5dbSHarman Kalra goto free; 1553665ff1ccSJerin Jacob } 1554665ff1ccSJerin Jacob 15559854e5dbSHarman Kalra dev->lmt_base = mz->iova; 15569854e5dbSHarman Kalra dev->lmt_mz = mz; 1557665ff1ccSJerin Jacob /* Base LMT address should be chosen from only those pci funcs which 1558665ff1ccSJerin Jacob * participate in LMT shared mode. 1559665ff1ccSJerin Jacob */ 1560665ff1ccSJerin Jacob if (!dev->disable_shared_lmt) { 1561665ff1ccSJerin Jacob idev = idev_get_cfg(); 1562da718c19SNithin Dabilpuram if (!idev) { 1563da718c19SNithin Dabilpuram errno = EFAULT; 1564da718c19SNithin Dabilpuram goto free; 1565da718c19SNithin Dabilpuram } 1566da718c19SNithin Dabilpuram 1567665ff1ccSJerin Jacob if (!__atomic_load_n(&idev->lmt_pf_func, __ATOMIC_ACQUIRE)) { 1568665ff1ccSJerin Jacob idev->lmt_base_addr = dev->lmt_base; 1569665ff1ccSJerin Jacob idev->lmt_pf_func = dev->pf_func; 1570665ff1ccSJerin Jacob idev->num_lmtlines = RVU_LMT_LINE_MAX; 1571665ff1ccSJerin Jacob } 1572665ff1ccSJerin Jacob } 1573665ff1ccSJerin Jacob 1574665ff1ccSJerin Jacob return 0; 15759854e5dbSHarman Kalra free: 15769854e5dbSHarman Kalra plt_memzone_free(mz); 15779854e5dbSHarman Kalra fail: 15789854e5dbSHarman Kalra return -errno; 1579665ff1ccSJerin Jacob } 1580665ff1ccSJerin Jacob 1581b38db1bfSTomasz Duszynski static bool 1582b38db1bfSTomasz Duszynski dev_cache_line_size_valid(void) 1583b38db1bfSTomasz Duszynski { 1584b38db1bfSTomasz Duszynski if (roc_model_is_cn9k()) { 1585b38db1bfSTomasz Duszynski if (PLT_CACHE_LINE_SIZE != 128) { 158661deac72SHarman Kalra plt_err("Cache line size of %d is wrong for CN9K", PLT_CACHE_LINE_SIZE); 1587b38db1bfSTomasz Duszynski return false; 1588b38db1bfSTomasz Duszynski } 1589b38db1bfSTomasz Duszynski } else if (roc_model_is_cn10k()) { 1590b38db1bfSTomasz Duszynski if (PLT_CACHE_LINE_SIZE == 128) { 1591b38db1bfSTomasz Duszynski plt_warn("Cache line size of %d might affect performance", 1592b38db1bfSTomasz Duszynski PLT_CACHE_LINE_SIZE); 1593b38db1bfSTomasz Duszynski } else if (PLT_CACHE_LINE_SIZE != 64) { 159461deac72SHarman Kalra plt_err("Cache line size of %d is wrong for CN10K", PLT_CACHE_LINE_SIZE); 1595b38db1bfSTomasz Duszynski return false; 1596b38db1bfSTomasz Duszynski } 1597b38db1bfSTomasz Duszynski } 1598b38db1bfSTomasz Duszynski 1599b38db1bfSTomasz Duszynski return true; 1600b38db1bfSTomasz Duszynski } 1601b38db1bfSTomasz Duszynski 160261deac72SHarman Kalra static void 16039bd368caSHarman Kalra mbox_platform_changes(struct mbox_platform *mbox_plat, uintptr_t bar2, uintptr_t bar4, bool is_vf) 160461deac72SHarman Kalra { 16059bd368caSHarman Kalra int i; 16069bd368caSHarman Kalra 160761deac72SHarman Kalra if (roc_model_is_cn20k()) { 160861deac72SHarman Kalra /* For CN20K, AF allocates mbox memory in DRAM and writes PF 160961deac72SHarman Kalra * regions/offsets in RVU_MBOX_AF_PFX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX 161061deac72SHarman Kalra * gives the aliased address to access AF/PF mailbox regions. 161161deac72SHarman Kalra */ 161261deac72SHarman Kalra mbox_plat->mbox_reg_base = bar2; 161361deac72SHarman Kalra mbox_plat->mbox_region_base = 161461deac72SHarman Kalra bar2 + (RVU_PFX_FUNC_PFAF_MBOX + 161561deac72SHarman Kalra ((uint64_t)RVU_BLOCK_ADDR_MBOX << RVU_FUNC_BLKADDR_SHIFT)); 161661deac72SHarman Kalra /* Interrupt vectors */ 161761deac72SHarman Kalra mbox_plat->pfaf_vec = RVU_MBOX_PF_INT_VEC_AFPF_MBOX; 16189bd368caSHarman Kalra mbox_plat->pfvf_mbox0_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; 16199bd368caSHarman Kalra mbox_plat->pfvf_mbox1_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX1; 16209bd368caSHarman Kalra mbox_plat->pfvf1_mbox0_vec = RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0; 16219bd368caSHarman Kalra mbox_plat->pfvf1_mbox1_vec = RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; 16229bd368caSHarman Kalra for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) { 16239bd368caSHarman Kalra mbox_plat->pfvf_mbox_int_ena_w1s[i] = RVU_MBOX_PF_VFPF_INT_ENA_W1SX(i); 16249bd368caSHarman Kalra mbox_plat->pfvf_mbox_int_ena_w1c[i] = RVU_MBOX_PF_VFPF_INT_ENA_W1CX(i); 16259bd368caSHarman Kalra mbox_plat->pfvf_mbox_intx[i] = RVU_MBOX_PF_VFPF_INTX(i); 16269bd368caSHarman Kalra mbox_plat->pfvf1_mbox_int_ena_w1s[i] = RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(i); 16279bd368caSHarman Kalra mbox_plat->pfvf1_mbox_int_ena_w1c[i] = RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(i); 16289bd368caSHarman Kalra mbox_plat->pfvf1_mbox_intx[i] = RVU_MBOX_PF_VFPF1_INTX(i); 16299bd368caSHarman Kalra } 163061deac72SHarman Kalra } else { 163161deac72SHarman Kalra mbox_plat->mbox_reg_base = bar2; 163261deac72SHarman Kalra mbox_plat->mbox_region_base = bar4; 163361deac72SHarman Kalra mbox_plat->pfaf_vec = RVU_PF_INT_VEC_AFPF_MBOX; 16349bd368caSHarman Kalra mbox_plat->pfvf_mbox0_vec = RVU_PF_INT_VEC_VFPF_MBOX0; 16359bd368caSHarman Kalra mbox_plat->pfvf_mbox1_vec = RVU_PF_INT_VEC_VFPF_MBOX1; 16369bd368caSHarman Kalra for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) { 16379bd368caSHarman Kalra mbox_plat->pfvf_mbox_int_ena_w1s[i] = RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i); 16389bd368caSHarman Kalra mbox_plat->pfvf_mbox_int_ena_w1c[i] = RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i); 16399bd368caSHarman Kalra mbox_plat->pfvf_mbox_intx[i] = RVU_PF_VFPF_MBOX_INTX(i); 16409bd368caSHarman Kalra } 16419bd368caSHarman Kalra } 16429bd368caSHarman Kalra if (is_vf) { 16439bd368caSHarman Kalra if (roc_model_is_cn20k()) 16449bd368caSHarman Kalra mbox_plat->mbox_region_base = 16459bd368caSHarman Kalra bar2 + (RVU_VF_MBOX_REGION + 16469bd368caSHarman Kalra ((uint64_t)RVU_BLOCK_ADDR_MBOX << RVU_FUNC_BLKADDR_SHIFT)); 16479bd368caSHarman Kalra if (roc_model_is_cn10k()) 16489bd368caSHarman Kalra mbox_plat->mbox_region_base = bar2 + RVU_VF_MBOX_REGION; 164961deac72SHarman Kalra } 165061deac72SHarman Kalra } 165161deac72SHarman Kalra 1652665ff1ccSJerin Jacob int 1653665ff1ccSJerin Jacob dev_init(struct dev *dev, struct plt_pci_device *pci_dev) 1654665ff1ccSJerin Jacob { 165561deac72SHarman Kalra uintptr_t mbox_reg_base, mbox_region_base, bar2, bar4; 16561c7a4d37SHarman Kalra char name[MBOX_HANDLER_NAME_MAX_LEN]; 1657665ff1ccSJerin Jacob int direction, up_direction, rc; 1658585bb3e5SJerin Jacob uintptr_t vf_mbase = 0; 1659665ff1ccSJerin Jacob uint64_t intr_offset; 16609bd368caSHarman Kalra bool is_vf; 1661665ff1ccSJerin Jacob 1662b38db1bfSTomasz Duszynski if (!dev_cache_line_size_valid()) 1663b38db1bfSTomasz Duszynski return -EFAULT; 1664b38db1bfSTomasz Duszynski 166536808adbSRahul Bhansali if (!roc_plt_lmt_validate()) { 166636808adbSRahul Bhansali plt_err("Failed to validate LMT line"); 166736808adbSRahul Bhansali return -EFAULT; 166836808adbSRahul Bhansali } 166936808adbSRahul Bhansali 16709bd368caSHarman Kalra dev_vf_hwcap_update(pci_dev, dev); 16719bd368caSHarman Kalra is_vf = dev_is_vf(dev); 16729bd368caSHarman Kalra 1673665ff1ccSJerin Jacob bar2 = (uintptr_t)pci_dev->mem_resource[2].addr; 1674665ff1ccSJerin Jacob bar4 = (uintptr_t)pci_dev->mem_resource[4].addr; 16759bd368caSHarman Kalra dev->mbox_plat = plt_zmalloc(sizeof(struct mbox_platform), 0); 16769bd368caSHarman Kalra if (!dev->mbox_plat) { 16779bd368caSHarman Kalra plt_err("Failed to allocate mem for mbox_plat"); 16789bd368caSHarman Kalra rc = -ENOMEM; 16799bd368caSHarman Kalra goto fail; 16809bd368caSHarman Kalra } 16819bd368caSHarman Kalra mbox_platform_changes(dev->mbox_plat, bar2, bar4, is_vf); 168261deac72SHarman Kalra 16839bd368caSHarman Kalra mbox_reg_base = dev->mbox_plat->mbox_reg_base; 16849bd368caSHarman Kalra mbox_region_base = dev->mbox_plat->mbox_region_base; 168561deac72SHarman Kalra if (mbox_reg_base == 0 || mbox_region_base == 0) { 1686665ff1ccSJerin Jacob plt_err("Failed to get PCI bars"); 1687665ff1ccSJerin Jacob rc = -ENODEV; 1688665ff1ccSJerin Jacob goto error; 1689665ff1ccSJerin Jacob } 169061deac72SHarman Kalra /* Trigger fault on mbox_reg_base and mbox_region_base 1691665ff1ccSJerin Jacob * to avoid BUG_ON in remap_pfn_range() 1692665ff1ccSJerin Jacob * in latest kernel. 1693665ff1ccSJerin Jacob */ 169461deac72SHarman Kalra *(volatile uint64_t *)mbox_reg_base; 169561deac72SHarman Kalra *(volatile uint64_t *)mbox_region_base; 1696665ff1ccSJerin Jacob 1697665ff1ccSJerin Jacob /* Check ROC model supported */ 1698665ff1ccSJerin Jacob if (roc_model->flag == 0) { 1699665ff1ccSJerin Jacob rc = UTIL_ERR_INVALID_MODEL; 170061deac72SHarman Kalra plt_err("Unsupported roc model"); 1701665ff1ccSJerin Jacob goto error; 1702665ff1ccSJerin Jacob } 1703665ff1ccSJerin Jacob 1704585bb3e5SJerin Jacob dev->maxvf = pci_dev->max_vfs; 1705665ff1ccSJerin Jacob dev->bar2 = bar2; 1706665ff1ccSJerin Jacob dev->bar4 = bar4; 17079bd368caSHarman Kalra dev->mbox_reg_base = dev->mbox_plat->mbox_reg_base; 1708665ff1ccSJerin Jacob 17099bd368caSHarman Kalra if (is_vf) { 1710585bb3e5SJerin Jacob direction = MBOX_DIR_VFPF; 1711585bb3e5SJerin Jacob up_direction = MBOX_DIR_VFPF_UP; 1712585bb3e5SJerin Jacob intr_offset = RVU_VF_INT; 1713585bb3e5SJerin Jacob } else { 1714665ff1ccSJerin Jacob direction = MBOX_DIR_PFAF; 1715665ff1ccSJerin Jacob up_direction = MBOX_DIR_PFAF_UP; 1716665ff1ccSJerin Jacob intr_offset = RVU_PF_INT; 1717585bb3e5SJerin Jacob } 1718665ff1ccSJerin Jacob 1719f1187392SHarman Kalra /* Clear all RVUM interrupts */ 1720f1187392SHarman Kalra clear_rvum_interrupts(dev); 1721f1187392SHarman Kalra 1722665ff1ccSJerin Jacob /* Initialize the local mbox */ 172361deac72SHarman Kalra rc = mbox_init(&dev->mbox_local, mbox_region_base, mbox_reg_base, direction, 1, 172461deac72SHarman Kalra intr_offset); 1725665ff1ccSJerin Jacob if (rc) 1726665ff1ccSJerin Jacob goto error; 1727665ff1ccSJerin Jacob dev->mbox = &dev->mbox_local; 1728665ff1ccSJerin Jacob 172961deac72SHarman Kalra rc = mbox_init(&dev->mbox_up, mbox_region_base, mbox_reg_base, up_direction, 1, 173061deac72SHarman Kalra intr_offset); 1731665ff1ccSJerin Jacob if (rc) 1732665ff1ccSJerin Jacob goto mbox_fini; 1733665ff1ccSJerin Jacob 1734665ff1ccSJerin Jacob /* Register mbox interrupts */ 1735993107f0SShijith Thotton rc = dev_mbox_register_irq(pci_dev, dev); 1736665ff1ccSJerin Jacob if (rc) 1737665ff1ccSJerin Jacob goto mbox_fini; 1738665ff1ccSJerin Jacob 1739665ff1ccSJerin Jacob /* Check the readiness of PF/VF */ 1740665ff1ccSJerin Jacob rc = send_ready_msg(dev->mbox, &dev->pf_func); 1741665ff1ccSJerin Jacob if (rc) 1742665ff1ccSJerin Jacob goto mbox_unregister; 1743665ff1ccSJerin Jacob 1744665ff1ccSJerin Jacob dev->pf = dev_get_pf(dev->pf_func); 1745585bb3e5SJerin Jacob dev->vf = dev_get_vf(dev->pf_func); 1746585bb3e5SJerin Jacob memset(&dev->active_vfs, 0, sizeof(dev->active_vfs)); 1747665ff1ccSJerin Jacob 1748585bb3e5SJerin Jacob /* Allocate memory for device ops */ 1749585bb3e5SJerin Jacob dev->ops = plt_zmalloc(sizeof(struct dev_ops), 0); 1750585bb3e5SJerin Jacob if (dev->ops == NULL) { 1751585bb3e5SJerin Jacob rc = -ENOMEM; 1752585bb3e5SJerin Jacob goto mbox_unregister; 1753585bb3e5SJerin Jacob } 1754585bb3e5SJerin Jacob 1755585bb3e5SJerin Jacob /* Found VF devices in a PF device */ 1756585bb3e5SJerin Jacob if (pci_dev->max_vfs > 0) { 1757585bb3e5SJerin Jacob /* Remap mbox area for all vf's */ 1758585bb3e5SJerin Jacob vf_mbase = dev_vf_mbase_get(pci_dev, dev); 1759585bb3e5SJerin Jacob if (!vf_mbase) { 1760585bb3e5SJerin Jacob rc = -ENODEV; 1761585bb3e5SJerin Jacob goto mbox_unregister; 1762585bb3e5SJerin Jacob } 1763585bb3e5SJerin Jacob /* Init mbox object */ 176461deac72SHarman Kalra rc = mbox_init(&dev->mbox_vfpf, vf_mbase, mbox_reg_base, MBOX_DIR_PFVF, 1765585bb3e5SJerin Jacob pci_dev->max_vfs, intr_offset); 1766585bb3e5SJerin Jacob if (rc) 1767585bb3e5SJerin Jacob goto iounmap; 1768585bb3e5SJerin Jacob 1769585bb3e5SJerin Jacob /* PF -> VF UP messages */ 177061deac72SHarman Kalra rc = mbox_init(&dev->mbox_vfpf_up, vf_mbase, mbox_reg_base, MBOX_DIR_PFVF_UP, 177161deac72SHarman Kalra pci_dev->max_vfs, intr_offset); 1772585bb3e5SJerin Jacob if (rc) 1773585bb3e5SJerin Jacob goto iounmap; 17741c7a4d37SHarman Kalra 17751c7a4d37SHarman Kalra /* Create a thread for handling msgs from VFs */ 17761c7a4d37SHarman Kalra pthread_cond_init(&dev->sync.pfvf_msg_cond, NULL); 17771c7a4d37SHarman Kalra pthread_mutex_init(&dev->sync.mutex, NULL); 17781c7a4d37SHarman Kalra 1779a7ba40b2SThomas Monjalon snprintf(name, MBOX_HANDLER_NAME_MAX_LEN, "mbox_pf%d", dev->pf); 17801c7a4d37SHarman Kalra dev->sync.start_thread = true; 1781a7ba40b2SThomas Monjalon rc = plt_thread_create_control(&dev->sync.pfvf_msg_thread, name, 17821c7a4d37SHarman Kalra pf_vf_mbox_thread_main, dev); 17831c7a4d37SHarman Kalra if (rc != 0) { 1784f665790aSDavid Marchand plt_err("Failed to create thread for VF mbox handling"); 17854f6f36ceSAkhil Goyal goto thread_fail; 17861c7a4d37SHarman Kalra } 1787585bb3e5SJerin Jacob } 1788585bb3e5SJerin Jacob 1789585bb3e5SJerin Jacob /* Register VF-FLR irq handlers */ 17909bd368caSHarman Kalra if (!is_vf) { 1791993107f0SShijith Thotton rc = dev_vf_flr_register_irqs(pci_dev, dev); 1792585bb3e5SJerin Jacob if (rc) 17931c7a4d37SHarman Kalra goto stop_msg_thrd; 1794585bb3e5SJerin Jacob } 1795665ff1ccSJerin Jacob dev->mbox_active = 1; 1796665ff1ccSJerin Jacob 1797124ff1a4SAshwin Sekhar T K rc = npa_lf_init(dev, pci_dev); 1798124ff1a4SAshwin Sekhar T K if (rc) 17991c7a4d37SHarman Kalra goto stop_msg_thrd; 1800124ff1a4SAshwin Sekhar T K 1801665ff1ccSJerin Jacob /* Setup LMT line base */ 18029854e5dbSHarman Kalra rc = dev_lmt_setup(dev); 1803665ff1ccSJerin Jacob if (rc) 18041c7a4d37SHarman Kalra goto stop_msg_thrd; 1805665ff1ccSJerin Jacob 1806665ff1ccSJerin Jacob return rc; 18071c7a4d37SHarman Kalra stop_msg_thrd: 18081c7a4d37SHarman Kalra /* Exiting the mbox sync thread */ 18091c7a4d37SHarman Kalra if (dev->sync.start_thread) { 18101c7a4d37SHarman Kalra dev->sync.start_thread = false; 18111c7a4d37SHarman Kalra pthread_cond_signal(&dev->sync.pfvf_msg_cond); 1812a7ba40b2SThomas Monjalon plt_thread_join(dev->sync.pfvf_msg_thread, NULL); 18134f6f36ceSAkhil Goyal } 18144f6f36ceSAkhil Goyal thread_fail: 18151c7a4d37SHarman Kalra pthread_mutex_destroy(&dev->sync.mutex); 18161c7a4d37SHarman Kalra pthread_cond_destroy(&dev->sync.pfvf_msg_cond); 1817665ff1ccSJerin Jacob iounmap: 1818585bb3e5SJerin Jacob dev_vf_mbase_put(pci_dev, vf_mbase); 1819665ff1ccSJerin Jacob mbox_unregister: 1820758b58f0SPavan Nikhilesh dev_mbox_unregister_irq(pci_dev, dev); 1821585bb3e5SJerin Jacob if (dev->ops) 1822585bb3e5SJerin Jacob plt_free(dev->ops); 1823665ff1ccSJerin Jacob mbox_fini: 1824665ff1ccSJerin Jacob mbox_fini(dev->mbox); 1825665ff1ccSJerin Jacob mbox_fini(&dev->mbox_up); 1826665ff1ccSJerin Jacob error: 18279bd368caSHarman Kalra plt_free(dev->mbox_plat); 18289bd368caSHarman Kalra fail: 1829665ff1ccSJerin Jacob return rc; 1830665ff1ccSJerin Jacob } 1831665ff1ccSJerin Jacob 1832665ff1ccSJerin Jacob int 1833665ff1ccSJerin Jacob dev_fini(struct dev *dev, struct plt_pci_device *pci_dev) 1834665ff1ccSJerin Jacob { 1835d61138d4SHarman Kalra struct plt_intr_handle *intr_handle = pci_dev->intr_handle; 1836665ff1ccSJerin Jacob struct mbox *mbox; 1837665ff1ccSJerin Jacob 1838124ff1a4SAshwin Sekhar T K /* Check if this dev hosts npalf and has 1+ refs */ 1839124ff1a4SAshwin Sekhar T K if (idev_npa_lf_active(dev) > 1) 1840124ff1a4SAshwin Sekhar T K return -EAGAIN; 1841124ff1a4SAshwin Sekhar T K 18421c7a4d37SHarman Kalra /* Exiting the mbox sync thread */ 18431c7a4d37SHarman Kalra if (dev->sync.start_thread) { 18441c7a4d37SHarman Kalra dev->sync.start_thread = false; 18451c7a4d37SHarman Kalra pthread_cond_signal(&dev->sync.pfvf_msg_cond); 1846a7ba40b2SThomas Monjalon plt_thread_join(dev->sync.pfvf_msg_thread, NULL); 18471c7a4d37SHarman Kalra pthread_mutex_destroy(&dev->sync.mutex); 18481c7a4d37SHarman Kalra pthread_cond_destroy(&dev->sync.pfvf_msg_cond); 18491c7a4d37SHarman Kalra } 18501c7a4d37SHarman Kalra 1851124ff1a4SAshwin Sekhar T K /* Clear references to this pci dev */ 1852124ff1a4SAshwin Sekhar T K npa_lf_fini(); 1853124ff1a4SAshwin Sekhar T K 18549854e5dbSHarman Kalra /* Releasing memory allocated for lmt region */ 18559854e5dbSHarman Kalra if (dev->lmt_mz) 18569854e5dbSHarman Kalra plt_memzone_free(dev->lmt_mz); 18579854e5dbSHarman Kalra 1858758b58f0SPavan Nikhilesh dev_mbox_unregister_irq(pci_dev, dev); 1859665ff1ccSJerin Jacob 18609bd368caSHarman Kalra if (!dev_is_vf(dev)) { 1861758b58f0SPavan Nikhilesh dev_vf_flr_unregister_irqs(pci_dev, dev); 18629bd368caSHarman Kalra /* Releasing memory allocated for mbox region */ 18639bd368caSHarman Kalra if (dev->vf_mbox_mz) 18649bd368caSHarman Kalra plt_memzone_free(dev->vf_mbox_mz); 18659bd368caSHarman Kalra } 18669bd368caSHarman Kalra 1867585bb3e5SJerin Jacob /* Release PF - VF */ 1868585bb3e5SJerin Jacob mbox = &dev->mbox_vfpf; 1869585bb3e5SJerin Jacob if (mbox->hwbase && mbox->dev) 1870585bb3e5SJerin Jacob dev_vf_mbase_put(pci_dev, mbox->hwbase); 1871585bb3e5SJerin Jacob 1872585bb3e5SJerin Jacob if (dev->ops) 1873585bb3e5SJerin Jacob plt_free(dev->ops); 1874585bb3e5SJerin Jacob 1875585bb3e5SJerin Jacob mbox_fini(mbox); 1876585bb3e5SJerin Jacob mbox = &dev->mbox_vfpf_up; 1877585bb3e5SJerin Jacob mbox_fini(mbox); 1878585bb3e5SJerin Jacob 1879665ff1ccSJerin Jacob /* Release PF - AF */ 1880665ff1ccSJerin Jacob mbox = dev->mbox; 1881665ff1ccSJerin Jacob mbox_fini(mbox); 1882665ff1ccSJerin Jacob mbox = &dev->mbox_up; 1883665ff1ccSJerin Jacob mbox_fini(mbox); 1884665ff1ccSJerin Jacob dev->mbox_active = 0; 1885665ff1ccSJerin Jacob 18869bd368caSHarman Kalra plt_free(dev->mbox_plat); 1887665ff1ccSJerin Jacob /* Disable MSIX vectors */ 1888665ff1ccSJerin Jacob dev_irqs_disable(intr_handle); 1889665ff1ccSJerin Jacob return 0; 1890665ff1ccSJerin Jacob } 1891