xref: /dpdk/drivers/common/cnxk/roc_dev.c (revision 384903ed3e6427e1a1a05d3df313a272011e2bf6)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <fcntl.h>
6 #include <inttypes.h>
7 #include <string.h>
8 #include <sys/mman.h>
9 #include <unistd.h>
10 
11 #include "roc_api.h"
12 #include "roc_priv.h"
13 
14 /* PCI Extended capability ID */
15 #define ROC_PCI_EXT_CAP_ID_SRIOV 0x10 /* SRIOV cap */
16 
17 /* Single Root I/O Virtualization */
18 #define ROC_PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
19 
20 /* VF Mbox handler thread name */
21 #define MBOX_HANDLER_NAME_MAX_LEN RTE_THREAD_INTERNAL_NAME_SIZE
22 
23 /* VF interrupt message pending bits - mbox or flr */
24 #define ROC_DEV_MBOX_PEND BIT_ULL(0)
25 #define ROC_DEV_FLR_PEND  BIT_ULL(1)
26 
27 /* RVU PF interrupt status as received from AF*/
28 #define RVU_PF_INTR_STATUS 0x3
29 
30 static void *
31 mbox_mem_map(off_t off, size_t size)
32 {
33 	void *va = MAP_FAILED;
34 	int mem_fd;
35 
36 	if (size <= 0 || !off) {
37 		plt_err("Invalid mbox area off 0x%lx size %lu", off, size);
38 		goto error;
39 	}
40 
41 	mem_fd = open("/dev/mem", O_RDWR);
42 	if (mem_fd < 0)
43 		goto error;
44 
45 	va = plt_mmap(NULL, size, PLT_PROT_READ | PLT_PROT_WRITE,
46 		      PLT_MAP_SHARED, mem_fd, off);
47 	close(mem_fd);
48 
49 	if (va == MAP_FAILED)
50 		plt_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd", size, mem_fd,
51 			(intmax_t)off);
52 error:
53 	return va;
54 }
55 
56 static void
57 mbox_mem_unmap(void *va, size_t size)
58 {
59 	if (va)
60 		munmap(va, size);
61 }
62 
63 static int
64 pf_af_sync_msg(struct dev *dev, struct mbox_msghdr **rsp)
65 {
66 	uint32_t timeout = 0, sleep = 1;
67 	struct mbox *mbox = dev->mbox;
68 	struct mbox_dev *mdev = &mbox->dev[0];
69 
70 	volatile uint64_t int_status = 0;
71 	struct mbox_msghdr *msghdr;
72 	uint64_t off;
73 	int rc = 0;
74 
75 	/* We need to disable PF interrupts. We are in timer interrupt */
76 	plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT_ENA_W1C);
77 
78 	/* Send message */
79 	mbox_msg_send(mbox, 0);
80 
81 	do {
82 		plt_delay_ms(sleep);
83 		timeout += sleep;
84 		if (timeout >= mbox->rsp_tmo) {
85 			plt_err("Message timeout: %dms", mbox->rsp_tmo);
86 			rc = -EIO;
87 			break;
88 		}
89 		int_status = plt_read64(dev->mbox_reg_base + RVU_PF_INT);
90 	} while (!(int_status & RVU_PF_INTR_STATUS));
91 
92 	/* Clear */
93 	plt_write64(int_status, dev->mbox_reg_base + RVU_PF_INT);
94 
95 	/* Enable interrupts */
96 	plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT_ENA_W1S);
97 
98 	if (rc == 0) {
99 		/* Get message */
100 		off = mbox->rx_start +
101 		      PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
102 		msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off);
103 		if (rsp)
104 			*rsp = msghdr;
105 		rc = msghdr->rc;
106 	}
107 
108 	return rc;
109 }
110 
111 /* PF will send the messages to AF and wait for responses and forward the
112  * responses to VF.
113  */
114 static int
115 af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
116 {
117 	uint32_t timeout = 0, sleep = 1;
118 	struct mbox *mbox = dev->mbox;
119 	struct mbox_dev *mdev = &mbox->dev[0];
120 	volatile uint64_t int_status;
121 	struct mbox_hdr *req_hdr;
122 	struct mbox_msghdr *msg;
123 	struct mbox_msghdr *rsp;
124 	uint64_t offset;
125 	size_t size;
126 	int i;
127 
128 	/* We need to disable PF interrupts. We are in timer interrupt */
129 	plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT_ENA_W1C);
130 
131 	/* Send message to AF */
132 	mbox_msg_send(mbox, 0);
133 
134 	/* Wait for AF response */
135 	do {
136 		plt_delay_ms(sleep);
137 		timeout++;
138 		if (timeout >= mbox->rsp_tmo) {
139 			plt_err("Routed messages %d timeout: %dms", num_msg, mbox->rsp_tmo);
140 			break;
141 		}
142 		int_status = plt_read64(dev->mbox_reg_base + RVU_PF_INT);
143 	} while (!(int_status & RVU_PF_INTR_STATUS));
144 
145 	/* Clear */
146 	plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT);
147 
148 	/* Enable interrupts */
149 	plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT_ENA_W1S);
150 
151 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
152 	if (req_hdr->num_msgs != num_msg)
153 		plt_err("Routed messages: %d received: %d", num_msg,
154 			req_hdr->num_msgs);
155 
156 	/* Get messages from mbox */
157 	offset = mbox->rx_start +
158 		 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
159 	for (i = 0; i < req_hdr->num_msgs; i++) {
160 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
161 		size = mbox->rx_start + msg->next_msgoff - offset;
162 
163 		/* Reserve PF/VF mbox message */
164 		size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
165 		rsp = mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
166 		if (!rsp) {
167 			plt_err("Failed to reserve VF%d message", vf);
168 			continue;
169 		}
170 
171 		mbox_rsp_init(msg->id, rsp);
172 
173 		/* Copy message from AF<->PF mbox to PF<->VF mbox */
174 		mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
175 			    (uint8_t *)msg + sizeof(struct mbox_msghdr),
176 			    size - sizeof(struct mbox_msghdr));
177 
178 		/* Set status and sender pf_func data */
179 		rsp->rc = msg->rc;
180 		rsp->pcifunc = msg->pcifunc;
181 
182 		/* Whenever a PF comes up, AF sends the link status to it but
183 		 * when VF comes up no such event is sent to respective VF.
184 		 * Using MBOX_MSG_NIX_LF_START_RX response from AF for the
185 		 * purpose and send the link status of PF to VF.
186 		 */
187 		if (msg->id == MBOX_MSG_NIX_LF_START_RX) {
188 			/* Send link status to VF */
189 			struct cgx_link_user_info linfo;
190 			struct mbox_msghdr *vf_msg;
191 			size_t sz;
192 
193 			/* Get the link status */
194 			memset(&linfo, 0, sizeof(struct cgx_link_user_info));
195 			if (dev->ops && dev->ops->link_status_get)
196 				dev->ops->link_status_get(dev->roc_nix, &linfo);
197 
198 			sz = PLT_ALIGN(mbox_id2size(MBOX_MSG_CGX_LINK_EVENT),
199 				       MBOX_MSG_ALIGN);
200 			/* Prepare the message to be sent */
201 			vf_msg = mbox_alloc_msg(&dev->mbox_vfpf_up, vf, sz);
202 			if (vf_msg) {
203 				mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg);
204 				mbox_memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr), &linfo,
205 					    sizeof(struct cgx_link_user_info));
206 
207 				vf_msg->rc = msg->rc;
208 				vf_msg->pcifunc = msg->pcifunc;
209 				/* Send to VF */
210 				mbox_msg_send_up(&dev->mbox_vfpf_up, vf);
211 				mbox_wait_for_zero(&dev->mbox_vfpf_up, vf);
212 			}
213 		}
214 
215 		offset = mbox->rx_start + msg->next_msgoff;
216 	}
217 
218 	return req_hdr->num_msgs;
219 }
220 
221 static int
222 process_rvu_lf_msgs(struct dev *dev, uint16_t vf, struct mbox_msghdr *msg, size_t size)
223 {
224 	uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
225 	uint8_t req[MBOX_MSG_REQ_SIZE_MAX];
226 	struct msg_rsp *rsp;
227 	uint16_t rsp_len;
228 	void *resp;
229 	int rc = 0;
230 
231 	/* Handle BPHY mailbox message in PF */
232 	dev->active_vfs[vf / max_bits] |= BIT_ULL(vf % max_bits);
233 
234 	if ((size - sizeof(struct mbox_msghdr)) > MBOX_MSG_REQ_SIZE_MAX) {
235 		plt_err("MBOX request size greater than %d", MBOX_MSG_REQ_SIZE_MAX);
236 		return -1;
237 	}
238 	mbox_memcpy(req, (uint8_t *)msg + sizeof(struct mbox_msghdr),
239 		    size - sizeof(struct mbox_msghdr));
240 
241 	rc = dev->ops->msg_process_cb(dev_get_vf(msg->pcifunc), msg->id, req,
242 				      size - sizeof(struct mbox_msghdr), &resp, &rsp_len);
243 	if (rc < 0) {
244 		plt_err("Failed to process VF%d  message", vf);
245 		return -1;
246 	}
247 
248 	rsp = (struct msg_rsp *)mbox_alloc_msg(&dev->mbox_vfpf, vf,
249 					       rsp_len + sizeof(struct mbox_msghdr));
250 	if (!rsp) {
251 		plt_err("Failed to alloc VF%d response message", vf);
252 		return -1;
253 	}
254 
255 	mbox_rsp_init(msg->id, rsp);
256 
257 	mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr), resp, rsp_len);
258 	free(resp);
259 	/* PF/VF function ID */
260 	rsp->hdr.pcifunc = msg->pcifunc;
261 	rsp->hdr.rc = 0;
262 
263 	return 0;
264 }
265 
266 /* PF receives mbox DOWN messages from VF and forwards to AF */
267 static int
268 vf_pf_process_msgs(struct dev *dev, uint16_t vf)
269 {
270 	struct mbox *mbox = &dev->mbox_vfpf;
271 	struct mbox_dev *mdev = &mbox->dev[vf];
272 	struct mbox_hdr *req_hdr;
273 	struct mbox_msghdr *msg;
274 	int offset, routed = 0;
275 	size_t size;
276 	uint16_t i;
277 
278 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
279 	if (!req_hdr->num_msgs)
280 		return 0;
281 
282 	offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
283 
284 	mbox_get(dev->mbox);
285 	for (i = 0; i < req_hdr->num_msgs; i++) {
286 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
287 		size = mbox->rx_start + msg->next_msgoff - offset;
288 
289 		/* RVU_PF_FUNC_S */
290 		msg->pcifunc = dev_pf_func(dev->pf, vf);
291 
292 		if (msg->id == MBOX_MSG_READY) {
293 			struct ready_msg_rsp *rsp;
294 			uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
295 
296 			/* Handle READY message in PF */
297 			dev->active_vfs[vf / max_bits] |=
298 				BIT_ULL(vf % max_bits);
299 			rsp = (struct ready_msg_rsp *)mbox_alloc_msg(
300 				mbox, vf, sizeof(*rsp));
301 			if (!rsp) {
302 				plt_err("Failed to alloc VF%d READY message",
303 					vf);
304 				continue;
305 			}
306 
307 			mbox_rsp_init(msg->id, rsp);
308 
309 			/* PF/VF function ID */
310 			rsp->hdr.pcifunc = msg->pcifunc;
311 			rsp->hdr.rc = 0;
312 		} else if (roc_rvu_lf_msg_id_range_check(dev->roc_rvu_lf, msg->id)) {
313 			if (process_rvu_lf_msgs(dev, vf, msg, size) < 0)
314 				continue;
315 		} else {
316 			struct mbox_msghdr *af_req;
317 			/* Reserve AF/PF mbox message */
318 			size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
319 			af_req = mbox_alloc_msg(dev->mbox, 0, size);
320 			if (af_req == NULL)
321 				return -ENOSPC;
322 			mbox_req_init(msg->id, af_req);
323 
324 			/* Copy message from VF<->PF mbox to PF<->AF mbox */
325 			mbox_memcpy((uint8_t *)af_req +
326 					    sizeof(struct mbox_msghdr),
327 				    (uint8_t *)msg + sizeof(struct mbox_msghdr),
328 				    size - sizeof(struct mbox_msghdr));
329 			af_req->pcifunc = msg->pcifunc;
330 			routed++;
331 		}
332 		offset = mbox->rx_start + msg->next_msgoff;
333 	}
334 
335 	if (routed > 0) {
336 		plt_base_dbg("pf:%d routed %d messages from vf:%d to AF",
337 			     dev->pf, routed, vf);
338 		/* PF will send the messages to AF and wait for responses */
339 		af_pf_wait_msg(dev, vf, routed);
340 		mbox_reset(dev->mbox, 0);
341 	}
342 	mbox_put(dev->mbox);
343 
344 	/* Send mbox responses to VF */
345 	if (mdev->num_msgs) {
346 		plt_base_dbg("pf:%d reply %d messages to vf:%d", dev->pf,
347 			     mdev->num_msgs, vf);
348 		mbox_msg_send(mbox, vf);
349 	}
350 
351 	return i;
352 }
353 
354 /* VF sends Ack to PF's UP messages */
355 static int
356 vf_pf_process_up_msgs(struct dev *dev, uint16_t vf)
357 {
358 	struct mbox *mbox = &dev->mbox_vfpf_up;
359 	struct mbox_dev *mdev = &mbox->dev[vf];
360 	struct mbox_hdr *req_hdr;
361 	struct mbox_msghdr *msg;
362 	int msgs_acked = 0;
363 	int offset;
364 	uint16_t i;
365 
366 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
367 	if (req_hdr->num_msgs == 0)
368 		return 0;
369 
370 	offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
371 
372 	for (i = 0; i < req_hdr->num_msgs; i++) {
373 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
374 
375 		msgs_acked++;
376 		/* RVU_PF_FUNC_S */
377 		msg->pcifunc = dev_pf_func(dev->pf, vf);
378 
379 		switch (msg->id) {
380 		case MBOX_MSG_CGX_LINK_EVENT:
381 			plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
382 				     msg->id, mbox_id2name(msg->id),
383 				     msg->pcifunc, dev_get_pf(msg->pcifunc),
384 				     dev_get_vf(msg->pcifunc));
385 			break;
386 		case MBOX_MSG_CGX_PTP_RX_INFO:
387 			plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
388 				     msg->id, mbox_id2name(msg->id),
389 				     msg->pcifunc, dev_get_pf(msg->pcifunc),
390 				     dev_get_vf(msg->pcifunc));
391 			break;
392 		default:
393 			if (roc_rvu_lf_msg_id_range_check(dev->roc_rvu_lf, msg->id))
394 				plt_base_dbg("PF: Msg 0x%x fn:0x%x (pf:%d,vf:%d)",
395 					     msg->id, msg->pcifunc, dev_get_pf(msg->pcifunc),
396 					     dev_get_vf(msg->pcifunc));
397 			else
398 				plt_err("Not handled UP msg 0x%x (%s) func:0x%x",
399 					msg->id, mbox_id2name(msg->id), msg->pcifunc);
400 		}
401 		offset = mbox->rx_start + msg->next_msgoff;
402 	}
403 	mbox_reset(mbox, vf);
404 	mdev->msgs_acked = msgs_acked;
405 	plt_wmb();
406 
407 	return i;
408 }
409 
410 /* PF handling messages from VF */
411 static void
412 roc_vf_pf_mbox_handle_msg(void *param, dev_intr_t *intr)
413 {
414 	uint16_t vf, max_vf, max_bits;
415 	struct dev *dev = param;
416 
417 	max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
418 	max_vf = max_bits * MAX_VFPF_DWORD_BITS;
419 
420 	for (vf = 0; vf < max_vf; vf++) {
421 		if (intr->bits[vf / max_bits] & BIT_ULL(vf % max_bits)) {
422 			plt_base_dbg("Process vf:%d request (pf:%d, vf:%d)", vf,
423 				     dev->pf, dev->vf);
424 			/* VF initiated down messages */
425 			vf_pf_process_msgs(dev, vf);
426 			/* VF replies to PF's UP messages */
427 			vf_pf_process_up_msgs(dev, vf);
428 			intr->bits[vf / max_bits] &= ~(BIT_ULL(vf % max_bits));
429 		}
430 	}
431 }
432 
433 /* IRQ to PF from VF - PF context (interrupt thread) */
434 static void
435 roc_vf_pf_mbox_irq(void *param)
436 {
437 	bool signal_thread = false;
438 	struct dev *dev = param;
439 	dev_intr_t intrb;
440 	uint64_t intr;
441 	int vfpf, sz;
442 
443 	sz = sizeof(intrb.bits[0]) * MAX_VFPF_DWORD_BITS;
444 	memset(intrb.bits, 0, sz);
445 	for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
446 		intr = plt_read64(dev->mbox_reg_base + dev->mbox_plat->pfvf_mbox_intx[vfpf]);
447 		if (!intr)
448 			continue;
449 
450 		plt_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)", vfpf, intr, dev->pf,
451 			     dev->vf);
452 
453 		/* Save and clear intr bits */
454 		intrb.bits[vfpf] |= intr;
455 		plt_write64(intr, dev->mbox_reg_base + dev->mbox_plat->pfvf_mbox_intx[vfpf]);
456 		signal_thread = true;
457 	}
458 
459 	if (signal_thread) {
460 		pthread_mutex_lock(&dev->sync.mutex);
461 		/* Interrupt state was saved in local variable first, as dev->intr.bits
462 		 * is a shared resources between VF msg and interrupt thread.
463 		 */
464 		memcpy(dev->intr.bits, intrb.bits, sz);
465 		/* MBOX message received from VF */
466 		dev->sync.msg_avail |= ROC_DEV_MBOX_PEND;
467 		/* Signal vf message handler thread */
468 		pthread_cond_signal(&dev->sync.pfvf_msg_cond);
469 		pthread_mutex_unlock(&dev->sync.mutex);
470 	}
471 }
472 
473 /* Received response from AF (PF context) / PF (VF context) */
474 static void
475 process_msgs(struct dev *dev, struct mbox *mbox)
476 {
477 	struct mbox_dev *mdev = &mbox->dev[0];
478 	struct mbox_hdr *req_hdr;
479 	struct mbox_msghdr *msg;
480 	int msgs_acked = 0;
481 	int offset;
482 	uint16_t i;
483 
484 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
485 	if (req_hdr->num_msgs == 0)
486 		return;
487 
488 	offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
489 	for (i = 0; i < req_hdr->num_msgs; i++) {
490 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
491 
492 		msgs_acked++;
493 		plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
494 			     mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
495 			     dev_get_vf(msg->pcifunc));
496 
497 		switch (msg->id) {
498 			/* Add message id's that are handled here */
499 		case MBOX_MSG_READY:
500 			/* Get our identity */
501 			dev->pf_func = msg->pcifunc;
502 			break;
503 		case MBOX_MSG_CGX_PRIO_FLOW_CTRL_CFG:
504 		case MBOX_MSG_CGX_CFG_PAUSE_FRM:
505 			/* Handling the case where one VF tries to disable PFC
506 			 * while PFC already configured on other VFs. This is
507 			 * not an error but a warning which can be ignored.
508 			 */
509 			if (msg->rc) {
510 				if (msg->rc == LMAC_AF_ERR_PERM_DENIED) {
511 					plt_mbox_dbg(
512 						"Receive Flow control disable not permitted "
513 						"as its used by other PFVFs");
514 					msg->rc = 0;
515 				} else {
516 					plt_err("Message (%s) response has err=%d",
517 						mbox_id2name(msg->id), msg->rc);
518 				}
519 			}
520 			break;
521 		case MBOX_MSG_CGX_PROMISC_DISABLE:
522 		case MBOX_MSG_CGX_PROMISC_ENABLE:
523 			if (msg->rc) {
524 				if (msg->rc == LMAC_AF_ERR_INVALID_PARAM) {
525 					plt_mbox_dbg("Already in same promisc state");
526 					msg->rc = 0;
527 				} else {
528 					plt_err("Message (%s) response has err=%d",
529 						mbox_id2name(msg->id), msg->rc);
530 				}
531 			}
532 			break;
533 
534 		default:
535 			if (msg->rc)
536 				plt_err("Message (%s) response has err=%d (%s)",
537 					mbox_id2name(msg->id), msg->rc, roc_error_msg_get(msg->rc));
538 			break;
539 		}
540 		offset = mbox->rx_start + msg->next_msgoff;
541 	}
542 
543 	mbox_reset(mbox, 0);
544 	/* Update acked if someone is waiting a message - mbox_wait is waiting */
545 	mdev->msgs_acked = msgs_acked;
546 	plt_wmb();
547 }
548 
549 /* Copies the message received from AF and sends it to VF */
550 static void
551 pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg)
552 {
553 	uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t);
554 	struct mbox *vf_mbox = &dev->mbox_vfpf_up;
555 	struct msg_req *msg = rec_msg;
556 	struct mbox_msghdr *vf_msg;
557 	uint16_t vf;
558 	size_t size;
559 
560 	size = PLT_ALIGN(mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
561 	if (size < sizeof(struct mbox_msghdr))
562 		return;
563 	/* Send UP message to all VF's */
564 	for (vf = 0; vf < vf_mbox->ndevs; vf++) {
565 		/* VF active */
566 		if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf))))
567 			continue;
568 
569 		plt_base_dbg("(%s) size: %zx to VF: %d",
570 			     mbox_id2name(msg->hdr.id), size, vf);
571 
572 		/* Reserve PF/VF mbox message */
573 		vf_msg = mbox_alloc_msg(vf_mbox, vf, size);
574 		if (!vf_msg) {
575 			plt_err("Failed to alloc VF%d UP message", vf);
576 			continue;
577 		}
578 		mbox_req_init(msg->hdr.id, vf_msg);
579 
580 		/*
581 		 * Copy message from AF<->PF UP mbox
582 		 * to PF<->VF UP mbox
583 		 */
584 		mbox_memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr),
585 			    (uint8_t *)msg + sizeof(struct mbox_msghdr),
586 			    size - sizeof(struct mbox_msghdr));
587 
588 		vf_msg->rc = msg->hdr.rc;
589 		/* Set PF to be a sender */
590 		vf_msg->pcifunc = dev->pf_func;
591 
592 		/* Send to VF */
593 		mbox_msg_send(vf_mbox, vf);
594 		mbox_wait_for_zero(&dev->mbox_vfpf_up, vf);
595 	}
596 }
597 
598 static int
599 mbox_up_handler_rep_event_up_notify(struct dev *dev, struct rep_event *req, struct msg_rsp *rsp)
600 {
601 	struct roc_eswitch_repte_notify_msg *notify_msg;
602 	int rc = 0;
603 
604 	plt_base_dbg("mbox_up_handler_rep_event_up_notify");
605 	plt_base_dbg("pf:%d/vf:%d msg id 0x%x (%s) from: pf:%d/vf:%d", dev_get_pf(dev->pf_func),
606 		     dev_get_vf(dev->pf_func), req->hdr.id, mbox_id2name(req->hdr.id),
607 		     dev_get_pf(req->hdr.pcifunc), dev_get_vf(req->hdr.pcifunc));
608 
609 	if (dev->ops && dev->ops->repte_notify) {
610 		notify_msg = plt_zmalloc(sizeof(struct roc_eswitch_repte_notify_msg), 0);
611 		if (!notify_msg) {
612 			plt_err("Failed to allocate memory");
613 			rc = -ENOMEM;
614 			goto fail;
615 		}
616 
617 		switch (req->event) {
618 		case RVU_EVENT_PORT_STATE:
619 			plt_base_dbg("pcifunc %x, port_state %d", req->pcifunc,
620 				     req->evt_data.port_state);
621 			notify_msg->type = ROC_ESWITCH_LINK_STATE;
622 			notify_msg->link.hw_func = req->pcifunc;
623 			notify_msg->link.enable = req->evt_data.port_state;
624 			break;
625 		case RVU_EVENT_PFVF_STATE:
626 			plt_base_dbg("pcifunc %x, repte_state %d", req->pcifunc,
627 				     req->evt_data.vf_state);
628 			notify_msg->type = ROC_ESWITCH_REPTE_STATE;
629 			notify_msg->state.hw_func = req->pcifunc;
630 			notify_msg->state.enable = req->evt_data.vf_state;
631 			break;
632 		case RVU_EVENT_MTU_CHANGE:
633 			plt_base_dbg("pcifunc %x, mtu val %d", req->pcifunc, req->evt_data.mtu);
634 			notify_msg->type = ROC_ESWITCH_REPTE_MTU;
635 			notify_msg->mtu.hw_func = req->pcifunc;
636 			notify_msg->mtu.mtu = req->evt_data.mtu;
637 			break;
638 		default:
639 			plt_err("Unknown event type %u", req->event);
640 			plt_free(notify_msg);
641 			rc = -EINVAL;
642 			goto fail;
643 		}
644 
645 		rc = dev->ops->repte_notify(dev->roc_nix, (void *)notify_msg);
646 		if (rc < 0)
647 			plt_err("Failed to send notification type %x for representee %x",
648 				notify_msg->type, notify_msg->state.hw_func);
649 
650 		plt_free(notify_msg);
651 	}
652 fail:
653 	rsp->hdr.rc = rc;
654 	return rc;
655 }
656 
657 static int
658 mbox_up_handler_mcs_intr_notify(struct dev *dev, struct mcs_intr_info *info, struct msg_rsp *rsp)
659 {
660 	struct roc_mcs_event_desc desc = {0};
661 	struct roc_mcs *mcs;
662 
663 	plt_base_dbg("pf:%d/vf:%d msg id 0x%x (%s) from: pf:%d/vf:%d", dev_get_pf(dev->pf_func),
664 		     dev_get_vf(dev->pf_func), info->hdr.id, mbox_id2name(info->hdr.id),
665 		     dev_get_pf(info->hdr.pcifunc), dev_get_vf(info->hdr.pcifunc));
666 
667 	mcs = roc_idev_mcs_get(info->mcs_id);
668 	if (!mcs)
669 		goto exit;
670 
671 	if (info->intr_mask) {
672 		switch (info->intr_mask) {
673 		case MCS_CPM_RX_SECTAG_V_EQ1_INT:
674 			desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
675 			desc.subtype = ROC_MCS_EVENT_RX_SECTAG_V_EQ1;
676 			break;
677 		case MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT:
678 			desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
679 			desc.subtype = ROC_MCS_EVENT_RX_SECTAG_E_EQ0_C_EQ1;
680 			break;
681 		case MCS_CPM_RX_SECTAG_SL_GTE48_INT:
682 			desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
683 			desc.subtype = ROC_MCS_EVENT_RX_SECTAG_SL_GTE48;
684 			break;
685 		case MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT:
686 			desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
687 			desc.subtype = ROC_MCS_EVENT_RX_SECTAG_ES_EQ1_SC_EQ1;
688 			break;
689 		case MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT:
690 			desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
691 			desc.subtype = ROC_MCS_EVENT_RX_SECTAG_SC_EQ1_SCB_EQ1;
692 			break;
693 		case MCS_CPM_RX_PACKET_XPN_EQ0_INT:
694 			desc.type = ROC_MCS_EVENT_RX_SA_PN_HARD_EXP;
695 			desc.metadata.sa_idx = info->sa_id;
696 			break;
697 		case MCS_CPM_RX_PN_THRESH_REACHED_INT:
698 			desc.type = ROC_MCS_EVENT_RX_SA_PN_SOFT_EXP;
699 			desc.metadata.sa_idx = info->sa_id;
700 			break;
701 		case MCS_CPM_TX_PACKET_XPN_EQ0_INT:
702 			desc.type = ROC_MCS_EVENT_TX_SA_PN_HARD_EXP;
703 			desc.metadata.sa_idx = info->sa_id;
704 			break;
705 		case MCS_CPM_TX_PN_THRESH_REACHED_INT:
706 			desc.type = ROC_MCS_EVENT_TX_SA_PN_SOFT_EXP;
707 			desc.metadata.sa_idx = info->sa_id;
708 			break;
709 		case MCS_CPM_TX_SA_NOT_VALID_INT:
710 			desc.type = ROC_MCS_EVENT_SA_NOT_VALID;
711 			break;
712 		case MCS_BBE_RX_DFIFO_OVERFLOW_INT:
713 		case MCS_BBE_TX_DFIFO_OVERFLOW_INT:
714 			desc.type = ROC_MCS_EVENT_FIFO_OVERFLOW;
715 			desc.subtype = ROC_MCS_EVENT_DATA_FIFO_OVERFLOW;
716 			desc.metadata.lmac_id = info->lmac_id;
717 			break;
718 		case MCS_BBE_RX_PLFIFO_OVERFLOW_INT:
719 		case MCS_BBE_TX_PLFIFO_OVERFLOW_INT:
720 			desc.type = ROC_MCS_EVENT_FIFO_OVERFLOW;
721 			desc.subtype = ROC_MCS_EVENT_POLICY_FIFO_OVERFLOW;
722 			desc.metadata.lmac_id = info->lmac_id;
723 			break;
724 		case MCS_PAB_RX_CHAN_OVERFLOW_INT:
725 		case MCS_PAB_TX_CHAN_OVERFLOW_INT:
726 			desc.type = ROC_MCS_EVENT_FIFO_OVERFLOW;
727 			desc.subtype = ROC_MCS_EVENT_PKT_ASSM_FIFO_OVERFLOW;
728 			desc.metadata.lmac_id = info->lmac_id;
729 			break;
730 		default:
731 			goto exit;
732 		}
733 
734 		mcs_event_cb_process(mcs, &desc);
735 	}
736 
737 exit:
738 	rsp->hdr.rc = 0;
739 	return 0;
740 }
741 
742 static int
743 mbox_up_handler_cgx_link_event(struct dev *dev, struct cgx_link_info_msg *msg,
744 			       struct msg_rsp *rsp)
745 {
746 	struct cgx_link_user_info *linfo = &msg->link_info;
747 	void *roc_nix = dev->roc_nix;
748 
749 	plt_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d",
750 		     dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func),
751 		     linfo->link_up ? "UP" : "DOWN", msg->hdr.id,
752 		     mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc),
753 		     dev_get_vf(msg->hdr.pcifunc));
754 
755 	/* PF gets link notification from AF */
756 	if (dev_get_pf(msg->hdr.pcifunc) == 0) {
757 		if (dev->ops && dev->ops->link_status_update)
758 			dev->ops->link_status_update(roc_nix, linfo);
759 
760 		/* Forward the same message as received from AF to VF */
761 		pf_vf_mbox_send_up_msg(dev, msg);
762 	} else {
763 		/* VF gets link up notification */
764 		if (dev->ops && dev->ops->link_status_update)
765 			dev->ops->link_status_update(roc_nix, linfo);
766 	}
767 
768 	rsp->hdr.rc = 0;
769 	return 0;
770 }
771 
772 static int
773 mbox_up_handler_cgx_ptp_rx_info(struct dev *dev,
774 				struct cgx_ptp_rx_info_msg *msg,
775 				struct msg_rsp *rsp)
776 {
777 	void *roc_nix = dev->roc_nix;
778 
779 	plt_base_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d",
780 		     dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func),
781 		     msg->ptp_en ? "ENABLED" : "DISABLED", msg->hdr.id,
782 		     mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc),
783 		     dev_get_vf(msg->hdr.pcifunc));
784 
785 	/* PF gets PTP notification from AF */
786 	if (dev_get_pf(msg->hdr.pcifunc) == 0) {
787 		if (dev->ops && dev->ops->ptp_info_update)
788 			dev->ops->ptp_info_update(roc_nix, msg->ptp_en);
789 
790 		/* Forward the same message as received from AF to VF */
791 		pf_vf_mbox_send_up_msg(dev, msg);
792 	} else {
793 		/* VF gets PTP notification */
794 		if (dev->ops && dev->ops->ptp_info_update)
795 			dev->ops->ptp_info_update(roc_nix, msg->ptp_en);
796 	}
797 
798 	rsp->hdr.rc = 0;
799 	return 0;
800 }
801 
802 static int
803 mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req)
804 {
805 	/* Check if valid, if not reply with a invalid msg */
806 	if (req->sig != MBOX_REQ_SIG)
807 		return -EIO;
808 
809 	switch (req->id) {
810 	default:
811 		reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
812 		break;
813 #define M(_name, _id, _fn_name, _req_type, _rsp_type)                          \
814 	case _id: {                                                            \
815 		struct _rsp_type *rsp;                                         \
816 		int err;                                                       \
817 		rsp = (struct _rsp_type *)mbox_alloc_msg(                      \
818 			&dev->mbox_up, 0, sizeof(struct _rsp_type));           \
819 		if (!rsp)                                                      \
820 			return -ENOMEM;                                        \
821 		rsp->hdr.id = _id;                                             \
822 		rsp->hdr.sig = MBOX_RSP_SIG;                                   \
823 		rsp->hdr.pcifunc = dev->pf_func;                               \
824 		rsp->hdr.rc = 0;                                               \
825 		err = mbox_up_handler_##_fn_name(dev, (struct _req_type *)req, \
826 						 rsp);                         \
827 		return err;                                                    \
828 	}
829 		MBOX_UP_CGX_MESSAGES
830 		MBOX_UP_MCS_MESSAGES
831 		MBOX_UP_REP_MESSAGES
832 #undef M
833 	}
834 
835 	return -ENODEV;
836 }
837 
838 static int
839 process_rvu_lf_msgs_up(struct dev *dev, struct mbox_msghdr *msg, size_t size)
840 {
841 	uint8_t req[MBOX_MSG_REQ_SIZE_MAX];
842 	struct msg_rsp *rsp;
843 	uint16_t rsp_len;
844 	void *resp;
845 	int rc = 0;
846 
847 	/* Check if valid, if not reply with an invalid msg */
848 	if (msg->sig != MBOX_REQ_SIG)
849 		return -EIO;
850 
851 	if ((size - sizeof(struct mbox_msghdr)) > MBOX_MSG_REQ_SIZE_MAX) {
852 		plt_err("MBOX request size greater than %d", MBOX_MSG_REQ_SIZE_MAX);
853 		return -ENOMEM;
854 	}
855 	mbox_memcpy(req, (uint8_t *)msg + sizeof(struct mbox_msghdr),
856 		    size - sizeof(struct mbox_msghdr));
857 	rc = dev->ops->msg_process_cb(dev_get_vf(msg->pcifunc), msg->id, req,
858 				      size - sizeof(struct mbox_msghdr), &resp, &rsp_len);
859 	if (rc < 0) {
860 		plt_err("Failed to process VF%d  message", dev->vf);
861 		return rc;
862 	}
863 
864 	rsp = (struct msg_rsp *)mbox_alloc_msg(&dev->mbox_up, 0,
865 					       rsp_len + sizeof(struct mbox_msghdr));
866 	if (!rsp) {
867 		plt_err("Failed to alloc VF%d response message", dev->vf);
868 		return -ENOMEM;
869 	}
870 
871 	mbox_rsp_init(msg->id, rsp);
872 
873 	mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr), resp, rsp_len);
874 	free(resp);
875 	/* PF/VF function ID */
876 	rsp->hdr.pcifunc = msg->pcifunc;
877 	rsp->hdr.rc = 0;
878 
879 	return rc;
880 }
881 
882 /* Received up messages from AF (PF context) / PF (in context) */
883 static void
884 process_msgs_up(struct dev *dev, struct mbox *mbox)
885 {
886 	struct mbox_dev *mdev = &mbox->dev[0];
887 	struct mbox_hdr *req_hdr;
888 	struct mbox_msghdr *msg;
889 	int i, err, offset;
890 	size_t size;
891 
892 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
893 	if (req_hdr->num_msgs == 0)
894 		return;
895 
896 	offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
897 	for (i = 0; i < req_hdr->num_msgs; i++) {
898 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
899 
900 		plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
901 			     mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
902 			     dev_get_vf(msg->pcifunc));
903 		if (roc_rvu_lf_msg_id_range_check(dev->roc_rvu_lf, msg->id)) {
904 			size = mbox->rx_start + msg->next_msgoff - offset;
905 			err = process_rvu_lf_msgs_up(dev, msg, size);
906 			if (err)
907 				plt_err("Error %d handling 0x%x RVU_LF up msg", err, msg->id);
908 		} else {
909 			err = mbox_process_msgs_up(dev, msg);
910 			if (err)
911 				plt_err("Error %d handling 0x%x (%s)", err, msg->id,
912 					mbox_id2name(msg->id));
913 		}
914 		offset = mbox->rx_start + msg->next_msgoff;
915 	}
916 	/* Send mbox responses */
917 	if (mdev->num_msgs) {
918 		plt_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
919 		mbox_msg_send(mbox, 0);
920 	}
921 }
922 
923 /* IRQ to VF from PF - VF context (interrupt thread) */
924 static void
925 roc_pf_vf_mbox_irq(void *param)
926 {
927 	struct dev *dev = param;
928 	uint64_t mbox_data;
929 	uint64_t intr;
930 
931 	intr = plt_read64(dev->mbox_reg_base + RVU_VF_INT);
932 	if (intr == 0)
933 		plt_base_dbg("Proceeding to check mbox UP messages if any");
934 
935 	plt_write64(intr, dev->mbox_reg_base + RVU_VF_INT);
936 	plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
937 
938 	/* Reading for UP/DOWN message, next message sending will be delayed
939 	 * by 1ms until this region is zeroed mbox_wait_for_zero()
940 	 */
941 	mbox_data = plt_read64(dev->mbox_reg_base + RVU_VF_VFPF_MBOX0);
942 	/* If interrupt occurred for down message */
943 	if (mbox_data & MBOX_DOWN_MSG || intr & BIT_ULL(1)) {
944 		mbox_data &= ~MBOX_DOWN_MSG;
945 		plt_write64(mbox_data, dev->mbox_reg_base + RVU_VF_VFPF_MBOX0);
946 
947 		/* First process all configuration messages */
948 		process_msgs(dev, dev->mbox);
949 	}
950 	/* If interrupt occurred for UP message */
951 	if (mbox_data & MBOX_UP_MSG || intr & BIT_ULL(0)) {
952 		mbox_data &= ~MBOX_UP_MSG;
953 		plt_write64(mbox_data, dev->mbox_reg_base + RVU_VF_VFPF_MBOX0);
954 
955 		/* Process Uplink messages */
956 		process_msgs_up(dev, &dev->mbox_up);
957 	}
958 }
959 
960 /* IRQ to PF from AF - PF context (interrupt thread) */
961 static void
962 roc_af_pf_mbox_irq(void *param)
963 {
964 	struct dev *dev = param;
965 	uint64_t mbox_data;
966 	uint64_t intr;
967 
968 	intr = plt_read64(dev->mbox_reg_base + RVU_PF_INT);
969 	if (intr == 0)
970 		plt_base_dbg("Proceeding to check mbox UP messages if any");
971 
972 	plt_write64(intr, dev->mbox_reg_base + RVU_PF_INT);
973 	plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
974 
975 	/* Reading for UP/DOWN message, next message sending will be delayed
976 	 * by 1ms until this region is zeroed mbox_wait_for_zero()
977 	 */
978 	mbox_data = plt_read64(dev->mbox_reg_base + RVU_PF_PFAF_MBOX0);
979 	/* If interrupt occurred for down message */
980 	if (mbox_data & MBOX_DOWN_MSG || intr & BIT_ULL(1)) {
981 		mbox_data &= ~MBOX_DOWN_MSG;
982 		plt_write64(mbox_data, dev->mbox_reg_base + RVU_PF_PFAF_MBOX0);
983 
984 		/* First process all configuration messages */
985 		process_msgs(dev, dev->mbox);
986 	}
987 	/* If interrupt occurred for up message */
988 	if (mbox_data & MBOX_UP_MSG || intr & BIT_ULL(0)) {
989 		mbox_data &= ~MBOX_UP_MSG;
990 		plt_write64(mbox_data, dev->mbox_reg_base + RVU_PF_PFAF_MBOX0);
991 
992 		/* Process Uplink messages */
993 		process_msgs_up(dev, &dev->mbox_up);
994 	}
995 }
996 
997 static int
998 mbox_register_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
999 {
1000 	struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
1001 	int i, rc;
1002 
1003 	/* HW clear irq */
1004 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
1005 		plt_write64(~0ull, dev->mbox_reg_base + dev->mbox_plat->pfvf_mbox_int_ena_w1c[i]);
1006 		plt_write64(~0ull, dev->mbox_reg_base + dev->mbox_plat->pfvf1_mbox_int_ena_w1c[i]);
1007 	}
1008 
1009 	plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT_ENA_W1C);
1010 
1011 	/* MBOX interrupt for VF(0...63) <-> PF */
1012 	rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev, dev->mbox_plat->pfvf_mbox0_vec);
1013 
1014 	if (rc) {
1015 		plt_err("Fail to register PF(VF0-63) mbox irq");
1016 		return rc;
1017 	}
1018 	/* MBOX interrupt for VF(64...128) <-> PF */
1019 	rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev, dev->mbox_plat->pfvf_mbox1_vec);
1020 
1021 	if (rc) {
1022 		plt_err("Fail to register PF(VF64-128) mbox irq");
1023 		return rc;
1024 	}
1025 
1026 	/* Additional interrupt vector which can be used by VF -> PF using when
1027 	 * RVU_VF_VFPF_TRIG(1) trigger register.
1028 	 */
1029 	if (roc_model_is_cn20k()) {
1030 		/* MBOX1 interrupt for VF(0...63) <-> PF */
1031 		rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev,
1032 				      dev->mbox_plat->pfvf1_mbox0_vec);
1033 
1034 		if (rc) {
1035 			plt_err("Fail to register PF1(VF0-63) mbox irq");
1036 			return rc;
1037 		}
1038 		/* MBOX1 interrupt for VF(64...128) <-> PF */
1039 		rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev,
1040 				      dev->mbox_plat->pfvf1_mbox1_vec);
1041 
1042 		if (rc) {
1043 			plt_err("Fail to register PF1(VF64-128) mbox irq");
1044 			return rc;
1045 		}
1046 	}
1047 	/* MBOX interrupt AF <-> PF */
1048 	rc = dev_irq_register(intr_handle, roc_af_pf_mbox_irq, dev, dev->mbox_plat->pfaf_vec);
1049 	if (rc) {
1050 		plt_err("Fail to register AF<->PF mbox irq");
1051 		return rc;
1052 	}
1053 
1054 	/* HW enable intr */
1055 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
1056 		plt_write64(~0ull, dev->mbox_reg_base + dev->mbox_plat->pfvf_mbox_int_ena_w1s[i]);
1057 		plt_write64(~0ull, dev->mbox_reg_base + dev->mbox_plat->pfvf1_mbox_int_ena_w1s[i]);
1058 	}
1059 
1060 	plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT);
1061 	plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT_ENA_W1S);
1062 
1063 	return rc;
1064 }
1065 
1066 static int
1067 mbox_register_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
1068 {
1069 	struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
1070 	int rc;
1071 
1072 	/* Clear irq */
1073 	plt_write64(~0ull, dev->mbox_reg_base + RVU_VF_INT_ENA_W1C);
1074 
1075 	/* MBOX interrupt PF <-> VF */
1076 	rc = dev_irq_register(intr_handle, roc_pf_vf_mbox_irq, dev, RVU_VF_INT_VEC_MBOX);
1077 	if (rc) {
1078 		plt_err("Fail to register PF<->VF mbox irq");
1079 		return rc;
1080 	}
1081 
1082 	/* HW enable intr */
1083 	plt_write64(~0ull, dev->mbox_reg_base + RVU_VF_INT);
1084 	plt_write64(~0ull, dev->mbox_reg_base + RVU_VF_INT_ENA_W1S);
1085 
1086 	return rc;
1087 }
1088 
1089 int
1090 dev_mbox_register_irq(struct plt_pci_device *pci_dev, struct dev *dev)
1091 {
1092 	if (dev_is_vf(dev))
1093 		return mbox_register_vf_irq(pci_dev, dev);
1094 	else
1095 		return mbox_register_pf_irq(pci_dev, dev);
1096 }
1097 
1098 static void
1099 mbox_unregister_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
1100 {
1101 	struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
1102 	int i;
1103 
1104 	/* HW clear irq */
1105 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
1106 		plt_write64(~0ull, dev->mbox_reg_base + dev->mbox_plat->pfvf_mbox_int_ena_w1c[i]);
1107 		plt_write64(~0ull, dev->mbox_reg_base + dev->mbox_plat->pfvf1_mbox_int_ena_w1c[i]);
1108 	}
1109 
1110 	plt_write64(~0ull, dev->mbox_reg_base + RVU_PF_INT_ENA_W1C);
1111 
1112 	/* Unregister the interrupt handler for each vectors */
1113 	/* MBOX interrupt for VF(0...63) <-> PF */
1114 	dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev, dev->mbox_plat->pfvf_mbox0_vec);
1115 
1116 	/* MBOX interrupt for VF(64...128) <-> PF */
1117 	dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev, dev->mbox_plat->pfvf_mbox1_vec);
1118 
1119 	if (roc_model_is_cn20k()) {
1120 		/* MBOX1 interrupt for VF(0...63) <-> PF */
1121 		dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev,
1122 				   dev->mbox_plat->pfvf1_mbox0_vec);
1123 
1124 		/* MBOX1 interrupt for VF(64...128) <-> PF */
1125 		dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev,
1126 				   dev->mbox_plat->pfvf1_mbox1_vec);
1127 	}
1128 
1129 	/* MBOX interrupt AF <-> PF */
1130 	dev_irq_unregister(intr_handle, roc_af_pf_mbox_irq, dev, dev->mbox_plat->pfaf_vec);
1131 }
1132 
1133 static void
1134 mbox_unregister_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
1135 {
1136 	struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
1137 
1138 	/* Clear irq */
1139 	plt_write64(~0ull, dev->mbox_reg_base + RVU_VF_INT_ENA_W1C);
1140 
1141 	/* Unregister the interrupt handler */
1142 	dev_irq_unregister(intr_handle, roc_pf_vf_mbox_irq, dev, RVU_VF_INT_VEC_MBOX);
1143 }
1144 
1145 void
1146 dev_mbox_unregister_irq(struct plt_pci_device *pci_dev, struct dev *dev)
1147 {
1148 	if (dev_is_vf(dev))
1149 		mbox_unregister_vf_irq(pci_dev, dev);
1150 	else
1151 		mbox_unregister_pf_irq(pci_dev, dev);
1152 }
1153 
1154 static int
1155 vf_flr_send_msg(struct dev *dev, uint16_t vf)
1156 {
1157 	struct mbox *mbox = dev->mbox;
1158 	struct msg_req *req;
1159 	int rc;
1160 
1161 	req = mbox_alloc_msg_vf_flr(mbox_get(mbox));
1162 	if (req == NULL)
1163 		return -ENOSPC;
1164 	/* Overwrite pcifunc to indicate VF */
1165 	req->hdr.pcifunc = dev_pf_func(dev->pf, vf);
1166 
1167 	/* Sync message in interrupt context */
1168 	rc = pf_af_sync_msg(dev, NULL);
1169 	if (rc)
1170 		plt_err("Failed to send VF FLR mbox msg, rc=%d", rc);
1171 
1172 	mbox_put(mbox);
1173 
1174 	return rc;
1175 }
1176 
1177 static void
1178 roc_pf_vf_flr_irq(void *param)
1179 {
1180 	struct dev *dev = (struct dev *)param;
1181 	bool signal_thread = false;
1182 	dev_intr_t flr;
1183 	uintptr_t bar2;
1184 	uint64_t intr;
1185 	int i, sz;
1186 
1187 	bar2 = dev->bar2;
1188 
1189 	sz = sizeof(flr.bits[0]) * MAX_VFPF_DWORD_BITS;
1190 	memset(flr.bits, 0, sz);
1191 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
1192 		intr = plt_read64(bar2 + RVU_PF_VFFLR_INTX(i));
1193 		if (!intr)
1194 			continue;
1195 
1196 		/* Clear interrupt */
1197 		plt_write64(intr, bar2 + RVU_PF_VFFLR_INTX(i));
1198 		/* Disable the interrupt */
1199 		plt_write64(intr,
1200 			    bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
1201 
1202 		/* Save FLR interrupts per VF as bits */
1203 		flr.bits[i] |= intr;
1204 		/* Enable interrupt */
1205 		plt_write64(~0ull,
1206 			    bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
1207 		signal_thread = true;
1208 	}
1209 
1210 	if (signal_thread) {
1211 		pthread_mutex_lock(&dev->sync.mutex);
1212 		/* Interrupt state was saved in local variable first, as dev->flr.bits
1213 		 * is a shared resources between VF msg and interrupt thread.
1214 		 */
1215 		memcpy(dev->flr.bits, flr.bits, sz);
1216 		/* FLR message received from VF */
1217 		dev->sync.msg_avail |= ROC_DEV_FLR_PEND;
1218 		/* Signal vf message handler thread */
1219 		pthread_cond_signal(&dev->sync.pfvf_msg_cond);
1220 		pthread_mutex_unlock(&dev->sync.mutex);
1221 	}
1222 }
1223 
1224 void
1225 dev_vf_flr_unregister_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
1226 {
1227 	struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
1228 	int i;
1229 
1230 	plt_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
1231 
1232 	/* HW clear irq */
1233 	for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
1234 		plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
1235 
1236 	dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev,
1237 			   RVU_PF_INT_VEC_VFFLR0);
1238 
1239 	dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev,
1240 			   RVU_PF_INT_VEC_VFFLR1);
1241 }
1242 
1243 int
1244 dev_vf_flr_register_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
1245 {
1246 	struct plt_intr_handle *handle = pci_dev->intr_handle;
1247 	int i, rc;
1248 
1249 	plt_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
1250 
1251 	rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev,
1252 			      RVU_PF_INT_VEC_VFFLR0);
1253 	if (rc)
1254 		plt_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc);
1255 
1256 	rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev,
1257 			      RVU_PF_INT_VEC_VFFLR1);
1258 	if (rc)
1259 		plt_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc);
1260 
1261 	/* Enable HW interrupt */
1262 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
1263 		plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i));
1264 		plt_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i));
1265 		plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
1266 	}
1267 	return 0;
1268 }
1269 
1270 static void
1271 vf_flr_handle_msg(void *param, dev_intr_t *flr)
1272 {
1273 	uint16_t vf, max_vf, max_bits;
1274 	struct dev *dev = param;
1275 
1276 	max_bits = sizeof(flr->bits[0]) * sizeof(uint64_t);
1277 	max_vf = max_bits * MAX_VFPF_DWORD_BITS;
1278 
1279 	for (vf = 0; vf < max_vf; vf++) {
1280 		if (flr->bits[vf / max_bits] & BIT_ULL(vf % max_bits)) {
1281 			plt_base_dbg("Process FLR vf:%d request (pf:%d, vf:%d)",
1282 				     vf, dev->pf, dev->vf);
1283 			/* Inform AF about VF reset */
1284 			vf_flr_send_msg(dev, vf);
1285 			flr->bits[vf / max_bits] &= ~(BIT_ULL(vf % max_bits));
1286 
1287 			/* Signal FLR finish */
1288 			plt_write64(BIT_ULL(vf % max_bits),
1289 				    dev->bar2 + RVU_PF_VFTRPENDX(vf / max_bits));
1290 		}
1291 	}
1292 }
1293 
1294 static uint32_t
1295 pf_vf_mbox_thread_main(void *arg)
1296 {
1297 	struct dev *dev = arg;
1298 	bool is_flr, is_mbox;
1299 	dev_intr_t flr, intr;
1300 	int sz, rc;
1301 
1302 	sz = sizeof(intr.bits[0]) * MAX_VFPF_DWORD_BITS;
1303 	pthread_mutex_lock(&dev->sync.mutex);
1304 	while (dev->sync.start_thread) {
1305 		do {
1306 			rc = pthread_cond_wait(&dev->sync.pfvf_msg_cond, &dev->sync.mutex);
1307 		} while (rc != 0);
1308 
1309 		if (!dev->sync.msg_avail) {
1310 			continue;
1311 		} else {
1312 			while (dev->sync.msg_avail) {
1313 				/* Check which VF msg received */
1314 				is_mbox = dev->sync.msg_avail & ROC_DEV_MBOX_PEND;
1315 				is_flr = dev->sync.msg_avail & ROC_DEV_FLR_PEND;
1316 				memcpy(intr.bits, dev->intr.bits, sz);
1317 				memcpy(flr.bits, dev->flr.bits, sz);
1318 				memset(dev->flr.bits, 0, sz);
1319 				memset(dev->intr.bits, 0, sz);
1320 				dev->sync.msg_avail = 0;
1321 				/* Unlocking for interrupt thread to grab lock
1322 				 * and update msg_avail field.
1323 				 */
1324 				pthread_mutex_unlock(&dev->sync.mutex);
1325 				/* Calling respective message handlers */
1326 				if (is_mbox)
1327 					roc_vf_pf_mbox_handle_msg(dev, &intr);
1328 				if (is_flr)
1329 					vf_flr_handle_msg(dev, &flr);
1330 				/* Locking as cond wait will unlock before wait */
1331 				pthread_mutex_lock(&dev->sync.mutex);
1332 			}
1333 		}
1334 	}
1335 
1336 	pthread_mutex_unlock(&dev->sync.mutex);
1337 
1338 	return 0;
1339 }
1340 
1341 static void
1342 clear_rvum_interrupts(struct dev *dev)
1343 {
1344 	uint64_t intr;
1345 	int i;
1346 
1347 	if (dev_is_vf(dev)) {
1348 		/* Clear VF mbox interrupt */
1349 		intr = plt_read64(dev->mbox_reg_base + RVU_VF_INT);
1350 		if (intr)
1351 			plt_write64(intr, dev->mbox_reg_base + RVU_VF_INT);
1352 	} else {
1353 		/* Clear AF PF interrupt line */
1354 		intr = plt_read64(dev->mbox_reg_base + RVU_PF_INT);
1355 		if (intr)
1356 			plt_write64(intr, dev->mbox_reg_base + RVU_PF_INT);
1357 		for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
1358 			/* Clear MBOX interrupts */
1359 			intr = plt_read64(dev->mbox_reg_base + dev->mbox_plat->pfvf_mbox_intx[i]);
1360 			if (intr) {
1361 				plt_write64(intr,
1362 					    dev->mbox_reg_base + dev->mbox_plat->pfvf_mbox_intx[i]);
1363 				if (roc_model_is_cn20k())
1364 					plt_write64(intr,
1365 						    dev->mbox_reg_base +
1366 							    dev->mbox_plat->pfvf1_mbox_intx[i]);
1367 			}
1368 			/* Clear VF FLR interrupts */
1369 			intr = plt_read64(dev->bar2 + RVU_PF_VFFLR_INTX(i));
1370 			if (intr)
1371 				plt_write64(intr,
1372 					    dev->bar2 + RVU_PF_VFFLR_INTX(i));
1373 		}
1374 	}
1375 }
1376 
1377 int
1378 dev_active_vfs(struct dev *dev)
1379 {
1380 	int i, count = 0;
1381 
1382 	for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
1383 		count += plt_popcount32(dev->active_vfs[i]);
1384 
1385 	return count;
1386 }
1387 
1388 static void
1389 dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev)
1390 {
1391 	switch (pci_dev->id.device_id) {
1392 	case PCI_DEVID_CNXK_RVU_PF:
1393 		break;
1394 	case PCI_DEVID_CNXK_RVU_SSO_TIM_VF:
1395 	case PCI_DEVID_CNXK_RVU_NPA_VF:
1396 	case PCI_DEVID_CN10K_RVU_CPT_VF:
1397 	case PCI_DEVID_CN9K_RVU_CPT_VF:
1398 	case PCI_DEVID_CNXK_RVU_AF_VF:
1399 	case PCI_DEVID_CNXK_RVU_VF:
1400 	case PCI_DEVID_CNXK_RVU_SDP_VF:
1401 	case PCI_DEVID_CNXK_RVU_NIX_INL_VF:
1402 	case PCI_DEVID_CNXK_RVU_BPHY_VF:
1403 	case PCI_DEVID_CNXK_RVU_ESWITCH_VF:
1404 		dev->hwcap |= DEV_HWCAP_F_VF;
1405 		break;
1406 	}
1407 }
1408 
1409 static uintptr_t
1410 cn20k_pfvf_mbox_alloc(struct dev *dev, uint16_t max_vfs)
1411 {
1412 	char name[PLT_MEMZONE_NAMESIZE];
1413 	const struct plt_memzone *mz;
1414 	uint32_t vf_mbox_region;
1415 
1416 	vf_mbox_region = plt_align64pow2(MBOX_SIZE * max_vfs);
1417 	/* Allocating memory for LMT region */
1418 	sprintf(name, "PFVF_MBOX_REGION%x", dev->pf_func);
1419 
1420 	mz = plt_memzone_reserve_aligned(name, vf_mbox_region, 0, MBOX_SIZE);
1421 	if (!mz) {
1422 		plt_err("Memory alloc failed: %s", strerror(errno));
1423 		goto fail;
1424 	}
1425 
1426 	dev->vf_mbox_base = mz->iova;
1427 	dev->vf_mbox_mz = mz;
1428 	plt_write64(dev->vf_mbox_base, dev->mbox_reg_base + RVU_PF_VF_MBOX_ADDR);
1429 
1430 	return dev->vf_mbox_base;
1431 fail:
1432 	return (uintptr_t)NULL;
1433 }
1434 
1435 static uintptr_t
1436 dev_vf_mbase_get(struct plt_pci_device *pci_dev, struct dev *dev)
1437 {
1438 	void *vf_mbase = NULL;
1439 	uintptr_t pa;
1440 
1441 	if (dev_is_vf(dev))
1442 		return 0;
1443 
1444 	if (roc_model_is_cn20k())
1445 		return cn20k_pfvf_mbox_alloc(dev, pci_dev->max_vfs);
1446 
1447 	/* For CN10K, it is just after PF MBOX */
1448 	if (roc_model_is_cn10k())
1449 		return dev->bar4 + MBOX_SIZE;
1450 
1451 	pa = plt_read64(dev->bar2 + RVU_PF_VF_BAR4_ADDR);
1452 	if (!pa) {
1453 		plt_err("Invalid VF mbox base pa");
1454 		return pa;
1455 	}
1456 
1457 	vf_mbase = mbox_mem_map(pa, MBOX_SIZE * pci_dev->max_vfs);
1458 	if (vf_mbase == MAP_FAILED) {
1459 		plt_err("Failed to mmap vf mbase at pa 0x%lx, rc=%d", pa,
1460 			errno);
1461 		return 0;
1462 	}
1463 	return (uintptr_t)vf_mbase;
1464 }
1465 
1466 static void
1467 dev_vf_mbase_put(struct plt_pci_device *pci_dev, uintptr_t vf_mbase)
1468 {
1469 	if (!vf_mbase || !pci_dev->max_vfs || !roc_model_is_cn9k())
1470 		return;
1471 
1472 	mbox_mem_unmap((void *)vf_mbase, MBOX_SIZE * pci_dev->max_vfs);
1473 }
1474 
1475 static int
1476 dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova)
1477 {
1478 	struct lmtst_tbl_setup_req *req;
1479 	int rc;
1480 
1481 	req = mbox_alloc_msg_lmtst_tbl_setup(mbox_get(mbox));
1482 	if (!req) {
1483 		rc = -ENOSPC;
1484 		goto exit;
1485 	}
1486 
1487 	/* This pcifunc is defined with primary pcifunc whose LMT address
1488 	 * will be shared. If call contains valid IOVA, following pcifunc
1489 	 * field is of no use.
1490 	 */
1491 	req->pcifunc = valid_iova ? 0 : idev_lmt_pffunc_get();
1492 	req->use_local_lmt_region = valid_iova;
1493 	req->lmt_iova = iova;
1494 
1495 	rc = mbox_process(mbox);
1496 exit:
1497 	mbox_put(mbox);
1498 	return rc;
1499 }
1500 
1501 /* Total no of lines * size of each lmtline */
1502 #define LMT_REGION_SIZE (ROC_NUM_LMT_LINES * ROC_LMT_LINE_SZ)
1503 static int
1504 dev_lmt_setup(struct dev *dev)
1505 {
1506 	char name[PLT_MEMZONE_NAMESIZE];
1507 	const struct plt_memzone *mz;
1508 	struct idev_cfg *idev;
1509 	int rc;
1510 
1511 	if (roc_model_is_cn9k()) {
1512 		dev->lmt_base = dev->mbox_reg_base + (RVU_BLOCK_ADDR_LMT << 20);
1513 		return 0;
1514 	}
1515 
1516 	/* [CN10K, .) */
1517 
1518 	/* Set common lmt region from second pf_func onwards. */
1519 	if (!dev->disable_shared_lmt && idev_lmt_pffunc_get() &&
1520 	    dev->pf_func != idev_lmt_pffunc_get()) {
1521 		rc = dev_setup_shared_lmt_region(dev->mbox, false, 0);
1522 		if (!rc) {
1523 			/* On success, updating lmt base of secondary pf_funcs
1524 			 * with primary pf_func's lmt base.
1525 			 */
1526 			dev->lmt_base = roc_idev_lmt_base_addr_get();
1527 			return rc;
1528 		}
1529 		plt_err("Failed to setup shared lmt region, pf_func %d err %d "
1530 			"Using respective LMT region per pf func",
1531 			dev->pf_func, rc);
1532 	}
1533 
1534 	/* Allocating memory for LMT region */
1535 	sprintf(name, "LMT_MAP%x", dev->pf_func);
1536 
1537 	/* Setting alignment to ensure correct masking for resetting to lmt base
1538 	 * of a core after all lmt lines under that core are used.
1539 	 * Alignment value LMT_REGION_SIZE to handle the case where all lines
1540 	 * are used by 1 core.
1541 	 */
1542 	mz = plt_lmt_region_reserve_aligned(name, LMT_REGION_SIZE, LMT_REGION_SIZE);
1543 	if (!mz) {
1544 		plt_err("Memory alloc failed: %s", strerror(errno));
1545 		goto fail;
1546 	}
1547 
1548 	/* Share the IOVA address with Kernel */
1549 	rc = dev_setup_shared_lmt_region(dev->mbox, true, mz->iova);
1550 	if (rc) {
1551 		errno = rc;
1552 		goto free;
1553 	}
1554 
1555 	dev->lmt_base = mz->iova;
1556 	dev->lmt_mz = mz;
1557 	/* Base LMT address should be chosen from only those pci funcs which
1558 	 * participate in LMT shared mode.
1559 	 */
1560 	if (!dev->disable_shared_lmt) {
1561 		idev = idev_get_cfg();
1562 		if (!idev) {
1563 			errno = EFAULT;
1564 			goto free;
1565 		}
1566 
1567 		if (!__atomic_load_n(&idev->lmt_pf_func, __ATOMIC_ACQUIRE)) {
1568 			idev->lmt_base_addr = dev->lmt_base;
1569 			idev->lmt_pf_func = dev->pf_func;
1570 			idev->num_lmtlines = RVU_LMT_LINE_MAX;
1571 		}
1572 	}
1573 
1574 	return 0;
1575 free:
1576 	plt_memzone_free(mz);
1577 fail:
1578 	return -errno;
1579 }
1580 
1581 static bool
1582 dev_cache_line_size_valid(void)
1583 {
1584 	if (roc_model_is_cn9k()) {
1585 		if (PLT_CACHE_LINE_SIZE != 128) {
1586 			plt_err("Cache line size of %d is wrong for CN9K", PLT_CACHE_LINE_SIZE);
1587 			return false;
1588 		}
1589 	} else if (roc_model_is_cn10k()) {
1590 		if (PLT_CACHE_LINE_SIZE == 128) {
1591 			plt_warn("Cache line size of %d might affect performance",
1592 				 PLT_CACHE_LINE_SIZE);
1593 		} else if (PLT_CACHE_LINE_SIZE != 64) {
1594 			plt_err("Cache line size of %d is wrong for CN10K", PLT_CACHE_LINE_SIZE);
1595 			return false;
1596 		}
1597 	}
1598 
1599 	return true;
1600 }
1601 
1602 static void
1603 mbox_platform_changes(struct mbox_platform *mbox_plat, uintptr_t bar2, uintptr_t bar4, bool is_vf)
1604 {
1605 	int i;
1606 
1607 	if (roc_model_is_cn20k()) {
1608 		/* For CN20K, AF allocates mbox memory in DRAM and writes PF
1609 		 * regions/offsets in RVU_MBOX_AF_PFX_ADDR, the RVU_PFX_FUNC_PFAF_MBOX
1610 		 * gives the aliased address to access AF/PF mailbox regions.
1611 		 */
1612 		mbox_plat->mbox_reg_base = bar2;
1613 		mbox_plat->mbox_region_base =
1614 			bar2 + (RVU_PFX_FUNC_PFAF_MBOX +
1615 				((uint64_t)RVU_BLOCK_ADDR_MBOX << RVU_FUNC_BLKADDR_SHIFT));
1616 		/* Interrupt vectors */
1617 		mbox_plat->pfaf_vec = RVU_MBOX_PF_INT_VEC_AFPF_MBOX;
1618 		mbox_plat->pfvf_mbox0_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0;
1619 		mbox_plat->pfvf_mbox1_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX1;
1620 		mbox_plat->pfvf1_mbox0_vec = RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0;
1621 		mbox_plat->pfvf1_mbox1_vec = RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1;
1622 		for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
1623 			mbox_plat->pfvf_mbox_int_ena_w1s[i] = RVU_MBOX_PF_VFPF_INT_ENA_W1SX(i);
1624 			mbox_plat->pfvf_mbox_int_ena_w1c[i] = RVU_MBOX_PF_VFPF_INT_ENA_W1CX(i);
1625 			mbox_plat->pfvf_mbox_intx[i] = RVU_MBOX_PF_VFPF_INTX(i);
1626 			mbox_plat->pfvf1_mbox_int_ena_w1s[i] = RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(i);
1627 			mbox_plat->pfvf1_mbox_int_ena_w1c[i] = RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(i);
1628 			mbox_plat->pfvf1_mbox_intx[i] = RVU_MBOX_PF_VFPF1_INTX(i);
1629 		}
1630 	} else {
1631 		mbox_plat->mbox_reg_base = bar2;
1632 		mbox_plat->mbox_region_base = bar4;
1633 		mbox_plat->pfaf_vec = RVU_PF_INT_VEC_AFPF_MBOX;
1634 		mbox_plat->pfvf_mbox0_vec = RVU_PF_INT_VEC_VFPF_MBOX0;
1635 		mbox_plat->pfvf_mbox1_vec = RVU_PF_INT_VEC_VFPF_MBOX1;
1636 		for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
1637 			mbox_plat->pfvf_mbox_int_ena_w1s[i] = RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i);
1638 			mbox_plat->pfvf_mbox_int_ena_w1c[i] = RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i);
1639 			mbox_plat->pfvf_mbox_intx[i] = RVU_PF_VFPF_MBOX_INTX(i);
1640 		}
1641 	}
1642 	if (is_vf) {
1643 		if (roc_model_is_cn20k())
1644 			mbox_plat->mbox_region_base =
1645 				bar2 + (RVU_VF_MBOX_REGION +
1646 					((uint64_t)RVU_BLOCK_ADDR_MBOX << RVU_FUNC_BLKADDR_SHIFT));
1647 		if (roc_model_is_cn10k())
1648 			mbox_plat->mbox_region_base = bar2 + RVU_VF_MBOX_REGION;
1649 	}
1650 }
1651 
1652 int
1653 dev_init(struct dev *dev, struct plt_pci_device *pci_dev)
1654 {
1655 	uintptr_t mbox_reg_base, mbox_region_base, bar2, bar4;
1656 	char name[MBOX_HANDLER_NAME_MAX_LEN];
1657 	int direction, up_direction, rc;
1658 	uintptr_t vf_mbase = 0;
1659 	uint64_t intr_offset;
1660 	bool is_vf;
1661 
1662 	if (!dev_cache_line_size_valid())
1663 		return -EFAULT;
1664 
1665 	if (!roc_plt_lmt_validate()) {
1666 		plt_err("Failed to validate LMT line");
1667 		return -EFAULT;
1668 	}
1669 
1670 	dev_vf_hwcap_update(pci_dev, dev);
1671 	is_vf = dev_is_vf(dev);
1672 
1673 	bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
1674 	bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
1675 	dev->mbox_plat = plt_zmalloc(sizeof(struct mbox_platform), 0);
1676 	if (!dev->mbox_plat) {
1677 		plt_err("Failed to allocate mem for mbox_plat");
1678 		rc = -ENOMEM;
1679 		goto fail;
1680 	}
1681 	mbox_platform_changes(dev->mbox_plat, bar2, bar4, is_vf);
1682 
1683 	mbox_reg_base = dev->mbox_plat->mbox_reg_base;
1684 	mbox_region_base = dev->mbox_plat->mbox_region_base;
1685 	if (mbox_reg_base == 0 || mbox_region_base == 0) {
1686 		plt_err("Failed to get PCI bars");
1687 		rc = -ENODEV;
1688 		goto error;
1689 	}
1690 	/* Trigger fault on mbox_reg_base and mbox_region_base
1691 	 * to avoid BUG_ON in remap_pfn_range()
1692 	 * in latest kernel.
1693 	 */
1694 	*(volatile uint64_t *)mbox_reg_base;
1695 	*(volatile uint64_t *)mbox_region_base;
1696 
1697 	/* Check ROC model supported */
1698 	if (roc_model->flag == 0) {
1699 		rc = UTIL_ERR_INVALID_MODEL;
1700 		plt_err("Unsupported roc model");
1701 		goto error;
1702 	}
1703 
1704 	dev->maxvf = pci_dev->max_vfs;
1705 	dev->bar2 = bar2;
1706 	dev->bar4 = bar4;
1707 	dev->mbox_reg_base = dev->mbox_plat->mbox_reg_base;
1708 
1709 	if (is_vf) {
1710 		direction = MBOX_DIR_VFPF;
1711 		up_direction = MBOX_DIR_VFPF_UP;
1712 		intr_offset = RVU_VF_INT;
1713 	} else {
1714 		direction = MBOX_DIR_PFAF;
1715 		up_direction = MBOX_DIR_PFAF_UP;
1716 		intr_offset = RVU_PF_INT;
1717 	}
1718 
1719 	/* Clear all RVUM interrupts */
1720 	clear_rvum_interrupts(dev);
1721 
1722 	/* Initialize the local mbox */
1723 	rc = mbox_init(&dev->mbox_local, mbox_region_base, mbox_reg_base, direction, 1,
1724 		       intr_offset);
1725 	if (rc)
1726 		goto error;
1727 	dev->mbox = &dev->mbox_local;
1728 
1729 	rc = mbox_init(&dev->mbox_up, mbox_region_base, mbox_reg_base, up_direction, 1,
1730 		       intr_offset);
1731 	if (rc)
1732 		goto mbox_fini;
1733 
1734 	/* Register mbox interrupts */
1735 	rc = dev_mbox_register_irq(pci_dev, dev);
1736 	if (rc)
1737 		goto mbox_fini;
1738 
1739 	/* Check the readiness of PF/VF */
1740 	rc = send_ready_msg(dev->mbox, &dev->pf_func);
1741 	if (rc)
1742 		goto mbox_unregister;
1743 
1744 	dev->pf = dev_get_pf(dev->pf_func);
1745 	dev->vf = dev_get_vf(dev->pf_func);
1746 	memset(&dev->active_vfs, 0, sizeof(dev->active_vfs));
1747 
1748 	/* Allocate memory for device ops */
1749 	dev->ops = plt_zmalloc(sizeof(struct dev_ops), 0);
1750 	if (dev->ops == NULL) {
1751 		rc = -ENOMEM;
1752 		goto mbox_unregister;
1753 	}
1754 
1755 	/* Found VF devices in a PF device */
1756 	if (pci_dev->max_vfs > 0) {
1757 		/* Remap mbox area for all vf's */
1758 		vf_mbase = dev_vf_mbase_get(pci_dev, dev);
1759 		if (!vf_mbase) {
1760 			rc = -ENODEV;
1761 			goto mbox_unregister;
1762 		}
1763 		/* Init mbox object */
1764 		rc = mbox_init(&dev->mbox_vfpf, vf_mbase, mbox_reg_base, MBOX_DIR_PFVF,
1765 			       pci_dev->max_vfs, intr_offset);
1766 		if (rc)
1767 			goto iounmap;
1768 
1769 		/* PF -> VF UP messages */
1770 		rc = mbox_init(&dev->mbox_vfpf_up, vf_mbase, mbox_reg_base, MBOX_DIR_PFVF_UP,
1771 			       pci_dev->max_vfs, intr_offset);
1772 		if (rc)
1773 			goto iounmap;
1774 
1775 		/* Create a thread for handling msgs from VFs */
1776 		pthread_cond_init(&dev->sync.pfvf_msg_cond, NULL);
1777 		pthread_mutex_init(&dev->sync.mutex, NULL);
1778 
1779 		snprintf(name, MBOX_HANDLER_NAME_MAX_LEN, "mbox_pf%d", dev->pf);
1780 		dev->sync.start_thread = true;
1781 		rc = plt_thread_create_control(&dev->sync.pfvf_msg_thread, name,
1782 				pf_vf_mbox_thread_main, dev);
1783 		if (rc != 0) {
1784 			plt_err("Failed to create thread for VF mbox handling");
1785 			goto thread_fail;
1786 		}
1787 	}
1788 
1789 	/* Register VF-FLR irq handlers */
1790 	if (!is_vf) {
1791 		rc = dev_vf_flr_register_irqs(pci_dev, dev);
1792 		if (rc)
1793 			goto stop_msg_thrd;
1794 	}
1795 	dev->mbox_active = 1;
1796 
1797 	rc = npa_lf_init(dev, pci_dev);
1798 	if (rc)
1799 		goto stop_msg_thrd;
1800 
1801 	/* Setup LMT line base */
1802 	rc = dev_lmt_setup(dev);
1803 	if (rc)
1804 		goto stop_msg_thrd;
1805 
1806 	return rc;
1807 stop_msg_thrd:
1808 	/* Exiting the mbox sync thread */
1809 	if (dev->sync.start_thread) {
1810 		dev->sync.start_thread = false;
1811 		pthread_cond_signal(&dev->sync.pfvf_msg_cond);
1812 		plt_thread_join(dev->sync.pfvf_msg_thread, NULL);
1813 	}
1814 thread_fail:
1815 	pthread_mutex_destroy(&dev->sync.mutex);
1816 	pthread_cond_destroy(&dev->sync.pfvf_msg_cond);
1817 iounmap:
1818 	dev_vf_mbase_put(pci_dev, vf_mbase);
1819 mbox_unregister:
1820 	dev_mbox_unregister_irq(pci_dev, dev);
1821 	if (dev->ops)
1822 		plt_free(dev->ops);
1823 mbox_fini:
1824 	mbox_fini(dev->mbox);
1825 	mbox_fini(&dev->mbox_up);
1826 error:
1827 	plt_free(dev->mbox_plat);
1828 fail:
1829 	return rc;
1830 }
1831 
1832 int
1833 dev_fini(struct dev *dev, struct plt_pci_device *pci_dev)
1834 {
1835 	struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
1836 	struct mbox *mbox;
1837 
1838 	/* Check if this dev hosts npalf and has 1+ refs */
1839 	if (idev_npa_lf_active(dev) > 1)
1840 		return -EAGAIN;
1841 
1842 	/* Exiting the mbox sync thread */
1843 	if (dev->sync.start_thread) {
1844 		dev->sync.start_thread = false;
1845 		pthread_cond_signal(&dev->sync.pfvf_msg_cond);
1846 		plt_thread_join(dev->sync.pfvf_msg_thread, NULL);
1847 		pthread_mutex_destroy(&dev->sync.mutex);
1848 		pthread_cond_destroy(&dev->sync.pfvf_msg_cond);
1849 	}
1850 
1851 	/* Clear references to this pci dev */
1852 	npa_lf_fini();
1853 
1854 	/* Releasing memory allocated for lmt region */
1855 	if (dev->lmt_mz)
1856 		plt_memzone_free(dev->lmt_mz);
1857 
1858 	dev_mbox_unregister_irq(pci_dev, dev);
1859 
1860 	if (!dev_is_vf(dev)) {
1861 		dev_vf_flr_unregister_irqs(pci_dev, dev);
1862 		/* Releasing memory allocated for mbox region */
1863 		if (dev->vf_mbox_mz)
1864 			plt_memzone_free(dev->vf_mbox_mz);
1865 	}
1866 
1867 	/* Release PF - VF */
1868 	mbox = &dev->mbox_vfpf;
1869 	if (mbox->hwbase && mbox->dev)
1870 		dev_vf_mbase_put(pci_dev, mbox->hwbase);
1871 
1872 	if (dev->ops)
1873 		plt_free(dev->ops);
1874 
1875 	mbox_fini(mbox);
1876 	mbox = &dev->mbox_vfpf_up;
1877 	mbox_fini(mbox);
1878 
1879 	/* Release PF - AF */
1880 	mbox = dev->mbox;
1881 	mbox_fini(mbox);
1882 	mbox = &dev->mbox_up;
1883 	mbox_fini(mbox);
1884 	dev->mbox_active = 0;
1885 
1886 	plt_free(dev->mbox_plat);
1887 	/* Disable MSIX vectors */
1888 	dev_irqs_disable(intr_handle);
1889 	return 0;
1890 }
1891