xref: /dpdk/drivers/common/cnxk/roc_dev.c (revision c9902a15bd005b6d4fe072cf7b60fe4ee679155f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <fcntl.h>
6 #include <inttypes.h>
7 #include <string.h>
8 #include <sys/mman.h>
9 #include <unistd.h>
10 
11 #include "roc_api.h"
12 #include "roc_priv.h"
13 
14 /* PCI Extended capability ID */
15 #define ROC_PCI_EXT_CAP_ID_SRIOV 0x10 /* SRIOV cap */
16 
17 /* Single Root I/O Virtualization */
18 #define ROC_PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
19 
20 static void *
21 mbox_mem_map(off_t off, size_t size)
22 {
23 	void *va = MAP_FAILED;
24 	int mem_fd;
25 
26 	if (size <= 0 || !off) {
27 		plt_err("Invalid mbox area off 0x%lx size %lu", off, size);
28 		goto error;
29 	}
30 
31 	mem_fd = open("/dev/mem", O_RDWR);
32 	if (mem_fd < 0)
33 		goto error;
34 
35 	va = plt_mmap(NULL, size, PLT_PROT_READ | PLT_PROT_WRITE,
36 		      PLT_MAP_SHARED, mem_fd, off);
37 	close(mem_fd);
38 
39 	if (va == MAP_FAILED)
40 		plt_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd", size, mem_fd,
41 			(intmax_t)off);
42 error:
43 	return va;
44 }
45 
46 static void
47 mbox_mem_unmap(void *va, size_t size)
48 {
49 	if (va)
50 		munmap(va, size);
51 }
52 
53 static int
54 pf_af_sync_msg(struct dev *dev, struct mbox_msghdr **rsp)
55 {
56 	uint32_t timeout = 0, sleep = 1;
57 	struct mbox *mbox = dev->mbox;
58 	struct mbox_dev *mdev = &mbox->dev[0];
59 
60 	volatile uint64_t int_status;
61 	struct mbox_msghdr *msghdr;
62 	uint64_t off;
63 	int rc = 0;
64 
65 	/* We need to disable PF interrupts. We are in timer interrupt */
66 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
67 
68 	/* Send message */
69 	mbox_msg_send(mbox, 0);
70 
71 	do {
72 		plt_delay_ms(sleep);
73 		timeout += sleep;
74 		if (timeout >= mbox->rsp_tmo) {
75 			plt_err("Message timeout: %dms", mbox->rsp_tmo);
76 			rc = -EIO;
77 			break;
78 		}
79 		int_status = plt_read64(dev->bar2 + RVU_PF_INT);
80 	} while ((int_status & 0x1) != 0x1);
81 
82 	/* Clear */
83 	plt_write64(int_status, dev->bar2 + RVU_PF_INT);
84 
85 	/* Enable interrupts */
86 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
87 
88 	if (rc == 0) {
89 		/* Get message */
90 		off = mbox->rx_start +
91 		      PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
92 		msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off);
93 		if (rsp)
94 			*rsp = msghdr;
95 		rc = msghdr->rc;
96 	}
97 
98 	return rc;
99 }
100 
101 static int
102 af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
103 {
104 	uint32_t timeout = 0, sleep = 1;
105 	struct mbox *mbox = dev->mbox;
106 	struct mbox_dev *mdev = &mbox->dev[0];
107 	volatile uint64_t int_status;
108 	struct mbox_hdr *req_hdr;
109 	struct mbox_msghdr *msg;
110 	struct mbox_msghdr *rsp;
111 	uint64_t offset;
112 	size_t size;
113 	int i;
114 
115 	/* We need to disable PF interrupts. We are in timer interrupt */
116 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
117 
118 	/* Send message */
119 	mbox_msg_send(mbox, 0);
120 
121 	do {
122 		plt_delay_ms(sleep);
123 		timeout++;
124 		if (timeout >= mbox->rsp_tmo) {
125 			plt_err("Routed messages %d timeout: %dms", num_msg,
126 				mbox->rsp_tmo);
127 			break;
128 		}
129 		int_status = plt_read64(dev->bar2 + RVU_PF_INT);
130 	} while ((int_status & 0x1) != 0x1);
131 
132 	/* Clear */
133 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT);
134 
135 	/* Enable interrupts */
136 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
137 
138 	plt_spinlock_lock(&mdev->mbox_lock);
139 
140 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
141 	if (req_hdr->num_msgs != num_msg)
142 		plt_err("Routed messages: %d received: %d", num_msg,
143 			req_hdr->num_msgs);
144 
145 	/* Get messages from mbox */
146 	offset = mbox->rx_start +
147 		 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
148 	for (i = 0; i < req_hdr->num_msgs; i++) {
149 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
150 		size = mbox->rx_start + msg->next_msgoff - offset;
151 
152 		/* Reserve PF/VF mbox message */
153 		size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
154 		rsp = mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
155 		mbox_rsp_init(msg->id, rsp);
156 
157 		/* Copy message from AF<->PF mbox to PF<->VF mbox */
158 		mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
159 			    (uint8_t *)msg + sizeof(struct mbox_msghdr),
160 			    size - sizeof(struct mbox_msghdr));
161 
162 		/* Set status and sender pf_func data */
163 		rsp->rc = msg->rc;
164 		rsp->pcifunc = msg->pcifunc;
165 
166 		/* Whenever a PF comes up, AF sends the link status to it but
167 		 * when VF comes up no such event is sent to respective VF.
168 		 * Using MBOX_MSG_NIX_LF_START_RX response from AF for the
169 		 * purpose and send the link status of PF to VF.
170 		 */
171 		if (msg->id == MBOX_MSG_NIX_LF_START_RX) {
172 			/* Send link status to VF */
173 			struct cgx_link_user_info linfo;
174 			struct mbox_msghdr *vf_msg;
175 			size_t sz;
176 
177 			/* Get the link status */
178 			memset(&linfo, 0, sizeof(struct cgx_link_user_info));
179 			if (dev->ops && dev->ops->link_status_get)
180 				dev->ops->link_status_get(dev->roc_nix, &linfo);
181 
182 			sz = PLT_ALIGN(mbox_id2size(MBOX_MSG_CGX_LINK_EVENT),
183 				       MBOX_MSG_ALIGN);
184 			/* Prepare the message to be sent */
185 			vf_msg = mbox_alloc_msg(&dev->mbox_vfpf_up, vf, sz);
186 			if (vf_msg) {
187 				mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg);
188 				memcpy((uint8_t *)vf_msg +
189 				       sizeof(struct mbox_msghdr), &linfo,
190 				       sizeof(struct cgx_link_user_info));
191 
192 				vf_msg->rc = msg->rc;
193 				vf_msg->pcifunc = msg->pcifunc;
194 				/* Send to VF */
195 				mbox_msg_send(&dev->mbox_vfpf_up, vf);
196 			}
197 		}
198 
199 		offset = mbox->rx_start + msg->next_msgoff;
200 	}
201 	plt_spinlock_unlock(&mdev->mbox_lock);
202 
203 	return req_hdr->num_msgs;
204 }
205 
206 static int
207 vf_pf_process_msgs(struct dev *dev, uint16_t vf)
208 {
209 	struct mbox *mbox = &dev->mbox_vfpf;
210 	struct mbox_dev *mdev = &mbox->dev[vf];
211 	struct mbox_hdr *req_hdr;
212 	struct mbox_msghdr *msg;
213 	int offset, routed = 0;
214 	size_t size;
215 	uint16_t i;
216 
217 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
218 	if (!req_hdr->num_msgs)
219 		return 0;
220 
221 	offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
222 
223 	for (i = 0; i < req_hdr->num_msgs; i++) {
224 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
225 		size = mbox->rx_start + msg->next_msgoff - offset;
226 
227 		/* RVU_PF_FUNC_S */
228 		msg->pcifunc = dev_pf_func(dev->pf, vf);
229 
230 		if (msg->id == MBOX_MSG_READY) {
231 			struct ready_msg_rsp *rsp;
232 			uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
233 
234 			/* Handle READY message in PF */
235 			dev->active_vfs[vf / max_bits] |=
236 				BIT_ULL(vf % max_bits);
237 			rsp = (struct ready_msg_rsp *)mbox_alloc_msg(
238 				mbox, vf, sizeof(*rsp));
239 			mbox_rsp_init(msg->id, rsp);
240 
241 			/* PF/VF function ID */
242 			rsp->hdr.pcifunc = msg->pcifunc;
243 			rsp->hdr.rc = 0;
244 		} else {
245 			struct mbox_msghdr *af_req;
246 			/* Reserve AF/PF mbox message */
247 			size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
248 			af_req = mbox_alloc_msg(dev->mbox, 0, size);
249 			if (af_req == NULL)
250 				return -ENOSPC;
251 			mbox_req_init(msg->id, af_req);
252 
253 			/* Copy message from VF<->PF mbox to PF<->AF mbox */
254 			mbox_memcpy((uint8_t *)af_req +
255 					    sizeof(struct mbox_msghdr),
256 				    (uint8_t *)msg + sizeof(struct mbox_msghdr),
257 				    size - sizeof(struct mbox_msghdr));
258 			af_req->pcifunc = msg->pcifunc;
259 			routed++;
260 		}
261 		offset = mbox->rx_start + msg->next_msgoff;
262 	}
263 
264 	if (routed > 0) {
265 		plt_base_dbg("pf:%d routed %d messages from vf:%d to AF",
266 			     dev->pf, routed, vf);
267 		af_pf_wait_msg(dev, vf, routed);
268 		mbox_reset(dev->mbox, 0);
269 	}
270 
271 	/* Send mbox responses to VF */
272 	if (mdev->num_msgs) {
273 		plt_base_dbg("pf:%d reply %d messages to vf:%d", dev->pf,
274 			     mdev->num_msgs, vf);
275 		mbox_msg_send(mbox, vf);
276 	}
277 
278 	return i;
279 }
280 
281 static int
282 vf_pf_process_up_msgs(struct dev *dev, uint16_t vf)
283 {
284 	struct mbox *mbox = &dev->mbox_vfpf_up;
285 	struct mbox_dev *mdev = &mbox->dev[vf];
286 	struct mbox_hdr *req_hdr;
287 	struct mbox_msghdr *msg;
288 	int msgs_acked = 0;
289 	int offset;
290 	uint16_t i;
291 
292 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
293 	if (req_hdr->num_msgs == 0)
294 		return 0;
295 
296 	offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
297 
298 	for (i = 0; i < req_hdr->num_msgs; i++) {
299 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
300 
301 		msgs_acked++;
302 		/* RVU_PF_FUNC_S */
303 		msg->pcifunc = dev_pf_func(dev->pf, vf);
304 
305 		switch (msg->id) {
306 		case MBOX_MSG_CGX_LINK_EVENT:
307 			plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
308 				     msg->id, mbox_id2name(msg->id),
309 				     msg->pcifunc, dev_get_pf(msg->pcifunc),
310 				     dev_get_vf(msg->pcifunc));
311 			break;
312 		case MBOX_MSG_CGX_PTP_RX_INFO:
313 			plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
314 				     msg->id, mbox_id2name(msg->id),
315 				     msg->pcifunc, dev_get_pf(msg->pcifunc),
316 				     dev_get_vf(msg->pcifunc));
317 			break;
318 		default:
319 			plt_err("Not handled UP msg 0x%x (%s) func:0x%x",
320 				msg->id, mbox_id2name(msg->id), msg->pcifunc);
321 		}
322 		offset = mbox->rx_start + msg->next_msgoff;
323 	}
324 	mbox_reset(mbox, vf);
325 	mdev->msgs_acked = msgs_acked;
326 	plt_wmb();
327 
328 	return i;
329 }
330 
331 static void
332 roc_vf_pf_mbox_handle_msg(void *param)
333 {
334 	uint16_t vf, max_vf, max_bits;
335 	struct dev *dev = param;
336 
337 	max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
338 	max_vf = max_bits * MAX_VFPF_DWORD_BITS;
339 
340 	for (vf = 0; vf < max_vf; vf++) {
341 		if (dev->intr.bits[vf / max_bits] & BIT_ULL(vf % max_bits)) {
342 			plt_base_dbg("Process vf:%d request (pf:%d, vf:%d)", vf,
343 				     dev->pf, dev->vf);
344 			vf_pf_process_msgs(dev, vf);
345 			/* UP messages */
346 			vf_pf_process_up_msgs(dev, vf);
347 			dev->intr.bits[vf / max_bits] &=
348 				~(BIT_ULL(vf % max_bits));
349 		}
350 	}
351 	dev->timer_set = 0;
352 }
353 
354 static void
355 roc_vf_pf_mbox_irq(void *param)
356 {
357 	struct dev *dev = param;
358 	bool alarm_set = false;
359 	uint64_t intr;
360 	int vfpf;
361 
362 	for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
363 		intr = plt_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
364 		if (!intr)
365 			continue;
366 
367 		plt_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)",
368 			     vfpf, intr, dev->pf, dev->vf);
369 
370 		/* Save and clear intr bits */
371 		dev->intr.bits[vfpf] |= intr;
372 		plt_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
373 		alarm_set = true;
374 	}
375 
376 	if (!dev->timer_set && alarm_set) {
377 		dev->timer_set = 1;
378 		/* Start timer to handle messages */
379 		plt_alarm_set(VF_PF_MBOX_TIMER_MS, roc_vf_pf_mbox_handle_msg,
380 			      dev);
381 	}
382 }
383 
384 static void
385 process_msgs(struct dev *dev, struct mbox *mbox)
386 {
387 	struct mbox_dev *mdev = &mbox->dev[0];
388 	struct mbox_hdr *req_hdr;
389 	struct mbox_msghdr *msg;
390 	int msgs_acked = 0;
391 	int offset;
392 	uint16_t i;
393 
394 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
395 	if (req_hdr->num_msgs == 0)
396 		return;
397 
398 	offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
399 	for (i = 0; i < req_hdr->num_msgs; i++) {
400 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
401 
402 		msgs_acked++;
403 		plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
404 			     mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
405 			     dev_get_vf(msg->pcifunc));
406 
407 		switch (msg->id) {
408 			/* Add message id's that are handled here */
409 		case MBOX_MSG_READY:
410 			/* Get our identity */
411 			dev->pf_func = msg->pcifunc;
412 			break;
413 
414 		default:
415 			if (msg->rc)
416 				plt_err("Message (%s) response has err=%d",
417 					mbox_id2name(msg->id), msg->rc);
418 			break;
419 		}
420 		offset = mbox->rx_start + msg->next_msgoff;
421 	}
422 
423 	mbox_reset(mbox, 0);
424 	/* Update acked if someone is waiting a message */
425 	mdev->msgs_acked = msgs_acked;
426 	plt_wmb();
427 }
428 
429 /* Copies the message received from AF and sends it to VF */
430 static void
431 pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg)
432 {
433 	uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t);
434 	struct mbox *vf_mbox = &dev->mbox_vfpf_up;
435 	struct msg_req *msg = rec_msg;
436 	struct mbox_msghdr *vf_msg;
437 	uint16_t vf;
438 	size_t size;
439 
440 	size = PLT_ALIGN(mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
441 	/* Send UP message to all VF's */
442 	for (vf = 0; vf < vf_mbox->ndevs; vf++) {
443 		/* VF active */
444 		if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf))))
445 			continue;
446 
447 		plt_base_dbg("(%s) size: %zx to VF: %d",
448 			     mbox_id2name(msg->hdr.id), size, vf);
449 
450 		/* Reserve PF/VF mbox message */
451 		vf_msg = mbox_alloc_msg(vf_mbox, vf, size);
452 		if (!vf_msg) {
453 			plt_err("Failed to alloc VF%d UP message", vf);
454 			continue;
455 		}
456 		mbox_req_init(msg->hdr.id, vf_msg);
457 
458 		/*
459 		 * Copy message from AF<->PF UP mbox
460 		 * to PF<->VF UP mbox
461 		 */
462 		mbox_memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr),
463 			    (uint8_t *)msg + sizeof(struct mbox_msghdr),
464 			    size - sizeof(struct mbox_msghdr));
465 
466 		vf_msg->rc = msg->hdr.rc;
467 		/* Set PF to be a sender */
468 		vf_msg->pcifunc = dev->pf_func;
469 
470 		/* Send to VF */
471 		mbox_msg_send(vf_mbox, vf);
472 	}
473 }
474 
475 static int
476 mbox_up_handler_cgx_link_event(struct dev *dev, struct cgx_link_info_msg *msg,
477 			       struct msg_rsp *rsp)
478 {
479 	struct cgx_link_user_info *linfo = &msg->link_info;
480 	void *roc_nix = dev->roc_nix;
481 
482 	plt_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d",
483 		     dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func),
484 		     linfo->link_up ? "UP" : "DOWN", msg->hdr.id,
485 		     mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc),
486 		     dev_get_vf(msg->hdr.pcifunc));
487 
488 	/* PF gets link notification from AF */
489 	if (dev_get_pf(msg->hdr.pcifunc) == 0) {
490 		if (dev->ops && dev->ops->link_status_update)
491 			dev->ops->link_status_update(roc_nix, linfo);
492 
493 		/* Forward the same message as received from AF to VF */
494 		pf_vf_mbox_send_up_msg(dev, msg);
495 	} else {
496 		/* VF gets link up notification */
497 		if (dev->ops && dev->ops->link_status_update)
498 			dev->ops->link_status_update(roc_nix, linfo);
499 	}
500 
501 	rsp->hdr.rc = 0;
502 	return 0;
503 }
504 
505 static int
506 mbox_up_handler_cgx_ptp_rx_info(struct dev *dev,
507 				struct cgx_ptp_rx_info_msg *msg,
508 				struct msg_rsp *rsp)
509 {
510 	void *roc_nix = dev->roc_nix;
511 
512 	plt_base_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d",
513 		     dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func),
514 		     msg->ptp_en ? "ENABLED" : "DISABLED", msg->hdr.id,
515 		     mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc),
516 		     dev_get_vf(msg->hdr.pcifunc));
517 
518 	/* PF gets PTP notification from AF */
519 	if (dev_get_pf(msg->hdr.pcifunc) == 0) {
520 		if (dev->ops && dev->ops->ptp_info_update)
521 			dev->ops->ptp_info_update(roc_nix, msg->ptp_en);
522 
523 		/* Forward the same message as received from AF to VF */
524 		pf_vf_mbox_send_up_msg(dev, msg);
525 	} else {
526 		/* VF gets PTP notification */
527 		if (dev->ops && dev->ops->ptp_info_update)
528 			dev->ops->ptp_info_update(roc_nix, msg->ptp_en);
529 	}
530 
531 	rsp->hdr.rc = 0;
532 	return 0;
533 }
534 
535 static int
536 mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req)
537 {
538 	/* Check if valid, if not reply with a invalid msg */
539 	if (req->sig != MBOX_REQ_SIG)
540 		return -EIO;
541 
542 	switch (req->id) {
543 	default:
544 		reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
545 		break;
546 #define M(_name, _id, _fn_name, _req_type, _rsp_type)                          \
547 	case _id: {                                                            \
548 		struct _rsp_type *rsp;                                         \
549 		int err;                                                       \
550 		rsp = (struct _rsp_type *)mbox_alloc_msg(                      \
551 			&dev->mbox_up, 0, sizeof(struct _rsp_type));           \
552 		if (!rsp)                                                      \
553 			return -ENOMEM;                                        \
554 		rsp->hdr.id = _id;                                             \
555 		rsp->hdr.sig = MBOX_RSP_SIG;                                   \
556 		rsp->hdr.pcifunc = dev->pf_func;                               \
557 		rsp->hdr.rc = 0;                                               \
558 		err = mbox_up_handler_##_fn_name(dev, (struct _req_type *)req, \
559 						 rsp);                         \
560 		return err;                                                    \
561 	}
562 		MBOX_UP_CGX_MESSAGES
563 #undef M
564 	}
565 
566 	return -ENODEV;
567 }
568 
569 static void
570 process_msgs_up(struct dev *dev, struct mbox *mbox)
571 {
572 	struct mbox_dev *mdev = &mbox->dev[0];
573 	struct mbox_hdr *req_hdr;
574 	struct mbox_msghdr *msg;
575 	int i, err, offset;
576 
577 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
578 	if (req_hdr->num_msgs == 0)
579 		return;
580 
581 	offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
582 	for (i = 0; i < req_hdr->num_msgs; i++) {
583 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
584 
585 		plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
586 			     mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
587 			     dev_get_vf(msg->pcifunc));
588 		err = mbox_process_msgs_up(dev, msg);
589 		if (err)
590 			plt_err("Error %d handling 0x%x (%s)", err, msg->id,
591 				mbox_id2name(msg->id));
592 		offset = mbox->rx_start + msg->next_msgoff;
593 	}
594 	/* Send mbox responses */
595 	if (mdev->num_msgs) {
596 		plt_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
597 		mbox_msg_send(mbox, 0);
598 	}
599 }
600 
601 static void
602 roc_pf_vf_mbox_irq(void *param)
603 {
604 	struct dev *dev = param;
605 	uint64_t intr;
606 
607 	intr = plt_read64(dev->bar2 + RVU_VF_INT);
608 	if (intr == 0)
609 		plt_base_dbg("Proceeding to check mbox UP messages if any");
610 
611 	plt_write64(intr, dev->bar2 + RVU_VF_INT);
612 	plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
613 
614 	/* First process all configuration messages */
615 	process_msgs(dev, dev->mbox);
616 
617 	/* Process Uplink messages */
618 	process_msgs_up(dev, &dev->mbox_up);
619 }
620 
621 static void
622 roc_af_pf_mbox_irq(void *param)
623 {
624 	struct dev *dev = param;
625 	uint64_t intr;
626 
627 	intr = plt_read64(dev->bar2 + RVU_PF_INT);
628 	if (intr == 0)
629 		plt_base_dbg("Proceeding to check mbox UP messages if any");
630 
631 	plt_write64(intr, dev->bar2 + RVU_PF_INT);
632 	plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
633 
634 	/* First process all configuration messages */
635 	process_msgs(dev, dev->mbox);
636 
637 	/* Process Uplink messages */
638 	process_msgs_up(dev, &dev->mbox_up);
639 }
640 
641 static int
642 mbox_register_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
643 {
644 	struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
645 	int i, rc;
646 
647 	/* HW clear irq */
648 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
649 		plt_write64(~0ull,
650 			    dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
651 
652 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
653 
654 	dev->timer_set = 0;
655 
656 	/* MBOX interrupt for VF(0...63) <-> PF */
657 	rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev,
658 			      RVU_PF_INT_VEC_VFPF_MBOX0);
659 
660 	if (rc) {
661 		plt_err("Fail to register PF(VF0-63) mbox irq");
662 		return rc;
663 	}
664 	/* MBOX interrupt for VF(64...128) <-> PF */
665 	rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev,
666 			      RVU_PF_INT_VEC_VFPF_MBOX1);
667 
668 	if (rc) {
669 		plt_err("Fail to register PF(VF64-128) mbox irq");
670 		return rc;
671 	}
672 	/* MBOX interrupt AF <-> PF */
673 	rc = dev_irq_register(intr_handle, roc_af_pf_mbox_irq, dev,
674 			      RVU_PF_INT_VEC_AFPF_MBOX);
675 	if (rc) {
676 		plt_err("Fail to register AF<->PF mbox irq");
677 		return rc;
678 	}
679 
680 	/* HW enable intr */
681 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
682 		plt_write64(~0ull,
683 			    dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));
684 
685 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT);
686 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
687 
688 	return rc;
689 }
690 
691 static int
692 mbox_register_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
693 {
694 	struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
695 	int rc;
696 
697 	/* Clear irq */
698 	plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
699 
700 	/* MBOX interrupt PF <-> VF */
701 	rc = dev_irq_register(intr_handle, roc_pf_vf_mbox_irq, dev,
702 			      RVU_VF_INT_VEC_MBOX);
703 	if (rc) {
704 		plt_err("Fail to register PF<->VF mbox irq");
705 		return rc;
706 	}
707 
708 	/* HW enable intr */
709 	plt_write64(~0ull, dev->bar2 + RVU_VF_INT);
710 	plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1S);
711 
712 	return rc;
713 }
714 
715 static int
716 mbox_register_irq(struct plt_pci_device *pci_dev, struct dev *dev)
717 {
718 	if (dev_is_vf(dev))
719 		return mbox_register_vf_irq(pci_dev, dev);
720 	else
721 		return mbox_register_pf_irq(pci_dev, dev);
722 }
723 
724 static void
725 mbox_unregister_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
726 {
727 	struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
728 	int i;
729 
730 	/* HW clear irq */
731 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
732 		plt_write64(~0ull,
733 			    dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
734 
735 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
736 
737 	dev->timer_set = 0;
738 
739 	plt_alarm_cancel(roc_vf_pf_mbox_handle_msg, dev);
740 
741 	/* Unregister the interrupt handler for each vectors */
742 	/* MBOX interrupt for VF(0...63) <-> PF */
743 	dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev,
744 			   RVU_PF_INT_VEC_VFPF_MBOX0);
745 
746 	/* MBOX interrupt for VF(64...128) <-> PF */
747 	dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev,
748 			   RVU_PF_INT_VEC_VFPF_MBOX1);
749 
750 	/* MBOX interrupt AF <-> PF */
751 	dev_irq_unregister(intr_handle, roc_af_pf_mbox_irq, dev,
752 			   RVU_PF_INT_VEC_AFPF_MBOX);
753 }
754 
755 static void
756 mbox_unregister_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
757 {
758 	struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
759 
760 	/* Clear irq */
761 	plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
762 
763 	/* Unregister the interrupt handler */
764 	dev_irq_unregister(intr_handle, roc_pf_vf_mbox_irq, dev,
765 			   RVU_VF_INT_VEC_MBOX);
766 }
767 
768 static void
769 mbox_unregister_irq(struct plt_pci_device *pci_dev, struct dev *dev)
770 {
771 	if (dev_is_vf(dev))
772 		mbox_unregister_vf_irq(pci_dev, dev);
773 	else
774 		mbox_unregister_pf_irq(pci_dev, dev);
775 }
776 
777 static int
778 vf_flr_send_msg(struct dev *dev, uint16_t vf)
779 {
780 	struct mbox *mbox = dev->mbox;
781 	struct msg_req *req;
782 	int rc;
783 
784 	req = mbox_alloc_msg_vf_flr(mbox);
785 	if (req == NULL)
786 		return -ENOSPC;
787 	/* Overwrite pcifunc to indicate VF */
788 	req->hdr.pcifunc = dev_pf_func(dev->pf, vf);
789 
790 	/* Sync message in interrupt context */
791 	rc = pf_af_sync_msg(dev, NULL);
792 	if (rc)
793 		plt_err("Failed to send VF FLR mbox msg, rc=%d", rc);
794 
795 	return rc;
796 }
797 
798 static void
799 roc_pf_vf_flr_irq(void *param)
800 {
801 	struct dev *dev = (struct dev *)param;
802 	uint16_t max_vf = 64, vf;
803 	uintptr_t bar2;
804 	uint64_t intr;
805 	int i;
806 
807 	max_vf = (dev->maxvf > 0) ? dev->maxvf : 64;
808 	bar2 = dev->bar2;
809 
810 	plt_base_dbg("FLR VF interrupt: max_vf: %d", max_vf);
811 
812 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
813 		intr = plt_read64(bar2 + RVU_PF_VFFLR_INTX(i));
814 		if (!intr)
815 			continue;
816 
817 		for (vf = 0; vf < max_vf; vf++) {
818 			if (!(intr & (1ULL << vf)))
819 				continue;
820 
821 			plt_base_dbg("FLR: i :%d intr: 0x%" PRIx64 ", vf-%d", i,
822 				     intr, (64 * i + vf));
823 			/* Clear interrupt */
824 			plt_write64(BIT_ULL(vf), bar2 + RVU_PF_VFFLR_INTX(i));
825 			/* Disable the interrupt */
826 			plt_write64(BIT_ULL(vf),
827 				    bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
828 			/* Inform AF about VF reset */
829 			vf_flr_send_msg(dev, vf);
830 
831 			/* Signal FLR finish */
832 			plt_write64(BIT_ULL(vf), bar2 + RVU_PF_VFTRPENDX(i));
833 			/* Enable interrupt */
834 			plt_write64(~0ull, bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
835 		}
836 	}
837 }
838 
839 static int
840 vf_flr_unregister_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
841 {
842 	struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
843 	int i;
844 
845 	plt_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
846 
847 	/* HW clear irq */
848 	for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
849 		plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
850 
851 	dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev,
852 			   RVU_PF_INT_VEC_VFFLR0);
853 
854 	dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev,
855 			   RVU_PF_INT_VEC_VFFLR1);
856 
857 	return 0;
858 }
859 
860 static int
861 vf_flr_register_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
862 {
863 	struct plt_intr_handle *handle = &pci_dev->intr_handle;
864 	int i, rc;
865 
866 	plt_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
867 
868 	rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev,
869 			      RVU_PF_INT_VEC_VFFLR0);
870 	if (rc)
871 		plt_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc);
872 
873 	rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev,
874 			      RVU_PF_INT_VEC_VFFLR1);
875 	if (rc)
876 		plt_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc);
877 
878 	/* Enable HW interrupt */
879 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
880 		plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i));
881 		plt_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i));
882 		plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
883 	}
884 	return 0;
885 }
886 
887 int
888 dev_active_vfs(struct dev *dev)
889 {
890 	int i, count = 0;
891 
892 	for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
893 		count += __builtin_popcount(dev->active_vfs[i]);
894 
895 	return count;
896 }
897 
898 static void
899 dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev)
900 {
901 	switch (pci_dev->id.device_id) {
902 	case PCI_DEVID_CNXK_RVU_PF:
903 		break;
904 	case PCI_DEVID_CNXK_RVU_SSO_TIM_VF:
905 	case PCI_DEVID_CNXK_RVU_NPA_VF:
906 	case PCI_DEVID_CN10K_RVU_CPT_VF:
907 	case PCI_DEVID_CN9K_RVU_CPT_VF:
908 	case PCI_DEVID_CNXK_RVU_AF_VF:
909 	case PCI_DEVID_CNXK_RVU_VF:
910 	case PCI_DEVID_CNXK_RVU_SDP_VF:
911 		dev->hwcap |= DEV_HWCAP_F_VF;
912 		break;
913 	}
914 }
915 
916 static uintptr_t
917 dev_vf_mbase_get(struct plt_pci_device *pci_dev, struct dev *dev)
918 {
919 	void *vf_mbase = NULL;
920 	uintptr_t pa;
921 
922 	if (dev_is_vf(dev))
923 		return 0;
924 
925 	/* For CN10K onwards, it is just after PF MBOX */
926 	if (!roc_model_is_cn9k())
927 		return dev->bar4 + MBOX_SIZE;
928 
929 	pa = plt_read64(dev->bar2 + RVU_PF_VF_BAR4_ADDR);
930 	if (!pa) {
931 		plt_err("Invalid VF mbox base pa");
932 		return pa;
933 	}
934 
935 	vf_mbase = mbox_mem_map(pa, MBOX_SIZE * pci_dev->max_vfs);
936 	if (vf_mbase == MAP_FAILED) {
937 		plt_err("Failed to mmap vf mbase at pa 0x%lx, rc=%d", pa,
938 			errno);
939 		return 0;
940 	}
941 	return (uintptr_t)vf_mbase;
942 }
943 
944 static void
945 dev_vf_mbase_put(struct plt_pci_device *pci_dev, uintptr_t vf_mbase)
946 {
947 	if (!vf_mbase || !pci_dev->max_vfs || !roc_model_is_cn9k())
948 		return;
949 
950 	mbox_mem_unmap((void *)vf_mbase, MBOX_SIZE * pci_dev->max_vfs);
951 }
952 
953 static int
954 dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova)
955 {
956 	struct lmtst_tbl_setup_req *req;
957 
958 	req = mbox_alloc_msg_lmtst_tbl_setup(mbox);
959 	/* This pcifunc is defined with primary pcifunc whose LMT address
960 	 * will be shared. If call contains valid IOVA, following pcifunc
961 	 * field is of no use.
962 	 */
963 	req->pcifunc = valid_iova ? 0 : idev_lmt_pffunc_get();
964 	req->use_local_lmt_region = valid_iova;
965 	req->lmt_iova = iova;
966 
967 	return mbox_process(mbox);
968 }
969 
970 /* Total no of lines * size of each lmtline */
971 #define LMT_REGION_SIZE (ROC_NUM_LMT_LINES * ROC_LMT_LINE_SZ)
972 static int
973 dev_lmt_setup(struct dev *dev)
974 {
975 	char name[PLT_MEMZONE_NAMESIZE];
976 	const struct plt_memzone *mz;
977 	struct idev_cfg *idev;
978 	int rc;
979 
980 	if (roc_model_is_cn9k()) {
981 		dev->lmt_base = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
982 		return 0;
983 	}
984 
985 	/* [CN10K, .) */
986 
987 	/* Set common lmt region from second pf_func onwards. */
988 	if (!dev->disable_shared_lmt && idev_lmt_pffunc_get() &&
989 	    dev->pf_func != idev_lmt_pffunc_get()) {
990 		rc = dev_setup_shared_lmt_region(dev->mbox, false, 0);
991 		if (!rc) {
992 			/* On success, updating lmt base of secondary pf_funcs
993 			 * with primary pf_func's lmt base.
994 			 */
995 			dev->lmt_base = roc_idev_lmt_base_addr_get();
996 			return rc;
997 		}
998 		plt_err("Failed to setup shared lmt region, pf_func %d err %d "
999 			"Using respective LMT region per pf func",
1000 			dev->pf_func, rc);
1001 	}
1002 
1003 	/* Allocating memory for LMT region */
1004 	sprintf(name, "LMT_MAP%x", dev->pf_func);
1005 
1006 	/* Setting alignment to ensure correct masking for resetting to lmt base
1007 	 * of a core after all lmt lines under that core are used.
1008 	 * Alignment value LMT_REGION_SIZE to handle the case where all lines
1009 	 * are used by 1 core.
1010 	 */
1011 	mz = plt_lmt_region_reserve_aligned(name, LMT_REGION_SIZE,
1012 					    LMT_REGION_SIZE);
1013 	if (!mz) {
1014 		plt_err("Memory alloc failed: %s", strerror(errno));
1015 		goto fail;
1016 	}
1017 
1018 	/* Share the IOVA address with Kernel */
1019 	rc = dev_setup_shared_lmt_region(dev->mbox, true, mz->iova);
1020 	if (rc) {
1021 		errno = rc;
1022 		goto free;
1023 	}
1024 
1025 	dev->lmt_base = mz->iova;
1026 	dev->lmt_mz = mz;
1027 	/* Base LMT address should be chosen from only those pci funcs which
1028 	 * participate in LMT shared mode.
1029 	 */
1030 	if (!dev->disable_shared_lmt) {
1031 		idev = idev_get_cfg();
1032 		if (!__atomic_load_n(&idev->lmt_pf_func, __ATOMIC_ACQUIRE)) {
1033 			idev->lmt_base_addr = dev->lmt_base;
1034 			idev->lmt_pf_func = dev->pf_func;
1035 			idev->num_lmtlines = RVU_LMT_LINE_MAX;
1036 		}
1037 	}
1038 
1039 	return 0;
1040 free:
1041 	plt_memzone_free(mz);
1042 fail:
1043 	return -errno;
1044 }
1045 
1046 int
1047 dev_init(struct dev *dev, struct plt_pci_device *pci_dev)
1048 {
1049 	int direction, up_direction, rc;
1050 	uintptr_t bar2, bar4, mbox;
1051 	uintptr_t vf_mbase = 0;
1052 	uint64_t intr_offset;
1053 
1054 	bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
1055 	bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
1056 	if (bar2 == 0 || bar4 == 0) {
1057 		plt_err("Failed to get PCI bars");
1058 		rc = -ENODEV;
1059 		goto error;
1060 	}
1061 
1062 	/* Trigger fault on bar2 and bar4 regions
1063 	 * to avoid BUG_ON in remap_pfn_range()
1064 	 * in latest kernel.
1065 	 */
1066 	*(volatile uint64_t *)bar2;
1067 	*(volatile uint64_t *)bar4;
1068 
1069 	/* Check ROC model supported */
1070 	if (roc_model->flag == 0) {
1071 		rc = UTIL_ERR_INVALID_MODEL;
1072 		goto error;
1073 	}
1074 
1075 	dev->maxvf = pci_dev->max_vfs;
1076 	dev->bar2 = bar2;
1077 	dev->bar4 = bar4;
1078 	dev_vf_hwcap_update(pci_dev, dev);
1079 
1080 	if (dev_is_vf(dev)) {
1081 		mbox = (roc_model_is_cn9k() ?
1082 			bar4 : (bar2 + RVU_VF_MBOX_REGION));
1083 		direction = MBOX_DIR_VFPF;
1084 		up_direction = MBOX_DIR_VFPF_UP;
1085 		intr_offset = RVU_VF_INT;
1086 	} else {
1087 		mbox = bar4;
1088 		direction = MBOX_DIR_PFAF;
1089 		up_direction = MBOX_DIR_PFAF_UP;
1090 		intr_offset = RVU_PF_INT;
1091 	}
1092 
1093 	/* Initialize the local mbox */
1094 	rc = mbox_init(&dev->mbox_local, mbox, bar2, direction, 1, intr_offset);
1095 	if (rc)
1096 		goto error;
1097 	dev->mbox = &dev->mbox_local;
1098 
1099 	rc = mbox_init(&dev->mbox_up, mbox, bar2, up_direction, 1, intr_offset);
1100 	if (rc)
1101 		goto mbox_fini;
1102 
1103 	/* Register mbox interrupts */
1104 	rc = mbox_register_irq(pci_dev, dev);
1105 	if (rc)
1106 		goto mbox_fini;
1107 
1108 	/* Check the readiness of PF/VF */
1109 	rc = send_ready_msg(dev->mbox, &dev->pf_func);
1110 	if (rc)
1111 		goto mbox_unregister;
1112 
1113 	dev->pf = dev_get_pf(dev->pf_func);
1114 	dev->vf = dev_get_vf(dev->pf_func);
1115 	memset(&dev->active_vfs, 0, sizeof(dev->active_vfs));
1116 
1117 	/* Allocate memory for device ops */
1118 	dev->ops = plt_zmalloc(sizeof(struct dev_ops), 0);
1119 	if (dev->ops == NULL) {
1120 		rc = -ENOMEM;
1121 		goto mbox_unregister;
1122 	}
1123 
1124 	/* Found VF devices in a PF device */
1125 	if (pci_dev->max_vfs > 0) {
1126 		/* Remap mbox area for all vf's */
1127 		vf_mbase = dev_vf_mbase_get(pci_dev, dev);
1128 		if (!vf_mbase) {
1129 			rc = -ENODEV;
1130 			goto mbox_unregister;
1131 		}
1132 		/* Init mbox object */
1133 		rc = mbox_init(&dev->mbox_vfpf, vf_mbase, bar2, MBOX_DIR_PFVF,
1134 			       pci_dev->max_vfs, intr_offset);
1135 		if (rc)
1136 			goto iounmap;
1137 
1138 		/* PF -> VF UP messages */
1139 		rc = mbox_init(&dev->mbox_vfpf_up, vf_mbase, bar2,
1140 			       MBOX_DIR_PFVF_UP, pci_dev->max_vfs, intr_offset);
1141 		if (rc)
1142 			goto iounmap;
1143 	}
1144 
1145 	/* Register VF-FLR irq handlers */
1146 	if (!dev_is_vf(dev)) {
1147 		rc = vf_flr_register_irqs(pci_dev, dev);
1148 		if (rc)
1149 			goto iounmap;
1150 	}
1151 	dev->mbox_active = 1;
1152 
1153 	rc = npa_lf_init(dev, pci_dev);
1154 	if (rc)
1155 		goto iounmap;
1156 
1157 	/* Setup LMT line base */
1158 	rc = dev_lmt_setup(dev);
1159 	if (rc)
1160 		goto iounmap;
1161 
1162 	return rc;
1163 iounmap:
1164 	dev_vf_mbase_put(pci_dev, vf_mbase);
1165 mbox_unregister:
1166 	mbox_unregister_irq(pci_dev, dev);
1167 	if (dev->ops)
1168 		plt_free(dev->ops);
1169 mbox_fini:
1170 	mbox_fini(dev->mbox);
1171 	mbox_fini(&dev->mbox_up);
1172 error:
1173 	return rc;
1174 }
1175 
1176 int
1177 dev_fini(struct dev *dev, struct plt_pci_device *pci_dev)
1178 {
1179 	struct plt_intr_handle *intr_handle = &pci_dev->intr_handle;
1180 	struct mbox *mbox;
1181 
1182 	/* Check if this dev hosts npalf and has 1+ refs */
1183 	if (idev_npa_lf_active(dev) > 1)
1184 		return -EAGAIN;
1185 
1186 	/* Clear references to this pci dev */
1187 	npa_lf_fini();
1188 
1189 	/* Releasing memory allocated for lmt region */
1190 	if (dev->lmt_mz)
1191 		plt_memzone_free(dev->lmt_mz);
1192 
1193 	mbox_unregister_irq(pci_dev, dev);
1194 
1195 	if (!dev_is_vf(dev))
1196 		vf_flr_unregister_irqs(pci_dev, dev);
1197 	/* Release PF - VF */
1198 	mbox = &dev->mbox_vfpf;
1199 	if (mbox->hwbase && mbox->dev)
1200 		dev_vf_mbase_put(pci_dev, mbox->hwbase);
1201 
1202 	if (dev->ops)
1203 		plt_free(dev->ops);
1204 
1205 	mbox_fini(mbox);
1206 	mbox = &dev->mbox_vfpf_up;
1207 	mbox_fini(mbox);
1208 
1209 	/* Release PF - AF */
1210 	mbox = dev->mbox;
1211 	mbox_fini(mbox);
1212 	mbox = &dev->mbox_up;
1213 	mbox_fini(mbox);
1214 	dev->mbox_active = 0;
1215 
1216 	/* Disable MSIX vectors */
1217 	dev_irqs_disable(intr_handle);
1218 	return 0;
1219 }
1220