xref: /dpdk/drivers/common/cnxk/roc_dev.c (revision 3a80d7fb2ecdd6e8e48e56e3726b26980fa2a089)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include <fcntl.h>
6 #include <inttypes.h>
7 #include <string.h>
8 #include <sys/mman.h>
9 #include <unistd.h>
10 
11 #include "roc_api.h"
12 #include "roc_priv.h"
13 
14 /* PCI Extended capability ID */
15 #define ROC_PCI_EXT_CAP_ID_SRIOV 0x10 /* SRIOV cap */
16 
17 /* Single Root I/O Virtualization */
18 #define ROC_PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
19 
20 static void *
21 mbox_mem_map(off_t off, size_t size)
22 {
23 	void *va = MAP_FAILED;
24 	int mem_fd;
25 
26 	if (size <= 0 || !off) {
27 		plt_err("Invalid mbox area off 0x%lx size %lu", off, size);
28 		goto error;
29 	}
30 
31 	mem_fd = open("/dev/mem", O_RDWR);
32 	if (mem_fd < 0)
33 		goto error;
34 
35 	va = plt_mmap(NULL, size, PLT_PROT_READ | PLT_PROT_WRITE,
36 		      PLT_MAP_SHARED, mem_fd, off);
37 	close(mem_fd);
38 
39 	if (va == MAP_FAILED)
40 		plt_err("Failed to mmap sz=0x%zx, fd=%d, off=%jd", size, mem_fd,
41 			(intmax_t)off);
42 error:
43 	return va;
44 }
45 
46 static void
47 mbox_mem_unmap(void *va, size_t size)
48 {
49 	if (va)
50 		munmap(va, size);
51 }
52 
53 static int
54 pf_af_sync_msg(struct dev *dev, struct mbox_msghdr **rsp)
55 {
56 	uint32_t timeout = 0, sleep = 1;
57 	struct mbox *mbox = dev->mbox;
58 	struct mbox_dev *mdev = &mbox->dev[0];
59 
60 	volatile uint64_t int_status = 0;
61 	struct mbox_msghdr *msghdr;
62 	uint64_t off;
63 	int rc = 0;
64 
65 	/* We need to disable PF interrupts. We are in timer interrupt */
66 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
67 
68 	/* Send message */
69 	mbox_msg_send(mbox, 0);
70 
71 	do {
72 		plt_delay_ms(sleep);
73 		timeout += sleep;
74 		if (timeout >= mbox->rsp_tmo) {
75 			plt_err("Message timeout: %dms", mbox->rsp_tmo);
76 			rc = -EIO;
77 			break;
78 		}
79 		int_status = plt_read64(dev->bar2 + RVU_PF_INT);
80 	} while ((int_status & 0x1) != 0x1);
81 
82 	/* Clear */
83 	plt_write64(int_status, dev->bar2 + RVU_PF_INT);
84 
85 	/* Enable interrupts */
86 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
87 
88 	if (rc == 0) {
89 		/* Get message */
90 		off = mbox->rx_start +
91 		      PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
92 		msghdr = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + off);
93 		if (rsp)
94 			*rsp = msghdr;
95 		rc = msghdr->rc;
96 	}
97 
98 	return rc;
99 }
100 
101 static int
102 af_pf_wait_msg(struct dev *dev, uint16_t vf, int num_msg)
103 {
104 	uint32_t timeout = 0, sleep = 1;
105 	struct mbox *mbox = dev->mbox;
106 	struct mbox_dev *mdev = &mbox->dev[0];
107 	volatile uint64_t int_status;
108 	struct mbox_hdr *req_hdr;
109 	struct mbox_msghdr *msg;
110 	struct mbox_msghdr *rsp;
111 	uint64_t offset;
112 	size_t size;
113 	int i;
114 
115 	/* We need to disable PF interrupts. We are in timer interrupt */
116 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
117 
118 	/* Send message */
119 	mbox_msg_send(mbox, 0);
120 
121 	do {
122 		plt_delay_ms(sleep);
123 		timeout++;
124 		if (timeout >= mbox->rsp_tmo) {
125 			plt_err("Routed messages %d timeout: %dms", num_msg,
126 				mbox->rsp_tmo);
127 			break;
128 		}
129 		int_status = plt_read64(dev->bar2 + RVU_PF_INT);
130 	} while ((int_status & 0x1) != 0x1);
131 
132 	/* Clear */
133 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT);
134 
135 	/* Enable interrupts */
136 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
137 
138 	plt_spinlock_lock(&mdev->mbox_lock);
139 
140 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
141 	if (req_hdr->num_msgs != num_msg)
142 		plt_err("Routed messages: %d received: %d", num_msg,
143 			req_hdr->num_msgs);
144 
145 	/* Get messages from mbox */
146 	offset = mbox->rx_start +
147 		 PLT_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
148 	for (i = 0; i < req_hdr->num_msgs; i++) {
149 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
150 		size = mbox->rx_start + msg->next_msgoff - offset;
151 
152 		/* Reserve PF/VF mbox message */
153 		size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
154 		rsp = mbox_alloc_msg(&dev->mbox_vfpf, vf, size);
155 		if (!rsp) {
156 			plt_err("Failed to reserve VF%d message", vf);
157 			continue;
158 		}
159 
160 		mbox_rsp_init(msg->id, rsp);
161 
162 		/* Copy message from AF<->PF mbox to PF<->VF mbox */
163 		mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),
164 			    (uint8_t *)msg + sizeof(struct mbox_msghdr),
165 			    size - sizeof(struct mbox_msghdr));
166 
167 		/* Set status and sender pf_func data */
168 		rsp->rc = msg->rc;
169 		rsp->pcifunc = msg->pcifunc;
170 
171 		/* Whenever a PF comes up, AF sends the link status to it but
172 		 * when VF comes up no such event is sent to respective VF.
173 		 * Using MBOX_MSG_NIX_LF_START_RX response from AF for the
174 		 * purpose and send the link status of PF to VF.
175 		 */
176 		if (msg->id == MBOX_MSG_NIX_LF_START_RX) {
177 			/* Send link status to VF */
178 			struct cgx_link_user_info linfo;
179 			struct mbox_msghdr *vf_msg;
180 			size_t sz;
181 
182 			/* Get the link status */
183 			memset(&linfo, 0, sizeof(struct cgx_link_user_info));
184 			if (dev->ops && dev->ops->link_status_get)
185 				dev->ops->link_status_get(dev->roc_nix, &linfo);
186 
187 			sz = PLT_ALIGN(mbox_id2size(MBOX_MSG_CGX_LINK_EVENT),
188 				       MBOX_MSG_ALIGN);
189 			/* Prepare the message to be sent */
190 			vf_msg = mbox_alloc_msg(&dev->mbox_vfpf_up, vf, sz);
191 			if (vf_msg) {
192 				mbox_req_init(MBOX_MSG_CGX_LINK_EVENT, vf_msg);
193 				memcpy((uint8_t *)vf_msg +
194 				       sizeof(struct mbox_msghdr), &linfo,
195 				       sizeof(struct cgx_link_user_info));
196 
197 				vf_msg->rc = msg->rc;
198 				vf_msg->pcifunc = msg->pcifunc;
199 				/* Send to VF */
200 				mbox_msg_send(&dev->mbox_vfpf_up, vf);
201 			}
202 		}
203 
204 		offset = mbox->rx_start + msg->next_msgoff;
205 	}
206 	plt_spinlock_unlock(&mdev->mbox_lock);
207 
208 	return req_hdr->num_msgs;
209 }
210 
211 static int
212 vf_pf_process_msgs(struct dev *dev, uint16_t vf)
213 {
214 	struct mbox *mbox = &dev->mbox_vfpf;
215 	struct mbox_dev *mdev = &mbox->dev[vf];
216 	struct mbox_hdr *req_hdr;
217 	struct mbox_msghdr *msg;
218 	int offset, routed = 0;
219 	size_t size;
220 	uint16_t i;
221 
222 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
223 	if (!req_hdr->num_msgs)
224 		return 0;
225 
226 	offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
227 
228 	for (i = 0; i < req_hdr->num_msgs; i++) {
229 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
230 		size = mbox->rx_start + msg->next_msgoff - offset;
231 
232 		/* RVU_PF_FUNC_S */
233 		msg->pcifunc = dev_pf_func(dev->pf, vf);
234 
235 		if (msg->id == MBOX_MSG_READY) {
236 			struct ready_msg_rsp *rsp;
237 			uint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;
238 
239 			/* Handle READY message in PF */
240 			dev->active_vfs[vf / max_bits] |=
241 				BIT_ULL(vf % max_bits);
242 			rsp = (struct ready_msg_rsp *)mbox_alloc_msg(
243 				mbox, vf, sizeof(*rsp));
244 			if (!rsp) {
245 				plt_err("Failed to alloc VF%d READY message",
246 					vf);
247 				continue;
248 			}
249 
250 			mbox_rsp_init(msg->id, rsp);
251 
252 			/* PF/VF function ID */
253 			rsp->hdr.pcifunc = msg->pcifunc;
254 			rsp->hdr.rc = 0;
255 		} else {
256 			struct mbox_msghdr *af_req;
257 			/* Reserve AF/PF mbox message */
258 			size = PLT_ALIGN(size, MBOX_MSG_ALIGN);
259 			af_req = mbox_alloc_msg(dev->mbox, 0, size);
260 			if (af_req == NULL)
261 				return -ENOSPC;
262 			mbox_req_init(msg->id, af_req);
263 
264 			/* Copy message from VF<->PF mbox to PF<->AF mbox */
265 			mbox_memcpy((uint8_t *)af_req +
266 					    sizeof(struct mbox_msghdr),
267 				    (uint8_t *)msg + sizeof(struct mbox_msghdr),
268 				    size - sizeof(struct mbox_msghdr));
269 			af_req->pcifunc = msg->pcifunc;
270 			routed++;
271 		}
272 		offset = mbox->rx_start + msg->next_msgoff;
273 	}
274 
275 	if (routed > 0) {
276 		plt_base_dbg("pf:%d routed %d messages from vf:%d to AF",
277 			     dev->pf, routed, vf);
278 		af_pf_wait_msg(dev, vf, routed);
279 		mbox_reset(dev->mbox, 0);
280 	}
281 
282 	/* Send mbox responses to VF */
283 	if (mdev->num_msgs) {
284 		plt_base_dbg("pf:%d reply %d messages to vf:%d", dev->pf,
285 			     mdev->num_msgs, vf);
286 		mbox_msg_send(mbox, vf);
287 	}
288 
289 	return i;
290 }
291 
292 static int
293 vf_pf_process_up_msgs(struct dev *dev, uint16_t vf)
294 {
295 	struct mbox *mbox = &dev->mbox_vfpf_up;
296 	struct mbox_dev *mdev = &mbox->dev[vf];
297 	struct mbox_hdr *req_hdr;
298 	struct mbox_msghdr *msg;
299 	int msgs_acked = 0;
300 	int offset;
301 	uint16_t i;
302 
303 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
304 	if (req_hdr->num_msgs == 0)
305 		return 0;
306 
307 	offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
308 
309 	for (i = 0; i < req_hdr->num_msgs; i++) {
310 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
311 
312 		msgs_acked++;
313 		/* RVU_PF_FUNC_S */
314 		msg->pcifunc = dev_pf_func(dev->pf, vf);
315 
316 		switch (msg->id) {
317 		case MBOX_MSG_CGX_LINK_EVENT:
318 			plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
319 				     msg->id, mbox_id2name(msg->id),
320 				     msg->pcifunc, dev_get_pf(msg->pcifunc),
321 				     dev_get_vf(msg->pcifunc));
322 			break;
323 		case MBOX_MSG_CGX_PTP_RX_INFO:
324 			plt_base_dbg("PF: Msg 0x%x (%s) fn:0x%x (pf:%d,vf:%d)",
325 				     msg->id, mbox_id2name(msg->id),
326 				     msg->pcifunc, dev_get_pf(msg->pcifunc),
327 				     dev_get_vf(msg->pcifunc));
328 			break;
329 		default:
330 			plt_err("Not handled UP msg 0x%x (%s) func:0x%x",
331 				msg->id, mbox_id2name(msg->id), msg->pcifunc);
332 		}
333 		offset = mbox->rx_start + msg->next_msgoff;
334 	}
335 	mbox_reset(mbox, vf);
336 	mdev->msgs_acked = msgs_acked;
337 	plt_wmb();
338 
339 	return i;
340 }
341 
342 static void
343 roc_vf_pf_mbox_handle_msg(void *param)
344 {
345 	uint16_t vf, max_vf, max_bits;
346 	struct dev *dev = param;
347 
348 	max_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);
349 	max_vf = max_bits * MAX_VFPF_DWORD_BITS;
350 
351 	for (vf = 0; vf < max_vf; vf++) {
352 		if (dev->intr.bits[vf / max_bits] & BIT_ULL(vf % max_bits)) {
353 			plt_base_dbg("Process vf:%d request (pf:%d, vf:%d)", vf,
354 				     dev->pf, dev->vf);
355 			vf_pf_process_msgs(dev, vf);
356 			/* UP messages */
357 			vf_pf_process_up_msgs(dev, vf);
358 			dev->intr.bits[vf / max_bits] &=
359 				~(BIT_ULL(vf % max_bits));
360 		}
361 	}
362 	dev->timer_set = 0;
363 }
364 
365 static void
366 roc_vf_pf_mbox_irq(void *param)
367 {
368 	struct dev *dev = param;
369 	bool alarm_set = false;
370 	uint64_t intr;
371 	int vfpf;
372 
373 	for (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {
374 		intr = plt_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
375 		if (!intr)
376 			continue;
377 
378 		plt_base_dbg("vfpf: %d intr: 0x%" PRIx64 " (pf:%d, vf:%d)",
379 			     vfpf, intr, dev->pf, dev->vf);
380 
381 		/* Save and clear intr bits */
382 		dev->intr.bits[vfpf] |= intr;
383 		plt_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));
384 		alarm_set = true;
385 	}
386 
387 	if (!dev->timer_set && alarm_set) {
388 		dev->timer_set = 1;
389 		/* Start timer to handle messages */
390 		plt_alarm_set(VF_PF_MBOX_TIMER_MS, roc_vf_pf_mbox_handle_msg,
391 			      dev);
392 	}
393 }
394 
395 static void
396 process_msgs(struct dev *dev, struct mbox *mbox)
397 {
398 	struct mbox_dev *mdev = &mbox->dev[0];
399 	struct mbox_hdr *req_hdr;
400 	struct mbox_msghdr *msg;
401 	int msgs_acked = 0;
402 	int offset;
403 	uint16_t i;
404 
405 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
406 	if (req_hdr->num_msgs == 0)
407 		return;
408 
409 	offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
410 	for (i = 0; i < req_hdr->num_msgs; i++) {
411 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
412 
413 		msgs_acked++;
414 		plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
415 			     mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
416 			     dev_get_vf(msg->pcifunc));
417 
418 		switch (msg->id) {
419 			/* Add message id's that are handled here */
420 		case MBOX_MSG_READY:
421 			/* Get our identity */
422 			dev->pf_func = msg->pcifunc;
423 			break;
424 		case MBOX_MSG_CGX_PRIO_FLOW_CTRL_CFG:
425 			/* Handling the case where one VF tries to disable PFC
426 			 * while PFC already configured on other VFs. This is
427 			 * not an error but a warning which can be ignored.
428 			 */
429 #define LMAC_AF_ERR_PERM_DENIED -1103
430 			if (msg->rc) {
431 				if (msg->rc == LMAC_AF_ERR_PERM_DENIED) {
432 					plt_mbox_dbg(
433 						"Receive Flow control disable not permitted "
434 						"as its used by other PFVFs");
435 					msg->rc = 0;
436 				} else {
437 					plt_err("Message (%s) response has err=%d",
438 						mbox_id2name(msg->id), msg->rc);
439 				}
440 			}
441 			break;
442 
443 		default:
444 			if (msg->rc)
445 				plt_err("Message (%s) response has err=%d",
446 					mbox_id2name(msg->id), msg->rc);
447 			break;
448 		}
449 		offset = mbox->rx_start + msg->next_msgoff;
450 	}
451 
452 	mbox_reset(mbox, 0);
453 	/* Update acked if someone is waiting a message */
454 	mdev->msgs_acked = msgs_acked;
455 	plt_wmb();
456 }
457 
458 /* Copies the message received from AF and sends it to VF */
459 static void
460 pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg)
461 {
462 	uint16_t max_bits = sizeof(dev->active_vfs[0]) * sizeof(uint64_t);
463 	struct mbox *vf_mbox = &dev->mbox_vfpf_up;
464 	struct msg_req *msg = rec_msg;
465 	struct mbox_msghdr *vf_msg;
466 	uint16_t vf;
467 	size_t size;
468 
469 	size = PLT_ALIGN(mbox_id2size(msg->hdr.id), MBOX_MSG_ALIGN);
470 	/* Send UP message to all VF's */
471 	for (vf = 0; vf < vf_mbox->ndevs; vf++) {
472 		/* VF active */
473 		if (!(dev->active_vfs[vf / max_bits] & (BIT_ULL(vf))))
474 			continue;
475 
476 		plt_base_dbg("(%s) size: %zx to VF: %d",
477 			     mbox_id2name(msg->hdr.id), size, vf);
478 
479 		/* Reserve PF/VF mbox message */
480 		vf_msg = mbox_alloc_msg(vf_mbox, vf, size);
481 		if (!vf_msg) {
482 			plt_err("Failed to alloc VF%d UP message", vf);
483 			continue;
484 		}
485 		mbox_req_init(msg->hdr.id, vf_msg);
486 
487 		/*
488 		 * Copy message from AF<->PF UP mbox
489 		 * to PF<->VF UP mbox
490 		 */
491 		mbox_memcpy((uint8_t *)vf_msg + sizeof(struct mbox_msghdr),
492 			    (uint8_t *)msg + sizeof(struct mbox_msghdr),
493 			    size - sizeof(struct mbox_msghdr));
494 
495 		vf_msg->rc = msg->hdr.rc;
496 		/* Set PF to be a sender */
497 		vf_msg->pcifunc = dev->pf_func;
498 
499 		/* Send to VF */
500 		mbox_msg_send(vf_mbox, vf);
501 	}
502 }
503 
504 static int
505 mbox_up_handler_cgx_link_event(struct dev *dev, struct cgx_link_info_msg *msg,
506 			       struct msg_rsp *rsp)
507 {
508 	struct cgx_link_user_info *linfo = &msg->link_info;
509 	void *roc_nix = dev->roc_nix;
510 
511 	plt_base_dbg("pf:%d/vf:%d NIC Link %s --> 0x%x (%s) from: pf:%d/vf:%d",
512 		     dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func),
513 		     linfo->link_up ? "UP" : "DOWN", msg->hdr.id,
514 		     mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc),
515 		     dev_get_vf(msg->hdr.pcifunc));
516 
517 	/* PF gets link notification from AF */
518 	if (dev_get_pf(msg->hdr.pcifunc) == 0) {
519 		if (dev->ops && dev->ops->link_status_update)
520 			dev->ops->link_status_update(roc_nix, linfo);
521 
522 		/* Forward the same message as received from AF to VF */
523 		pf_vf_mbox_send_up_msg(dev, msg);
524 	} else {
525 		/* VF gets link up notification */
526 		if (dev->ops && dev->ops->link_status_update)
527 			dev->ops->link_status_update(roc_nix, linfo);
528 	}
529 
530 	rsp->hdr.rc = 0;
531 	return 0;
532 }
533 
534 static int
535 mbox_up_handler_cgx_ptp_rx_info(struct dev *dev,
536 				struct cgx_ptp_rx_info_msg *msg,
537 				struct msg_rsp *rsp)
538 {
539 	void *roc_nix = dev->roc_nix;
540 
541 	plt_base_dbg("pf:%d/vf:%d PTP mode %s --> 0x%x (%s) from: pf:%d/vf:%d",
542 		     dev_get_pf(dev->pf_func), dev_get_vf(dev->pf_func),
543 		     msg->ptp_en ? "ENABLED" : "DISABLED", msg->hdr.id,
544 		     mbox_id2name(msg->hdr.id), dev_get_pf(msg->hdr.pcifunc),
545 		     dev_get_vf(msg->hdr.pcifunc));
546 
547 	/* PF gets PTP notification from AF */
548 	if (dev_get_pf(msg->hdr.pcifunc) == 0) {
549 		if (dev->ops && dev->ops->ptp_info_update)
550 			dev->ops->ptp_info_update(roc_nix, msg->ptp_en);
551 
552 		/* Forward the same message as received from AF to VF */
553 		pf_vf_mbox_send_up_msg(dev, msg);
554 	} else {
555 		/* VF gets PTP notification */
556 		if (dev->ops && dev->ops->ptp_info_update)
557 			dev->ops->ptp_info_update(roc_nix, msg->ptp_en);
558 	}
559 
560 	rsp->hdr.rc = 0;
561 	return 0;
562 }
563 
564 static int
565 mbox_process_msgs_up(struct dev *dev, struct mbox_msghdr *req)
566 {
567 	/* Check if valid, if not reply with a invalid msg */
568 	if (req->sig != MBOX_REQ_SIG)
569 		return -EIO;
570 
571 	switch (req->id) {
572 	default:
573 		reply_invalid_msg(&dev->mbox_up, 0, 0, req->id);
574 		break;
575 #define M(_name, _id, _fn_name, _req_type, _rsp_type)                          \
576 	case _id: {                                                            \
577 		struct _rsp_type *rsp;                                         \
578 		int err;                                                       \
579 		rsp = (struct _rsp_type *)mbox_alloc_msg(                      \
580 			&dev->mbox_up, 0, sizeof(struct _rsp_type));           \
581 		if (!rsp)                                                      \
582 			return -ENOMEM;                                        \
583 		rsp->hdr.id = _id;                                             \
584 		rsp->hdr.sig = MBOX_RSP_SIG;                                   \
585 		rsp->hdr.pcifunc = dev->pf_func;                               \
586 		rsp->hdr.rc = 0;                                               \
587 		err = mbox_up_handler_##_fn_name(dev, (struct _req_type *)req, \
588 						 rsp);                         \
589 		return err;                                                    \
590 	}
591 		MBOX_UP_CGX_MESSAGES
592 #undef M
593 	}
594 
595 	return -ENODEV;
596 }
597 
598 static void
599 process_msgs_up(struct dev *dev, struct mbox *mbox)
600 {
601 	struct mbox_dev *mdev = &mbox->dev[0];
602 	struct mbox_hdr *req_hdr;
603 	struct mbox_msghdr *msg;
604 	int i, err, offset;
605 
606 	req_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);
607 	if (req_hdr->num_msgs == 0)
608 		return;
609 
610 	offset = mbox->rx_start + PLT_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
611 	for (i = 0; i < req_hdr->num_msgs; i++) {
612 		msg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);
613 
614 		plt_base_dbg("Message 0x%x (%s) pf:%d/vf:%d", msg->id,
615 			     mbox_id2name(msg->id), dev_get_pf(msg->pcifunc),
616 			     dev_get_vf(msg->pcifunc));
617 		err = mbox_process_msgs_up(dev, msg);
618 		if (err)
619 			plt_err("Error %d handling 0x%x (%s)", err, msg->id,
620 				mbox_id2name(msg->id));
621 		offset = mbox->rx_start + msg->next_msgoff;
622 	}
623 	/* Send mbox responses */
624 	if (mdev->num_msgs) {
625 		plt_base_dbg("Reply num_msgs:%d", mdev->num_msgs);
626 		mbox_msg_send(mbox, 0);
627 	}
628 }
629 
630 static void
631 roc_pf_vf_mbox_irq(void *param)
632 {
633 	struct dev *dev = param;
634 	uint64_t intr;
635 
636 	intr = plt_read64(dev->bar2 + RVU_VF_INT);
637 	if (intr == 0)
638 		plt_base_dbg("Proceeding to check mbox UP messages if any");
639 
640 	plt_write64(intr, dev->bar2 + RVU_VF_INT);
641 	plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
642 
643 	/* First process all configuration messages */
644 	process_msgs(dev, dev->mbox);
645 
646 	/* Process Uplink messages */
647 	process_msgs_up(dev, &dev->mbox_up);
648 }
649 
650 static void
651 roc_af_pf_mbox_irq(void *param)
652 {
653 	struct dev *dev = param;
654 	uint64_t intr;
655 
656 	intr = plt_read64(dev->bar2 + RVU_PF_INT);
657 	if (intr == 0)
658 		plt_base_dbg("Proceeding to check mbox UP messages if any");
659 
660 	plt_write64(intr, dev->bar2 + RVU_PF_INT);
661 	plt_base_dbg("Irq 0x%" PRIx64 "(pf:%d,vf:%d)", intr, dev->pf, dev->vf);
662 
663 	/* First process all configuration messages */
664 	process_msgs(dev, dev->mbox);
665 
666 	/* Process Uplink messages */
667 	process_msgs_up(dev, &dev->mbox_up);
668 }
669 
670 static int
671 mbox_register_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
672 {
673 	struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
674 	int i, rc;
675 
676 	/* HW clear irq */
677 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
678 		plt_write64(~0ull,
679 			    dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
680 
681 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
682 
683 	dev->timer_set = 0;
684 
685 	/* MBOX interrupt for VF(0...63) <-> PF */
686 	rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev,
687 			      RVU_PF_INT_VEC_VFPF_MBOX0);
688 
689 	if (rc) {
690 		plt_err("Fail to register PF(VF0-63) mbox irq");
691 		return rc;
692 	}
693 	/* MBOX interrupt for VF(64...128) <-> PF */
694 	rc = dev_irq_register(intr_handle, roc_vf_pf_mbox_irq, dev,
695 			      RVU_PF_INT_VEC_VFPF_MBOX1);
696 
697 	if (rc) {
698 		plt_err("Fail to register PF(VF64-128) mbox irq");
699 		return rc;
700 	}
701 	/* MBOX interrupt AF <-> PF */
702 	rc = dev_irq_register(intr_handle, roc_af_pf_mbox_irq, dev,
703 			      RVU_PF_INT_VEC_AFPF_MBOX);
704 	if (rc) {
705 		plt_err("Fail to register AF<->PF mbox irq");
706 		return rc;
707 	}
708 
709 	/* HW enable intr */
710 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
711 		plt_write64(~0ull,
712 			    dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));
713 
714 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT);
715 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);
716 
717 	return rc;
718 }
719 
720 static int
721 mbox_register_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
722 {
723 	struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
724 	int rc;
725 
726 	/* Clear irq */
727 	plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
728 
729 	/* MBOX interrupt PF <-> VF */
730 	rc = dev_irq_register(intr_handle, roc_pf_vf_mbox_irq, dev,
731 			      RVU_VF_INT_VEC_MBOX);
732 	if (rc) {
733 		plt_err("Fail to register PF<->VF mbox irq");
734 		return rc;
735 	}
736 
737 	/* HW enable intr */
738 	plt_write64(~0ull, dev->bar2 + RVU_VF_INT);
739 	plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1S);
740 
741 	return rc;
742 }
743 
744 int
745 dev_mbox_register_irq(struct plt_pci_device *pci_dev, struct dev *dev)
746 {
747 	if (dev_is_vf(dev))
748 		return mbox_register_vf_irq(pci_dev, dev);
749 	else
750 		return mbox_register_pf_irq(pci_dev, dev);
751 }
752 
753 static void
754 mbox_unregister_pf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
755 {
756 	struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
757 	int i;
758 
759 	/* HW clear irq */
760 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)
761 		plt_write64(~0ull,
762 			    dev->bar2 + RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));
763 
764 	plt_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);
765 
766 	dev->timer_set = 0;
767 
768 	plt_alarm_cancel(roc_vf_pf_mbox_handle_msg, dev);
769 
770 	/* Unregister the interrupt handler for each vectors */
771 	/* MBOX interrupt for VF(0...63) <-> PF */
772 	dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev,
773 			   RVU_PF_INT_VEC_VFPF_MBOX0);
774 
775 	/* MBOX interrupt for VF(64...128) <-> PF */
776 	dev_irq_unregister(intr_handle, roc_vf_pf_mbox_irq, dev,
777 			   RVU_PF_INT_VEC_VFPF_MBOX1);
778 
779 	/* MBOX interrupt AF <-> PF */
780 	dev_irq_unregister(intr_handle, roc_af_pf_mbox_irq, dev,
781 			   RVU_PF_INT_VEC_AFPF_MBOX);
782 }
783 
784 static void
785 mbox_unregister_vf_irq(struct plt_pci_device *pci_dev, struct dev *dev)
786 {
787 	struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
788 
789 	/* Clear irq */
790 	plt_write64(~0ull, dev->bar2 + RVU_VF_INT_ENA_W1C);
791 
792 	/* Unregister the interrupt handler */
793 	dev_irq_unregister(intr_handle, roc_pf_vf_mbox_irq, dev,
794 			   RVU_VF_INT_VEC_MBOX);
795 }
796 
797 static void
798 mbox_unregister_irq(struct plt_pci_device *pci_dev, struct dev *dev)
799 {
800 	if (dev_is_vf(dev))
801 		mbox_unregister_vf_irq(pci_dev, dev);
802 	else
803 		mbox_unregister_pf_irq(pci_dev, dev);
804 }
805 
806 static int
807 vf_flr_send_msg(struct dev *dev, uint16_t vf)
808 {
809 	struct mbox *mbox = dev->mbox;
810 	struct msg_req *req;
811 	int rc;
812 
813 	req = mbox_alloc_msg_vf_flr(mbox);
814 	if (req == NULL)
815 		return -ENOSPC;
816 	/* Overwrite pcifunc to indicate VF */
817 	req->hdr.pcifunc = dev_pf_func(dev->pf, vf);
818 
819 	/* Sync message in interrupt context */
820 	rc = pf_af_sync_msg(dev, NULL);
821 	if (rc)
822 		plt_err("Failed to send VF FLR mbox msg, rc=%d", rc);
823 
824 	return rc;
825 }
826 
827 static void
828 roc_pf_vf_flr_irq(void *param)
829 {
830 	struct dev *dev = (struct dev *)param;
831 	uint16_t max_vf = 64, vf;
832 	uintptr_t bar2;
833 	uint64_t intr;
834 	int i;
835 
836 	max_vf = (dev->maxvf > 0) ? dev->maxvf : 64;
837 	bar2 = dev->bar2;
838 
839 	plt_base_dbg("FLR VF interrupt: max_vf: %d", max_vf);
840 
841 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
842 		intr = plt_read64(bar2 + RVU_PF_VFFLR_INTX(i));
843 		if (!intr)
844 			continue;
845 
846 		for (vf = 0; vf < max_vf; vf++) {
847 			if (!(intr & (1ULL << vf)))
848 				continue;
849 
850 			plt_base_dbg("FLR: i :%d intr: 0x%" PRIx64 ", vf-%d", i,
851 				     intr, (64 * i + vf));
852 			/* Clear interrupt */
853 			plt_write64(BIT_ULL(vf), bar2 + RVU_PF_VFFLR_INTX(i));
854 			/* Disable the interrupt */
855 			plt_write64(BIT_ULL(vf),
856 				    bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
857 			/* Inform AF about VF reset */
858 			vf_flr_send_msg(dev, vf);
859 
860 			/* Signal FLR finish */
861 			plt_write64(BIT_ULL(vf), bar2 + RVU_PF_VFTRPENDX(i));
862 			/* Enable interrupt */
863 			plt_write64(~0ull, bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
864 		}
865 	}
866 }
867 
868 static int
869 vf_flr_unregister_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
870 {
871 	struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
872 	int i;
873 
874 	plt_base_dbg("Unregister VF FLR interrupts for %s", pci_dev->name);
875 
876 	/* HW clear irq */
877 	for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
878 		plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1CX(i));
879 
880 	dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev,
881 			   RVU_PF_INT_VEC_VFFLR0);
882 
883 	dev_irq_unregister(intr_handle, roc_pf_vf_flr_irq, dev,
884 			   RVU_PF_INT_VEC_VFFLR1);
885 
886 	return 0;
887 }
888 
889 int
890 dev_vf_flr_register_irqs(struct plt_pci_device *pci_dev, struct dev *dev)
891 {
892 	struct plt_intr_handle *handle = pci_dev->intr_handle;
893 	int i, rc;
894 
895 	plt_base_dbg("Register VF FLR interrupts for %s", pci_dev->name);
896 
897 	rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev,
898 			      RVU_PF_INT_VEC_VFFLR0);
899 	if (rc)
900 		plt_err("Failed to init RVU_PF_INT_VEC_VFFLR0 rc=%d", rc);
901 
902 	rc = dev_irq_register(handle, roc_pf_vf_flr_irq, dev,
903 			      RVU_PF_INT_VEC_VFFLR1);
904 	if (rc)
905 		plt_err("Failed to init RVU_PF_INT_VEC_VFFLR1 rc=%d", rc);
906 
907 	/* Enable HW interrupt */
908 	for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
909 		plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INTX(i));
910 		plt_write64(~0ull, dev->bar2 + RVU_PF_VFTRPENDX(i));
911 		plt_write64(~0ull, dev->bar2 + RVU_PF_VFFLR_INT_ENA_W1SX(i));
912 	}
913 	return 0;
914 }
915 
916 static void
917 clear_rvum_interrupts(struct dev *dev)
918 {
919 	uint64_t intr;
920 	int i;
921 
922 	if (dev_is_vf(dev)) {
923 		/* Clear VF mbox interrupt */
924 		intr = plt_read64(dev->bar2 + RVU_VF_INT);
925 		if (intr)
926 			plt_write64(intr, dev->bar2 + RVU_VF_INT);
927 	} else {
928 		/* Clear AF PF interrupt line */
929 		intr = plt_read64(dev->bar2 + RVU_PF_INT);
930 		if (intr)
931 			plt_write64(intr, dev->bar2 + RVU_PF_INT);
932 		for (i = 0; i < MAX_VFPF_DWORD_BITS; ++i) {
933 			/* Clear MBOX interrupts */
934 			intr = plt_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(i));
935 			if (intr)
936 				plt_write64(intr,
937 					    dev->bar2 +
938 						    RVU_PF_VFPF_MBOX_INTX(i));
939 			/* Clear VF FLR interrupts */
940 			intr = plt_read64(dev->bar2 + RVU_PF_VFFLR_INTX(i));
941 			if (intr)
942 				plt_write64(intr,
943 					    dev->bar2 + RVU_PF_VFFLR_INTX(i));
944 		}
945 	}
946 }
947 
948 int
949 dev_active_vfs(struct dev *dev)
950 {
951 	int i, count = 0;
952 
953 	for (i = 0; i < MAX_VFPF_DWORD_BITS; i++)
954 		count += __builtin_popcount(dev->active_vfs[i]);
955 
956 	return count;
957 }
958 
959 static void
960 dev_vf_hwcap_update(struct plt_pci_device *pci_dev, struct dev *dev)
961 {
962 	switch (pci_dev->id.device_id) {
963 	case PCI_DEVID_CNXK_RVU_PF:
964 		break;
965 	case PCI_DEVID_CNXK_RVU_SSO_TIM_VF:
966 	case PCI_DEVID_CNXK_RVU_NPA_VF:
967 	case PCI_DEVID_CN10K_RVU_CPT_VF:
968 	case PCI_DEVID_CN9K_RVU_CPT_VF:
969 	case PCI_DEVID_CNXK_RVU_AF_VF:
970 	case PCI_DEVID_CNXK_RVU_VF:
971 	case PCI_DEVID_CNXK_RVU_SDP_VF:
972 		dev->hwcap |= DEV_HWCAP_F_VF;
973 		break;
974 	}
975 }
976 
977 static uintptr_t
978 dev_vf_mbase_get(struct plt_pci_device *pci_dev, struct dev *dev)
979 {
980 	void *vf_mbase = NULL;
981 	uintptr_t pa;
982 
983 	if (dev_is_vf(dev))
984 		return 0;
985 
986 	/* For CN10K onwards, it is just after PF MBOX */
987 	if (!roc_model_is_cn9k())
988 		return dev->bar4 + MBOX_SIZE;
989 
990 	pa = plt_read64(dev->bar2 + RVU_PF_VF_BAR4_ADDR);
991 	if (!pa) {
992 		plt_err("Invalid VF mbox base pa");
993 		return pa;
994 	}
995 
996 	vf_mbase = mbox_mem_map(pa, MBOX_SIZE * pci_dev->max_vfs);
997 	if (vf_mbase == MAP_FAILED) {
998 		plt_err("Failed to mmap vf mbase at pa 0x%lx, rc=%d", pa,
999 			errno);
1000 		return 0;
1001 	}
1002 	return (uintptr_t)vf_mbase;
1003 }
1004 
1005 static void
1006 dev_vf_mbase_put(struct plt_pci_device *pci_dev, uintptr_t vf_mbase)
1007 {
1008 	if (!vf_mbase || !pci_dev->max_vfs || !roc_model_is_cn9k())
1009 		return;
1010 
1011 	mbox_mem_unmap((void *)vf_mbase, MBOX_SIZE * pci_dev->max_vfs);
1012 }
1013 
1014 static int
1015 dev_setup_shared_lmt_region(struct mbox *mbox, bool valid_iova, uint64_t iova)
1016 {
1017 	struct lmtst_tbl_setup_req *req;
1018 
1019 	req = mbox_alloc_msg_lmtst_tbl_setup(mbox);
1020 	if (!req)
1021 		return -ENOSPC;
1022 
1023 	/* This pcifunc is defined with primary pcifunc whose LMT address
1024 	 * will be shared. If call contains valid IOVA, following pcifunc
1025 	 * field is of no use.
1026 	 */
1027 	req->pcifunc = valid_iova ? 0 : idev_lmt_pffunc_get();
1028 	req->use_local_lmt_region = valid_iova;
1029 	req->lmt_iova = iova;
1030 
1031 	return mbox_process(mbox);
1032 }
1033 
1034 /* Total no of lines * size of each lmtline */
1035 #define LMT_REGION_SIZE (ROC_NUM_LMT_LINES * ROC_LMT_LINE_SZ)
1036 static int
1037 dev_lmt_setup(struct dev *dev)
1038 {
1039 	char name[PLT_MEMZONE_NAMESIZE];
1040 	const struct plt_memzone *mz;
1041 	struct idev_cfg *idev;
1042 	int rc;
1043 
1044 	if (roc_model_is_cn9k()) {
1045 		dev->lmt_base = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
1046 		return 0;
1047 	}
1048 
1049 	/* [CN10K, .) */
1050 
1051 	/* Set common lmt region from second pf_func onwards. */
1052 	if (!dev->disable_shared_lmt && idev_lmt_pffunc_get() &&
1053 	    dev->pf_func != idev_lmt_pffunc_get()) {
1054 		rc = dev_setup_shared_lmt_region(dev->mbox, false, 0);
1055 		if (!rc) {
1056 			/* On success, updating lmt base of secondary pf_funcs
1057 			 * with primary pf_func's lmt base.
1058 			 */
1059 			dev->lmt_base = roc_idev_lmt_base_addr_get();
1060 			return rc;
1061 		}
1062 		plt_err("Failed to setup shared lmt region, pf_func %d err %d "
1063 			"Using respective LMT region per pf func",
1064 			dev->pf_func, rc);
1065 	}
1066 
1067 	/* Allocating memory for LMT region */
1068 	sprintf(name, "LMT_MAP%x", dev->pf_func);
1069 
1070 	/* Setting alignment to ensure correct masking for resetting to lmt base
1071 	 * of a core after all lmt lines under that core are used.
1072 	 * Alignment value LMT_REGION_SIZE to handle the case where all lines
1073 	 * are used by 1 core.
1074 	 */
1075 	mz = plt_lmt_region_reserve_aligned(name, LMT_REGION_SIZE,
1076 					    LMT_REGION_SIZE);
1077 	if (!mz) {
1078 		plt_err("Memory alloc failed: %s", strerror(errno));
1079 		goto fail;
1080 	}
1081 
1082 	/* Share the IOVA address with Kernel */
1083 	rc = dev_setup_shared_lmt_region(dev->mbox, true, mz->iova);
1084 	if (rc) {
1085 		errno = rc;
1086 		goto free;
1087 	}
1088 
1089 	dev->lmt_base = mz->iova;
1090 	dev->lmt_mz = mz;
1091 	/* Base LMT address should be chosen from only those pci funcs which
1092 	 * participate in LMT shared mode.
1093 	 */
1094 	if (!dev->disable_shared_lmt) {
1095 		idev = idev_get_cfg();
1096 		if (!idev) {
1097 			errno = EFAULT;
1098 			goto free;
1099 		}
1100 
1101 		if (!__atomic_load_n(&idev->lmt_pf_func, __ATOMIC_ACQUIRE)) {
1102 			idev->lmt_base_addr = dev->lmt_base;
1103 			idev->lmt_pf_func = dev->pf_func;
1104 			idev->num_lmtlines = RVU_LMT_LINE_MAX;
1105 		}
1106 	}
1107 
1108 	return 0;
1109 free:
1110 	plt_memzone_free(mz);
1111 fail:
1112 	return -errno;
1113 }
1114 
1115 static bool
1116 dev_cache_line_size_valid(void)
1117 {
1118 	if (roc_model_is_cn9k()) {
1119 		if (PLT_CACHE_LINE_SIZE != 128) {
1120 			plt_err("Cache line size of %d is wrong for CN9K",
1121 				PLT_CACHE_LINE_SIZE);
1122 			return false;
1123 		}
1124 	} else if (roc_model_is_cn10k()) {
1125 		if (PLT_CACHE_LINE_SIZE == 128) {
1126 			plt_warn("Cache line size of %d might affect performance",
1127 				 PLT_CACHE_LINE_SIZE);
1128 		} else if (PLT_CACHE_LINE_SIZE != 64) {
1129 			plt_err("Cache line size of %d is wrong for CN10K",
1130 				PLT_CACHE_LINE_SIZE);
1131 			return false;
1132 		}
1133 	}
1134 
1135 	return true;
1136 }
1137 
1138 int
1139 dev_init(struct dev *dev, struct plt_pci_device *pci_dev)
1140 {
1141 	int direction, up_direction, rc;
1142 	uintptr_t bar2, bar4, mbox;
1143 	uintptr_t vf_mbase = 0;
1144 	uint64_t intr_offset;
1145 
1146 	if (!dev_cache_line_size_valid())
1147 		return -EFAULT;
1148 
1149 	bar2 = (uintptr_t)pci_dev->mem_resource[2].addr;
1150 	bar4 = (uintptr_t)pci_dev->mem_resource[4].addr;
1151 	if (bar2 == 0 || bar4 == 0) {
1152 		plt_err("Failed to get PCI bars");
1153 		rc = -ENODEV;
1154 		goto error;
1155 	}
1156 
1157 	/* Trigger fault on bar2 and bar4 regions
1158 	 * to avoid BUG_ON in remap_pfn_range()
1159 	 * in latest kernel.
1160 	 */
1161 	*(volatile uint64_t *)bar2;
1162 	*(volatile uint64_t *)bar4;
1163 
1164 	/* Check ROC model supported */
1165 	if (roc_model->flag == 0) {
1166 		rc = UTIL_ERR_INVALID_MODEL;
1167 		goto error;
1168 	}
1169 
1170 	dev->maxvf = pci_dev->max_vfs;
1171 	dev->bar2 = bar2;
1172 	dev->bar4 = bar4;
1173 	dev_vf_hwcap_update(pci_dev, dev);
1174 
1175 	if (dev_is_vf(dev)) {
1176 		mbox = (roc_model_is_cn9k() ?
1177 			bar4 : (bar2 + RVU_VF_MBOX_REGION));
1178 		direction = MBOX_DIR_VFPF;
1179 		up_direction = MBOX_DIR_VFPF_UP;
1180 		intr_offset = RVU_VF_INT;
1181 	} else {
1182 		mbox = bar4;
1183 		direction = MBOX_DIR_PFAF;
1184 		up_direction = MBOX_DIR_PFAF_UP;
1185 		intr_offset = RVU_PF_INT;
1186 	}
1187 
1188 	/* Clear all RVUM interrupts */
1189 	clear_rvum_interrupts(dev);
1190 
1191 	/* Initialize the local mbox */
1192 	rc = mbox_init(&dev->mbox_local, mbox, bar2, direction, 1, intr_offset);
1193 	if (rc)
1194 		goto error;
1195 	dev->mbox = &dev->mbox_local;
1196 
1197 	rc = mbox_init(&dev->mbox_up, mbox, bar2, up_direction, 1, intr_offset);
1198 	if (rc)
1199 		goto mbox_fini;
1200 
1201 	/* Register mbox interrupts */
1202 	rc = dev_mbox_register_irq(pci_dev, dev);
1203 	if (rc)
1204 		goto mbox_fini;
1205 
1206 	/* Check the readiness of PF/VF */
1207 	rc = send_ready_msg(dev->mbox, &dev->pf_func);
1208 	if (rc)
1209 		goto mbox_unregister;
1210 
1211 	dev->pf = dev_get_pf(dev->pf_func);
1212 	dev->vf = dev_get_vf(dev->pf_func);
1213 	memset(&dev->active_vfs, 0, sizeof(dev->active_vfs));
1214 
1215 	/* Allocate memory for device ops */
1216 	dev->ops = plt_zmalloc(sizeof(struct dev_ops), 0);
1217 	if (dev->ops == NULL) {
1218 		rc = -ENOMEM;
1219 		goto mbox_unregister;
1220 	}
1221 
1222 	/* Found VF devices in a PF device */
1223 	if (pci_dev->max_vfs > 0) {
1224 		/* Remap mbox area for all vf's */
1225 		vf_mbase = dev_vf_mbase_get(pci_dev, dev);
1226 		if (!vf_mbase) {
1227 			rc = -ENODEV;
1228 			goto mbox_unregister;
1229 		}
1230 		/* Init mbox object */
1231 		rc = mbox_init(&dev->mbox_vfpf, vf_mbase, bar2, MBOX_DIR_PFVF,
1232 			       pci_dev->max_vfs, intr_offset);
1233 		if (rc)
1234 			goto iounmap;
1235 
1236 		/* PF -> VF UP messages */
1237 		rc = mbox_init(&dev->mbox_vfpf_up, vf_mbase, bar2,
1238 			       MBOX_DIR_PFVF_UP, pci_dev->max_vfs, intr_offset);
1239 		if (rc)
1240 			goto iounmap;
1241 	}
1242 
1243 	/* Register VF-FLR irq handlers */
1244 	if (!dev_is_vf(dev)) {
1245 		rc = dev_vf_flr_register_irqs(pci_dev, dev);
1246 		if (rc)
1247 			goto iounmap;
1248 	}
1249 	dev->mbox_active = 1;
1250 
1251 	rc = npa_lf_init(dev, pci_dev);
1252 	if (rc)
1253 		goto iounmap;
1254 
1255 	/* Setup LMT line base */
1256 	rc = dev_lmt_setup(dev);
1257 	if (rc)
1258 		goto iounmap;
1259 
1260 	return rc;
1261 iounmap:
1262 	dev_vf_mbase_put(pci_dev, vf_mbase);
1263 mbox_unregister:
1264 	mbox_unregister_irq(pci_dev, dev);
1265 	if (dev->ops)
1266 		plt_free(dev->ops);
1267 mbox_fini:
1268 	mbox_fini(dev->mbox);
1269 	mbox_fini(&dev->mbox_up);
1270 error:
1271 	return rc;
1272 }
1273 
1274 int
1275 dev_fini(struct dev *dev, struct plt_pci_device *pci_dev)
1276 {
1277 	struct plt_intr_handle *intr_handle = pci_dev->intr_handle;
1278 	struct mbox *mbox;
1279 
1280 	/* Check if this dev hosts npalf and has 1+ refs */
1281 	if (idev_npa_lf_active(dev) > 1)
1282 		return -EAGAIN;
1283 
1284 	/* Clear references to this pci dev */
1285 	npa_lf_fini();
1286 
1287 	/* Releasing memory allocated for lmt region */
1288 	if (dev->lmt_mz)
1289 		plt_memzone_free(dev->lmt_mz);
1290 
1291 	mbox_unregister_irq(pci_dev, dev);
1292 
1293 	if (!dev_is_vf(dev))
1294 		vf_flr_unregister_irqs(pci_dev, dev);
1295 	/* Release PF - VF */
1296 	mbox = &dev->mbox_vfpf;
1297 	if (mbox->hwbase && mbox->dev)
1298 		dev_vf_mbase_put(pci_dev, mbox->hwbase);
1299 
1300 	if (dev->ops)
1301 		plt_free(dev->ops);
1302 
1303 	mbox_fini(mbox);
1304 	mbox = &dev->mbox_vfpf_up;
1305 	mbox_fini(mbox);
1306 
1307 	/* Release PF - AF */
1308 	mbox = dev->mbox;
1309 	mbox_fini(mbox);
1310 	mbox = &dev->mbox_up;
1311 	mbox_fini(mbox);
1312 	dev->mbox_active = 0;
1313 
1314 	/* Disable MSIX vectors */
1315 	dev_irqs_disable(intr_handle);
1316 	return 0;
1317 }
1318