xref: /dpdk/drivers/net/enic/enic_sriov.c (revision 00ce43111dc5b364722c882cdd37d3664d87b6cc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2024 Cisco Systems, Inc.  All rights reserved.
3  */
4 
5 #include <rte_memzone.h>
6 #include <ethdev_driver.h>
7 
8 #include "enic_compat.h"
9 #include "enic.h"
10 #include "enic_sriov.h"
11 
12 static int enic_check_chan_capability(struct enic *enic);
13 static int enic_register_vf(struct enic *enic);
14 static void enic_unregister_vf(struct enic *enic);
15 
16 const char *msg_type_str[ENIC_MBOX_MAX] = {
17 	"VF_CAPABILITY_REQUEST",
18 	"VF_CAPABILITY_REPLY",
19 	"VF_REGISTER_REQUEST",
20 	"VF_REGISTER_REPLY",
21 	"VF_UNREGISTER_REQUEST",
22 	"VF_UNREGISTER_REPLY",
23 	"PF_LINK_STATE_NOTIF",
24 	"PF_LINK_STATE_ACK",
25 	"PF_GET_STATS_REQUEST",
26 	"PF_GET_STATS_REPLY",
27 	"VF_ADD_DEL_MAC_REQUEST",
28 	"VF_ADD_DEL_MAC_REPLY",
29 	"PF_SET_ADMIN_MAC_NOTIF",
30 	"PF_SET_ADMIN_MAC_ACK",
31 	"VF_SET_PKT_FILTER_FLAGS_REQUEST",
32 	"VF_SET_PKT_FILTER_FLAGS_REPLY",
33 };
34 
35 static const char *enic_mbox_msg_type_str(enum enic_mbox_msg_type type)
36 {
37 	if (type >= 0 && type < ENIC_MBOX_MAX)
38 		return msg_type_str[type];
39 	return "INVALID";
40 }
41 
42 static bool admin_chan_enabled(struct enic *enic)
43 {
44 	return enic->admin_chan_enabled;
45 }
46 
47 static void lock_admin_chan(struct enic *enic)
48 {
49 	pthread_mutex_lock(&enic->admin_chan_lock);
50 }
51 
52 static void unlock_admin_chan(struct enic *enic)
53 {
54 	pthread_mutex_unlock(&enic->admin_chan_lock);
55 }
56 
57 static int enic_enable_admin_rq(struct enic *enic)
58 {
59 	uint32_t rqbuf_size = ENIC_ADMIN_BUF_SIZE;
60 	uint32_t desc_count = 256;
61 	struct rq_enet_desc *rqd;
62 	struct vnic_rq *rq;
63 	struct vnic_cq *cq;
64 	rte_iova_t dma;
65 	uint32_t i;
66 	int cq_idx;
67 	int err = 0;
68 	char name[RTE_MEMZONE_NAMESIZE];
69 	static int instance;
70 
71 	ENICPMD_FUNC_TRACE();
72 	rq = &enic->admin_rq;
73 	cq_idx = ENIC_ADMIN_RQ_CQ;
74 	cq = &enic->admin_cq[cq_idx];
75 	err = vnic_admin_rq_alloc(enic->vdev, rq, desc_count,
76 				  sizeof(struct rq_enet_desc));
77 	if (err) {
78 		dev_err(enic, "failed to allocate admin RQ\n");
79 		return err;
80 	}
81 	err = vnic_admin_cq_alloc(enic->vdev, cq, cq_idx,
82 		SOCKET_ID_ANY, desc_count, sizeof(struct cq_enet_rq_desc));
83 	if (err) {
84 		dev_err(enic, "failed to allocate CQ for admin RQ\n");
85 		return err;
86 	}
87 
88 	vnic_rq_init(rq, cq_idx, 0, 0);
89 	vnic_cq_clean(cq);
90 	vnic_cq_init(cq,
91 		     0 /* flow_control_enable */,
92 		     1 /* color_enable */,
93 		     0 /* cq_head */,
94 		     0 /* cq_tail */,
95 		     1 /* cq_tail_color */,
96 		     1 /* interrupt_enable */,
97 		     1 /* cq_entry_enable */,
98 		     0 /* cq_message_enable */,
99 		     ENICPMD_LSC_INTR_OFFSET /* interrupt_offset */,
100 		     0 /* cq_message_addr */);
101 	vnic_rq_enable(rq);
102 
103 	/*
104 	 * Allocate RQ DMA buffers. The admin chan reuses these
105 	 * buffers and never allocates new ones again
106 	 */
107 	snprintf((char *)name, sizeof(name), "admin-rq-buf-%d", instance++);
108 	rq->admin_msg_rz = rte_memzone_reserve_aligned((const char *)name,
109 			desc_count * rqbuf_size, SOCKET_ID_ANY,
110 			RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
111 	if (!rq->admin_msg_rz)
112 		return -ENOMEM;
113 
114 	memset(rq->admin_msg_rz->addr, 0, desc_count * rqbuf_size);
115 
116 	dma = rq->admin_msg_rz->iova;
117 	rqd = rq->ring.descs;
118 	for (i = 0; i < desc_count; i++) {
119 		rq_enet_desc_enc(rqd, dma, RQ_ENET_TYPE_ONLY_SOP,
120 				 rqbuf_size);
121 		dma += rqbuf_size;
122 		rqd++;
123 	}
124 	rte_rmb();
125 	rq->posted_index = rq->ring.desc_count - 1;
126 	rq->admin_next_idx = 0;
127 	ENICPMD_LOG(DEBUG, "admin rq posted_index %u", rq->posted_index);
128 	iowrite32(rq->posted_index, &rq->ctrl->posted_index);
129 	rte_wmb();
130 	return err;
131 }
132 
133 static int enic_enable_admin_wq(struct enic *enic)
134 {
135 	uint32_t wqbuf_size = ENIC_ADMIN_BUF_SIZE;
136 	uint32_t desc_count = 256;
137 	struct vnic_wq *wq;
138 	struct vnic_cq *cq;
139 	int cq_idx;
140 	int err = 0;
141 	char name[RTE_MEMZONE_NAMESIZE];
142 	static int instance;
143 
144 	ENICPMD_FUNC_TRACE();
145 	wq = &enic->admin_wq;
146 	cq_idx = ENIC_ADMIN_WQ_CQ;
147 	cq = &enic->admin_cq[cq_idx];
148 	err = vnic_admin_wq_alloc(enic->vdev, wq, desc_count, sizeof(struct wq_enet_desc));
149 	if (err) {
150 		dev_err(enic, "failed to allocate admin WQ\n");
151 		return err;
152 	}
153 	err = vnic_admin_cq_alloc(enic->vdev, cq, cq_idx,
154 		SOCKET_ID_ANY, desc_count, sizeof(struct cq_enet_wq_desc));
155 	if (err) {
156 		vnic_wq_free(wq);
157 		dev_err(enic, "failed to allocate CQ for admin WQ\n");
158 		return err;
159 	}
160 	snprintf((char *)name, sizeof(name),
161 		 "vnic_cqmsg-%s-admin-wq-%d", enic->bdf_name, instance++);
162 	wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
163 			sizeof(uint32_t), SOCKET_ID_ANY,
164 			RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
165 	if (!wq->cqmsg_rz)
166 		return -ENOMEM;
167 
168 	vnic_wq_init(wq, cq_idx, 0, 0);
169 	vnic_cq_clean(cq);
170 	vnic_cq_init(cq,
171 		     0 /* flow_control_enable */,
172 		     1 /* color_enable */,
173 		     0 /* cq_head */,
174 		     0 /* cq_tail */,
175 		     1 /* cq_tail_color */,
176 		     0 /* interrupt_enable */,
177 		     0 /* cq_entry_enable */,
178 		     1 /* cq_message_enable */,
179 		     0 /* interrupt offset */,
180 		     (uint64_t)wq->cqmsg_rz->iova);
181 
182 	vnic_wq_enable(wq);
183 
184 	snprintf((char *)name, sizeof(name), "admin-wq-buf-%d", instance++);
185 	wq->admin_msg_rz = rte_memzone_reserve_aligned((const char *)name,
186 			desc_count * wqbuf_size, SOCKET_ID_ANY,
187 			RTE_MEMZONE_IOVA_CONTIG, ENIC_PAGE_SIZE);
188 	if (!wq->admin_msg_rz)
189 		return -ENOMEM;
190 
191 	return err;
192 }
193 
194 static void enic_admin_wq_post(struct enic *enic, void *msg)
195 {
196 	struct wq_enet_desc *desc;
197 	struct enic_mbox_hdr *hdr;
198 	unsigned int head_idx;
199 	struct vnic_wq *wq;
200 	rte_iova_t dma;
201 	int msg_size;
202 	void *va;
203 
204 	ENICPMD_FUNC_TRACE();
205 	wq = &enic->admin_wq;
206 	hdr = msg;
207 	msg_size = hdr->msg_len;
208 	RTE_VERIFY(msg_size < ENIC_ADMIN_BUF_SIZE);
209 
210 	head_idx = wq->head_idx;
211 	desc = (struct wq_enet_desc *)wq->ring.descs;
212 	desc = desc + head_idx;
213 
214 	/* Copy message to pre-allocated WQ DMA buffer */
215 	dma = wq->admin_msg_rz->iova + ENIC_ADMIN_BUF_SIZE * head_idx;
216 	va = (void *)((char *)wq->admin_msg_rz->addr + ENIC_ADMIN_BUF_SIZE * head_idx);
217 	memcpy(va, msg, msg_size);
218 
219 	ENICPMD_LOG(DEBUG, "post admin wq msg at %u", head_idx);
220 
221 	/* Send message to PF: loopback=1 */
222 	wq_enet_desc_enc(desc, dma, msg_size,
223 			 0 /* mss */,
224 			 0 /* header_len */,
225 			 0 /* offload_mode */, 1 /* eop */, 1 /* cq */,
226 			 0 /* fcoe */,
227 			 1 /* vlan_tag_insert */,
228 			 0 /* vlan_id */,
229 			 1 /* loopback */);
230 	head_idx = enic_ring_incr(wq->ring.desc_count, head_idx);
231 	rte_wmb();
232 	iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
233 	wq->head_idx = head_idx;
234 }
235 
236 static void enic_mbox_init_msg_hdr(struct enic *enic, void *msg,
237 				   enum enic_mbox_msg_type type)
238 {
239 	struct enic_mbox_hdr *hdr;
240 	int len;
241 
242 	switch (type) {
243 	case ENIC_MBOX_VF_CAPABILITY_REQUEST:
244 		len = sizeof(struct enic_mbox_vf_capability_msg);
245 		break;
246 	case ENIC_MBOX_VF_REGISTER_REQUEST:
247 		len = sizeof(struct enic_mbox_vf_register_msg);
248 		break;
249 	case ENIC_MBOX_VF_UNREGISTER_REQUEST:
250 		len = sizeof(struct enic_mbox_vf_unregister_msg);
251 		break;
252 	case ENIC_MBOX_VF_SET_PKT_FILTER_FLAGS_REQUEST:
253 		len = sizeof(struct enic_mbox_vf_set_pkt_filter_flags_msg);
254 		break;
255 	case ENIC_MBOX_PF_LINK_STATE_ACK:
256 		len = sizeof(struct enic_mbox_pf_link_state_ack_msg);
257 		break;
258 	case ENIC_MBOX_PF_GET_STATS_REPLY:
259 		len = sizeof(struct enic_mbox_pf_get_stats_reply_msg);
260 		break;
261 	case ENIC_MBOX_VF_ADD_DEL_MAC_REQUEST:
262 		len = sizeof(struct enic_mbox_vf_add_del_mac_msg);
263 		break;
264 	default:
265 		RTE_VERIFY(false);
266 		break;
267 	}
268 	memset(msg, 0, len);
269 	hdr = msg;
270 	hdr->dst_vnic_id = ENIC_MBOX_DST_PF;
271 	hdr->src_vnic_id = enic->admin_chan_vf_id;
272 	hdr->msg_type = type;
273 	hdr->flags = 0;
274 	hdr->msg_len = len;
275 	hdr->msg_num = ++enic->admin_chan_msg_num;
276 }
277 
278 /*
279  * See if there is a new receive packet. If yes, copy it out.
280  */
281 static int enic_admin_rq_peek(struct enic *enic, uint8_t *msg, int *msg_len)
282 {
283 	const int desc_size = sizeof(struct cq_enet_rq_desc);
284 	volatile struct cq_desc *cqd_ptr;
285 	uint16_t cq_idx, rq_idx, rq_num;
286 	struct cq_enet_rq_desc *cqrd;
287 	uint16_t seg_length;
288 	struct cq_desc cqd;
289 	struct vnic_rq *rq;
290 	struct vnic_cq *cq;
291 	uint8_t tc, color;
292 	int next_idx;
293 	void *va;
294 
295 	rq = &enic->admin_rq;
296 	cq = &enic->admin_cq[ENIC_ADMIN_RQ_CQ];
297 	cq_idx = cq->to_clean;
298 	cqd_ptr = (struct cq_desc *)((uintptr_t)(cq->ring.descs) +
299 				     (uintptr_t)cq_idx * desc_size);
300 	color = cq->last_color;
301 	tc = *(volatile uint8_t *)((uintptr_t)cqd_ptr + desc_size - 1);
302 	/* No new packet, return */
303 	if ((tc & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
304 		return -EAGAIN;
305 	ENICPMD_LOG(DEBUG, "admin RQ has a completion cq_idx %u color %u", cq_idx, color);
306 
307 	cqd = *cqd_ptr;
308 	cqrd = (struct cq_enet_rq_desc *)&cqd;
309 	seg_length = rte_le_to_cpu_16(cqrd->bytes_written_flags) &
310 		CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
311 
312 	rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
313 	rq_idx = (cqd.completed_index & CQ_DESC_COMP_NDX_MASK);
314 	ENICPMD_LOG(DEBUG, "rq_num %u rq_idx %u len %u", rq_num, rq_idx, seg_length);
315 
316 	RTE_VERIFY(rq_num == 0);
317 	next_idx = rq->admin_next_idx;
318 	RTE_VERIFY(rq_idx == next_idx);
319 	rq->admin_next_idx = enic_ring_incr(rq->ring.desc_count, next_idx);
320 
321 	/* Copy out the received message */
322 	va = (void *)((char *)rq->admin_msg_rz->addr + ENIC_ADMIN_BUF_SIZE * next_idx);
323 	*msg_len = seg_length;
324 	memset(msg, 0, ENIC_ADMIN_BUF_SIZE);
325 	memcpy(msg, va, seg_length);
326 	memset(va, 0, ENIC_ADMIN_BUF_SIZE);
327 
328 	/* Advance CQ */
329 	cq_idx++;
330 	if (unlikely(cq_idx == cq->ring.desc_count)) {
331 		cq_idx = 0;
332 		cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
333 	}
334 	cq->to_clean = cq_idx;
335 
336 	/* Recycle and post RQ buffer */
337 	rq->posted_index = enic_ring_add(rq->ring.desc_count,
338 					 rq->posted_index,
339 					 1);
340 	rte_wmb();
341 	iowrite32(rq->posted_index, &rq->ctrl->posted_index);
342 	rte_wmb();
343 	return 0;
344 }
345 
346 int enic_enable_vf_admin_chan(struct enic *enic)
347 {
348 	struct vnic_sriov_stats *stats;
349 	int err;
350 
351 	ENICPMD_FUNC_TRACE();
352 	pthread_mutex_init(&enic->admin_chan_lock, NULL);
353 	err = vnic_dev_enable_admin_qp(enic->vdev, 1);
354 	if (err) {
355 		ENICPMD_LOG(ERR, "failed to enable admin QP type");
356 		goto out;
357 	}
358 	err = vnic_dev_alloc_sriov_stats_mem(enic->vdev);
359 	if (err) {
360 		ENICPMD_LOG(ERR, "failed to allocate SR-IOV stats buffer");
361 		goto out;
362 	}
363 	err = vnic_dev_sriov_stats(enic->vdev, &stats);
364 	if (err) {
365 		ENICPMD_LOG(ERR, "failed to get SR-IOV stats");
366 		goto out;
367 	}
368 	enic->admin_chan_vf_id = stats->vf_index;
369 	enic->sriov_vf_soft_rx_stats = !!stats->sriov_host_rx_stats;
370 	ENICPMD_LOG(INFO, "SR-IOV VF index %u %s stats",
371 		    stats->vf_index, enic->sriov_vf_soft_rx_stats ? "soft" : "HW");
372 	err = enic_enable_admin_rq(enic);
373 	if (err) {
374 		ENICPMD_LOG(ERR, "failed to enable admin RQ");
375 		goto out;
376 	}
377 	err = enic_enable_admin_wq(enic);
378 	if (err) {
379 		ENICPMD_LOG(ERR, "failed to enable admin WQ");
380 		goto out;
381 	}
382 	enic->admin_chan_enabled = true;
383 	/* Now the admin channel is ready. Send CAPABILITY as the first message */
384 	err = enic_check_chan_capability(enic);
385 	if (err) {
386 		ENICPMD_LOG(ERR, "failed to exchange VF_CAPABILITY message");
387 		goto out;
388 	}
389 	if (enic->sriov_vf_compat_mode) {
390 		enic_disable_vf_admin_chan(enic, false);
391 		return 0;
392 	}
393 	/* Then register.. */
394 	err = enic_register_vf(enic);
395 	if (err) {
396 		ENICPMD_LOG(ERR, "failed to perform VF_REGISTER");
397 		goto out;
398 	}
399 	/*
400 	 * If we have to count RX packets (soft stats), do not use
401 	 * avx2 receive handlers
402 	 */
403 	if (enic->sriov_vf_soft_rx_stats)
404 		enic->enable_avx2_rx = 0;
405 out:
406 	return err;
407 }
408 
409 int enic_disable_vf_admin_chan(struct enic *enic, bool unregister)
410 {
411 	struct vnic_rq *rq;
412 	struct vnic_wq *wq;
413 	struct vnic_cq *cq;
414 
415 	ENICPMD_FUNC_TRACE();
416 	if (unregister)
417 		enic_unregister_vf(enic);
418 	enic->sriov_vf_soft_rx_stats = false;
419 
420 	rq = &enic->admin_rq;
421 	vnic_rq_disable(rq);
422 	rte_memzone_free(rq->admin_msg_rz);
423 	vnic_rq_free(rq);
424 
425 	cq = &enic->admin_cq[ENIC_ADMIN_RQ_CQ];
426 	vnic_cq_free(cq);
427 
428 	wq = &enic->admin_wq;
429 	vnic_wq_disable(wq);
430 	rte_memzone_free(wq->admin_msg_rz);
431 	rte_memzone_free(wq->cqmsg_rz);
432 	vnic_wq_free(wq);
433 
434 	cq = &enic->admin_cq[ENIC_ADMIN_WQ_CQ];
435 	vnic_cq_free(cq);
436 
437 	enic->admin_chan_enabled = false;
438 	return 0;
439 }
440 
441 static int common_hdr_check(struct enic *enic, void *msg)
442 {
443 	struct enic_mbox_hdr *hdr;
444 
445 	hdr = (struct enic_mbox_hdr *)msg;
446 	ENICPMD_LOG(DEBUG, "RX dst %u src %u type %u(%s) flags %u len %u num %" PRIu64,
447 		    hdr->dst_vnic_id, hdr->src_vnic_id, hdr->msg_type,
448 		    enic_mbox_msg_type_str(hdr->msg_type),
449 		    hdr->flags, hdr->msg_len, hdr->msg_num);
450 	if (hdr->dst_vnic_id != enic->admin_chan_vf_id ||
451 	    hdr->src_vnic_id != ENIC_MBOX_DST_PF) {
452 		ENICPMD_LOG(ERR, "unexpected dst/src in reply: dst=%u (expected=%u) src=%u",
453 			    hdr->dst_vnic_id, enic->admin_chan_vf_id, hdr->src_vnic_id);
454 		return -EINVAL;
455 	}
456 	return 0;
457 }
458 
459 static int common_reply_check(__rte_unused struct enic *enic, void *msg,
460 			      enum enic_mbox_msg_type type)
461 {
462 	struct enic_mbox_generic_reply_msg *reply;
463 	struct enic_mbox_hdr *hdr;
464 
465 	hdr = (struct enic_mbox_hdr *)msg;
466 	reply = (struct enic_mbox_generic_reply_msg *)(hdr + 1);
467 	if (hdr->msg_type != type) {
468 		ENICPMD_LOG(ERR, "unexpected reply: expected=%u received=%u",
469 			    type, hdr->msg_type);
470 		return -EINVAL;
471 	}
472 	if (reply->ret_major != 0) {
473 		ENICPMD_LOG(ERR, "error reply: type=%u(%s) ret_major/minor=%u/%u",
474 			    type, enic_mbox_msg_type_str(type),
475 			    reply->ret_major, reply->ret_minor);
476 		return -EINVAL;
477 	}
478 	return 0;
479 }
480 
481 static void handle_pf_link_state_notif(struct enic *enic, void *msg)
482 {
483 	struct enic_mbox_pf_link_state_notif_msg *notif = msg;
484 	struct enic_mbox_pf_link_state_ack_msg ack;
485 	struct rte_eth_link link;
486 
487 	ENICPMD_FUNC_TRACE();
488 	ENICPMD_LOG(DEBUG, "PF_LINK_STAT_NOTIF: link_state=%u", notif->link_state);
489 
490 	/*
491 	 * Do not use enic_link_update()
492 	 * Linux PF driver disables link-status notify in FW and uses
493 	 * this admin message instead. Notify does not work. Remember
494 	 * the status from PF.
495 	 */
496 	memset(&link, 0, sizeof(link));
497 	link.link_status = notif->link_state ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
498 	link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
499 	link.link_speed = vnic_dev_port_speed(enic->vdev);
500 	rte_eth_linkstatus_set(enic->rte_dev, &link);
501 	rte_eth_dev_callback_process(enic->rte_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
502 	ENICPMD_LOG(DEBUG, "eth_linkstatus: speed=%u duplex=%u autoneg=%u status=%u",
503 		    link.link_speed, link.link_duplex, link.link_autoneg,
504 		    link.link_status);
505 
506 	enic_mbox_init_msg_hdr(enic, &ack, ENIC_MBOX_PF_LINK_STATE_ACK);
507 	enic_admin_wq_post(enic, &ack);
508 	ENICPMD_LOG(DEBUG, "sent PF_LINK_STATE_ACK");
509 }
510 
511 static void handle_pf_get_stats(struct enic *enic, void *msg)
512 {
513 	struct enic_mbox_pf_get_stats_reply_msg reply;
514 	struct enic_mbox_pf_get_stats_msg *req;
515 	struct vnic_stats *hw_stats;
516 	struct vnic_stats *vs;
517 	unsigned int i;
518 
519 	ENICPMD_FUNC_TRACE();
520 	req = msg;
521 	ENICPMD_LOG(DEBUG, "flags=0x%x", req->flags);
522 	enic_mbox_init_msg_hdr(enic, &reply, ENIC_MBOX_PF_GET_STATS_REPLY);
523 	vs = &reply.stats.vnic_stats;
524 	if (req->flags & ENIC_MBOX_GET_STATS_RX) {
525 		for (i = 0; i < enic->rq_count; i++) {
526 			vs->rx.rx_frames_ok += enic->rq[i].soft_stats_pkts;
527 			vs->rx.rx_bytes_ok += enic->rq[i].soft_stats_bytes;
528 		}
529 		vs->rx.rx_frames_total = vs->rx.rx_frames_ok;
530 		reply.stats.num_rx_stats = 6;
531 	}
532 	if (req->flags & ENIC_MBOX_GET_STATS_TX) {
533 		vnic_dev_stats_dump(enic->vdev, &hw_stats);
534 		vs->tx = hw_stats->tx;
535 		reply.stats.num_tx_stats = 11; /* all fields up to rsvd */
536 	}
537 	enic_admin_wq_post(enic, &reply);
538 	ENICPMD_LOG(DEBUG, "sent PF_GET_STATS_REPLY");
539 }
540 
541 static void handle_pf_request_msg(struct enic *enic, void *msg)
542 {
543 	struct enic_mbox_hdr *hdr = msg;
544 
545 	switch (hdr->msg_type) {
546 	case ENIC_MBOX_PF_LINK_STATE_NOTIF:
547 		handle_pf_link_state_notif(enic, msg);
548 		break;
549 	case ENIC_MBOX_PF_GET_STATS_REQUEST:
550 		handle_pf_get_stats(enic, msg);
551 		break;
552 	case ENIC_MBOX_PF_SET_ADMIN_MAC_NOTIF:
553 		ENICPMD_LOG(WARNING, "Ignore PF_SET_ADMIN_MAC_NOTIF from PF. The PF driver has changed VF MAC address. Reload the driver to use the new address.");
554 		break;
555 	default:
556 		ENICPMD_LOG(WARNING, "received unexpected non-request message from PF: received=%u(%s)",
557 			    hdr->msg_type, enic_mbox_msg_type_str(hdr->msg_type));
558 		break;
559 	}
560 }
561 
562 void enic_poll_vf_admin_chan(struct enic *enic)
563 {
564 	uint8_t msg[ENIC_ADMIN_BUF_SIZE];
565 	int len;
566 
567 	ENICPMD_FUNC_TRACE();
568 	lock_admin_chan(enic);
569 	while (!enic_admin_rq_peek(enic, msg, &len)) {
570 		if (common_hdr_check(enic, msg))
571 			continue;
572 		handle_pf_request_msg(enic, msg);
573 	}
574 	unlock_admin_chan(enic);
575 }
576 
577 /*
578  * Poll/receive messages until we see the wanted reply message.
579  * That is, we wait for the wanted reply.
580  */
581 #define RECV_REPLY_TIMEOUT 5 /* seconds */
582 static int recv_reply(struct enic *enic, void *msg, enum enic_mbox_msg_type type)
583 {
584 	struct enic_mbox_hdr *hdr;
585 	uint64_t start, end; /* seconds */
586 	int err, len;
587 
588 	start = rte_rdtsc() / rte_get_tsc_hz();
589 again:
590 	end = rte_rdtsc() / rte_get_tsc_hz();
591 	if (end - start > RECV_REPLY_TIMEOUT) {
592 		ENICPMD_LOG(WARNING, "timed out while waiting for reply %u(%s)",
593 			    type, enic_mbox_msg_type_str(type));
594 		return -ETIMEDOUT;
595 	}
596 	if (enic_admin_rq_peek(enic, msg, &len))
597 		goto again;
598 	err = common_hdr_check(enic, msg);
599 	if (err)
600 		goto out;
601 
602 	/* If not the reply we are looking for, process it and poll again */
603 	hdr = msg;
604 	if (hdr->msg_type != type) {
605 		handle_pf_request_msg(enic, msg);
606 		goto again;
607 	}
608 
609 	err = common_reply_check(enic, msg, type);
610 	if (err)
611 		goto out;
612 out:
613 	return err;
614 }
615 
616 /*
617  * Ask the PF driver its level of the admin channel support. If the
618  * answer is ver 0 (minimal) or no channel support (timed-out
619  * request), work in the backward compat mode.
620  *
621  * In the compat mode, trust mode does not work, because the PF driver
622  * does not support it. For example, VF cannot enable promisc mode,
623  * and cannot change MAC address.
624  */
625 static int enic_check_chan_capability(struct enic *enic)
626 {
627 	struct enic_mbox_vf_capability_reply_msg *reply;
628 	struct enic_mbox_vf_capability_msg req;
629 	uint8_t msg[ENIC_ADMIN_BUF_SIZE];
630 	int err;
631 
632 	ENICPMD_FUNC_TRACE();
633 
634 	enic_mbox_init_msg_hdr(enic, &req.hdr, ENIC_MBOX_VF_CAPABILITY_REQUEST);
635 	req.version = ENIC_MBOX_CAP_VERSION_1;
636 	enic_admin_wq_post(enic, &req);
637 	ENICPMD_LOG(DEBUG, "sent VF_CAPABILITY");
638 
639 	err = recv_reply(enic, msg, ENIC_MBOX_VF_CAPABILITY_REPLY);
640 	if (err == -ETIMEDOUT)
641 		ENICPMD_LOG(WARNING, "PF driver has not responded to CAPABILITY request. Please update the host PF driver");
642 	else if (err)
643 		goto out;
644 	ENICPMD_LOG(DEBUG, "VF_CAPABILITY_REPLY ok");
645 	reply = (struct enic_mbox_vf_capability_reply_msg *)msg;
646 	enic->admin_pf_cap_version = reply->version;
647 	ENICPMD_LOG(DEBUG, "PF admin channel capability version %u",
648 		    enic->admin_pf_cap_version);
649 	if (err == -ETIMEDOUT || enic->admin_pf_cap_version == ENIC_MBOX_CAP_VERSION_0) {
650 		ENICPMD_LOG(WARNING, "PF driver does not have adequate admin channel support. VF works in backward compatible mode");
651 		err = 0;
652 		enic->sriov_vf_compat_mode = true;
653 	} else if (enic->admin_pf_cap_version == ENIC_MBOX_CAP_VERSION_INVALID) {
654 		ENICPMD_LOG(WARNING, "Unexpected version in CAPABILITY_REPLY from PF driver. cap_version %u",
655 			    enic->admin_pf_cap_version);
656 		err = -EINVAL;
657 	}
658 out:
659 	return err;
660 }
661 
662 /*
663  * The VF driver must 'register' with the PF driver first, before
664  * sending any devcmd requests. Once registered, the VF driver must be
665  * ready to process messages from the PF driver.
666  */
667 static int enic_register_vf(struct enic *enic)
668 {
669 	struct enic_mbox_vf_register_msg req;
670 	uint8_t msg[ENIC_ADMIN_BUF_SIZE];
671 	int err;
672 
673 	ENICPMD_FUNC_TRACE();
674 	enic_mbox_init_msg_hdr(enic, &req, ENIC_MBOX_VF_REGISTER_REQUEST);
675 	enic_admin_wq_post(enic, &req);
676 	ENICPMD_LOG(DEBUG, "sent VF_REGISTER");
677 	err = recv_reply(enic, msg, ENIC_MBOX_VF_REGISTER_REPLY);
678 	if (err)
679 		goto out;
680 	ENICPMD_LOG(DEBUG, "VF_REGISTER_REPLY ok");
681 out:
682 	return err;
683 }
684 
685 /*
686  * The PF driver expects unregister when the VF driver closes.  But,
687  * it is not mandatory. For example, the VF driver may crash without
688  * sending the unregister message. In this case, everything still
689  * works fine.
690  */
691 static void enic_unregister_vf(struct enic *enic)
692 {
693 	struct enic_mbox_vf_unregister_msg req;
694 	uint8_t msg[ENIC_ADMIN_BUF_SIZE];
695 
696 	ENICPMD_FUNC_TRACE();
697 	enic_mbox_init_msg_hdr(enic, &req, ENIC_MBOX_VF_UNREGISTER_REQUEST);
698 	enic_admin_wq_post(enic, &req);
699 	ENICPMD_LOG(DEBUG, "sent VF_UNREGISTER");
700 	if (!recv_reply(enic, msg, ENIC_MBOX_VF_UNREGISTER_REPLY))
701 		ENICPMD_LOG(DEBUG, "VF_UNREGISTER_REPLY ok");
702 }
703 
704 static int vf_set_packet_filter(struct enic *enic, int directed, int multicast,
705 				int broadcast, int promisc, int allmulti)
706 {
707 	struct enic_mbox_vf_set_pkt_filter_flags_msg req;
708 	uint8_t msg[ENIC_ADMIN_BUF_SIZE];
709 	uint16_t flags;
710 	int err;
711 
712 	ENICPMD_FUNC_TRACE();
713 	enic_mbox_init_msg_hdr(enic, &req, ENIC_MBOX_VF_SET_PKT_FILTER_FLAGS_REQUEST);
714 	flags = 0;
715 	if (directed)
716 		flags |= ENIC_MBOX_PKT_FILTER_DIRECTED;
717 	if (multicast)
718 		flags |= ENIC_MBOX_PKT_FILTER_MULTICAST;
719 	if (broadcast)
720 		flags |= ENIC_MBOX_PKT_FILTER_BROADCAST;
721 	if (promisc)
722 		flags |= ENIC_MBOX_PKT_FILTER_PROMISC;
723 	if (allmulti)
724 		flags |= ENIC_MBOX_PKT_FILTER_ALLMULTI;
725 	req.flags = flags;
726 	req.pad = 0;
727 	/* Lock admin channel while we send and wait for the reply, to prevent
728 	 * enic_poll_vf_admin_chan() (RQ interrupt) from interfering.
729 	 */
730 	lock_admin_chan(enic);
731 	enic_admin_wq_post(enic, &req);
732 	ENICPMD_LOG(DEBUG, "sent VF_SET_PKT_FILTER_FLAGS flags=0x%x", flags);
733 	err = recv_reply(enic, msg, ENIC_MBOX_VF_SET_PKT_FILTER_FLAGS_REPLY);
734 	unlock_admin_chan(enic);
735 	if (err) {
736 		ENICPMD_LOG(DEBUG, "VF_SET_PKT_FILTER_FLAGS_REPLY failed");
737 		goto out;
738 	}
739 	ENICPMD_LOG(DEBUG, "VF_SET_PKT_FILTER_FLAGS_REPLY ok");
740 out:
741 	return err;
742 }
743 
744 int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
745 			   int broadcast, int promisc, int allmulti)
746 {
747 	if (enic_is_vf(enic)) {
748 		RTE_VERIFY(admin_chan_enabled(enic));
749 		return vf_set_packet_filter(enic, directed, multicast,
750 					    broadcast, promisc, allmulti);
751 	}
752 	return vnic_dev_packet_filter(enic->vdev, directed, multicast,
753 				     broadcast, promisc, allmulti);
754 }
755 
756 static int vf_add_del_addr(struct enic *enic, uint8_t *addr, bool delete)
757 {
758 	struct enic_mbox_vf_add_del_mac_msg req;
759 	uint8_t msg[ENIC_ADMIN_BUF_SIZE];
760 	int err;
761 
762 	ENICPMD_FUNC_TRACE();
763 	enic_mbox_init_msg_hdr(enic, &req, ENIC_MBOX_VF_ADD_DEL_MAC_REQUEST);
764 
765 	req.num_addrs = 1;
766 	memcpy(req.mac_addr.addr, addr, RTE_ETHER_ADDR_LEN);
767 	req.mac_addr.flags = delete ? 0 : MAC_ADDR_FLAG_ADD;
768 
769 	lock_admin_chan(enic);
770 	enic_admin_wq_post(enic, &req);
771 	ENICPMD_LOG(DEBUG, "sent VF_ADD_DEL_MAC");
772 	err = recv_reply(enic, msg, ENIC_MBOX_VF_ADD_DEL_MAC_REPLY);
773 	unlock_admin_chan(enic);
774 	if (err) {
775 		ENICPMD_LOG(DEBUG, "VF_ADD_DEL_MAC_REPLY failed");
776 		goto out;
777 	}
778 	ENICPMD_LOG(DEBUG, "VF_ADD_DEL_MAC_REPLY ok");
779 out:
780 	return err;
781 }
782 
783 int enic_dev_add_addr(struct enic *enic, uint8_t *addr)
784 {
785 	ENICPMD_FUNC_TRACE();
786 	if (enic_is_vf(enic)) {
787 		RTE_VERIFY(admin_chan_enabled(enic));
788 		return vf_add_del_addr(enic, addr, false);
789 	}
790 	return vnic_dev_add_addr(enic->vdev, addr);
791 }
792 
793 int enic_dev_del_addr(struct enic *enic, uint8_t *addr)
794 {
795 	ENICPMD_FUNC_TRACE();
796 	if (enic_is_vf(enic)) {
797 		RTE_VERIFY(admin_chan_enabled(enic));
798 		return vf_add_del_addr(enic, addr, true);
799 	}
800 	return vnic_dev_del_addr(enic->vdev, addr);
801 }
802