xref: /netbsd-src/sys/dev/pci/ixgbe/if_sriov.c (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
34 
35 #include "ixgbe.h"
36 #include "ixgbe_sriov.h"
37 
38 #ifdef PCI_IOV
39 
40 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
41 
42 /************************************************************************
43  * ixgbe_pci_iov_detach
44  ************************************************************************/
45 int
46 ixgbe_pci_iov_detach(device_t dev)
47 {
48 	return pci_iov_detach(dev);
49 }
50 
51 /************************************************************************
52  * ixgbe_define_iov_schemas
53  ************************************************************************/
54 void
55 ixgbe_define_iov_schemas(device_t dev, int *error)
56 {
57 	nvlist_t *pf_schema, *vf_schema;
58 
59 	pf_schema = pci_iov_schema_alloc_node();
60 	vf_schema = pci_iov_schema_alloc_node();
61 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
62 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
63 	    IOV_SCHEMA_HASDEFAULT, TRUE);
64 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
65 	    IOV_SCHEMA_HASDEFAULT, FALSE);
66 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
67 	    IOV_SCHEMA_HASDEFAULT, FALSE);
68 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
69 	if (*error != 0) {
70 		device_printf(dev,
71 		    "Error %d setting up SR-IOV\n", *error);
72 	}
73 } /* ixgbe_define_iov_schemas */
74 
75 /************************************************************************
76  * ixgbe_align_all_queue_indices
77  ************************************************************************/
78 inline void
79 ixgbe_align_all_queue_indices(struct adapter *adapter)
80 {
81 	int i;
82 	int index;
83 
84 	for (i = 0; i < adapter->num_queues; i++) {
85 		index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
86 		adapter->rx_rings[i].me = index;
87 		adapter->tx_rings[i].me = index;
88 	}
89 }
90 
91 /* Support functions for SR-IOV/VF management */
92 static inline void
93 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
94 {
95 	if (vf->flags & IXGBE_VF_CTS)
96 		msg |= IXGBE_VT_MSGTYPE_CTS;
97 
98 	adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
99 }
100 
101 static inline void
102 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
103 {
104 	msg &= IXGBE_VT_MSG_MASK;
105 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
106 }
107 
108 static inline void
109 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
110 {
111 	msg &= IXGBE_VT_MSG_MASK;
112 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
113 }
114 
115 static inline void
116 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
117 {
118 	if (!(vf->flags & IXGBE_VF_CTS))
119 		ixgbe_send_vf_nack(adapter, vf, 0);
120 }
121 
122 static inline boolean_t
123 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
124 {
125 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
126 }
127 
128 static inline int
129 ixgbe_vf_queues(int mode)
130 {
131 	switch (mode) {
132 	case IXGBE_64_VM:
133 		return (2);
134 	case IXGBE_32_VM:
135 		return (4);
136 	case IXGBE_NO_VM:
137 	default:
138 		return (0);
139 	}
140 }
141 
142 inline int
143 ixgbe_vf_que_index(int mode, int vfnum, int num)
144 {
145 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
146 }
147 
148 static inline void
149 ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
150 {
151 	if (adapter->max_frame_size < max_frame)
152 		adapter->max_frame_size = max_frame;
153 }
154 
155 inline u32
156 ixgbe_get_mrqc(int iov_mode)
157 {
158 	u32 mrqc;
159 
160 	switch (iov_mode) {
161 	case IXGBE_64_VM:
162 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
163 		break;
164 	case IXGBE_32_VM:
165 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
166 		break;
167 	case IXGBE_NO_VM:
168 		mrqc = 0;
169 		break;
170 	default:
171 		panic("Unexpected SR-IOV mode %d", iov_mode);
172 	}
173 
174 	return mrqc;
175 }
176 
177 
178 inline u32
179 ixgbe_get_mtqc(int iov_mode)
180 {
181 	uint32_t mtqc;
182 
183 	switch (iov_mode) {
184 	case IXGBE_64_VM:
185 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
186 		break;
187 	case IXGBE_32_VM:
188 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
189 		break;
190 	case IXGBE_NO_VM:
191 		mtqc = IXGBE_MTQC_64Q_1PB;
192 		break;
193 	default:
194 		panic("Unexpected SR-IOV mode %d", iov_mode);
195 	}
196 
197 	return mtqc;
198 }
199 
200 void
201 ixgbe_ping_all_vfs(struct adapter *adapter)
202 {
203 	struct ixgbe_vf *vf;
204 
205 	for (int i = 0; i < adapter->num_vfs; i++) {
206 		vf = &adapter->vfs[i];
207 		if (vf->flags & IXGBE_VF_ACTIVE)
208 			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
209 	}
210 } /* ixgbe_ping_all_vfs */
211 
212 
213 static void
214 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
215                           uint16_t tag)
216 {
217 	struct ixgbe_hw *hw;
218 	uint32_t vmolr, vmvir;
219 
220 	hw = &adapter->hw;
221 
222 	vf->vlan_tag = tag;
223 
224 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
225 
226 	/* Do not receive packets that pass inexact filters. */
227 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
228 
229 	/* Disable Multicast Promicuous Mode. */
230 	vmolr &= ~IXGBE_VMOLR_MPE;
231 
232 	/* Accept broadcasts. */
233 	vmolr |= IXGBE_VMOLR_BAM;
234 
235 	if (tag == 0) {
236 		/* Accept non-vlan tagged traffic. */
237 		vmolr |= IXGBE_VMOLR_AUPE;
238 
239 		/* Allow VM to tag outgoing traffic; no default tag. */
240 		vmvir = 0;
241 	} else {
242 		/* Require vlan-tagged traffic. */
243 		vmolr &= ~IXGBE_VMOLR_AUPE;
244 
245 		/* Tag all traffic with provided vlan tag. */
246 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
247 	}
248 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
249 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
250 } /* ixgbe_vf_set_default_vlan */
251 
252 
253 static void
254 ixgbe_clear_vfmbmem(struct ixgbe_hw *hw, struct ixgbe_vf *vf)
255 {
256 	uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
257 	uint16_t mbx_size = hw->mbx.size;
258 	uint16_t i;
259 
260 	IXGBE_CORE_LOCK_ASSERT(adapter);
261 
262 	for (i = 0; i < mbx_size; ++i)
263 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
264 } /* ixgbe_clear_vfmbmem */
265 
266 
267 static boolean_t
268 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
269 {
270 
271 	/*
272 	 * Frame size compatibility between PF and VF is only a problem on
273 	 * 82599-based cards.  X540 and later support any combination of jumbo
274 	 * frames on PFs and VFs.
275 	 */
276 	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
277 		return (TRUE);
278 
279 	switch (vf->api_ver) {
280 	case IXGBE_API_VER_1_0:
281 	case IXGBE_API_VER_UNKNOWN:
282 		/*
283 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
284 		 * frames on either the PF or the VF.
285 		 */
286 		if (adapter->max_frame_size > ETHER_MAX_LEN ||
287 		    vf->max_frame_size > ETHER_MAX_LEN)
288 			return (FALSE);
289 
290 		return (TRUE);
291 
292 		break;
293 	case IXGBE_API_VER_1_1:
294 	default:
295 		/*
296 		 * 1.1 or later VF versions always work if they aren't using
297 		 * jumbo frames.
298 		 */
299 		if (vf->max_frame_size <= ETHER_MAX_LEN)
300 			return (TRUE);
301 
302 		/*
303 		 * Jumbo frames only work with VFs if the PF is also using jumbo
304 		 * frames.
305 		 */
306 		if (adapter->max_frame_size <= ETHER_MAX_LEN)
307 			return (TRUE);
308 
309 		return (FALSE);
310 	}
311 } /* ixgbe_vf_frame_size_compatible */
312 
313 
314 static void
315 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
316 {
317 	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
318 
319 	// XXX clear multicast addresses
320 
321 	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
322 	ixgbe_clear_vfmbmem(&adapter->hw, vf);
323 	ixgbe_toggle_txdctl(&adapter->hw, IXGBE_VF_INDEX(vf->pool));
324 
325 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
326 } /* ixgbe_process_vf_reset */
327 
328 
329 static void
330 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
331 {
332 	struct ixgbe_hw *hw;
333 	uint32_t vf_index, vfte;
334 
335 	hw = &adapter->hw;
336 
337 	vf_index = IXGBE_VF_INDEX(vf->pool);
338 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
339 	vfte |= IXGBE_VF_BIT(vf->pool);
340 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
341 } /* ixgbe_vf_enable_transmit */
342 
343 
344 static void
345 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
346 {
347 	struct ixgbe_hw *hw;
348 	uint32_t vf_index, vfre;
349 
350 	hw = &adapter->hw;
351 
352 	vf_index = IXGBE_VF_INDEX(vf->pool);
353 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
354 	if (ixgbe_vf_frame_size_compatible(adapter, vf))
355 		vfre |= IXGBE_VF_BIT(vf->pool);
356 	else
357 		vfre &= ~IXGBE_VF_BIT(vf->pool);
358 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
359 } /* ixgbe_vf_enable_receive */
360 
361 
362 static void
363 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
364 {
365 	struct ixgbe_hw *hw;
366 	uint32_t ack;
367 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
368 
369 	hw = &adapter->hw;
370 
371 	ixgbe_process_vf_reset(adapter, vf);
372 
373 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
374 		ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
375 		    vf->pool, TRUE);
376 		ack = IXGBE_VT_MSGTYPE_ACK;
377 	} else
378 		ack = IXGBE_VT_MSGTYPE_NACK;
379 
380 	ixgbe_vf_enable_transmit(adapter, vf);
381 	ixgbe_vf_enable_receive(adapter, vf);
382 
383 	vf->flags |= IXGBE_VF_CTS;
384 
385 	resp[0] = IXGBE_VF_RESET | ack;
386 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
387 	resp[3] = hw->mac.mc_filter_type;
388 	hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
389 } /* ixgbe_vf_reset_msg */
390 
391 
392 static void
393 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
394 {
395 	uint8_t *mac;
396 
397 	mac = (uint8_t*)&msg[1];
398 
399 	/* Check that the VF has permission to change the MAC address. */
400 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
401 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
402 		return;
403 	}
404 
405 	if (ixgbe_validate_mac_addr(mac) != 0) {
406 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
407 		return;
408 	}
409 
410 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
411 
412 	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
413 	    TRUE);
414 
415 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
416 } /* ixgbe_vf_set_mac */
417 
418 
419 /*
420  * VF multicast addresses are set by using the appropriate bit in
421  * 1 of 128 32 bit addresses (4096 possible).
422  */
423 static void
424 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
425 {
426 	u16	*list = (u16*)&msg[1];
427 	int	entries;
428 	u32	vmolr, vec_bit, vec_reg, mta_reg;
429 
430 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
431 	entries = uimin(entries, IXGBE_MAX_VF_MC);
432 
433 	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
434 
435 	vf->num_mc_hashes = entries;
436 
437 	/* Set the appropriate MTA bit */
438 	for (int i = 0; i < entries; i++) {
439 		vf->mc_hash[i] = list[i];
440 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
441 		vec_bit = vf->mc_hash[i] & 0x1F;
442 		mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
443 		mta_reg |= (1 << vec_bit);
444 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
445 	}
446 
447 	vmolr |= IXGBE_VMOLR_ROMPE;
448 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
449 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
450 } /* ixgbe_vf_set_mc_addr */
451 
452 
453 static void
454 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
455 {
456 	struct ixgbe_hw *hw;
457 	int enable;
458 	uint16_t tag;
459 
460 	hw = &adapter->hw;
461 	enable = IXGBE_VT_MSGINFO(msg[0]);
462 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
463 
464 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
465 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
466 		return;
467 	}
468 
469 	/* It is illegal to enable vlan tag 0. */
470 	if (tag == 0 && enable != 0) {
471 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
472 		return;
473 	}
474 
475 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
476 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
477 } /* ixgbe_vf_set_vlan */
478 
479 
480 static void
481 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
482 {
483 	struct ixgbe_hw *hw;
484 	uint32_t vf_max_size, pf_max_size, mhadd;
485 
486 	hw = &adapter->hw;
487 	vf_max_size = msg[1];
488 
489 	if (vf_max_size < ETHER_CRC_LEN) {
490 		/* We intentionally ACK invalid LPE requests. */
491 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
492 		return;
493 	}
494 
495 	vf_max_size -= ETHER_CRC_LEN;
496 
497 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
498 		/* We intentionally ACK invalid LPE requests. */
499 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
500 		return;
501 	}
502 
503 	vf->max_frame_size = vf_max_size;
504 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
505 
506 	/*
507 	 * We might have to disable reception to this VF if the frame size is
508 	 * not compatible with the config on the PF.
509 	 */
510 	ixgbe_vf_enable_receive(adapter, vf);
511 
512 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
513 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
514 
515 	if (pf_max_size < adapter->max_frame_size) {
516 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
517 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
518 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
519 	}
520 
521 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
522 } /* ixgbe_vf_set_lpe */
523 
524 
525 static void
526 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
527                      uint32_t *msg)
528 {
529 	//XXX implement this
530 	ixgbe_send_vf_nack(adapter, vf, msg[0]);
531 } /* ixgbe_vf_set_macvlan */
532 
533 
534 static void
535 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
536     uint32_t *msg)
537 {
538 
539 	switch (msg[1]) {
540 	case IXGBE_API_VER_1_0:
541 	case IXGBE_API_VER_1_1:
542 		vf->api_ver = msg[1];
543 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
544 		break;
545 	default:
546 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
547 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
548 		break;
549 	}
550 } /* ixgbe_vf_api_negotiate */
551 
552 
553 static void
554 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
555 {
556 	struct ixgbe_hw *hw;
557 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
558 	int num_queues;
559 
560 	hw = &adapter->hw;
561 
562 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
563 	switch (msg[0]) {
564 	case IXGBE_API_VER_1_0:
565 	case IXGBE_API_VER_UNKNOWN:
566 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
567 		return;
568 	}
569 
570 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
571 	    IXGBE_VT_MSGTYPE_CTS;
572 
573 	num_queues = ixgbe_vf_queues(adapter->iov_mode);
574 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
575 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
576 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
577 	resp[IXGBE_VF_DEF_QUEUE] = 0;
578 
579 	hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
580 } /* ixgbe_vf_get_queues */
581 
582 
583 static void
584 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
585 {
586 	struct ixgbe_hw *hw;
587 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
588 	int error;
589 
590 	hw = &adapter->hw;
591 
592 	error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
593 
594 	if (error != 0)
595 		return;
596 
597 	CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname,
598 	    msg[0], vf->pool);
599 	if (msg[0] == IXGBE_VF_RESET) {
600 		ixgbe_vf_reset_msg(adapter, vf, msg);
601 		return;
602 	}
603 
604 	if (!(vf->flags & IXGBE_VF_CTS)) {
605 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
606 		return;
607 	}
608 
609 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
610 	case IXGBE_VF_SET_MAC_ADDR:
611 		ixgbe_vf_set_mac(adapter, vf, msg);
612 		break;
613 	case IXGBE_VF_SET_MULTICAST:
614 		ixgbe_vf_set_mc_addr(adapter, vf, msg);
615 		break;
616 	case IXGBE_VF_SET_VLAN:
617 		ixgbe_vf_set_vlan(adapter, vf, msg);
618 		break;
619 	case IXGBE_VF_SET_LPE:
620 		ixgbe_vf_set_lpe(adapter, vf, msg);
621 		break;
622 	case IXGBE_VF_SET_MACVLAN:
623 		ixgbe_vf_set_macvlan(adapter, vf, msg);
624 		break;
625 	case IXGBE_VF_API_NEGOTIATE:
626 		ixgbe_vf_api_negotiate(adapter, vf, msg);
627 		break;
628 	case IXGBE_VF_GET_QUEUES:
629 		ixgbe_vf_get_queues(adapter, vf, msg);
630 		break;
631 	default:
632 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
633 	}
634 } /* ixgbe_process_vf_msg */
635 
636 
637 /* Tasklet for handling VF -> PF mailbox messages */
638 void
639 ixgbe_handle_mbx(void *context, int pending)
640 {
641 	struct adapter *adapter = context;
642 	struct ixgbe_hw *hw;
643 	struct ixgbe_vf *vf;
644 	int i;
645 
646 	hw = &adapter->hw;
647 
648 	for (i = 0; i < adapter->num_vfs; i++) {
649 		vf = &adapter->vfs[i];
650 
651 		if (vf->flags & IXGBE_VF_ACTIVE) {
652 			if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
653 				ixgbe_process_vf_reset(adapter, vf);
654 
655 			if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
656 				ixgbe_process_vf_msg(adapter, vf);
657 
658 			if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
659 				ixgbe_process_vf_ack(adapter, vf);
660 		}
661 	}
662 } /* ixgbe_handle_mbx */
663 
664 int
665 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
666 {
667 	struct adapter *adapter;
668 	int retval = 0;
669 
670 	adapter = device_get_softc(dev);
671 	adapter->iov_mode = IXGBE_NO_VM;
672 
673 	if (num_vfs == 0) {
674 		/* Would we ever get num_vfs = 0? */
675 		retval = EINVAL;
676 		goto err_init_iov;
677 	}
678 
679 	/*
680 	 * We've got to reserve a VM's worth of queues for the PF,
681 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
682 	 * With 64 VFs, you can only have two queues per VF.
683 	 * With 32 VFs, you can have up to four queues per VF.
684 	 */
685 	if (num_vfs >= IXGBE_32_VM)
686 		adapter->iov_mode = IXGBE_64_VM;
687 	else
688 		adapter->iov_mode = IXGBE_32_VM;
689 
690 	/* Again, reserving 1 VM's worth of queues for the PF */
691 	adapter->pool = adapter->iov_mode - 1;
692 
693 	if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
694 		retval = ENOSPC;
695 		goto err_init_iov;
696 	}
697 
698 	IXGBE_CORE_LOCK(adapter);
699 
700 	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
701 	    M_NOWAIT | M_ZERO);
702 
703 	if (adapter->vfs == NULL) {
704 		retval = ENOMEM;
705 		IXGBE_CORE_UNLOCK(adapter);
706 		goto err_init_iov;
707 	}
708 
709 	adapter->num_vfs = num_vfs;
710 
711 	/* set the SRIOV flag now as it's needed
712 	 * by ixgbe_init_locked() */
713 	adapter->feat_en |= IXGBE_FEATURE_SRIOV;
714 	adapter->init_locked(adapter);
715 
716 	IXGBE_CORE_UNLOCK(adapter);
717 
718 	return (retval);
719 
720 err_init_iov:
721 	adapter->num_vfs = 0;
722 	adapter->pool = 0;
723 	adapter->iov_mode = IXGBE_NO_VM;
724 
725 	return (retval);
726 } /* ixgbe_init_iov */
727 
728 void
729 ixgbe_uninit_iov(device_t dev)
730 {
731 	struct ixgbe_hw *hw;
732 	struct adapter *adapter;
733 	uint32_t pf_reg, vf_reg;
734 
735 	adapter = device_get_softc(dev);
736 	hw = &adapter->hw;
737 
738 	IXGBE_CORE_LOCK(adapter);
739 
740 	/* Enable rx/tx for the PF and disable it for all VFs. */
741 	pf_reg = IXGBE_VF_INDEX(adapter->pool);
742 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
743 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
744 
745 	if (pf_reg == 0)
746 		vf_reg = 1;
747 	else
748 		vf_reg = 0;
749 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
750 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
751 
752 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
753 
754 	free(adapter->vfs, M_IXGBE_SRIOV);
755 	adapter->vfs = NULL;
756 	adapter->num_vfs = 0;
757 	adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
758 
759 	IXGBE_CORE_UNLOCK(adapter);
760 } /* ixgbe_uninit_iov */
761 
762 static void
763 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
764 {
765 	struct ixgbe_hw *hw;
766 	uint32_t vf_index, pfmbimr;
767 
768 	IXGBE_CORE_LOCK_ASSERT(adapter);
769 
770 	hw = &adapter->hw;
771 
772 	if (!(vf->flags & IXGBE_VF_ACTIVE))
773 		return;
774 
775 	vf_index = IXGBE_VF_INDEX(vf->pool);
776 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
777 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
778 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
779 
780 	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
781 
782 	// XXX multicast addresses
783 
784 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
785 		ixgbe_set_rar(&adapter->hw, vf->rar_index,
786 		    vf->ether_addr, vf->pool, TRUE);
787 	}
788 
789 	ixgbe_vf_enable_transmit(adapter, vf);
790 	ixgbe_vf_enable_receive(adapter, vf);
791 
792 	ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
793 } /* ixgbe_init_vf */
794 
795 void
796 ixgbe_initialize_iov(struct adapter *adapter)
797 {
798 	struct ixgbe_hw *hw = &adapter->hw;
799 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
800 	int i;
801 
802 	if (adapter->iov_mode == IXGBE_NO_VM)
803 		return;
804 
805 	IXGBE_CORE_LOCK_ASSERT(adapter);
806 
807 	/* RMW appropriate registers based on IOV mode */
808 	/* Read... */
809 	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
810 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
811 	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
812 	/* Modify... */
813 	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
814 	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
815 	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
816 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
817 	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
818 	switch (adapter->iov_mode) {
819 	case IXGBE_64_VM:
820 		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
821 		mtqc    |= IXGBE_MTQC_64VF;
822 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
823 		gpie    |= IXGBE_GPIE_VTMODE_64;
824 		break;
825 	case IXGBE_32_VM:
826 		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
827 		mtqc    |= IXGBE_MTQC_32VF;
828 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
829 		gpie    |= IXGBE_GPIE_VTMODE_32;
830 		break;
831 	default:
832 		panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
833 	}
834 	/* Write... */
835 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
836 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
837 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
838 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
839 
840 	/* Enable rx/tx for the PF. */
841 	vf_reg = IXGBE_VF_INDEX(adapter->pool);
842 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
843 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
844 
845 	/* Allow VM-to-VM communication. */
846 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
847 
848 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
849 	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
850 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
851 
852 	for (i = 0; i < adapter->num_vfs; i++)
853 		ixgbe_init_vf(adapter, &adapter->vfs[i]);
854 } /* ixgbe_initialize_iov */
855 
856 
857 /* Check the max frame setting of all active VF's */
858 void
859 ixgbe_recalculate_max_frame(struct adapter *adapter)
860 {
861 	struct ixgbe_vf *vf;
862 
863 	IXGBE_CORE_LOCK_ASSERT(adapter);
864 
865 	for (int i = 0; i < adapter->num_vfs; i++) {
866 		vf = &adapter->vfs[i];
867 		if (vf->flags & IXGBE_VF_ACTIVE)
868 			ixgbe_update_max_frame(adapter, vf->max_frame_size);
869 	}
870 } /* ixgbe_recalculate_max_frame */
871 
872 int
873 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
874 {
875 	struct adapter *adapter;
876 	struct ixgbe_vf *vf;
877 	const void *mac;
878 
879 	adapter = device_get_softc(dev);
880 
881 	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
882 	    vfnum, adapter->num_vfs));
883 
884 	IXGBE_CORE_LOCK(adapter);
885 	vf = &adapter->vfs[vfnum];
886 	vf->pool= vfnum;
887 
888 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
889 	vf->rar_index = vfnum + 1;
890 	vf->default_vlan = 0;
891 	vf->max_frame_size = ETHER_MAX_LEN;
892 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
893 
894 	if (nvlist_exists_binary(config, "mac-addr")) {
895 		mac = nvlist_get_binary(config, "mac-addr", NULL);
896 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
897 		if (nvlist_get_bool(config, "allow-set-mac"))
898 			vf->flags |= IXGBE_VF_CAP_MAC;
899 	} else
900 		/*
901 		 * If the administrator has not specified a MAC address then
902 		 * we must allow the VF to choose one.
903 		 */
904 		vf->flags |= IXGBE_VF_CAP_MAC;
905 
906 	vf->flags |= IXGBE_VF_ACTIVE;
907 
908 	ixgbe_init_vf(adapter, vf);
909 	IXGBE_CORE_UNLOCK(adapter);
910 
911 	return (0);
912 } /* ixgbe_add_vf */
913 
914 #else
915 
916 void
917 ixgbe_handle_mbx(void *context, int pending)
918 {
919 	UNREFERENCED_2PARAMETER(context, pending);
920 } /* ixgbe_handle_mbx */
921 
922 inline int
923 ixgbe_vf_que_index(int mode, int vfnum, int num)
924 {
925 	UNREFERENCED_2PARAMETER(mode, vfnum);
926 
927 	return num;
928 } /* ixgbe_vf_que_index */
929 
930 #endif
931