xref: /netbsd-src/sys/dev/pci/ixgbe/if_sriov.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
34 
35 #include "ixgbe.h"
36 #include "ixgbe_sriov.h"
37 
38 #ifdef PCI_IOV
39 
40 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
41 
42 /************************************************************************
43  * ixgbe_pci_iov_detach
44  ************************************************************************/
45 int
46 ixgbe_pci_iov_detach(device_t dev)
47 {
48 	return pci_iov_detach(dev);
49 }
50 
51 /************************************************************************
52  * ixgbe_define_iov_schemas
53  ************************************************************************/
54 void
55 ixgbe_define_iov_schemas(device_t dev, int *error)
56 {
57 	nvlist_t *pf_schema, *vf_schema;
58 
59 	pf_schema = pci_iov_schema_alloc_node();
60 	vf_schema = pci_iov_schema_alloc_node();
61 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
62 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
63 	    IOV_SCHEMA_HASDEFAULT, TRUE);
64 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
65 	    IOV_SCHEMA_HASDEFAULT, FALSE);
66 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
67 	    IOV_SCHEMA_HASDEFAULT, FALSE);
68 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
69 	if (*error != 0) {
70 		device_printf(dev,
71 		    "Error %d setting up SR-IOV\n", *error);
72 	}
73 } /* ixgbe_define_iov_schemas */
74 
75 /************************************************************************
76  * ixgbe_align_all_queue_indices
77  ************************************************************************/
78 inline void
79 ixgbe_align_all_queue_indices(struct adapter *adapter)
80 {
81 	int i;
82 	int index;
83 
84 	for (i = 0; i < adapter->num_queues; i++) {
85 		index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
86 		adapter->rx_rings[i].me = index;
87 		adapter->tx_rings[i].me = index;
88 	}
89 }
90 
91 /* Support functions for SR-IOV/VF management */
92 static inline void
93 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
94 {
95 	if (vf->flags & IXGBE_VF_CTS)
96 		msg |= IXGBE_VT_MSGTYPE_CTS;
97 
98 	adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
99 }
100 
101 static inline void
102 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
103 {
104 	msg &= IXGBE_VT_MSG_MASK;
105 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
106 }
107 
108 static inline void
109 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
110 {
111 	msg &= IXGBE_VT_MSG_MASK;
112 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
113 }
114 
115 static inline void
116 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
117 {
118 	if (!(vf->flags & IXGBE_VF_CTS))
119 		ixgbe_send_vf_nack(adapter, vf, 0);
120 }
121 
122 static inline boolean_t
123 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
124 {
125 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
126 }
127 
128 static inline int
129 ixgbe_vf_queues(int mode)
130 {
131 	switch (mode) {
132 	case IXGBE_64_VM:
133 		return (2);
134 	case IXGBE_32_VM:
135 		return (4);
136 	case IXGBE_NO_VM:
137 	default:
138 		return (0);
139 	}
140 }
141 
142 inline int
143 ixgbe_vf_que_index(int mode, int vfnum, int num)
144 {
145 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
146 }
147 
148 static inline void
149 ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
150 {
151 	if (adapter->max_frame_size < max_frame)
152 		adapter->max_frame_size = max_frame;
153 }
154 
155 inline u32
156 ixgbe_get_mrqc(int iov_mode)
157 {
158 	u32 mrqc;
159 
160 	switch (iov_mode) {
161 	case IXGBE_64_VM:
162 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
163 		break;
164 	case IXGBE_32_VM:
165 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
166 		break;
167 	case IXGBE_NO_VM:
168 		mrqc = 0;
169 		break;
170 	default:
171 		panic("Unexpected SR-IOV mode %d", iov_mode);
172 	}
173 
174 	return mrqc;
175 }
176 
177 
178 inline u32
179 ixgbe_get_mtqc(int iov_mode)
180 {
181 	uint32_t mtqc;
182 
183 	switch (iov_mode) {
184 	case IXGBE_64_VM:
185 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
186 		break;
187 	case IXGBE_32_VM:
188 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
189 		break;
190 	case IXGBE_NO_VM:
191 		mtqc = IXGBE_MTQC_64Q_1PB;
192 		break;
193 	default:
194 		panic("Unexpected SR-IOV mode %d", iov_mode);
195 	}
196 
197 	return mtqc;
198 }
199 
200 void
201 ixgbe_ping_all_vfs(struct adapter *adapter)
202 {
203 	struct ixgbe_vf *vf;
204 
205 	for (int i = 0; i < adapter->num_vfs; i++) {
206 		vf = &adapter->vfs[i];
207 		if (vf->flags & IXGBE_VF_ACTIVE)
208 			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
209 	}
210 } /* ixgbe_ping_all_vfs */
211 
212 
213 static void
214 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
215                           uint16_t tag)
216 {
217 	struct ixgbe_hw *hw;
218 	uint32_t vmolr, vmvir;
219 
220 	hw = &adapter->hw;
221 
222 	vf->vlan_tag = tag;
223 
224 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
225 
226 	/* Do not receive packets that pass inexact filters. */
227 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
228 
229 	/* Disable Multicast Promicuous Mode. */
230 	vmolr &= ~IXGBE_VMOLR_MPE;
231 
232 	/* Accept broadcasts. */
233 	vmolr |= IXGBE_VMOLR_BAM;
234 
235 	if (tag == 0) {
236 		/* Accept non-vlan tagged traffic. */
237 		vmolr |= IXGBE_VMOLR_AUPE;
238 
239 		/* Allow VM to tag outgoing traffic; no default tag. */
240 		vmvir = 0;
241 	} else {
242 		/* Require vlan-tagged traffic. */
243 		vmolr &= ~IXGBE_VMOLR_AUPE;
244 
245 		/* Tag all traffic with provided vlan tag. */
246 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
247 	}
248 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
249 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
250 } /* ixgbe_vf_set_default_vlan */
251 
252 
253 static boolean_t
254 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
255 {
256 
257 	/*
258 	 * Frame size compatibility between PF and VF is only a problem on
259 	 * 82599-based cards.  X540 and later support any combination of jumbo
260 	 * frames on PFs and VFs.
261 	 */
262 	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
263 		return (TRUE);
264 
265 	switch (vf->api_ver) {
266 	case IXGBE_API_VER_1_0:
267 	case IXGBE_API_VER_UNKNOWN:
268 		/*
269 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
270 		 * frames on either the PF or the VF.
271 		 */
272 		if (adapter->max_frame_size > ETHER_MAX_LEN ||
273 		    vf->max_frame_size > ETHER_MAX_LEN)
274 			return (FALSE);
275 
276 		return (TRUE);
277 
278 		break;
279 	case IXGBE_API_VER_1_1:
280 	default:
281 		/*
282 		 * 1.1 or later VF versions always work if they aren't using
283 		 * jumbo frames.
284 		 */
285 		if (vf->max_frame_size <= ETHER_MAX_LEN)
286 			return (TRUE);
287 
288 		/*
289 		 * Jumbo frames only work with VFs if the PF is also using jumbo
290 		 * frames.
291 		 */
292 		if (adapter->max_frame_size <= ETHER_MAX_LEN)
293 			return (TRUE);
294 
295 		return (FALSE);
296 	}
297 } /* ixgbe_vf_frame_size_compatible */
298 
299 
300 static void
301 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
302 {
303 	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
304 
305 	// XXX clear multicast addresses
306 
307 	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
308 
309 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
310 } /* ixgbe_process_vf_reset */
311 
312 
313 static void
314 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
315 {
316 	struct ixgbe_hw *hw;
317 	uint32_t vf_index, vfte;
318 
319 	hw = &adapter->hw;
320 
321 	vf_index = IXGBE_VF_INDEX(vf->pool);
322 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
323 	vfte |= IXGBE_VF_BIT(vf->pool);
324 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
325 } /* ixgbe_vf_enable_transmit */
326 
327 
328 static void
329 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
330 {
331 	struct ixgbe_hw *hw;
332 	uint32_t vf_index, vfre;
333 
334 	hw = &adapter->hw;
335 
336 	vf_index = IXGBE_VF_INDEX(vf->pool);
337 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
338 	if (ixgbe_vf_frame_size_compatible(adapter, vf))
339 		vfre |= IXGBE_VF_BIT(vf->pool);
340 	else
341 		vfre &= ~IXGBE_VF_BIT(vf->pool);
342 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
343 } /* ixgbe_vf_enable_receive */
344 
345 
346 static void
347 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
348 {
349 	struct ixgbe_hw *hw;
350 	uint32_t ack;
351 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
352 
353 	hw = &adapter->hw;
354 
355 	ixgbe_process_vf_reset(adapter, vf);
356 
357 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
358 		ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
359 		    vf->pool, TRUE);
360 		ack = IXGBE_VT_MSGTYPE_ACK;
361 	} else
362 		ack = IXGBE_VT_MSGTYPE_NACK;
363 
364 	ixgbe_vf_enable_transmit(adapter, vf);
365 	ixgbe_vf_enable_receive(adapter, vf);
366 
367 	vf->flags |= IXGBE_VF_CTS;
368 
369 	resp[0] = IXGBE_VF_RESET | ack;
370 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
371 	resp[3] = hw->mac.mc_filter_type;
372 	hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
373 } /* ixgbe_vf_reset_msg */
374 
375 
376 static void
377 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
378 {
379 	uint8_t *mac;
380 
381 	mac = (uint8_t*)&msg[1];
382 
383 	/* Check that the VF has permission to change the MAC address. */
384 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
385 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
386 		return;
387 	}
388 
389 	if (ixgbe_validate_mac_addr(mac) != 0) {
390 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
391 		return;
392 	}
393 
394 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
395 
396 	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
397 	    TRUE);
398 
399 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
400 } /* ixgbe_vf_set_mac */
401 
402 
403 /*
404  * VF multicast addresses are set by using the appropriate bit in
405  * 1 of 128 32 bit addresses (4096 possible).
406  */
407 static void
408 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
409 {
410 	u16	*list = (u16*)&msg[1];
411 	int	entries;
412 	u32	vmolr, vec_bit, vec_reg, mta_reg;
413 
414 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
415 	entries = min(entries, IXGBE_MAX_VF_MC);
416 
417 	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
418 
419 	vf->num_mc_hashes = entries;
420 
421 	/* Set the appropriate MTA bit */
422 	for (int i = 0; i < entries; i++) {
423 		vf->mc_hash[i] = list[i];
424 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
425 		vec_bit = vf->mc_hash[i] & 0x1F;
426 		mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
427 		mta_reg |= (1 << vec_bit);
428 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
429 	}
430 
431 	vmolr |= IXGBE_VMOLR_ROMPE;
432 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
433 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
434 } /* ixgbe_vf_set_mc_addr */
435 
436 
437 static void
438 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
439 {
440 	struct ixgbe_hw *hw;
441 	int enable;
442 	uint16_t tag;
443 
444 	hw = &adapter->hw;
445 	enable = IXGBE_VT_MSGINFO(msg[0]);
446 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
447 
448 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
449 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
450 		return;
451 	}
452 
453 	/* It is illegal to enable vlan tag 0. */
454 	if (tag == 0 && enable != 0) {
455 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
456 		return;
457 	}
458 
459 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
460 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
461 } /* ixgbe_vf_set_vlan */
462 
463 
464 static void
465 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
466 {
467 	struct ixgbe_hw *hw;
468 	uint32_t vf_max_size, pf_max_size, mhadd;
469 
470 	hw = &adapter->hw;
471 	vf_max_size = msg[1];
472 
473 	if (vf_max_size < ETHER_CRC_LEN) {
474 		/* We intentionally ACK invalid LPE requests. */
475 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
476 		return;
477 	}
478 
479 	vf_max_size -= ETHER_CRC_LEN;
480 
481 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
482 		/* We intentionally ACK invalid LPE requests. */
483 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
484 		return;
485 	}
486 
487 	vf->max_frame_size = vf_max_size;
488 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
489 
490 	/*
491 	 * We might have to disable reception to this VF if the frame size is
492 	 * not compatible with the config on the PF.
493 	 */
494 	ixgbe_vf_enable_receive(adapter, vf);
495 
496 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
497 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
498 
499 	if (pf_max_size < adapter->max_frame_size) {
500 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
501 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
502 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
503 	}
504 
505 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
506 } /* ixgbe_vf_set_lpe */
507 
508 
509 static void
510 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
511                      uint32_t *msg)
512 {
513 	//XXX implement this
514 	ixgbe_send_vf_nack(adapter, vf, msg[0]);
515 } /* ixgbe_vf_set_macvlan */
516 
517 
518 static void
519 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
520     uint32_t *msg)
521 {
522 
523 	switch (msg[1]) {
524 	case IXGBE_API_VER_1_0:
525 	case IXGBE_API_VER_1_1:
526 		vf->api_ver = msg[1];
527 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
528 		break;
529 	default:
530 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
531 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
532 		break;
533 	}
534 } /* ixgbe_vf_api_negotiate */
535 
536 
537 static void
538 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
539 {
540 	struct ixgbe_hw *hw;
541 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
542 	int num_queues;
543 
544 	hw = &adapter->hw;
545 
546 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
547 	switch (msg[0]) {
548 	case IXGBE_API_VER_1_0:
549 	case IXGBE_API_VER_UNKNOWN:
550 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
551 		return;
552 	}
553 
554 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
555 	    IXGBE_VT_MSGTYPE_CTS;
556 
557 	num_queues = ixgbe_vf_queues(adapter->iov_mode);
558 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
559 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
560 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
561 	resp[IXGBE_VF_DEF_QUEUE] = 0;
562 
563 	hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
564 } /* ixgbe_vf_get_queues */
565 
566 
567 static void
568 ixgbe_process_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf)
569 {
570 	struct ixgbe_hw *hw;
571 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
572 	int error;
573 
574 	hw = &adapter->hw;
575 
576 	error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
577 
578 	if (error != 0)
579 		return;
580 
581 	CTR3(KTR_MALLOC, "%s: received msg %x from %d", adapter->ifp->if_xname,
582 	    msg[0], vf->pool);
583 	if (msg[0] == IXGBE_VF_RESET) {
584 		ixgbe_vf_reset_msg(adapter, vf, msg);
585 		return;
586 	}
587 
588 	if (!(vf->flags & IXGBE_VF_CTS)) {
589 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
590 		return;
591 	}
592 
593 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
594 	case IXGBE_VF_SET_MAC_ADDR:
595 		ixgbe_vf_set_mac(adapter, vf, msg);
596 		break;
597 	case IXGBE_VF_SET_MULTICAST:
598 		ixgbe_vf_set_mc_addr(adapter, vf, msg);
599 		break;
600 	case IXGBE_VF_SET_VLAN:
601 		ixgbe_vf_set_vlan(adapter, vf, msg);
602 		break;
603 	case IXGBE_VF_SET_LPE:
604 		ixgbe_vf_set_lpe(adapter, vf, msg);
605 		break;
606 	case IXGBE_VF_SET_MACVLAN:
607 		ixgbe_vf_set_macvlan(adapter, vf, msg);
608 		break;
609 	case IXGBE_VF_API_NEGOTIATE:
610 		ixgbe_vf_api_negotiate(adapter, vf, msg);
611 		break;
612 	case IXGBE_VF_GET_QUEUES:
613 		ixgbe_vf_get_queues(adapter, vf, msg);
614 		break;
615 	default:
616 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
617 	}
618 } /* ixgbe_process_vf_msg */
619 
620 
621 /* Tasklet for handling VF -> PF mailbox messages */
622 void
623 ixgbe_handle_mbx(void *context, int pending)
624 {
625 	struct adapter *adapter = context;
626 	struct ixgbe_hw *hw;
627 	struct ixgbe_vf *vf;
628 	int i;
629 
630 	hw = &adapter->hw;
631 
632 	IXGBE_CORE_LOCK(adapter);
633 	for (i = 0; i < adapter->num_vfs; i++) {
634 		vf = &adapter->vfs[i];
635 
636 		if (vf->flags & IXGBE_VF_ACTIVE) {
637 			if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
638 				ixgbe_process_vf_reset(adapter, vf);
639 
640 			if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
641 				ixgbe_process_vf_msg(adapter, vf);
642 
643 			if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
644 				ixgbe_process_vf_ack(adapter, vf);
645 		}
646 	}
647 	IXGBE_CORE_UNLOCK(adapter);
648 } /* ixgbe_handle_mbx */
649 
650 int
651 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
652 {
653 	struct adapter *adapter;
654 	int retval = 0;
655 
656 	adapter = device_get_softc(dev);
657 	adapter->iov_mode = IXGBE_NO_VM;
658 
659 	if (num_vfs == 0) {
660 		/* Would we ever get num_vfs = 0? */
661 		retval = EINVAL;
662 		goto err_init_iov;
663 	}
664 
665 	/*
666 	 * We've got to reserve a VM's worth of queues for the PF,
667 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
668 	 * With 64 VFs, you can only have two queues per VF.
669 	 * With 32 VFs, you can have up to four queues per VF.
670 	 */
671 	if (num_vfs >= IXGBE_32_VM)
672 		adapter->iov_mode = IXGBE_64_VM;
673 	else
674 		adapter->iov_mode = IXGBE_32_VM;
675 
676 	/* Again, reserving 1 VM's worth of queues for the PF */
677 	adapter->pool = adapter->iov_mode - 1;
678 
679 	if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
680 		retval = ENOSPC;
681 		goto err_init_iov;
682 	}
683 
684 	IXGBE_CORE_LOCK(adapter);
685 
686 	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
687 	    M_NOWAIT | M_ZERO);
688 
689 	if (adapter->vfs == NULL) {
690 		retval = ENOMEM;
691 		IXGBE_CORE_UNLOCK(adapter);
692 		goto err_init_iov;
693 	}
694 
695 	adapter->num_vfs = num_vfs;
696 
697 	/* set the SRIOV flag now as it's needed
698 	 * by ixgbe_init_locked() */
699 	adapter->feat_en |= IXGBE_FEATURE_SRIOV;
700 	adapter->init_locked(adapter);
701 
702 	IXGBE_CORE_UNLOCK(adapter);
703 
704 	return (retval);
705 
706 err_init_iov:
707 	adapter->num_vfs = 0;
708 	adapter->pool = 0;
709 	adapter->iov_mode = IXGBE_NO_VM;
710 
711 	return (retval);
712 } /* ixgbe_init_iov */
713 
714 void
715 ixgbe_uninit_iov(device_t dev)
716 {
717 	struct ixgbe_hw *hw;
718 	struct adapter *adapter;
719 	uint32_t pf_reg, vf_reg;
720 
721 	adapter = device_get_softc(dev);
722 	hw = &adapter->hw;
723 
724 	IXGBE_CORE_LOCK(adapter);
725 
726 	/* Enable rx/tx for the PF and disable it for all VFs. */
727 	pf_reg = IXGBE_VF_INDEX(adapter->pool);
728 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
729 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
730 
731 	if (pf_reg == 0)
732 		vf_reg = 1;
733 	else
734 		vf_reg = 0;
735 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
736 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
737 
738 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
739 
740 	free(adapter->vfs, M_IXGBE_SRIOV);
741 	adapter->vfs = NULL;
742 	adapter->num_vfs = 0;
743 	adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
744 
745 	IXGBE_CORE_UNLOCK(adapter);
746 } /* ixgbe_uninit_iov */
747 
748 static void
749 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
750 {
751 	struct ixgbe_hw *hw;
752 	uint32_t vf_index, pfmbimr;
753 
754 	IXGBE_CORE_LOCK_ASSERT(adapter);
755 
756 	hw = &adapter->hw;
757 
758 	if (!(vf->flags & IXGBE_VF_ACTIVE))
759 		return;
760 
761 	vf_index = IXGBE_VF_INDEX(vf->pool);
762 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
763 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
764 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
765 
766 	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
767 
768 	// XXX multicast addresses
769 
770 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
771 		ixgbe_set_rar(&adapter->hw, vf->rar_index,
772 		    vf->ether_addr, vf->pool, TRUE);
773 	}
774 
775 	ixgbe_vf_enable_transmit(adapter, vf);
776 	ixgbe_vf_enable_receive(adapter, vf);
777 
778 	ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
779 } /* ixgbe_init_vf */
780 
781 void
782 ixgbe_initialize_iov(struct adapter *adapter)
783 {
784 	struct ixgbe_hw *hw = &adapter->hw;
785 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
786 	int i;
787 
788 	if (adapter->iov_mode == IXGBE_NO_VM)
789 		return;
790 
791 	IXGBE_CORE_LOCK_ASSERT(adapter);
792 
793 	/* RMW appropriate registers based on IOV mode */
794 	/* Read... */
795 	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
796 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
797 	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
798 	/* Modify... */
799 	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
800 	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
801 	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
802 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
803 	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
804 	switch (adapter->iov_mode) {
805 	case IXGBE_64_VM:
806 		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
807 		mtqc    |= IXGBE_MTQC_64VF;
808 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
809 		gpie    |= IXGBE_GPIE_VTMODE_64;
810 		break;
811 	case IXGBE_32_VM:
812 		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
813 		mtqc    |= IXGBE_MTQC_32VF;
814 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
815 		gpie    |= IXGBE_GPIE_VTMODE_32;
816 		break;
817 	default:
818 		panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
819 	}
820 	/* Write... */
821 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
822 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
823 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
824 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
825 
826 	/* Enable rx/tx for the PF. */
827 	vf_reg = IXGBE_VF_INDEX(adapter->pool);
828 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
829 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
830 
831 	/* Allow VM-to-VM communication. */
832 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
833 
834 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
835 	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
836 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
837 
838 	for (i = 0; i < adapter->num_vfs; i++)
839 		ixgbe_init_vf(adapter, &adapter->vfs[i]);
840 } /* ixgbe_initialize_iov */
841 
842 
843 /* Check the max frame setting of all active VF's */
844 void
845 ixgbe_recalculate_max_frame(struct adapter *adapter)
846 {
847 	struct ixgbe_vf *vf;
848 
849 	IXGBE_CORE_LOCK_ASSERT(adapter);
850 
851 	for (int i = 0; i < adapter->num_vfs; i++) {
852 		vf = &adapter->vfs[i];
853 		if (vf->flags & IXGBE_VF_ACTIVE)
854 			ixgbe_update_max_frame(adapter, vf->max_frame_size);
855 	}
856 } /* ixgbe_recalculate_max_frame */
857 
858 int
859 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
860 {
861 	struct adapter *adapter;
862 	struct ixgbe_vf *vf;
863 	const void *mac;
864 
865 	adapter = device_get_softc(dev);
866 
867 	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
868 	    vfnum, adapter->num_vfs));
869 
870 	IXGBE_CORE_LOCK(adapter);
871 	vf = &adapter->vfs[vfnum];
872 	vf->pool= vfnum;
873 
874 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
875 	vf->rar_index = vfnum + 1;
876 	vf->default_vlan = 0;
877 	vf->max_frame_size = ETHER_MAX_LEN;
878 	ixgbe_update_max_frame(adapter, vf->max_frame_size);
879 
880 	if (nvlist_exists_binary(config, "mac-addr")) {
881 		mac = nvlist_get_binary(config, "mac-addr", NULL);
882 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
883 		if (nvlist_get_bool(config, "allow-set-mac"))
884 			vf->flags |= IXGBE_VF_CAP_MAC;
885 	} else
886 		/*
887 		 * If the administrator has not specified a MAC address then
888 		 * we must allow the VF to choose one.
889 		 */
890 		vf->flags |= IXGBE_VF_CAP_MAC;
891 
892 	vf->flags |= IXGBE_VF_ACTIVE;
893 
894 	ixgbe_init_vf(adapter, vf);
895 	IXGBE_CORE_UNLOCK(adapter);
896 
897 	return (0);
898 } /* ixgbe_add_vf */
899 
900 #else
901 
902 void
903 ixgbe_handle_mbx(void *context, int pending)
904 {
905 	UNREFERENCED_2PARAMETER(context, pending);
906 } /* ixgbe_handle_mbx */
907 
908 inline int
909 ixgbe_vf_que_index(int mode, int vfnum, int num)
910 {
911 	UNREFERENCED_2PARAMETER(mode, vfnum);
912 
913 	return num;
914 } /* ixgbe_vf_que_index */
915 
916 #endif
917