xref: /netbsd-src/sys/dev/pci/ixgbe/if_sriov.c (revision 55485da1d7c3dd560cd83aca0370533ca1207584)
1 /* $NetBSD: if_sriov.c,v 1.18 2023/10/06 14:37:04 msaitoh Exp $ */
2 /******************************************************************************
3 
4   Copyright (c) 2001-2017, Intel Corporation
5   All rights reserved.
6 
7   Redistribution and use in source and binary forms, with or without
8   modification, are permitted provided that the following conditions are met:
9 
10    1. Redistributions of source code must retain the above copyright notice,
11       this list of conditions and the following disclaimer.
12 
13    2. Redistributions in binary form must reproduce the above copyright
14       notice, this list of conditions and the following disclaimer in the
15       documentation and/or other materials provided with the distribution.
16 
17    3. Neither the name of the Intel Corporation nor the names of its
18       contributors may be used to endorse or promote products derived from
19       this software without specific prior written permission.
20 
21   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31   POSSIBILITY OF SUCH DAMAGE.
32 
33 ******************************************************************************/
34 /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: if_sriov.c,v 1.18 2023/10/06 14:37:04 msaitoh Exp $");
38 
39 #include "ixgbe.h"
40 #include "ixgbe_sriov.h"
41 
42 #ifdef PCI_IOV
43 
44 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
45 
46 /************************************************************************
47  * ixgbe_pci_iov_detach
48  ************************************************************************/
49 int
ixgbe_pci_iov_detach(device_t dev)50 ixgbe_pci_iov_detach(device_t dev)
51 {
52 	return pci_iov_detach(dev);
53 }
54 
55 /************************************************************************
56  * ixgbe_define_iov_schemas
57  ************************************************************************/
58 void
ixgbe_define_iov_schemas(device_t dev,int * error)59 ixgbe_define_iov_schemas(device_t dev, int *error)
60 {
61 	nvlist_t *pf_schema, *vf_schema;
62 
63 	pf_schema = pci_iov_schema_alloc_node();
64 	vf_schema = pci_iov_schema_alloc_node();
65 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
66 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
67 	    IOV_SCHEMA_HASDEFAULT, TRUE);
68 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
69 	    IOV_SCHEMA_HASDEFAULT, FALSE);
70 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
71 	    IOV_SCHEMA_HASDEFAULT, FALSE);
72 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
73 	if (*error != 0) {
74 		device_printf(dev,
75 		    "Error %d setting up SR-IOV\n", *error);
76 	}
77 } /* ixgbe_define_iov_schemas */
78 
79 /************************************************************************
80  * ixgbe_align_all_queue_indices
81  ************************************************************************/
82 inline void
ixgbe_align_all_queue_indices(struct ixgbe_softc * sc)83 ixgbe_align_all_queue_indices(struct ixgbe_softc *sc)
84 {
85 	int i;
86 	int index;
87 
88 	for (i = 0; i < sc->num_queues; i++) {
89 		index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
90 		sc->rx_rings[i].me = index;
91 		sc->tx_rings[i].me = index;
92 	}
93 }
94 
95 /* Support functions for SR-IOV/VF management */
96 static inline void
ixgbe_send_vf_msg(struct ixgbe_hw * hw,struct ixgbe_vf * vf,u32 msg)97 ixgbe_send_vf_msg(struct ixgbe_hw *hw, struct ixgbe_vf *vf, u32 msg)
98 {
99 	if (vf->flags & IXGBE_VF_CTS)
100 		msg |= IXGBE_VT_MSGTYPE_CTS;
101 
102 	hw->mbx.ops[vf->pool].write(hw, &msg, 1, vf->pool);
103 }
104 
105 static inline void
ixgbe_send_vf_ack(struct ixgbe_softc * sc,struct ixgbe_vf * vf,u32 msg)106 ixgbe_send_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
107 {
108 	msg &= IXGBE_VT_MSG_MASK;
109 	ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_SUCCESS);
110 }
111 
112 static inline void
ixgbe_send_vf_nack(struct ixgbe_softc * sc,struct ixgbe_vf * vf,u32 msg)113 ixgbe_send_vf_nack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
114 {
115 	msg &= IXGBE_VT_MSG_MASK;
116 	ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_FAILURE);
117 }
118 
119 static inline void
ixgbe_process_vf_ack(struct ixgbe_softc * sc,struct ixgbe_vf * vf)120 ixgbe_process_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
121 {
122 	if (!(vf->flags & IXGBE_VF_CTS))
123 		ixgbe_send_vf_nack(sc, vf, 0);
124 }
125 
126 static inline bool
ixgbe_vf_mac_changed(struct ixgbe_vf * vf,const uint8_t * mac)127 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
128 {
129 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
130 }
131 
132 static inline int
ixgbe_vf_queues(int mode)133 ixgbe_vf_queues(int mode)
134 {
135 	switch (mode) {
136 	case IXGBE_64_VM:
137 		return (2);
138 	case IXGBE_32_VM:
139 		return (4);
140 	case IXGBE_NO_VM:
141 	default:
142 		return (0);
143 	}
144 }
145 
146 inline int
ixgbe_vf_que_index(int mode,int vfnum,int num)147 ixgbe_vf_que_index(int mode, int vfnum, int num)
148 {
149 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
150 }
151 
152 static inline void
ixgbe_update_max_frame(struct ixgbe_softc * sc,int max_frame)153 ixgbe_update_max_frame(struct ixgbe_softc * sc, int max_frame)
154 {
155 	if (sc->max_frame_size < max_frame)
156 		sc->max_frame_size = max_frame;
157 }
158 
159 inline u32
ixgbe_get_mrqc(int iov_mode)160 ixgbe_get_mrqc(int iov_mode)
161 {
162 	u32 mrqc;
163 
164 	switch (iov_mode) {
165 	case IXGBE_64_VM:
166 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
167 		break;
168 	case IXGBE_32_VM:
169 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
170 		break;
171 	case IXGBE_NO_VM:
172 		mrqc = 0;
173 		break;
174 	default:
175 		panic("Unexpected SR-IOV mode %d", iov_mode);
176 	}
177 
178 	return mrqc;
179 }
180 
181 
182 inline u32
ixgbe_get_mtqc(int iov_mode)183 ixgbe_get_mtqc(int iov_mode)
184 {
185 	uint32_t mtqc;
186 
187 	switch (iov_mode) {
188 	case IXGBE_64_VM:
189 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
190 		break;
191 	case IXGBE_32_VM:
192 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
193 		break;
194 	case IXGBE_NO_VM:
195 		mtqc = IXGBE_MTQC_64Q_1PB;
196 		break;
197 	default:
198 		panic("Unexpected SR-IOV mode %d", iov_mode);
199 	}
200 
201 	return mtqc;
202 }
203 
204 void
ixgbe_ping_all_vfs(struct ixgbe_softc * sc)205 ixgbe_ping_all_vfs(struct ixgbe_softc *sc)
206 {
207 	struct ixgbe_vf *vf;
208 
209 	for (int i = 0; i < sc->num_vfs; i++) {
210 		vf = &sc->vfs[i];
211 		if (vf->flags & IXGBE_VF_ACTIVE)
212 			ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
213 	}
214 } /* ixgbe_ping_all_vfs */
215 
216 
217 static void
ixgbe_vf_set_default_vlan(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint16_t tag)218 ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
219                           uint16_t tag)
220 {
221 	struct ixgbe_hw *hw;
222 	uint32_t vmolr, vmvir;
223 
224 	hw = &sc->hw;
225 
226 	vf->vlan_tag = tag;
227 
228 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
229 
230 	/* Do not receive packets that pass inexact filters. */
231 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
232 
233 	/* Disable Multicast Promicuous Mode. */
234 	vmolr &= ~IXGBE_VMOLR_MPE;
235 
236 	/* Accept broadcasts. */
237 	vmolr |= IXGBE_VMOLR_BAM;
238 
239 	if (tag == 0) {
240 		/* Accept non-vlan tagged traffic. */
241 		vmolr |= IXGBE_VMOLR_AUPE;
242 
243 		/* Allow VM to tag outgoing traffic; no default tag. */
244 		vmvir = 0;
245 	} else {
246 		/* Require vlan-tagged traffic. */
247 		vmolr &= ~IXGBE_VMOLR_AUPE;
248 
249 		/* Tag all traffic with provided vlan tag. */
250 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
251 	}
252 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
253 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
254 } /* ixgbe_vf_set_default_vlan */
255 
256 
257 static void
ixgbe_clear_vfmbmem(struct ixgbe_softc * sc,struct ixgbe_vf * vf)258 ixgbe_clear_vfmbmem(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
259 {
260 	struct ixgbe_hw *hw = &sc->hw;
261 	uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
262 	uint16_t mbx_size = hw->mbx.size;
263 	uint16_t i;
264 
265 	IXGBE_CORE_LOCK_ASSERT(sc);
266 
267 	for (i = 0; i < mbx_size; ++i)
268 		IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
269 } /* ixgbe_clear_vfmbmem */
270 
271 
272 static bool
ixgbe_vf_frame_size_compatible(struct ixgbe_softc * sc,struct ixgbe_vf * vf)273 ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
274 {
275 
276 	/*
277 	 * Frame size compatibility between PF and VF is only a problem on
278 	 * 82599-based cards.  X540 and later support any combination of jumbo
279 	 * frames on PFs and VFs.
280 	 */
281 	if (sc->hw.mac.type != ixgbe_mac_82599EB)
282 		return (TRUE);
283 
284 	switch (vf->api_ver) {
285 	case IXGBE_API_VER_1_0:
286 	case IXGBE_API_VER_UNKNOWN:
287 		/*
288 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
289 		 * frames on either the PF or the VF.
290 		 */
291 		if (sc->max_frame_size > ETHER_MAX_LEN ||
292 		    vf->max_frame_size > ETHER_MAX_LEN)
293 			return (FALSE);
294 
295 		return (TRUE);
296 
297 		break;
298 	case IXGBE_API_VER_1_1:
299 	default:
300 		/*
301 		 * 1.1 or later VF versions always work if they aren't using
302 		 * jumbo frames.
303 		 */
304 		if (vf->max_frame_size <= ETHER_MAX_LEN)
305 			return (TRUE);
306 
307 		/*
308 		 * Jumbo frames only work with VFs if the PF is also using jumbo
309 		 * frames.
310 		 */
311 		if (sc->max_frame_size <= ETHER_MAX_LEN)
312 			return (TRUE);
313 
314 		return (FALSE);
315 	}
316 } /* ixgbe_vf_frame_size_compatible */
317 
318 
319 static void
ixgbe_process_vf_reset(struct ixgbe_softc * sc,struct ixgbe_vf * vf)320 ixgbe_process_vf_reset(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
321 {
322 	ixgbe_vf_set_default_vlan(sc, vf, vf->default_vlan);
323 
324 	// XXX clear multicast addresses
325 
326 	ixgbe_clear_rar(&sc->hw, vf->rar_index);
327 	ixgbe_clear_vfmbmem(sc, vf);
328 	ixgbe_toggle_txdctl(&sc->hw, IXGBE_VF_INDEX(vf->pool));
329 
330 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
331 } /* ixgbe_process_vf_reset */
332 
333 
334 static void
ixgbe_vf_enable_transmit(struct ixgbe_softc * sc,struct ixgbe_vf * vf)335 ixgbe_vf_enable_transmit(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
336 {
337 	struct ixgbe_hw *hw;
338 	uint32_t vf_index, vfte;
339 
340 	hw = &sc->hw;
341 
342 	vf_index = IXGBE_VF_INDEX(vf->pool);
343 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
344 	vfte |= IXGBE_VF_BIT(vf->pool);
345 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
346 } /* ixgbe_vf_enable_transmit */
347 
348 
349 static void
ixgbe_vf_enable_receive(struct ixgbe_softc * sc,struct ixgbe_vf * vf)350 ixgbe_vf_enable_receive(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
351 {
352 	struct ixgbe_hw *hw;
353 	uint32_t vf_index, vfre;
354 
355 	hw = &sc->hw;
356 
357 	vf_index = IXGBE_VF_INDEX(vf->pool);
358 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
359 	if (ixgbe_vf_frame_size_compatible(sc, vf))
360 		vfre |= IXGBE_VF_BIT(vf->pool);
361 	else
362 		vfre &= ~IXGBE_VF_BIT(vf->pool);
363 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
364 } /* ixgbe_vf_enable_receive */
365 
366 
367 static void
ixgbe_vf_reset_msg(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint32_t * msg)368 ixgbe_vf_reset_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
369 {
370 	struct ixgbe_hw *hw;
371 	uint32_t ack;
372 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
373 
374 	hw = &sc->hw;
375 
376 	ixgbe_process_vf_reset(sc, vf);
377 
378 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
379 		ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr,
380 		    vf->pool, TRUE);
381 		ack = IXGBE_VT_MSGTYPE_SUCCESS;
382 	} else
383 		ack = IXGBE_VT_MSGTYPE_FAILURE;
384 
385 	ixgbe_vf_enable_transmit(sc, vf);
386 	ixgbe_vf_enable_receive(sc, vf);
387 
388 	vf->flags |= IXGBE_VF_CTS;
389 
390 	resp[0] = IXGBE_VF_RESET | ack;
391 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
392 	resp[3] = hw->mac.mc_filter_type;
393 	hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
394 } /* ixgbe_vf_reset_msg */
395 
396 
397 static void
ixgbe_vf_set_mac(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint32_t * msg)398 ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
399 {
400 	uint8_t *mac;
401 
402 	mac = (uint8_t*)&msg[1];
403 
404 	/* Check that the VF has permission to change the MAC address. */
405 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
406 		ixgbe_send_vf_nack(sc, vf, msg[0]);
407 		return;
408 	}
409 
410 	if (ixgbe_validate_mac_addr(mac) != 0) {
411 		ixgbe_send_vf_nack(sc, vf, msg[0]);
412 		return;
413 	}
414 
415 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
416 
417 	ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr, vf->pool,
418 	    TRUE);
419 
420 	ixgbe_send_vf_ack(sc, vf, msg[0]);
421 } /* ixgbe_vf_set_mac */
422 
423 
424 /*
425  * VF multicast addresses are set by using the appropriate bit in
426  * 1 of 128 32 bit addresses (4096 possible).
427  */
428 static void
ixgbe_vf_set_mc_addr(struct ixgbe_softc * sc,struct ixgbe_vf * vf,u32 * msg)429 ixgbe_vf_set_mc_addr(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 *msg)
430 {
431 	u16	*list = (u16*)&msg[1];
432 	int	entries;
433 	u32	vmolr, vec_bit, vec_reg, mta_reg;
434 
435 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
436 	entries = uimin(entries, IXGBE_MAX_VF_MC);
437 
438 	vmolr = IXGBE_READ_REG(&sc->hw, IXGBE_VMOLR(vf->pool));
439 
440 	vf->num_mc_hashes = entries;
441 
442 	/* Set the appropriate MTA bit */
443 	for (int i = 0; i < entries; i++) {
444 		vf->mc_hash[i] = list[i];
445 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
446 		vec_bit = vf->mc_hash[i] & 0x1F;
447 		mta_reg = IXGBE_READ_REG(&sc->hw, IXGBE_MTA(vec_reg));
448 		mta_reg |= (1 << vec_bit);
449 		IXGBE_WRITE_REG(&sc->hw, IXGBE_MTA(vec_reg), mta_reg);
450 	}
451 
452 	vmolr |= IXGBE_VMOLR_ROMPE;
453 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VMOLR(vf->pool), vmolr);
454 	ixgbe_send_vf_ack(sc, vf, msg[0]);
455 } /* ixgbe_vf_set_mc_addr */
456 
457 
458 static void
ixgbe_vf_set_vlan(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint32_t * msg)459 ixgbe_vf_set_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
460 {
461 	struct ixgbe_hw *hw;
462 	int enable;
463 	uint16_t tag;
464 
465 	hw = &sc->hw;
466 	enable = IXGBE_VT_MSGINFO(msg[0]);
467 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
468 
469 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
470 		ixgbe_send_vf_nack(sc, vf, msg[0]);
471 		return;
472 	}
473 
474 	/* It is illegal to enable vlan tag 0. */
475 	if (tag == 0 && enable != 0) {
476 		ixgbe_send_vf_nack(sc, vf, msg[0]);
477 		return;
478 	}
479 
480 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
481 	ixgbe_send_vf_ack(sc, vf, msg[0]);
482 } /* ixgbe_vf_set_vlan */
483 
484 
485 static void
ixgbe_vf_set_lpe(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint32_t * msg)486 ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
487 {
488 	struct ixgbe_hw *hw;
489 	uint32_t vf_max_size, pf_max_size, mhadd;
490 
491 	hw = &sc->hw;
492 	vf_max_size = msg[1];
493 
494 	if (vf_max_size < ETHER_CRC_LEN) {
495 		/* We intentionally ACK invalid LPE requests. */
496 		ixgbe_send_vf_ack(sc, vf, msg[0]);
497 		return;
498 	}
499 
500 	vf_max_size -= ETHER_CRC_LEN;
501 
502 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
503 		/* We intentionally ACK invalid LPE requests. */
504 		ixgbe_send_vf_ack(sc, vf, msg[0]);
505 		return;
506 	}
507 
508 	vf->max_frame_size = vf_max_size;
509 	ixgbe_update_max_frame(sc, vf->max_frame_size);
510 
511 	/*
512 	 * We might have to disable reception to this VF if the frame size is
513 	 * not compatible with the config on the PF.
514 	 */
515 	ixgbe_vf_enable_receive(sc, vf);
516 
517 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
518 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
519 
520 	if (pf_max_size < sc->max_frame_size) {
521 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
522 		mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
523 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
524 	}
525 
526 	ixgbe_send_vf_ack(sc, vf, msg[0]);
527 } /* ixgbe_vf_set_lpe */
528 
529 
530 static void
ixgbe_vf_set_macvlan(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint32_t * msg)531 ixgbe_vf_set_macvlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
532                      uint32_t *msg)
533 {
534 	//XXX implement this
535 	ixgbe_send_vf_nack(sc, vf, msg[0]);
536 } /* ixgbe_vf_set_macvlan */
537 
538 
539 static void
ixgbe_vf_api_negotiate(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint32_t * msg)540 ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
541     uint32_t *msg)
542 {
543 
544 	switch (msg[1]) {
545 	case IXGBE_API_VER_1_0:
546 	case IXGBE_API_VER_1_1:
547 		vf->api_ver = msg[1];
548 		ixgbe_send_vf_ack(sc, vf, msg[0]);
549 		break;
550 	default:
551 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
552 		ixgbe_send_vf_nack(sc, vf, msg[0]);
553 		break;
554 	}
555 } /* ixgbe_vf_api_negotiate */
556 
557 
558 static void
ixgbe_vf_get_queues(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint32_t * msg)559 ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
560 {
561 	struct ixgbe_hw *hw;
562 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
563 	int num_queues;
564 
565 	hw = &sc->hw;
566 
567 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
568 	switch (msg[0]) {
569 	case IXGBE_API_VER_1_0:
570 	case IXGBE_API_VER_UNKNOWN:
571 		ixgbe_send_vf_nack(sc, vf, msg[0]);
572 		return;
573 	}
574 
575 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS |
576 	    IXGBE_VT_MSGTYPE_CTS;
577 
578 	num_queues = ixgbe_vf_queues(sc->iov_mode);
579 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
580 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
581 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
582 	resp[IXGBE_VF_DEF_QUEUE] = 0;
583 
584 	hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
585 } /* ixgbe_vf_get_queues */
586 
587 
588 static void
ixgbe_process_vf_msg(struct ixgbe_softc * sc,struct ixgbe_vf * vf)589 ixgbe_process_vf_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
590 {
591 	struct ixgbe_hw *hw;
592 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
593 	int error;
594 
595 	hw = &sc->hw;
596 
597 	error = hw->mbx.ops[vf->pool].read(hw, msg, IXGBE_VFMAILBOX_SIZE,
598 	    vf->pool);
599 
600 	if (error != 0)
601 		return;
602 
603 	CTR3(KTR_MALLOC, "%s: received msg %x from %d",
604 	    sc->ifp->if_xname, msg[0], vf->pool);
605 	if (msg[0] == IXGBE_VF_RESET) {
606 		ixgbe_vf_reset_msg(sc, vf, msg);
607 		return;
608 	}
609 
610 	if (!(vf->flags & IXGBE_VF_CTS)) {
611 		ixgbe_send_vf_nack(sc, vf, msg[0]);
612 		return;
613 	}
614 
615 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
616 	case IXGBE_VF_SET_MAC_ADDR:
617 		ixgbe_vf_set_mac(sc, vf, msg);
618 		break;
619 	case IXGBE_VF_SET_MULTICAST:
620 		ixgbe_vf_set_mc_addr(sc, vf, msg);
621 		break;
622 	case IXGBE_VF_SET_VLAN:
623 		ixgbe_vf_set_vlan(sc, vf, msg);
624 		break;
625 	case IXGBE_VF_SET_LPE:
626 		ixgbe_vf_set_lpe(sc, vf, msg);
627 		break;
628 	case IXGBE_VF_SET_MACVLAN:
629 		ixgbe_vf_set_macvlan(sc, vf, msg);
630 		break;
631 	case IXGBE_VF_API_NEGOTIATE:
632 		ixgbe_vf_api_negotiate(sc, vf, msg);
633 		break;
634 	case IXGBE_VF_GET_QUEUES:
635 		ixgbe_vf_get_queues(sc, vf, msg);
636 		break;
637 	default:
638 		ixgbe_send_vf_nack(sc, vf, msg[0]);
639 	}
640 } /* ixgbe_process_vf_msg */
641 
642 
643 /* Tasklet for handling VF -> PF mailbox messages */
644 void
ixgbe_handle_mbx(void * context)645 ixgbe_handle_mbx(void *context)
646 {
647 	struct ixgbe_softc *sc = context;
648 	struct ixgbe_hw *hw;
649 	struct ixgbe_vf *vf;
650 	int i;
651 
652 	KASSERT(mutex_owned(&sc->core_mtx));
653 
654 	hw = &sc->hw;
655 
656 	for (i = 0; i < sc->num_vfs; i++) {
657 		vf = &sc->vfs[i];
658 
659 		if ((vf->flags & IXGBE_VF_ACTIVE) == 0)
660 			continue;
661 
662 		if (hw->mbx.ops[vf->pool].check_for_rst(hw, vf->pool) == 0)
663 			ixgbe_process_vf_reset(sc, vf);
664 
665 		if (hw->mbx.ops[vf->pool].check_for_msg(hw, vf->pool) == 0)
666 			ixgbe_process_vf_msg(sc, vf);
667 
668 		if (hw->mbx.ops[vf->pool].check_for_ack(hw, vf->pool) == 0)
669 			ixgbe_process_vf_ack(sc, vf);
670 	}
671 } /* ixgbe_handle_mbx */
672 
673 int
ixgbe_init_iov(device_t dev,u16 num_vfs,const nvlist_t * config)674 ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
675 {
676 	struct ixgbe_softc *sc;
677 	int retval = 0;
678 
679 	sc = device_get_softc(dev);
680 	sc->iov_mode = IXGBE_NO_VM;
681 
682 	if (num_vfs == 0) {
683 		/* Would we ever get num_vfs = 0? */
684 		retval = EINVAL;
685 		goto err_init_iov;
686 	}
687 
688 	/*
689 	 * We've got to reserve a VM's worth of queues for the PF,
690 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
691 	 * With 64 VFs, you can only have two queues per VF.
692 	 * With 32 VFs, you can have up to four queues per VF.
693 	 */
694 	if (num_vfs >= IXGBE_32_VM)
695 		sc->iov_mode = IXGBE_64_VM;
696 	else
697 		sc->iov_mode = IXGBE_32_VM;
698 
699 	/* Again, reserving 1 VM's worth of queues for the PF */
700 	sc->pool = sc->iov_mode - 1;
701 
702 	if ((num_vfs > sc->pool) || (num_vfs >= IXGBE_64_VM)) {
703 		retval = ENOSPC;
704 		goto err_init_iov;
705 	}
706 
707 	IXGBE_CORE_LOCK(sc);
708 
709 	sc->vfs = malloc(sizeof(*sc->vfs) * num_vfs, M_IXGBE_SRIOV,
710 	    M_NOWAIT | M_ZERO);
711 
712 	if (sc->vfs == NULL) {
713 		retval = ENOMEM;
714 		IXGBE_CORE_UNLOCK(sc);
715 		goto err_init_iov;
716 	}
717 
718 	sc->num_vfs = num_vfs;
719 	ixgbe_init_mbx_params_pf(&sc->hw);
720 
721 	/* set the SRIOV flag now as it's needed
722 	 * by ixgbe_init_locked() */
723 	sc->feat_en |= IXGBE_FEATURE_SRIOV;
724 	sc->init_locked(sc);
725 
726 	IXGBE_CORE_UNLOCK(sc);
727 
728 	return retval;
729 
730 err_init_iov:
731 	sc->num_vfs = 0;
732 	sc->pool = 0;
733 	sc->iov_mode = IXGBE_NO_VM;
734 
735 	return retval;
736 } /* ixgbe_init_iov */
737 
738 void
ixgbe_uninit_iov(device_t dev)739 ixgbe_uninit_iov(device_t dev)
740 {
741 	struct ixgbe_hw *hw;
742 	struct ixgbe_softc *sc;
743 	uint32_t pf_reg, vf_reg;
744 
745 	sc = device_get_softc(dev);
746 	hw = &sc->hw;
747 
748 	IXGBE_CORE_LOCK(sc);
749 
750 	/* Enable rx/tx for the PF and disable it for all VFs. */
751 	pf_reg = IXGBE_VF_INDEX(sc->pool);
752 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(sc->pool));
753 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(sc->pool));
754 
755 	if (pf_reg == 0)
756 		vf_reg = 1;
757 	else
758 		vf_reg = 0;
759 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
760 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
761 
762 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
763 
764 	free(sc->vfs, M_IXGBE_SRIOV);
765 	sc->vfs = NULL;
766 	sc->num_vfs = 0;
767 	sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
768 
769 	IXGBE_CORE_UNLOCK(sc);
770 } /* ixgbe_uninit_iov */
771 
772 static void
ixgbe_init_vf(struct ixgbe_softc * sc,struct ixgbe_vf * vf)773 ixgbe_init_vf(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
774 {
775 	struct ixgbe_hw *hw;
776 	uint32_t vf_index, pfmbimr;
777 
778 	IXGBE_CORE_LOCK_ASSERT(sc);
779 
780 	hw = &sc->hw;
781 
782 	if (!(vf->flags & IXGBE_VF_ACTIVE))
783 		return;
784 
785 	vf_index = IXGBE_VF_INDEX(vf->pool);
786 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
787 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
788 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
789 
790 	ixgbe_vf_set_default_vlan(sc, vf, vf->vlan_tag);
791 
792 	// XXX multicast addresses
793 
794 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
795 		ixgbe_set_rar(&sc->hw, vf->rar_index,
796 		    vf->ether_addr, vf->pool, TRUE);
797 	}
798 
799 	ixgbe_vf_enable_transmit(sc, vf);
800 	ixgbe_vf_enable_receive(sc, vf);
801 
802 	ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
803 } /* ixgbe_init_vf */
804 
805 void
ixgbe_initialize_iov(struct ixgbe_softc * sc)806 ixgbe_initialize_iov(struct ixgbe_softc *sc)
807 {
808 	struct ixgbe_hw *hw = &sc->hw;
809 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
810 	int i;
811 
812 	if (sc->iov_mode == IXGBE_NO_VM)
813 		return;
814 
815 	IXGBE_CORE_LOCK_ASSERT(sc);
816 
817 	/* RMW appropriate registers based on IOV mode */
818 	/* Read... */
819 	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
820 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
821 	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
822 	/* Modify... */
823 	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
824 	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
825 	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
826 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
827 	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
828 	switch (sc->iov_mode) {
829 	case IXGBE_64_VM:
830 		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
831 		mtqc    |= IXGBE_MTQC_64VF;
832 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
833 		gpie    |= IXGBE_GPIE_VTMODE_64;
834 		break;
835 	case IXGBE_32_VM:
836 		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
837 		mtqc    |= IXGBE_MTQC_32VF;
838 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
839 		gpie    |= IXGBE_GPIE_VTMODE_32;
840 		break;
841 	default:
842 		panic("Unexpected SR-IOV mode %d", sc->iov_mode);
843 	}
844 	/* Write... */
845 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
846 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
847 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
848 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
849 
850 	/* Enable rx/tx for the PF. */
851 	vf_reg = IXGBE_VF_INDEX(sc->pool);
852 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(sc->pool));
853 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(sc->pool));
854 
855 	/* Allow VM-to-VM communication. */
856 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
857 
858 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
859 	vt_ctl |= (sc->pool << IXGBE_VT_CTL_POOL_SHIFT);
860 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
861 
862 	for (i = 0; i < sc->num_vfs; i++)
863 		ixgbe_init_vf(sc, &sc->vfs[i]);
864 } /* ixgbe_initialize_iov */
865 
866 
867 /* Check the max frame setting of all active VF's */
868 void
ixgbe_recalculate_max_frame(struct ixgbe_softc * sc)869 ixgbe_recalculate_max_frame(struct ixgbe_softc *sc)
870 {
871 	struct ixgbe_vf *vf;
872 
873 	IXGBE_CORE_LOCK_ASSERT(sc);
874 
875 	for (int i = 0; i < sc->num_vfs; i++) {
876 		vf = &sc->vfs[i];
877 		if (vf->flags & IXGBE_VF_ACTIVE)
878 			ixgbe_update_max_frame(sc, vf->max_frame_size);
879 	}
880 } /* ixgbe_recalculate_max_frame */
881 
882 int
ixgbe_add_vf(device_t dev,u16 vfnum,const nvlist_t * config)883 ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
884 {
885 	struct ixgbe_softc *sc;
886 	struct ixgbe_vf *vf;
887 	const void *mac;
888 
889 	sc = device_get_softc(dev);
890 
891 	KASSERT(vfnum < sc->num_vfs, ("VF index %d is out of range %d",
892 	    vfnum, sc->num_vfs));
893 
894 	IXGBE_CORE_LOCK(sc);
895 	vf = &sc->vfs[vfnum];
896 	vf->pool= vfnum;
897 
898 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
899 	vf->rar_index = vfnum + 1;
900 	vf->default_vlan = 0;
901 	vf->max_frame_size = ETHER_MAX_LEN;
902 	ixgbe_update_max_frame(sc, vf->max_frame_size);
903 
904 	if (nvlist_exists_binary(config, "mac-addr")) {
905 		mac = nvlist_get_binary(config, "mac-addr", NULL);
906 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
907 		if (nvlist_get_bool(config, "allow-set-mac"))
908 			vf->flags |= IXGBE_VF_CAP_MAC;
909 	} else
910 		/*
911 		 * If the administrator has not specified a MAC address then
912 		 * we must allow the VF to choose one.
913 		 */
914 		vf->flags |= IXGBE_VF_CAP_MAC;
915 
916 	vf->flags |= IXGBE_VF_ACTIVE;
917 
918 	ixgbe_init_vf(sc, vf);
919 	IXGBE_CORE_UNLOCK(sc);
920 
921 	return (0);
922 } /* ixgbe_add_vf */
923 
924 #else
925 
926 void
ixgbe_handle_mbx(void * context)927 ixgbe_handle_mbx(void *context)
928 {
929 	UNREFERENCED_1PARAMETER(context);
930 } /* ixgbe_handle_mbx */
931 
932 inline int
ixgbe_vf_que_index(int mode,int vfnum,int num)933 ixgbe_vf_que_index(int mode, int vfnum, int num)
934 {
935 	UNREFERENCED_2PARAMETER(mode, vfnum);
936 
937 	return num;
938 } /* ixgbe_vf_que_index */
939 
940 #endif
941