1*55485da1Smsaitoh /* $NetBSD: if_sriov.c,v 1.18 2023/10/06 14:37:04 msaitoh Exp $ */
2dc7f84c8Smsaitoh /******************************************************************************
3dc7f84c8Smsaitoh
4dc7f84c8Smsaitoh Copyright (c) 2001-2017, Intel Corporation
5dc7f84c8Smsaitoh All rights reserved.
6dc7f84c8Smsaitoh
7dc7f84c8Smsaitoh Redistribution and use in source and binary forms, with or without
8dc7f84c8Smsaitoh modification, are permitted provided that the following conditions are met:
9dc7f84c8Smsaitoh
10dc7f84c8Smsaitoh 1. Redistributions of source code must retain the above copyright notice,
11dc7f84c8Smsaitoh this list of conditions and the following disclaimer.
12dc7f84c8Smsaitoh
13dc7f84c8Smsaitoh 2. Redistributions in binary form must reproduce the above copyright
14dc7f84c8Smsaitoh notice, this list of conditions and the following disclaimer in the
15dc7f84c8Smsaitoh documentation and/or other materials provided with the distribution.
16dc7f84c8Smsaitoh
17dc7f84c8Smsaitoh 3. Neither the name of the Intel Corporation nor the names of its
18dc7f84c8Smsaitoh contributors may be used to endorse or promote products derived from
19dc7f84c8Smsaitoh this software without specific prior written permission.
20dc7f84c8Smsaitoh
21dc7f84c8Smsaitoh THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22dc7f84c8Smsaitoh AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23dc7f84c8Smsaitoh IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24dc7f84c8Smsaitoh ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25dc7f84c8Smsaitoh LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26dc7f84c8Smsaitoh CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27dc7f84c8Smsaitoh SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28dc7f84c8Smsaitoh INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29dc7f84c8Smsaitoh CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30dc7f84c8Smsaitoh ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31dc7f84c8Smsaitoh POSSIBILITY OF SUCH DAMAGE.
32dc7f84c8Smsaitoh
33dc7f84c8Smsaitoh ******************************************************************************/
34a06ca633Smsaitoh /*$FreeBSD: head/sys/dev/ixgbe/if_sriov.c 327031 2017-12-20 18:15:06Z erj $*/
35dc7f84c8Smsaitoh
36ab119b16Smsaitoh #include <sys/cdefs.h>
37*55485da1Smsaitoh __KERNEL_RCSID(0, "$NetBSD: if_sriov.c,v 1.18 2023/10/06 14:37:04 msaitoh Exp $");
38ab119b16Smsaitoh
39dc7f84c8Smsaitoh #include "ixgbe.h"
4092ac5028Smsaitoh #include "ixgbe_sriov.h"
41dc7f84c8Smsaitoh
42dc7f84c8Smsaitoh #ifdef PCI_IOV
43dc7f84c8Smsaitoh
44dc7f84c8Smsaitoh MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
45dc7f84c8Smsaitoh
46dc7f84c8Smsaitoh /************************************************************************
47dc7f84c8Smsaitoh * ixgbe_pci_iov_detach
48dc7f84c8Smsaitoh ************************************************************************/
49dc7f84c8Smsaitoh int
ixgbe_pci_iov_detach(device_t dev)50dc7f84c8Smsaitoh ixgbe_pci_iov_detach(device_t dev)
51dc7f84c8Smsaitoh {
52dc7f84c8Smsaitoh return pci_iov_detach(dev);
53dc7f84c8Smsaitoh }
54dc7f84c8Smsaitoh
55dc7f84c8Smsaitoh /************************************************************************
56dc7f84c8Smsaitoh * ixgbe_define_iov_schemas
57dc7f84c8Smsaitoh ************************************************************************/
58dc7f84c8Smsaitoh void
ixgbe_define_iov_schemas(device_t dev,int * error)59dc7f84c8Smsaitoh ixgbe_define_iov_schemas(device_t dev, int *error)
60dc7f84c8Smsaitoh {
61dc7f84c8Smsaitoh nvlist_t *pf_schema, *vf_schema;
62dc7f84c8Smsaitoh
63dc7f84c8Smsaitoh pf_schema = pci_iov_schema_alloc_node();
64dc7f84c8Smsaitoh vf_schema = pci_iov_schema_alloc_node();
65dc7f84c8Smsaitoh pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
66dc7f84c8Smsaitoh pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
67dc7f84c8Smsaitoh IOV_SCHEMA_HASDEFAULT, TRUE);
68dc7f84c8Smsaitoh pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
69dc7f84c8Smsaitoh IOV_SCHEMA_HASDEFAULT, FALSE);
70dc7f84c8Smsaitoh pci_iov_schema_add_bool(vf_schema, "allow-promisc",
71dc7f84c8Smsaitoh IOV_SCHEMA_HASDEFAULT, FALSE);
72dc7f84c8Smsaitoh *error = pci_iov_attach(dev, pf_schema, vf_schema);
73dc7f84c8Smsaitoh if (*error != 0) {
74dc7f84c8Smsaitoh device_printf(dev,
75dc7f84c8Smsaitoh "Error %d setting up SR-IOV\n", *error);
76dc7f84c8Smsaitoh }
77dc7f84c8Smsaitoh } /* ixgbe_define_iov_schemas */
78dc7f84c8Smsaitoh
79dc7f84c8Smsaitoh /************************************************************************
80dc7f84c8Smsaitoh * ixgbe_align_all_queue_indices
81dc7f84c8Smsaitoh ************************************************************************/
82dc7f84c8Smsaitoh inline void
ixgbe_align_all_queue_indices(struct ixgbe_softc * sc)83*55485da1Smsaitoh ixgbe_align_all_queue_indices(struct ixgbe_softc *sc)
84dc7f84c8Smsaitoh {
85dc7f84c8Smsaitoh int i;
86dc7f84c8Smsaitoh int index;
87dc7f84c8Smsaitoh
88*55485da1Smsaitoh for (i = 0; i < sc->num_queues; i++) {
89*55485da1Smsaitoh index = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
90*55485da1Smsaitoh sc->rx_rings[i].me = index;
91*55485da1Smsaitoh sc->tx_rings[i].me = index;
92dc7f84c8Smsaitoh }
93dc7f84c8Smsaitoh }
94dc7f84c8Smsaitoh
95dc7f84c8Smsaitoh /* Support functions for SR-IOV/VF management */
96dc7f84c8Smsaitoh static inline void
ixgbe_send_vf_msg(struct ixgbe_hw * hw,struct ixgbe_vf * vf,u32 msg)97debed2feSmsaitoh ixgbe_send_vf_msg(struct ixgbe_hw *hw, struct ixgbe_vf *vf, u32 msg)
98dc7f84c8Smsaitoh {
99dc7f84c8Smsaitoh if (vf->flags & IXGBE_VF_CTS)
100dc7f84c8Smsaitoh msg |= IXGBE_VT_MSGTYPE_CTS;
101dc7f84c8Smsaitoh
1021e393addSmsaitoh hw->mbx.ops[vf->pool].write(hw, &msg, 1, vf->pool);
103dc7f84c8Smsaitoh }
104dc7f84c8Smsaitoh
105dc7f84c8Smsaitoh static inline void
ixgbe_send_vf_ack(struct ixgbe_softc * sc,struct ixgbe_vf * vf,u32 msg)106*55485da1Smsaitoh ixgbe_send_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
107dc7f84c8Smsaitoh {
108dc7f84c8Smsaitoh msg &= IXGBE_VT_MSG_MASK;
109*55485da1Smsaitoh ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_SUCCESS);
110dc7f84c8Smsaitoh }
111dc7f84c8Smsaitoh
112dc7f84c8Smsaitoh static inline void
ixgbe_send_vf_nack(struct ixgbe_softc * sc,struct ixgbe_vf * vf,u32 msg)113*55485da1Smsaitoh ixgbe_send_vf_nack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
114dc7f84c8Smsaitoh {
115dc7f84c8Smsaitoh msg &= IXGBE_VT_MSG_MASK;
116*55485da1Smsaitoh ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_FAILURE);
117dc7f84c8Smsaitoh }
118dc7f84c8Smsaitoh
119dc7f84c8Smsaitoh static inline void
ixgbe_process_vf_ack(struct ixgbe_softc * sc,struct ixgbe_vf * vf)120*55485da1Smsaitoh ixgbe_process_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
121dc7f84c8Smsaitoh {
122dc7f84c8Smsaitoh if (!(vf->flags & IXGBE_VF_CTS))
123*55485da1Smsaitoh ixgbe_send_vf_nack(sc, vf, 0);
124dc7f84c8Smsaitoh }
125dc7f84c8Smsaitoh
12629209018Smrg static inline bool
ixgbe_vf_mac_changed(struct ixgbe_vf * vf,const uint8_t * mac)127dc7f84c8Smsaitoh ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
128dc7f84c8Smsaitoh {
129dc7f84c8Smsaitoh return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
130dc7f84c8Smsaitoh }
131dc7f84c8Smsaitoh
132dc7f84c8Smsaitoh static inline int
ixgbe_vf_queues(int mode)133dc7f84c8Smsaitoh ixgbe_vf_queues(int mode)
134dc7f84c8Smsaitoh {
135dc7f84c8Smsaitoh switch (mode) {
136dc7f84c8Smsaitoh case IXGBE_64_VM:
137dc7f84c8Smsaitoh return (2);
138dc7f84c8Smsaitoh case IXGBE_32_VM:
139dc7f84c8Smsaitoh return (4);
140dc7f84c8Smsaitoh case IXGBE_NO_VM:
141dc7f84c8Smsaitoh default:
142dc7f84c8Smsaitoh return (0);
143dc7f84c8Smsaitoh }
144dc7f84c8Smsaitoh }
145dc7f84c8Smsaitoh
146dc7f84c8Smsaitoh inline int
ixgbe_vf_que_index(int mode,int vfnum,int num)147dc7f84c8Smsaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
148dc7f84c8Smsaitoh {
149dc7f84c8Smsaitoh return ((vfnum * ixgbe_vf_queues(mode)) + num);
150dc7f84c8Smsaitoh }
151dc7f84c8Smsaitoh
152dc7f84c8Smsaitoh static inline void
ixgbe_update_max_frame(struct ixgbe_softc * sc,int max_frame)153*55485da1Smsaitoh ixgbe_update_max_frame(struct ixgbe_softc * sc, int max_frame)
154dc7f84c8Smsaitoh {
155*55485da1Smsaitoh if (sc->max_frame_size < max_frame)
156*55485da1Smsaitoh sc->max_frame_size = max_frame;
157dc7f84c8Smsaitoh }
158dc7f84c8Smsaitoh
159dc7f84c8Smsaitoh inline u32
ixgbe_get_mrqc(int iov_mode)160dc7f84c8Smsaitoh ixgbe_get_mrqc(int iov_mode)
161dc7f84c8Smsaitoh {
162dc7f84c8Smsaitoh u32 mrqc;
163dc7f84c8Smsaitoh
164dc7f84c8Smsaitoh switch (iov_mode) {
165dc7f84c8Smsaitoh case IXGBE_64_VM:
166dc7f84c8Smsaitoh mrqc = IXGBE_MRQC_VMDQRSS64EN;
167dc7f84c8Smsaitoh break;
168dc7f84c8Smsaitoh case IXGBE_32_VM:
169dc7f84c8Smsaitoh mrqc = IXGBE_MRQC_VMDQRSS32EN;
170dc7f84c8Smsaitoh break;
171dc7f84c8Smsaitoh case IXGBE_NO_VM:
172dc7f84c8Smsaitoh mrqc = 0;
173dc7f84c8Smsaitoh break;
174dc7f84c8Smsaitoh default:
175dc7f84c8Smsaitoh panic("Unexpected SR-IOV mode %d", iov_mode);
176dc7f84c8Smsaitoh }
177dc7f84c8Smsaitoh
178dc7f84c8Smsaitoh return mrqc;
179dc7f84c8Smsaitoh }
180dc7f84c8Smsaitoh
181dc7f84c8Smsaitoh
182dc7f84c8Smsaitoh inline u32
ixgbe_get_mtqc(int iov_mode)183dc7f84c8Smsaitoh ixgbe_get_mtqc(int iov_mode)
184dc7f84c8Smsaitoh {
185dc7f84c8Smsaitoh uint32_t mtqc;
186dc7f84c8Smsaitoh
187dc7f84c8Smsaitoh switch (iov_mode) {
188dc7f84c8Smsaitoh case IXGBE_64_VM:
189dc7f84c8Smsaitoh mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
190dc7f84c8Smsaitoh break;
191dc7f84c8Smsaitoh case IXGBE_32_VM:
192dc7f84c8Smsaitoh mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
193dc7f84c8Smsaitoh break;
194dc7f84c8Smsaitoh case IXGBE_NO_VM:
195dc7f84c8Smsaitoh mtqc = IXGBE_MTQC_64Q_1PB;
196dc7f84c8Smsaitoh break;
197dc7f84c8Smsaitoh default:
198dc7f84c8Smsaitoh panic("Unexpected SR-IOV mode %d", iov_mode);
199dc7f84c8Smsaitoh }
200dc7f84c8Smsaitoh
201dc7f84c8Smsaitoh return mtqc;
202dc7f84c8Smsaitoh }
203dc7f84c8Smsaitoh
204dc7f84c8Smsaitoh void
ixgbe_ping_all_vfs(struct ixgbe_softc * sc)205*55485da1Smsaitoh ixgbe_ping_all_vfs(struct ixgbe_softc *sc)
206dc7f84c8Smsaitoh {
207dc7f84c8Smsaitoh struct ixgbe_vf *vf;
208dc7f84c8Smsaitoh
209*55485da1Smsaitoh for (int i = 0; i < sc->num_vfs; i++) {
210*55485da1Smsaitoh vf = &sc->vfs[i];
211dc7f84c8Smsaitoh if (vf->flags & IXGBE_VF_ACTIVE)
212*55485da1Smsaitoh ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
213dc7f84c8Smsaitoh }
214dc7f84c8Smsaitoh } /* ixgbe_ping_all_vfs */
215dc7f84c8Smsaitoh
216dc7f84c8Smsaitoh
217dc7f84c8Smsaitoh static void
ixgbe_vf_set_default_vlan(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint16_t tag)218*55485da1Smsaitoh ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
219dc7f84c8Smsaitoh uint16_t tag)
220dc7f84c8Smsaitoh {
221dc7f84c8Smsaitoh struct ixgbe_hw *hw;
222dc7f84c8Smsaitoh uint32_t vmolr, vmvir;
223dc7f84c8Smsaitoh
224*55485da1Smsaitoh hw = &sc->hw;
225dc7f84c8Smsaitoh
226dc7f84c8Smsaitoh vf->vlan_tag = tag;
227dc7f84c8Smsaitoh
228dc7f84c8Smsaitoh vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
229dc7f84c8Smsaitoh
230dc7f84c8Smsaitoh /* Do not receive packets that pass inexact filters. */
231dc7f84c8Smsaitoh vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
232dc7f84c8Smsaitoh
233dc7f84c8Smsaitoh /* Disable Multicast Promicuous Mode. */
234dc7f84c8Smsaitoh vmolr &= ~IXGBE_VMOLR_MPE;
235dc7f84c8Smsaitoh
236dc7f84c8Smsaitoh /* Accept broadcasts. */
237dc7f84c8Smsaitoh vmolr |= IXGBE_VMOLR_BAM;
238dc7f84c8Smsaitoh
239dc7f84c8Smsaitoh if (tag == 0) {
240dc7f84c8Smsaitoh /* Accept non-vlan tagged traffic. */
24192ac5028Smsaitoh vmolr |= IXGBE_VMOLR_AUPE;
242dc7f84c8Smsaitoh
243dc7f84c8Smsaitoh /* Allow VM to tag outgoing traffic; no default tag. */
244dc7f84c8Smsaitoh vmvir = 0;
245dc7f84c8Smsaitoh } else {
246dc7f84c8Smsaitoh /* Require vlan-tagged traffic. */
247dc7f84c8Smsaitoh vmolr &= ~IXGBE_VMOLR_AUPE;
248dc7f84c8Smsaitoh
249dc7f84c8Smsaitoh /* Tag all traffic with provided vlan tag. */
250dc7f84c8Smsaitoh vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
251dc7f84c8Smsaitoh }
252dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
253dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
254dc7f84c8Smsaitoh } /* ixgbe_vf_set_default_vlan */
255dc7f84c8Smsaitoh
256dc7f84c8Smsaitoh
25722298f59Smsaitoh static void
ixgbe_clear_vfmbmem(struct ixgbe_softc * sc,struct ixgbe_vf * vf)258*55485da1Smsaitoh ixgbe_clear_vfmbmem(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
25922298f59Smsaitoh {
260*55485da1Smsaitoh struct ixgbe_hw *hw = &sc->hw;
26122298f59Smsaitoh uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
26222298f59Smsaitoh uint16_t mbx_size = hw->mbx.size;
26322298f59Smsaitoh uint16_t i;
26422298f59Smsaitoh
265*55485da1Smsaitoh IXGBE_CORE_LOCK_ASSERT(sc);
26622298f59Smsaitoh
26722298f59Smsaitoh for (i = 0; i < mbx_size; ++i)
26822298f59Smsaitoh IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
26922298f59Smsaitoh } /* ixgbe_clear_vfmbmem */
27022298f59Smsaitoh
27122298f59Smsaitoh
27229209018Smrg static bool
ixgbe_vf_frame_size_compatible(struct ixgbe_softc * sc,struct ixgbe_vf * vf)273*55485da1Smsaitoh ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
274dc7f84c8Smsaitoh {
275dc7f84c8Smsaitoh
276dc7f84c8Smsaitoh /*
277dc7f84c8Smsaitoh * Frame size compatibility between PF and VF is only a problem on
278dc7f84c8Smsaitoh * 82599-based cards. X540 and later support any combination of jumbo
279dc7f84c8Smsaitoh * frames on PFs and VFs.
280dc7f84c8Smsaitoh */
281*55485da1Smsaitoh if (sc->hw.mac.type != ixgbe_mac_82599EB)
282dc7f84c8Smsaitoh return (TRUE);
283dc7f84c8Smsaitoh
284dc7f84c8Smsaitoh switch (vf->api_ver) {
285dc7f84c8Smsaitoh case IXGBE_API_VER_1_0:
286dc7f84c8Smsaitoh case IXGBE_API_VER_UNKNOWN:
287dc7f84c8Smsaitoh /*
288dc7f84c8Smsaitoh * On legacy (1.0 and older) VF versions, we don't support jumbo
289dc7f84c8Smsaitoh * frames on either the PF or the VF.
290dc7f84c8Smsaitoh */
291*55485da1Smsaitoh if (sc->max_frame_size > ETHER_MAX_LEN ||
292dc7f84c8Smsaitoh vf->max_frame_size > ETHER_MAX_LEN)
293dc7f84c8Smsaitoh return (FALSE);
294dc7f84c8Smsaitoh
295dc7f84c8Smsaitoh return (TRUE);
296dc7f84c8Smsaitoh
297dc7f84c8Smsaitoh break;
298dc7f84c8Smsaitoh case IXGBE_API_VER_1_1:
299dc7f84c8Smsaitoh default:
300dc7f84c8Smsaitoh /*
301dc7f84c8Smsaitoh * 1.1 or later VF versions always work if they aren't using
302dc7f84c8Smsaitoh * jumbo frames.
303dc7f84c8Smsaitoh */
304dc7f84c8Smsaitoh if (vf->max_frame_size <= ETHER_MAX_LEN)
305dc7f84c8Smsaitoh return (TRUE);
306dc7f84c8Smsaitoh
307dc7f84c8Smsaitoh /*
308dc7f84c8Smsaitoh * Jumbo frames only work with VFs if the PF is also using jumbo
309dc7f84c8Smsaitoh * frames.
310dc7f84c8Smsaitoh */
311*55485da1Smsaitoh if (sc->max_frame_size <= ETHER_MAX_LEN)
312dc7f84c8Smsaitoh return (TRUE);
313dc7f84c8Smsaitoh
314dc7f84c8Smsaitoh return (FALSE);
315dc7f84c8Smsaitoh }
316dc7f84c8Smsaitoh } /* ixgbe_vf_frame_size_compatible */
317dc7f84c8Smsaitoh
318dc7f84c8Smsaitoh
319dc7f84c8Smsaitoh static void
ixgbe_process_vf_reset(struct ixgbe_softc * sc,struct ixgbe_vf * vf)320*55485da1Smsaitoh ixgbe_process_vf_reset(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
321dc7f84c8Smsaitoh {
322*55485da1Smsaitoh ixgbe_vf_set_default_vlan(sc, vf, vf->default_vlan);
323dc7f84c8Smsaitoh
324dc7f84c8Smsaitoh // XXX clear multicast addresses
325dc7f84c8Smsaitoh
326*55485da1Smsaitoh ixgbe_clear_rar(&sc->hw, vf->rar_index);
327*55485da1Smsaitoh ixgbe_clear_vfmbmem(sc, vf);
328*55485da1Smsaitoh ixgbe_toggle_txdctl(&sc->hw, IXGBE_VF_INDEX(vf->pool));
329dc7f84c8Smsaitoh
330dc7f84c8Smsaitoh vf->api_ver = IXGBE_API_VER_UNKNOWN;
331dc7f84c8Smsaitoh } /* ixgbe_process_vf_reset */
332dc7f84c8Smsaitoh
333dc7f84c8Smsaitoh
334dc7f84c8Smsaitoh static void
ixgbe_vf_enable_transmit(struct ixgbe_softc * sc,struct ixgbe_vf * vf)335*55485da1Smsaitoh ixgbe_vf_enable_transmit(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
336dc7f84c8Smsaitoh {
337dc7f84c8Smsaitoh struct ixgbe_hw *hw;
338dc7f84c8Smsaitoh uint32_t vf_index, vfte;
339dc7f84c8Smsaitoh
340*55485da1Smsaitoh hw = &sc->hw;
341dc7f84c8Smsaitoh
342dc7f84c8Smsaitoh vf_index = IXGBE_VF_INDEX(vf->pool);
343dc7f84c8Smsaitoh vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
344dc7f84c8Smsaitoh vfte |= IXGBE_VF_BIT(vf->pool);
345dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
346dc7f84c8Smsaitoh } /* ixgbe_vf_enable_transmit */
347dc7f84c8Smsaitoh
348dc7f84c8Smsaitoh
349dc7f84c8Smsaitoh static void
ixgbe_vf_enable_receive(struct ixgbe_softc * sc,struct ixgbe_vf * vf)350*55485da1Smsaitoh ixgbe_vf_enable_receive(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
351dc7f84c8Smsaitoh {
352dc7f84c8Smsaitoh struct ixgbe_hw *hw;
353dc7f84c8Smsaitoh uint32_t vf_index, vfre;
354dc7f84c8Smsaitoh
355*55485da1Smsaitoh hw = &sc->hw;
356dc7f84c8Smsaitoh
357dc7f84c8Smsaitoh vf_index = IXGBE_VF_INDEX(vf->pool);
358dc7f84c8Smsaitoh vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
359*55485da1Smsaitoh if (ixgbe_vf_frame_size_compatible(sc, vf))
360dc7f84c8Smsaitoh vfre |= IXGBE_VF_BIT(vf->pool);
361dc7f84c8Smsaitoh else
362dc7f84c8Smsaitoh vfre &= ~IXGBE_VF_BIT(vf->pool);
363dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
364dc7f84c8Smsaitoh } /* ixgbe_vf_enable_receive */
365dc7f84c8Smsaitoh
366dc7f84c8Smsaitoh
367dc7f84c8Smsaitoh static void
ixgbe_vf_reset_msg(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint32_t * msg)368*55485da1Smsaitoh ixgbe_vf_reset_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
369dc7f84c8Smsaitoh {
370dc7f84c8Smsaitoh struct ixgbe_hw *hw;
371dc7f84c8Smsaitoh uint32_t ack;
372dc7f84c8Smsaitoh uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
373dc7f84c8Smsaitoh
374*55485da1Smsaitoh hw = &sc->hw;
375dc7f84c8Smsaitoh
376*55485da1Smsaitoh ixgbe_process_vf_reset(sc, vf);
377dc7f84c8Smsaitoh
378dc7f84c8Smsaitoh if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
379*55485da1Smsaitoh ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr,
380dc7f84c8Smsaitoh vf->pool, TRUE);
381ba990beaSmsaitoh ack = IXGBE_VT_MSGTYPE_SUCCESS;
382dc7f84c8Smsaitoh } else
383ba990beaSmsaitoh ack = IXGBE_VT_MSGTYPE_FAILURE;
384dc7f84c8Smsaitoh
385*55485da1Smsaitoh ixgbe_vf_enable_transmit(sc, vf);
386*55485da1Smsaitoh ixgbe_vf_enable_receive(sc, vf);
387dc7f84c8Smsaitoh
388dc7f84c8Smsaitoh vf->flags |= IXGBE_VF_CTS;
389dc7f84c8Smsaitoh
39092ac5028Smsaitoh resp[0] = IXGBE_VF_RESET | ack;
391dc7f84c8Smsaitoh bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
392dc7f84c8Smsaitoh resp[3] = hw->mac.mc_filter_type;
393dc7f84c8Smsaitoh hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
394dc7f84c8Smsaitoh } /* ixgbe_vf_reset_msg */
395dc7f84c8Smsaitoh
396dc7f84c8Smsaitoh
397dc7f84c8Smsaitoh static void
ixgbe_vf_set_mac(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint32_t * msg)398*55485da1Smsaitoh ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
399dc7f84c8Smsaitoh {
400dc7f84c8Smsaitoh uint8_t *mac;
401dc7f84c8Smsaitoh
402dc7f84c8Smsaitoh mac = (uint8_t*)&msg[1];
403dc7f84c8Smsaitoh
404dc7f84c8Smsaitoh /* Check that the VF has permission to change the MAC address. */
405dc7f84c8Smsaitoh if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
406*55485da1Smsaitoh ixgbe_send_vf_nack(sc, vf, msg[0]);
407dc7f84c8Smsaitoh return;
408dc7f84c8Smsaitoh }
409dc7f84c8Smsaitoh
410dc7f84c8Smsaitoh if (ixgbe_validate_mac_addr(mac) != 0) {
411*55485da1Smsaitoh ixgbe_send_vf_nack(sc, vf, msg[0]);
412dc7f84c8Smsaitoh return;
413dc7f84c8Smsaitoh }
414dc7f84c8Smsaitoh
415dc7f84c8Smsaitoh bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
416dc7f84c8Smsaitoh
417*55485da1Smsaitoh ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr, vf->pool,
418dc7f84c8Smsaitoh TRUE);
419dc7f84c8Smsaitoh
420*55485da1Smsaitoh ixgbe_send_vf_ack(sc, vf, msg[0]);
421dc7f84c8Smsaitoh } /* ixgbe_vf_set_mac */
422dc7f84c8Smsaitoh
423dc7f84c8Smsaitoh
424dc7f84c8Smsaitoh /*
425dc7f84c8Smsaitoh * VF multicast addresses are set by using the appropriate bit in
426dc7f84c8Smsaitoh * 1 of 128 32 bit addresses (4096 possible).
427dc7f84c8Smsaitoh */
428dc7f84c8Smsaitoh static void
ixgbe_vf_set_mc_addr(struct ixgbe_softc * sc,struct ixgbe_vf * vf,u32 * msg)429*55485da1Smsaitoh ixgbe_vf_set_mc_addr(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 *msg)
430dc7f84c8Smsaitoh {
431dc7f84c8Smsaitoh u16 *list = (u16*)&msg[1];
432dc7f84c8Smsaitoh int entries;
433dc7f84c8Smsaitoh u32 vmolr, vec_bit, vec_reg, mta_reg;
434dc7f84c8Smsaitoh
435dc7f84c8Smsaitoh entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
436d1579b2dSriastradh entries = uimin(entries, IXGBE_MAX_VF_MC);
437dc7f84c8Smsaitoh
438*55485da1Smsaitoh vmolr = IXGBE_READ_REG(&sc->hw, IXGBE_VMOLR(vf->pool));
439dc7f84c8Smsaitoh
440dc7f84c8Smsaitoh vf->num_mc_hashes = entries;
441dc7f84c8Smsaitoh
442dc7f84c8Smsaitoh /* Set the appropriate MTA bit */
443dc7f84c8Smsaitoh for (int i = 0; i < entries; i++) {
444dc7f84c8Smsaitoh vf->mc_hash[i] = list[i];
445dc7f84c8Smsaitoh vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
446dc7f84c8Smsaitoh vec_bit = vf->mc_hash[i] & 0x1F;
447*55485da1Smsaitoh mta_reg = IXGBE_READ_REG(&sc->hw, IXGBE_MTA(vec_reg));
448dc7f84c8Smsaitoh mta_reg |= (1 << vec_bit);
449*55485da1Smsaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_MTA(vec_reg), mta_reg);
450dc7f84c8Smsaitoh }
451dc7f84c8Smsaitoh
452dc7f84c8Smsaitoh vmolr |= IXGBE_VMOLR_ROMPE;
453*55485da1Smsaitoh IXGBE_WRITE_REG(&sc->hw, IXGBE_VMOLR(vf->pool), vmolr);
454*55485da1Smsaitoh ixgbe_send_vf_ack(sc, vf, msg[0]);
455dc7f84c8Smsaitoh } /* ixgbe_vf_set_mc_addr */
456dc7f84c8Smsaitoh
457dc7f84c8Smsaitoh
458dc7f84c8Smsaitoh static void
ixgbe_vf_set_vlan(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint32_t * msg)459*55485da1Smsaitoh ixgbe_vf_set_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
460dc7f84c8Smsaitoh {
461dc7f84c8Smsaitoh struct ixgbe_hw *hw;
462dc7f84c8Smsaitoh int enable;
463dc7f84c8Smsaitoh uint16_t tag;
464dc7f84c8Smsaitoh
465*55485da1Smsaitoh hw = &sc->hw;
466dc7f84c8Smsaitoh enable = IXGBE_VT_MSGINFO(msg[0]);
467dc7f84c8Smsaitoh tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
468dc7f84c8Smsaitoh
469dc7f84c8Smsaitoh if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
470*55485da1Smsaitoh ixgbe_send_vf_nack(sc, vf, msg[0]);
471dc7f84c8Smsaitoh return;
472dc7f84c8Smsaitoh }
473dc7f84c8Smsaitoh
474dc7f84c8Smsaitoh /* It is illegal to enable vlan tag 0. */
475dc7f84c8Smsaitoh if (tag == 0 && enable != 0) {
476*55485da1Smsaitoh ixgbe_send_vf_nack(sc, vf, msg[0]);
477dc7f84c8Smsaitoh return;
478dc7f84c8Smsaitoh }
479dc7f84c8Smsaitoh
480dc7f84c8Smsaitoh ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
481*55485da1Smsaitoh ixgbe_send_vf_ack(sc, vf, msg[0]);
482dc7f84c8Smsaitoh } /* ixgbe_vf_set_vlan */
483dc7f84c8Smsaitoh
484dc7f84c8Smsaitoh
485dc7f84c8Smsaitoh static void
ixgbe_vf_set_lpe(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint32_t * msg)486*55485da1Smsaitoh ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
487dc7f84c8Smsaitoh {
488dc7f84c8Smsaitoh struct ixgbe_hw *hw;
489dc7f84c8Smsaitoh uint32_t vf_max_size, pf_max_size, mhadd;
490dc7f84c8Smsaitoh
491*55485da1Smsaitoh hw = &sc->hw;
492dc7f84c8Smsaitoh vf_max_size = msg[1];
493dc7f84c8Smsaitoh
494dc7f84c8Smsaitoh if (vf_max_size < ETHER_CRC_LEN) {
495dc7f84c8Smsaitoh /* We intentionally ACK invalid LPE requests. */
496*55485da1Smsaitoh ixgbe_send_vf_ack(sc, vf, msg[0]);
497dc7f84c8Smsaitoh return;
498dc7f84c8Smsaitoh }
499dc7f84c8Smsaitoh
500dc7f84c8Smsaitoh vf_max_size -= ETHER_CRC_LEN;
501dc7f84c8Smsaitoh
502dc7f84c8Smsaitoh if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
503dc7f84c8Smsaitoh /* We intentionally ACK invalid LPE requests. */
504*55485da1Smsaitoh ixgbe_send_vf_ack(sc, vf, msg[0]);
505dc7f84c8Smsaitoh return;
506dc7f84c8Smsaitoh }
507dc7f84c8Smsaitoh
508dc7f84c8Smsaitoh vf->max_frame_size = vf_max_size;
509*55485da1Smsaitoh ixgbe_update_max_frame(sc, vf->max_frame_size);
510dc7f84c8Smsaitoh
511dc7f84c8Smsaitoh /*
512dc7f84c8Smsaitoh * We might have to disable reception to this VF if the frame size is
513dc7f84c8Smsaitoh * not compatible with the config on the PF.
514dc7f84c8Smsaitoh */
515*55485da1Smsaitoh ixgbe_vf_enable_receive(sc, vf);
516dc7f84c8Smsaitoh
517dc7f84c8Smsaitoh mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
518dc7f84c8Smsaitoh pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
519dc7f84c8Smsaitoh
520*55485da1Smsaitoh if (pf_max_size < sc->max_frame_size) {
521dc7f84c8Smsaitoh mhadd &= ~IXGBE_MHADD_MFS_MASK;
522*55485da1Smsaitoh mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
523dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
524dc7f84c8Smsaitoh }
525dc7f84c8Smsaitoh
526*55485da1Smsaitoh ixgbe_send_vf_ack(sc, vf, msg[0]);
527dc7f84c8Smsaitoh } /* ixgbe_vf_set_lpe */
528dc7f84c8Smsaitoh
529dc7f84c8Smsaitoh
530dc7f84c8Smsaitoh static void
ixgbe_vf_set_macvlan(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint32_t * msg)531*55485da1Smsaitoh ixgbe_vf_set_macvlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
532dc7f84c8Smsaitoh uint32_t *msg)
533dc7f84c8Smsaitoh {
534dc7f84c8Smsaitoh //XXX implement this
535*55485da1Smsaitoh ixgbe_send_vf_nack(sc, vf, msg[0]);
536dc7f84c8Smsaitoh } /* ixgbe_vf_set_macvlan */
537dc7f84c8Smsaitoh
538dc7f84c8Smsaitoh
539dc7f84c8Smsaitoh static void
ixgbe_vf_api_negotiate(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint32_t * msg)540*55485da1Smsaitoh ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
541dc7f84c8Smsaitoh uint32_t *msg)
542dc7f84c8Smsaitoh {
543dc7f84c8Smsaitoh
544dc7f84c8Smsaitoh switch (msg[1]) {
545dc7f84c8Smsaitoh case IXGBE_API_VER_1_0:
546dc7f84c8Smsaitoh case IXGBE_API_VER_1_1:
547dc7f84c8Smsaitoh vf->api_ver = msg[1];
548*55485da1Smsaitoh ixgbe_send_vf_ack(sc, vf, msg[0]);
549dc7f84c8Smsaitoh break;
550dc7f84c8Smsaitoh default:
551dc7f84c8Smsaitoh vf->api_ver = IXGBE_API_VER_UNKNOWN;
552*55485da1Smsaitoh ixgbe_send_vf_nack(sc, vf, msg[0]);
553dc7f84c8Smsaitoh break;
554dc7f84c8Smsaitoh }
555dc7f84c8Smsaitoh } /* ixgbe_vf_api_negotiate */
556dc7f84c8Smsaitoh
557dc7f84c8Smsaitoh
558dc7f84c8Smsaitoh static void
ixgbe_vf_get_queues(struct ixgbe_softc * sc,struct ixgbe_vf * vf,uint32_t * msg)559*55485da1Smsaitoh ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
560dc7f84c8Smsaitoh {
561dc7f84c8Smsaitoh struct ixgbe_hw *hw;
562dc7f84c8Smsaitoh uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
563dc7f84c8Smsaitoh int num_queues;
564dc7f84c8Smsaitoh
565*55485da1Smsaitoh hw = &sc->hw;
566dc7f84c8Smsaitoh
567dc7f84c8Smsaitoh /* GET_QUEUES is not supported on pre-1.1 APIs. */
568dc7f84c8Smsaitoh switch (msg[0]) {
569dc7f84c8Smsaitoh case IXGBE_API_VER_1_0:
570dc7f84c8Smsaitoh case IXGBE_API_VER_UNKNOWN:
571*55485da1Smsaitoh ixgbe_send_vf_nack(sc, vf, msg[0]);
572dc7f84c8Smsaitoh return;
573dc7f84c8Smsaitoh }
574dc7f84c8Smsaitoh
575ba990beaSmsaitoh resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS |
576dc7f84c8Smsaitoh IXGBE_VT_MSGTYPE_CTS;
577dc7f84c8Smsaitoh
578*55485da1Smsaitoh num_queues = ixgbe_vf_queues(sc->iov_mode);
579dc7f84c8Smsaitoh resp[IXGBE_VF_TX_QUEUES] = num_queues;
580dc7f84c8Smsaitoh resp[IXGBE_VF_RX_QUEUES] = num_queues;
581dc7f84c8Smsaitoh resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
582dc7f84c8Smsaitoh resp[IXGBE_VF_DEF_QUEUE] = 0;
583dc7f84c8Smsaitoh
584dc7f84c8Smsaitoh hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
585dc7f84c8Smsaitoh } /* ixgbe_vf_get_queues */
586dc7f84c8Smsaitoh
587dc7f84c8Smsaitoh
588dc7f84c8Smsaitoh static void
ixgbe_process_vf_msg(struct ixgbe_softc * sc,struct ixgbe_vf * vf)589*55485da1Smsaitoh ixgbe_process_vf_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
590dc7f84c8Smsaitoh {
591dc7f84c8Smsaitoh struct ixgbe_hw *hw;
592dc7f84c8Smsaitoh uint32_t msg[IXGBE_VFMAILBOX_SIZE];
593dc7f84c8Smsaitoh int error;
594dc7f84c8Smsaitoh
595*55485da1Smsaitoh hw = &sc->hw;
596dc7f84c8Smsaitoh
5971e393addSmsaitoh error = hw->mbx.ops[vf->pool].read(hw, msg, IXGBE_VFMAILBOX_SIZE,
5981e393addSmsaitoh vf->pool);
599dc7f84c8Smsaitoh
600dc7f84c8Smsaitoh if (error != 0)
601dc7f84c8Smsaitoh return;
602dc7f84c8Smsaitoh
603*55485da1Smsaitoh CTR3(KTR_MALLOC, "%s: received msg %x from %d",
604*55485da1Smsaitoh sc->ifp->if_xname, msg[0], vf->pool);
605dc7f84c8Smsaitoh if (msg[0] == IXGBE_VF_RESET) {
606*55485da1Smsaitoh ixgbe_vf_reset_msg(sc, vf, msg);
607dc7f84c8Smsaitoh return;
608dc7f84c8Smsaitoh }
609dc7f84c8Smsaitoh
610dc7f84c8Smsaitoh if (!(vf->flags & IXGBE_VF_CTS)) {
611*55485da1Smsaitoh ixgbe_send_vf_nack(sc, vf, msg[0]);
612dc7f84c8Smsaitoh return;
613dc7f84c8Smsaitoh }
614dc7f84c8Smsaitoh
615dc7f84c8Smsaitoh switch (msg[0] & IXGBE_VT_MSG_MASK) {
616dc7f84c8Smsaitoh case IXGBE_VF_SET_MAC_ADDR:
617*55485da1Smsaitoh ixgbe_vf_set_mac(sc, vf, msg);
618dc7f84c8Smsaitoh break;
619dc7f84c8Smsaitoh case IXGBE_VF_SET_MULTICAST:
620*55485da1Smsaitoh ixgbe_vf_set_mc_addr(sc, vf, msg);
621dc7f84c8Smsaitoh break;
622dc7f84c8Smsaitoh case IXGBE_VF_SET_VLAN:
623*55485da1Smsaitoh ixgbe_vf_set_vlan(sc, vf, msg);
624dc7f84c8Smsaitoh break;
625dc7f84c8Smsaitoh case IXGBE_VF_SET_LPE:
626*55485da1Smsaitoh ixgbe_vf_set_lpe(sc, vf, msg);
627dc7f84c8Smsaitoh break;
628dc7f84c8Smsaitoh case IXGBE_VF_SET_MACVLAN:
629*55485da1Smsaitoh ixgbe_vf_set_macvlan(sc, vf, msg);
630dc7f84c8Smsaitoh break;
631dc7f84c8Smsaitoh case IXGBE_VF_API_NEGOTIATE:
632*55485da1Smsaitoh ixgbe_vf_api_negotiate(sc, vf, msg);
633dc7f84c8Smsaitoh break;
634dc7f84c8Smsaitoh case IXGBE_VF_GET_QUEUES:
635*55485da1Smsaitoh ixgbe_vf_get_queues(sc, vf, msg);
636dc7f84c8Smsaitoh break;
637dc7f84c8Smsaitoh default:
638*55485da1Smsaitoh ixgbe_send_vf_nack(sc, vf, msg[0]);
639dc7f84c8Smsaitoh }
640dc7f84c8Smsaitoh } /* ixgbe_process_vf_msg */
641dc7f84c8Smsaitoh
642dc7f84c8Smsaitoh
643dc7f84c8Smsaitoh /* Tasklet for handling VF -> PF mailbox messages */
644dc7f84c8Smsaitoh void
ixgbe_handle_mbx(void * context)64575695abcSmsaitoh ixgbe_handle_mbx(void *context)
646dc7f84c8Smsaitoh {
647*55485da1Smsaitoh struct ixgbe_softc *sc = context;
648dc7f84c8Smsaitoh struct ixgbe_hw *hw;
649dc7f84c8Smsaitoh struct ixgbe_vf *vf;
650dc7f84c8Smsaitoh int i;
651dc7f84c8Smsaitoh
652*55485da1Smsaitoh KASSERT(mutex_owned(&sc->core_mtx));
65364088230Smsaitoh
654*55485da1Smsaitoh hw = &sc->hw;
655dc7f84c8Smsaitoh
656*55485da1Smsaitoh for (i = 0; i < sc->num_vfs; i++) {
657*55485da1Smsaitoh vf = &sc->vfs[i];
658dc7f84c8Smsaitoh
6591e393addSmsaitoh if ((vf->flags & IXGBE_VF_ACTIVE) == 0)
6601e393addSmsaitoh continue;
6611e393addSmsaitoh
6621e393addSmsaitoh if (hw->mbx.ops[vf->pool].check_for_rst(hw, vf->pool) == 0)
663*55485da1Smsaitoh ixgbe_process_vf_reset(sc, vf);
664dc7f84c8Smsaitoh
6651e393addSmsaitoh if (hw->mbx.ops[vf->pool].check_for_msg(hw, vf->pool) == 0)
666*55485da1Smsaitoh ixgbe_process_vf_msg(sc, vf);
667dc7f84c8Smsaitoh
6681e393addSmsaitoh if (hw->mbx.ops[vf->pool].check_for_ack(hw, vf->pool) == 0)
669*55485da1Smsaitoh ixgbe_process_vf_ack(sc, vf);
670dc7f84c8Smsaitoh }
671dc7f84c8Smsaitoh } /* ixgbe_handle_mbx */
672dc7f84c8Smsaitoh
673dc7f84c8Smsaitoh int
ixgbe_init_iov(device_t dev,u16 num_vfs,const nvlist_t * config)674dc7f84c8Smsaitoh ixgbe_init_iov(device_t dev, u16 num_vfs, const nvlist_t *config)
675dc7f84c8Smsaitoh {
676*55485da1Smsaitoh struct ixgbe_softc *sc;
677dc7f84c8Smsaitoh int retval = 0;
678dc7f84c8Smsaitoh
679*55485da1Smsaitoh sc = device_get_softc(dev);
680*55485da1Smsaitoh sc->iov_mode = IXGBE_NO_VM;
681dc7f84c8Smsaitoh
682dc7f84c8Smsaitoh if (num_vfs == 0) {
683dc7f84c8Smsaitoh /* Would we ever get num_vfs = 0? */
684dc7f84c8Smsaitoh retval = EINVAL;
685dc7f84c8Smsaitoh goto err_init_iov;
686dc7f84c8Smsaitoh }
687dc7f84c8Smsaitoh
688dc7f84c8Smsaitoh /*
689dc7f84c8Smsaitoh * We've got to reserve a VM's worth of queues for the PF,
690dc7f84c8Smsaitoh * thus we go into "64 VF mode" if 32+ VFs are requested.
691dc7f84c8Smsaitoh * With 64 VFs, you can only have two queues per VF.
692dc7f84c8Smsaitoh * With 32 VFs, you can have up to four queues per VF.
693dc7f84c8Smsaitoh */
694dc7f84c8Smsaitoh if (num_vfs >= IXGBE_32_VM)
695*55485da1Smsaitoh sc->iov_mode = IXGBE_64_VM;
696dc7f84c8Smsaitoh else
697*55485da1Smsaitoh sc->iov_mode = IXGBE_32_VM;
698dc7f84c8Smsaitoh
699dc7f84c8Smsaitoh /* Again, reserving 1 VM's worth of queues for the PF */
700*55485da1Smsaitoh sc->pool = sc->iov_mode - 1;
701dc7f84c8Smsaitoh
702*55485da1Smsaitoh if ((num_vfs > sc->pool) || (num_vfs >= IXGBE_64_VM)) {
703dc7f84c8Smsaitoh retval = ENOSPC;
704dc7f84c8Smsaitoh goto err_init_iov;
705dc7f84c8Smsaitoh }
706dc7f84c8Smsaitoh
707*55485da1Smsaitoh IXGBE_CORE_LOCK(sc);
708dc7f84c8Smsaitoh
709*55485da1Smsaitoh sc->vfs = malloc(sizeof(*sc->vfs) * num_vfs, M_IXGBE_SRIOV,
710dc7f84c8Smsaitoh M_NOWAIT | M_ZERO);
711dc7f84c8Smsaitoh
712*55485da1Smsaitoh if (sc->vfs == NULL) {
713dc7f84c8Smsaitoh retval = ENOMEM;
714*55485da1Smsaitoh IXGBE_CORE_UNLOCK(sc);
715dc7f84c8Smsaitoh goto err_init_iov;
716dc7f84c8Smsaitoh }
717dc7f84c8Smsaitoh
718*55485da1Smsaitoh sc->num_vfs = num_vfs;
719*55485da1Smsaitoh ixgbe_init_mbx_params_pf(&sc->hw);
72092ac5028Smsaitoh
72192ac5028Smsaitoh /* set the SRIOV flag now as it's needed
72292ac5028Smsaitoh * by ixgbe_init_locked() */
723*55485da1Smsaitoh sc->feat_en |= IXGBE_FEATURE_SRIOV;
724*55485da1Smsaitoh sc->init_locked(sc);
725dc7f84c8Smsaitoh
726*55485da1Smsaitoh IXGBE_CORE_UNLOCK(sc);
727dc7f84c8Smsaitoh
728*55485da1Smsaitoh return retval;
729dc7f84c8Smsaitoh
730dc7f84c8Smsaitoh err_init_iov:
731*55485da1Smsaitoh sc->num_vfs = 0;
732*55485da1Smsaitoh sc->pool = 0;
733*55485da1Smsaitoh sc->iov_mode = IXGBE_NO_VM;
734dc7f84c8Smsaitoh
735*55485da1Smsaitoh return retval;
736dc7f84c8Smsaitoh } /* ixgbe_init_iov */
737dc7f84c8Smsaitoh
738dc7f84c8Smsaitoh void
ixgbe_uninit_iov(device_t dev)739dc7f84c8Smsaitoh ixgbe_uninit_iov(device_t dev)
740dc7f84c8Smsaitoh {
741dc7f84c8Smsaitoh struct ixgbe_hw *hw;
742*55485da1Smsaitoh struct ixgbe_softc *sc;
743dc7f84c8Smsaitoh uint32_t pf_reg, vf_reg;
744dc7f84c8Smsaitoh
745*55485da1Smsaitoh sc = device_get_softc(dev);
746*55485da1Smsaitoh hw = &sc->hw;
747dc7f84c8Smsaitoh
748*55485da1Smsaitoh IXGBE_CORE_LOCK(sc);
749dc7f84c8Smsaitoh
750dc7f84c8Smsaitoh /* Enable rx/tx for the PF and disable it for all VFs. */
751*55485da1Smsaitoh pf_reg = IXGBE_VF_INDEX(sc->pool);
752*55485da1Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(sc->pool));
753*55485da1Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(sc->pool));
754dc7f84c8Smsaitoh
755dc7f84c8Smsaitoh if (pf_reg == 0)
756dc7f84c8Smsaitoh vf_reg = 1;
757dc7f84c8Smsaitoh else
758dc7f84c8Smsaitoh vf_reg = 0;
759dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
760dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
761dc7f84c8Smsaitoh
762dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
763dc7f84c8Smsaitoh
764*55485da1Smsaitoh free(sc->vfs, M_IXGBE_SRIOV);
765*55485da1Smsaitoh sc->vfs = NULL;
766*55485da1Smsaitoh sc->num_vfs = 0;
767*55485da1Smsaitoh sc->feat_en &= ~IXGBE_FEATURE_SRIOV;
768dc7f84c8Smsaitoh
769*55485da1Smsaitoh IXGBE_CORE_UNLOCK(sc);
770dc7f84c8Smsaitoh } /* ixgbe_uninit_iov */
771dc7f84c8Smsaitoh
772dc7f84c8Smsaitoh static void
ixgbe_init_vf(struct ixgbe_softc * sc,struct ixgbe_vf * vf)773*55485da1Smsaitoh ixgbe_init_vf(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
774dc7f84c8Smsaitoh {
775dc7f84c8Smsaitoh struct ixgbe_hw *hw;
776dc7f84c8Smsaitoh uint32_t vf_index, pfmbimr;
777dc7f84c8Smsaitoh
778*55485da1Smsaitoh IXGBE_CORE_LOCK_ASSERT(sc);
779dc7f84c8Smsaitoh
780*55485da1Smsaitoh hw = &sc->hw;
781dc7f84c8Smsaitoh
782dc7f84c8Smsaitoh if (!(vf->flags & IXGBE_VF_ACTIVE))
783dc7f84c8Smsaitoh return;
784dc7f84c8Smsaitoh
785dc7f84c8Smsaitoh vf_index = IXGBE_VF_INDEX(vf->pool);
786dc7f84c8Smsaitoh pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
787dc7f84c8Smsaitoh pfmbimr |= IXGBE_VF_BIT(vf->pool);
788dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
789dc7f84c8Smsaitoh
790*55485da1Smsaitoh ixgbe_vf_set_default_vlan(sc, vf, vf->vlan_tag);
791dc7f84c8Smsaitoh
792dc7f84c8Smsaitoh // XXX multicast addresses
793dc7f84c8Smsaitoh
794dc7f84c8Smsaitoh if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
795*55485da1Smsaitoh ixgbe_set_rar(&sc->hw, vf->rar_index,
796dc7f84c8Smsaitoh vf->ether_addr, vf->pool, TRUE);
797dc7f84c8Smsaitoh }
798dc7f84c8Smsaitoh
799*55485da1Smsaitoh ixgbe_vf_enable_transmit(sc, vf);
800*55485da1Smsaitoh ixgbe_vf_enable_receive(sc, vf);
801dc7f84c8Smsaitoh
802*55485da1Smsaitoh ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
803dc7f84c8Smsaitoh } /* ixgbe_init_vf */
804dc7f84c8Smsaitoh
805dc7f84c8Smsaitoh void
ixgbe_initialize_iov(struct ixgbe_softc * sc)806*55485da1Smsaitoh ixgbe_initialize_iov(struct ixgbe_softc *sc)
807dc7f84c8Smsaitoh {
808*55485da1Smsaitoh struct ixgbe_hw *hw = &sc->hw;
809dc7f84c8Smsaitoh uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
810dc7f84c8Smsaitoh int i;
811dc7f84c8Smsaitoh
812*55485da1Smsaitoh if (sc->iov_mode == IXGBE_NO_VM)
813dc7f84c8Smsaitoh return;
814dc7f84c8Smsaitoh
815*55485da1Smsaitoh IXGBE_CORE_LOCK_ASSERT(sc);
816dc7f84c8Smsaitoh
817dc7f84c8Smsaitoh /* RMW appropriate registers based on IOV mode */
818dc7f84c8Smsaitoh /* Read... */
819dc7f84c8Smsaitoh mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
820dc7f84c8Smsaitoh gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
821dc7f84c8Smsaitoh gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
822dc7f84c8Smsaitoh /* Modify... */
823dc7f84c8Smsaitoh mrqc &= ~IXGBE_MRQC_MRQE_MASK;
824dc7f84c8Smsaitoh mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
825dc7f84c8Smsaitoh gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
826dc7f84c8Smsaitoh gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
827dc7f84c8Smsaitoh gpie &= ~IXGBE_GPIE_VTMODE_MASK;
828*55485da1Smsaitoh switch (sc->iov_mode) {
829dc7f84c8Smsaitoh case IXGBE_64_VM:
830dc7f84c8Smsaitoh mrqc |= IXGBE_MRQC_VMDQRSS64EN;
831dc7f84c8Smsaitoh mtqc |= IXGBE_MTQC_64VF;
832dc7f84c8Smsaitoh gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
833dc7f84c8Smsaitoh gpie |= IXGBE_GPIE_VTMODE_64;
834dc7f84c8Smsaitoh break;
835dc7f84c8Smsaitoh case IXGBE_32_VM:
836dc7f84c8Smsaitoh mrqc |= IXGBE_MRQC_VMDQRSS32EN;
837dc7f84c8Smsaitoh mtqc |= IXGBE_MTQC_32VF;
838dc7f84c8Smsaitoh gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
839dc7f84c8Smsaitoh gpie |= IXGBE_GPIE_VTMODE_32;
840dc7f84c8Smsaitoh break;
841dc7f84c8Smsaitoh default:
842*55485da1Smsaitoh panic("Unexpected SR-IOV mode %d", sc->iov_mode);
843dc7f84c8Smsaitoh }
844dc7f84c8Smsaitoh /* Write... */
845dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
846dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
847dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
848dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
849dc7f84c8Smsaitoh
850dc7f84c8Smsaitoh /* Enable rx/tx for the PF. */
851*55485da1Smsaitoh vf_reg = IXGBE_VF_INDEX(sc->pool);
852*55485da1Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(sc->pool));
853*55485da1Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(sc->pool));
854dc7f84c8Smsaitoh
855dc7f84c8Smsaitoh /* Allow VM-to-VM communication. */
856dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
857dc7f84c8Smsaitoh
858dc7f84c8Smsaitoh vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
859*55485da1Smsaitoh vt_ctl |= (sc->pool << IXGBE_VT_CTL_POOL_SHIFT);
860dc7f84c8Smsaitoh IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
861dc7f84c8Smsaitoh
862*55485da1Smsaitoh for (i = 0; i < sc->num_vfs; i++)
863*55485da1Smsaitoh ixgbe_init_vf(sc, &sc->vfs[i]);
864dc7f84c8Smsaitoh } /* ixgbe_initialize_iov */
865dc7f84c8Smsaitoh
866dc7f84c8Smsaitoh
867dc7f84c8Smsaitoh /* Check the max frame setting of all active VF's */
868dc7f84c8Smsaitoh void
ixgbe_recalculate_max_frame(struct ixgbe_softc * sc)869*55485da1Smsaitoh ixgbe_recalculate_max_frame(struct ixgbe_softc *sc)
870dc7f84c8Smsaitoh {
871dc7f84c8Smsaitoh struct ixgbe_vf *vf;
872dc7f84c8Smsaitoh
873*55485da1Smsaitoh IXGBE_CORE_LOCK_ASSERT(sc);
874dc7f84c8Smsaitoh
875*55485da1Smsaitoh for (int i = 0; i < sc->num_vfs; i++) {
876*55485da1Smsaitoh vf = &sc->vfs[i];
877dc7f84c8Smsaitoh if (vf->flags & IXGBE_VF_ACTIVE)
878*55485da1Smsaitoh ixgbe_update_max_frame(sc, vf->max_frame_size);
879dc7f84c8Smsaitoh }
880dc7f84c8Smsaitoh } /* ixgbe_recalculate_max_frame */
881dc7f84c8Smsaitoh
882dc7f84c8Smsaitoh int
ixgbe_add_vf(device_t dev,u16 vfnum,const nvlist_t * config)883dc7f84c8Smsaitoh ixgbe_add_vf(device_t dev, u16 vfnum, const nvlist_t *config)
884dc7f84c8Smsaitoh {
885*55485da1Smsaitoh struct ixgbe_softc *sc;
886dc7f84c8Smsaitoh struct ixgbe_vf *vf;
887dc7f84c8Smsaitoh const void *mac;
888dc7f84c8Smsaitoh
889*55485da1Smsaitoh sc = device_get_softc(dev);
890dc7f84c8Smsaitoh
891*55485da1Smsaitoh KASSERT(vfnum < sc->num_vfs, ("VF index %d is out of range %d",
892*55485da1Smsaitoh vfnum, sc->num_vfs));
893dc7f84c8Smsaitoh
894*55485da1Smsaitoh IXGBE_CORE_LOCK(sc);
895*55485da1Smsaitoh vf = &sc->vfs[vfnum];
896dc7f84c8Smsaitoh vf->pool= vfnum;
897dc7f84c8Smsaitoh
898dc7f84c8Smsaitoh /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
899dc7f84c8Smsaitoh vf->rar_index = vfnum + 1;
900dc7f84c8Smsaitoh vf->default_vlan = 0;
901dc7f84c8Smsaitoh vf->max_frame_size = ETHER_MAX_LEN;
902*55485da1Smsaitoh ixgbe_update_max_frame(sc, vf->max_frame_size);
903dc7f84c8Smsaitoh
904dc7f84c8Smsaitoh if (nvlist_exists_binary(config, "mac-addr")) {
905dc7f84c8Smsaitoh mac = nvlist_get_binary(config, "mac-addr", NULL);
906dc7f84c8Smsaitoh bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
907dc7f84c8Smsaitoh if (nvlist_get_bool(config, "allow-set-mac"))
908dc7f84c8Smsaitoh vf->flags |= IXGBE_VF_CAP_MAC;
909dc7f84c8Smsaitoh } else
910dc7f84c8Smsaitoh /*
911dc7f84c8Smsaitoh * If the administrator has not specified a MAC address then
912dc7f84c8Smsaitoh * we must allow the VF to choose one.
913dc7f84c8Smsaitoh */
914dc7f84c8Smsaitoh vf->flags |= IXGBE_VF_CAP_MAC;
915dc7f84c8Smsaitoh
916dc7f84c8Smsaitoh vf->flags |= IXGBE_VF_ACTIVE;
917dc7f84c8Smsaitoh
918*55485da1Smsaitoh ixgbe_init_vf(sc, vf);
919*55485da1Smsaitoh IXGBE_CORE_UNLOCK(sc);
920dc7f84c8Smsaitoh
921dc7f84c8Smsaitoh return (0);
922dc7f84c8Smsaitoh } /* ixgbe_add_vf */
923dc7f84c8Smsaitoh
924dc7f84c8Smsaitoh #else
925dc7f84c8Smsaitoh
926dc7f84c8Smsaitoh void
ixgbe_handle_mbx(void * context)92775695abcSmsaitoh ixgbe_handle_mbx(void *context)
928dc7f84c8Smsaitoh {
92975695abcSmsaitoh UNREFERENCED_1PARAMETER(context);
930dc7f84c8Smsaitoh } /* ixgbe_handle_mbx */
931dc7f84c8Smsaitoh
932dc7f84c8Smsaitoh inline int
ixgbe_vf_que_index(int mode,int vfnum,int num)933dc7f84c8Smsaitoh ixgbe_vf_que_index(int mode, int vfnum, int num)
934dc7f84c8Smsaitoh {
935dc7f84c8Smsaitoh UNREFERENCED_2PARAMETER(mode, vfnum);
936dc7f84c8Smsaitoh
937dc7f84c8Smsaitoh return num;
938dc7f84c8Smsaitoh } /* ixgbe_vf_que_index */
939dc7f84c8Smsaitoh
940dc7f84c8Smsaitoh #endif
941