xref: /minix3/minix/net/lwip/ethif.c (revision ef8d499e2d2af900e9b2ab297171d7b088652482)
1*ef8d499eSDavid van Moolenbroek /* LWIP service - ethif.c - ethernet interfaces */
2*ef8d499eSDavid van Moolenbroek /*
3*ef8d499eSDavid van Moolenbroek  * The most important aspect of this module is to maintain a send queue for the
4*ef8d499eSDavid van Moolenbroek  * interface.  This send queue consists of packets to send.  At times, the user
5*ef8d499eSDavid van Moolenbroek  * may request a change to the driver configuration.  While configuration
6*ef8d499eSDavid van Moolenbroek  * requests would ideally be enqueued in the send queue, this has proven too
7*ef8d499eSDavid van Moolenbroek  * problematic to work in practice, especially since out-of-memory conditions
8*ef8d499eSDavid van Moolenbroek  * may prevent configuration requests from being accepted immediately in such a
9*ef8d499eSDavid van Moolenbroek  * model.  Instead, we take a simple and blunt approach: configuration requests
10*ef8d499eSDavid van Moolenbroek  * "cut in line" and thus take precedence over pending packets in the send
11*ef8d499eSDavid van Moolenbroek  * queue.  This may not always be entirely correct: for example, packets may be
12*ef8d499eSDavid van Moolenbroek  * transmitted with the old ethernet address after the network device has
13*ef8d499eSDavid van Moolenbroek  * already been reconfigured to receive from a new ethernet address.  However,
14*ef8d499eSDavid van Moolenbroek  * this should not be a real problem, and we take care explicitly of perhaps
15*ef8d499eSDavid van Moolenbroek  * the most problematic case: packets not getting checksummed due to checksum
16*ef8d499eSDavid van Moolenbroek  * offloading configuration changes.
17*ef8d499eSDavid van Moolenbroek  *
18*ef8d499eSDavid van Moolenbroek  * Even with this blunt approach, we maintain three concurrent configurations:
19*ef8d499eSDavid van Moolenbroek  * the active, the pending, and the wanted configuration.  The active one is
20*ef8d499eSDavid van Moolenbroek  * the last known active configuration at the network driver.  It used not only
21*ef8d499eSDavid van Moolenbroek  * to report whether the device is in RUNNING state, but also to replay the
22*ef8d499eSDavid van Moolenbroek  * active configuration to a restarted driver.  The pending configuration is
23*ef8d499eSDavid van Moolenbroek  * a partially new configuration that has been given to ndev to send to the
24*ef8d499eSDavid van Moolenbroek  * driver, but not yet acknowledged by the driver.  Finally, the wanted
25*ef8d499eSDavid van Moolenbroek  * configuration is the latest one that has yet to be given to ndev.
26*ef8d499eSDavid van Moolenbroek  *
27*ef8d499eSDavid van Moolenbroek  * Each configuration has a bitmask indicating which part of the configuration
28*ef8d499eSDavid van Moolenbroek  * has changed, in order to limit work on the driver side.  This is also the
29*ef8d499eSDavid van Moolenbroek  * reason that the pending and wanted configurations are separate: if e.g. a
30*ef8d499eSDavid van Moolenbroek  * media change is pending at the driver, and the user also requests a mode
31*ef8d499eSDavid van Moolenbroek  * change, we do not want the media change to be repeated after it has been
32*ef8d499eSDavid van Moolenbroek  * acknowleged by the driver, just to change the mode as well.  In this example
33*ef8d499eSDavid van Moolenbroek  * the pending configuration will have NDEV_SET_MEDIA set, and the wanted
34*ef8d499eSDavid van Moolenbroek  * configuration will have NDEV_SET_MODE set.  Once acknowledged, the pending
35*ef8d499eSDavid van Moolenbroek  * bitmask is cleared and the wanted bitmask is tested to see if another
36*ef8d499eSDavid van Moolenbroek  * configuration change should be given to ndev.  Technically, this could lead
37*ef8d499eSDavid van Moolenbroek  * to starvation of actual packet transmission, but we expect configuration
38*ef8d499eSDavid van Moolenbroek  * changes to be very rare, since they are always user initiated.
39*ef8d499eSDavid van Moolenbroek  *
40*ef8d499eSDavid van Moolenbroek  * It is important to note for understanding the code that for some fields
41*ef8d499eSDavid van Moolenbroek  * (mode, flags, caps), the three configurations are cascading: even though the
42*ef8d499eSDavid van Moolenbroek  * wanted configuration may not have NDEV_SET_MODE set, its mode field will
43*ef8d499eSDavid van Moolenbroek  * still contain the most recently requested mode; that is, the mode in the
44*ef8d499eSDavid van Moolenbroek  * pending configuration if that one has NDEV_SET_MODE set, or otherwise the
45*ef8d499eSDavid van Moolenbroek  * mode in the active configuration.  For that reason, we carefully merge
46*ef8d499eSDavid van Moolenbroek  * configuration requests into the next level (wanted -> pending -> active),
47*ef8d499eSDavid van Moolenbroek  * updating just the fields that have been changed by the previous level.  This
48*ef8d499eSDavid van Moolenbroek  * approach simplifies obtaining current values a lot, but is not very obvious.
49*ef8d499eSDavid van Moolenbroek  *
50*ef8d499eSDavid van Moolenbroek  * Also, we never send multiple configuration requests at once, even though
51*ef8d499eSDavid van Moolenbroek  * ndev would let us do that: we use a single array for the list of multicast
52*ef8d499eSDavid van Moolenbroek  * ethernet addresses that we send to the driver, which the driver may retrieve
53*ef8d499eSDavid van Moolenbroek  * (using a memory grant) at any time.  We necessarily recompute the multicast
54*ef8d499eSDavid van Moolenbroek  * list before sending a configuration request, and thus, sending multiple
55*ef8d499eSDavid van Moolenbroek  * requests at once may lead to the driver retrieving a corrupted list.
56*ef8d499eSDavid van Moolenbroek  */
57*ef8d499eSDavid van Moolenbroek 
58*ef8d499eSDavid van Moolenbroek #include "lwip.h"
59*ef8d499eSDavid van Moolenbroek #include "ethif.h"
60*ef8d499eSDavid van Moolenbroek 
61*ef8d499eSDavid van Moolenbroek #include "lwip/etharp.h"
62*ef8d499eSDavid van Moolenbroek #include "lwip/ethip6.h"
63*ef8d499eSDavid van Moolenbroek #include "lwip/igmp.h"
64*ef8d499eSDavid van Moolenbroek #include "lwip/mld6.h"
65*ef8d499eSDavid van Moolenbroek 
66*ef8d499eSDavid van Moolenbroek #include <net/if_media.h>
67*ef8d499eSDavid van Moolenbroek 
68*ef8d499eSDavid van Moolenbroek #define ETHIF_MAX_MTU	1500		/* maximum MTU value for ethernet */
69*ef8d499eSDavid van Moolenbroek #define ETHIF_DEF_MTU	ETHIF_MAX_MTU	/* default MTU value that we use */
70*ef8d499eSDavid van Moolenbroek 
71*ef8d499eSDavid van Moolenbroek #define ETHIF_MCAST_MAX	8	/* maximum number of multicast addresses */
72*ef8d499eSDavid van Moolenbroek 
73*ef8d499eSDavid van Moolenbroek struct ethif {
74*ef8d499eSDavid van Moolenbroek 	struct ifdev ethif_ifdev;	/* interface device, MUST be first */
75*ef8d499eSDavid van Moolenbroek 	ndev_id_t ethif_ndev;		/* network device ID */
76*ef8d499eSDavid van Moolenbroek 	unsigned int ethif_flags;	/* interface flags (ETHIFF_) */
77*ef8d499eSDavid van Moolenbroek 	uint32_t ethif_caps;		/* driver capabilities (NDEV_CAPS_) */
78*ef8d499eSDavid van Moolenbroek 	uint32_t ethif_media;		/* driver-reported media type (IFM_) */
79*ef8d499eSDavid van Moolenbroek 	struct ndev_conf ethif_active;	/* active configuration (at driver) */
80*ef8d499eSDavid van Moolenbroek 	struct ndev_conf ethif_pending; /* pending configuration (at ndev) */
81*ef8d499eSDavid van Moolenbroek 	struct ndev_conf ethif_wanted;	/* desired configuration (waiting) */
82*ef8d499eSDavid van Moolenbroek 	struct ndev_hwaddr ethif_mclist[ETHIF_MCAST_MAX]; /* multicast list */
83*ef8d499eSDavid van Moolenbroek 	struct {			/* send queue (packet/conf refs) */
84*ef8d499eSDavid van Moolenbroek 		struct pbuf *es_head;	/* first (oldest) request reference */
85*ef8d499eSDavid van Moolenbroek 		struct pbuf **es_unsentp; /* ptr-ptr to first unsent request */
86*ef8d499eSDavid van Moolenbroek 		struct pbuf **es_tailp;	/* ptr-ptr for adding new requests */
87*ef8d499eSDavid van Moolenbroek 		unsigned int es_count;	/* buffer count, see ETHIF_PBUF_.. */
88*ef8d499eSDavid van Moolenbroek 	} ethif_snd;
89*ef8d499eSDavid van Moolenbroek 	struct {			/* receive queue (packets) */
90*ef8d499eSDavid van Moolenbroek 		struct pbuf *er_head;	/* first (oldest) request buffer */
91*ef8d499eSDavid van Moolenbroek 		struct pbuf **er_tailp;	/* ptr-ptr for adding new requests */
92*ef8d499eSDavid van Moolenbroek 	} ethif_rcv;
93*ef8d499eSDavid van Moolenbroek 	SIMPLEQ_ENTRY(ethif) ethif_next; /* next in free list */
94*ef8d499eSDavid van Moolenbroek } ethif_array[NR_NDEV];	/* any other value would be suboptimal */
95*ef8d499eSDavid van Moolenbroek 
96*ef8d499eSDavid van Moolenbroek #define ethif_get_name(ethif)	(ifdev_get_name(&(ethif)->ethif_ifdev))
97*ef8d499eSDavid van Moolenbroek #define ethif_get_netif(ethif)	(ifdev_get_netif(&(ethif)->ethif_ifdev))
98*ef8d499eSDavid van Moolenbroek 
99*ef8d499eSDavid van Moolenbroek #define ETHIFF_DISABLED		0x01	/* driver has disappeared */
100*ef8d499eSDavid van Moolenbroek #define ETHIFF_FIRST_CONF	0x02	/* first configuration request sent */
101*ef8d499eSDavid van Moolenbroek 
102*ef8d499eSDavid van Moolenbroek /*
103*ef8d499eSDavid van Moolenbroek  * Send queue limit settings.  Both are counted in number of pbuf objects.
104*ef8d499eSDavid van Moolenbroek  * ETHIF_PBUF_MIN is the minimum number of pbuf objects that can always be
105*ef8d499eSDavid van Moolenbroek  * enqueued on a particular interface's send queue.  It should be at least the
106*ef8d499eSDavid van Moolenbroek  * number of pbufs for one single packet after being reduced to the ndev limit,
107*ef8d499eSDavid van Moolenbroek  * so NDEV_IOV_MAX (8) is a natural fit.  The ETHIF_PBUF_MAX_n values define
108*ef8d499eSDavid van Moolenbroek  * the maximum number of pbufs that may be used by all interface send queues
109*ef8d499eSDavid van Moolenbroek  * combined, whichever of the two is smaller.  The resulting number must be set
110*ef8d499eSDavid van Moolenbroek  * fairly high, because at any time there may be a lot of active TCP sockets
111*ef8d499eSDavid van Moolenbroek  * that all generate a (multi-pbuf) packet as a result of a clock tick.  It is
112*ef8d499eSDavid van Moolenbroek  * currently a function of the size of the buffer pool, capped to a value that
113*ef8d499eSDavid van Moolenbroek  * is a function of the number of TCP sockets (assuming one packet per socket;
114*ef8d499eSDavid van Moolenbroek  * up to MSS/BUFSIZE+1 data pbufs, one header pbuf, one extra as margin).  The
115*ef8d499eSDavid van Moolenbroek  * difference between the per-interface guaranteed minimum and the global
116*ef8d499eSDavid van Moolenbroek  * maximum is what makes up a pool of "spares", which are really just tokens
117*ef8d499eSDavid van Moolenbroek  * allowing for enqueuing of that many pbufs.
118*ef8d499eSDavid van Moolenbroek  */
119*ef8d499eSDavid van Moolenbroek #define ETHIF_PBUF_MIN		(NDEV_IOV_MAX)
120*ef8d499eSDavid van Moolenbroek #define ETHIF_PBUF_MAX_1	(mempool_cur_buffers() >> 1)
121*ef8d499eSDavid van Moolenbroek #define ETHIF_PBUF_MAX_2	(NR_TCPSOCK * (TCP_MSS / MEMPOOL_BUFSIZE + 3))
122*ef8d499eSDavid van Moolenbroek 
123*ef8d499eSDavid van Moolenbroek static unsigned int ethif_spares;
124*ef8d499eSDavid van Moolenbroek 
125*ef8d499eSDavid van Moolenbroek static SIMPLEQ_HEAD(, ethif) ethif_freelist;	/* free ethif objects */
126*ef8d499eSDavid van Moolenbroek 
127*ef8d499eSDavid van Moolenbroek static const struct ifdev_ops ethif_ops;
128*ef8d499eSDavid van Moolenbroek 
129*ef8d499eSDavid van Moolenbroek #ifdef INET6
130*ef8d499eSDavid van Moolenbroek static ip6_addr_t ethif_ip6addr_allnodes_ll;
131*ef8d499eSDavid van Moolenbroek #endif /* INET6 */
132*ef8d499eSDavid van Moolenbroek 
133*ef8d499eSDavid van Moolenbroek /*
134*ef8d499eSDavid van Moolenbroek  * Initialize the ethernet interfaces module.
135*ef8d499eSDavid van Moolenbroek  */
136*ef8d499eSDavid van Moolenbroek void
ethif_init(void)137*ef8d499eSDavid van Moolenbroek ethif_init(void)
138*ef8d499eSDavid van Moolenbroek {
139*ef8d499eSDavid van Moolenbroek 	unsigned int slot;
140*ef8d499eSDavid van Moolenbroek 
141*ef8d499eSDavid van Moolenbroek 	/* Initialize the list of free ethif objects. */
142*ef8d499eSDavid van Moolenbroek 	SIMPLEQ_INIT(&ethif_freelist);
143*ef8d499eSDavid van Moolenbroek 
144*ef8d499eSDavid van Moolenbroek 	for (slot = 0; slot < __arraycount(ethif_array); slot++)
145*ef8d499eSDavid van Moolenbroek 		SIMPLEQ_INSERT_TAIL(&ethif_freelist, &ethif_array[slot],
146*ef8d499eSDavid van Moolenbroek 		    ethif_next);
147*ef8d499eSDavid van Moolenbroek 
148*ef8d499eSDavid van Moolenbroek 	/* Initialize the number of in-use spare tokens. */
149*ef8d499eSDavid van Moolenbroek 	ethif_spares = 0;
150*ef8d499eSDavid van Moolenbroek 
151*ef8d499eSDavid van Moolenbroek #ifdef INET6
152*ef8d499eSDavid van Moolenbroek 	/* Preinitialize the link-local all-nodes IPv6 multicast address. */
153*ef8d499eSDavid van Moolenbroek 	ip6_addr_set_allnodes_linklocal(&ethif_ip6addr_allnodes_ll);
154*ef8d499eSDavid van Moolenbroek #endif /* INET6 */
155*ef8d499eSDavid van Moolenbroek }
156*ef8d499eSDavid van Moolenbroek 
157*ef8d499eSDavid van Moolenbroek /*
158*ef8d499eSDavid van Moolenbroek  * As the result of some event, the NetBSD-style interface flags for this
159*ef8d499eSDavid van Moolenbroek  * interface may have changed.  Recompute and update the flags as appropriate.
160*ef8d499eSDavid van Moolenbroek  */
161*ef8d499eSDavid van Moolenbroek static void
ethif_update_ifflags(struct ethif * ethif)162*ef8d499eSDavid van Moolenbroek ethif_update_ifflags(struct ethif * ethif)
163*ef8d499eSDavid van Moolenbroek {
164*ef8d499eSDavid van Moolenbroek 	unsigned int ifflags;
165*ef8d499eSDavid van Moolenbroek 
166*ef8d499eSDavid van Moolenbroek 	ifflags = ifdev_get_ifflags(&ethif->ethif_ifdev);
167*ef8d499eSDavid van Moolenbroek 
168*ef8d499eSDavid van Moolenbroek 	/* These are the flags that we might update here. */
169*ef8d499eSDavid van Moolenbroek 	ifflags &= ~(IFF_RUNNING | IFF_ALLMULTI);
170*ef8d499eSDavid van Moolenbroek 
171*ef8d499eSDavid van Moolenbroek 	/*
172*ef8d499eSDavid van Moolenbroek 	 * For us, the RUNNING flag indicates that -as far as we know- the
173*ef8d499eSDavid van Moolenbroek 	 * network device is fully operational and has its I/O engines running.
174*ef8d499eSDavid van Moolenbroek 	 * This is a reflection of the current state, not of any intention, so
175*ef8d499eSDavid van Moolenbroek 	 * we look at the active configuration here.  We use the same approach
176*ef8d499eSDavid van Moolenbroek 	 * for one other receive state flags here (ALLMULTI).
177*ef8d499eSDavid van Moolenbroek 	 */
178*ef8d499eSDavid van Moolenbroek 	if ((ethif->ethif_flags &
179*ef8d499eSDavid van Moolenbroek 	    (ETHIFF_DISABLED | ETHIFF_FIRST_CONF)) == 0 &&
180*ef8d499eSDavid van Moolenbroek 	     ethif->ethif_active.nconf_mode != NDEV_MODE_DOWN) {
181*ef8d499eSDavid van Moolenbroek 		ifflags |= IFF_RUNNING;
182*ef8d499eSDavid van Moolenbroek 
183*ef8d499eSDavid van Moolenbroek 		if (ethif->ethif_active.nconf_mode & NDEV_MODE_MCAST_ALL)
184*ef8d499eSDavid van Moolenbroek 			ifflags |= IFF_ALLMULTI;
185*ef8d499eSDavid van Moolenbroek 	}
186*ef8d499eSDavid van Moolenbroek 
187*ef8d499eSDavid van Moolenbroek 	ifdev_update_ifflags(&ethif->ethif_ifdev, ifflags);
188*ef8d499eSDavid van Moolenbroek }
189*ef8d499eSDavid van Moolenbroek 
190*ef8d499eSDavid van Moolenbroek /*
191*ef8d499eSDavid van Moolenbroek  * Add a multicast hardware receive address into the set of hardware addresses
192*ef8d499eSDavid van Moolenbroek  * in the given configuration, if the given address is not already in the
193*ef8d499eSDavid van Moolenbroek  * configuration's set.  Adjust the configuration's mode as needed.  Return
194*ef8d499eSDavid van Moolenbroek  * TRUE If the address was added, and FALSE if the address could not be added
195*ef8d499eSDavid van Moolenbroek  * due to a full list (of 'max' elements), in which case the mode is changed
196*ef8d499eSDavid van Moolenbroek  * from receiving from listed multicast addresses to receiving from all
197*ef8d499eSDavid van Moolenbroek  * multicast addresses.
198*ef8d499eSDavid van Moolenbroek  */
199*ef8d499eSDavid van Moolenbroek static int
ethif_add_mcast(struct ndev_conf * nconf,unsigned int max,struct ndev_hwaddr * hwaddr)200*ef8d499eSDavid van Moolenbroek ethif_add_mcast(struct ndev_conf * nconf, unsigned int max,
201*ef8d499eSDavid van Moolenbroek 	struct ndev_hwaddr * hwaddr)
202*ef8d499eSDavid van Moolenbroek {
203*ef8d499eSDavid van Moolenbroek 	unsigned int slot;
204*ef8d499eSDavid van Moolenbroek 
205*ef8d499eSDavid van Moolenbroek 	/*
206*ef8d499eSDavid van Moolenbroek 	 * See if the hardware address is already in the list we produced so
207*ef8d499eSDavid van Moolenbroek 	 * far.  This makes the multicast list generation O(n^2) but we do not
208*ef8d499eSDavid van Moolenbroek 	 * expect many entries nor is the list size large anyway.
209*ef8d499eSDavid van Moolenbroek 	 */
210*ef8d499eSDavid van Moolenbroek 	for (slot = 0; slot < nconf->nconf_mccount; slot++)
211*ef8d499eSDavid van Moolenbroek 		if (!memcmp(&nconf->nconf_mclist[slot], hwaddr,
212*ef8d499eSDavid van Moolenbroek 		    sizeof(*hwaddr)))
213*ef8d499eSDavid van Moolenbroek 			return TRUE;
214*ef8d499eSDavid van Moolenbroek 
215*ef8d499eSDavid van Moolenbroek 	if (nconf->nconf_mccount < max) {
216*ef8d499eSDavid van Moolenbroek 		memcpy(&nconf->nconf_mclist[slot], hwaddr, sizeof(*hwaddr));
217*ef8d499eSDavid van Moolenbroek 		nconf->nconf_mccount++;
218*ef8d499eSDavid van Moolenbroek 
219*ef8d499eSDavid van Moolenbroek 		nconf->nconf_mode |= NDEV_MODE_MCAST_LIST;
220*ef8d499eSDavid van Moolenbroek 
221*ef8d499eSDavid van Moolenbroek 		return TRUE;
222*ef8d499eSDavid van Moolenbroek 	} else {
223*ef8d499eSDavid van Moolenbroek 		nconf->nconf_mode &= ~NDEV_MODE_MCAST_LIST;
224*ef8d499eSDavid van Moolenbroek 		nconf->nconf_mode |= NDEV_MODE_MCAST_ALL;
225*ef8d499eSDavid van Moolenbroek 
226*ef8d499eSDavid van Moolenbroek 		return FALSE;
227*ef8d499eSDavid van Moolenbroek 	}
228*ef8d499eSDavid van Moolenbroek }
229*ef8d499eSDavid van Moolenbroek 
230*ef8d499eSDavid van Moolenbroek /*
231*ef8d499eSDavid van Moolenbroek  * Add the ethernet hardware address derived from the given IPv4 multicast
232*ef8d499eSDavid van Moolenbroek  * address, to the list of multicast addresses.
233*ef8d499eSDavid van Moolenbroek  */
234*ef8d499eSDavid van Moolenbroek static int
ethif_add_mcast_v4(struct ndev_conf * nconf,unsigned int max,const ip4_addr_t * ip4addr)235*ef8d499eSDavid van Moolenbroek ethif_add_mcast_v4(struct ndev_conf * nconf, unsigned int max,
236*ef8d499eSDavid van Moolenbroek 	const ip4_addr_t * ip4addr)
237*ef8d499eSDavid van Moolenbroek {
238*ef8d499eSDavid van Moolenbroek 	struct ndev_hwaddr hwaddr;
239*ef8d499eSDavid van Moolenbroek 
240*ef8d499eSDavid van Moolenbroek 	/* 01:00:05:xx:xx:xx with the lower 23 bits of the IPv4 address. */
241*ef8d499eSDavid van Moolenbroek 	hwaddr.nhwa_addr[0] = LL_IP4_MULTICAST_ADDR_0;
242*ef8d499eSDavid van Moolenbroek 	hwaddr.nhwa_addr[1] = LL_IP4_MULTICAST_ADDR_1;
243*ef8d499eSDavid van Moolenbroek 	hwaddr.nhwa_addr[2] = LL_IP4_MULTICAST_ADDR_2;
244*ef8d499eSDavid van Moolenbroek 	hwaddr.nhwa_addr[3] = (ip4_addr_get_u32(ip4addr) >> 16) & 0x7f;
245*ef8d499eSDavid van Moolenbroek 	hwaddr.nhwa_addr[4] = (ip4_addr_get_u32(ip4addr) >>  8) & 0xff;
246*ef8d499eSDavid van Moolenbroek 	hwaddr.nhwa_addr[5] = (ip4_addr_get_u32(ip4addr) >>  0) & 0xff;
247*ef8d499eSDavid van Moolenbroek 
248*ef8d499eSDavid van Moolenbroek 	return ethif_add_mcast(nconf, max, &hwaddr);
249*ef8d499eSDavid van Moolenbroek }
250*ef8d499eSDavid van Moolenbroek 
251*ef8d499eSDavid van Moolenbroek /*
252*ef8d499eSDavid van Moolenbroek  * Add the ethernet hardware address derived from the given IPv6 multicast
253*ef8d499eSDavid van Moolenbroek  * address, to the list of multicast addresses.
254*ef8d499eSDavid van Moolenbroek  */
255*ef8d499eSDavid van Moolenbroek static int
ethif_add_mcast_v6(struct ndev_conf * nconf,unsigned int max,const ip6_addr_t * ip6addr)256*ef8d499eSDavid van Moolenbroek ethif_add_mcast_v6(struct ndev_conf * nconf, unsigned int max,
257*ef8d499eSDavid van Moolenbroek 	const ip6_addr_t * ip6addr)
258*ef8d499eSDavid van Moolenbroek {
259*ef8d499eSDavid van Moolenbroek 	struct ndev_hwaddr hwaddr;
260*ef8d499eSDavid van Moolenbroek 
261*ef8d499eSDavid van Moolenbroek 	/* 33:33:xx:xx:xx:xx with the lower 32 bits of the IPv6 address. */
262*ef8d499eSDavid van Moolenbroek 	hwaddr.nhwa_addr[0] = LL_IP6_MULTICAST_ADDR_0;
263*ef8d499eSDavid van Moolenbroek 	hwaddr.nhwa_addr[1] = LL_IP6_MULTICAST_ADDR_1;
264*ef8d499eSDavid van Moolenbroek 	memcpy(&hwaddr.nhwa_addr[2], &ip6addr->addr[3], sizeof(uint32_t));
265*ef8d499eSDavid van Moolenbroek 
266*ef8d499eSDavid van Moolenbroek 	return ethif_add_mcast(nconf, max, &hwaddr);
267*ef8d499eSDavid van Moolenbroek }
268*ef8d499eSDavid van Moolenbroek 
269*ef8d499eSDavid van Moolenbroek /*
270*ef8d499eSDavid van Moolenbroek  * Set up the multicast mode for a configuration that is to be sent to a
271*ef8d499eSDavid van Moolenbroek  * network driver, generating a multicast receive address list for the driver
272*ef8d499eSDavid van Moolenbroek  * as applicable.
273*ef8d499eSDavid van Moolenbroek  */
274*ef8d499eSDavid van Moolenbroek static void
ethif_gen_mcast(struct ethif * ethif,struct ndev_conf * nconf)275*ef8d499eSDavid van Moolenbroek ethif_gen_mcast(struct ethif * ethif, struct ndev_conf * nconf)
276*ef8d499eSDavid van Moolenbroek {
277*ef8d499eSDavid van Moolenbroek 	struct igmp_group *group4;
278*ef8d499eSDavid van Moolenbroek 	struct mld_group *group6;
279*ef8d499eSDavid van Moolenbroek 	unsigned int max;
280*ef8d499eSDavid van Moolenbroek 
281*ef8d499eSDavid van Moolenbroek 	/* Make sure that multicast is supported at all for this interface. */
282*ef8d499eSDavid van Moolenbroek 	if (!(ethif->ethif_caps & NDEV_CAP_MCAST))
283*ef8d499eSDavid van Moolenbroek 		return;
284*ef8d499eSDavid van Moolenbroek 
285*ef8d499eSDavid van Moolenbroek 	/* Make sure the mode is being (re)configured to be up. */
286*ef8d499eSDavid van Moolenbroek 	if (!(nconf->nconf_set & NDEV_SET_MODE) ||
287*ef8d499eSDavid van Moolenbroek 	    nconf->nconf_mode == NDEV_MODE_DOWN)
288*ef8d499eSDavid van Moolenbroek 		return;
289*ef8d499eSDavid van Moolenbroek 
290*ef8d499eSDavid van Moolenbroek 	/* Recompute the desired multicast flags. */
291*ef8d499eSDavid van Moolenbroek 	nconf->nconf_mode &= ~(NDEV_MODE_MCAST_LIST | NDEV_MODE_MCAST_ALL);
292*ef8d499eSDavid van Moolenbroek 
293*ef8d499eSDavid van Moolenbroek 	/* If promiscuous mode is enabled, receive all multicast packets. */
294*ef8d499eSDavid van Moolenbroek 	if (nconf->nconf_mode & NDEV_MODE_PROMISC) {
295*ef8d499eSDavid van Moolenbroek 		nconf->nconf_mode |= NDEV_MODE_MCAST_ALL;
296*ef8d499eSDavid van Moolenbroek 
297*ef8d499eSDavid van Moolenbroek 		return;
298*ef8d499eSDavid van Moolenbroek 	}
299*ef8d499eSDavid van Moolenbroek 
300*ef8d499eSDavid van Moolenbroek 	/*
301*ef8d499eSDavid van Moolenbroek 	 * Map all IGMP/MLD6 multicast addresses to ethernet addresses, merging
302*ef8d499eSDavid van Moolenbroek 	 * any duplicates to save slots.  We have to add the MLD6 all-nodes
303*ef8d499eSDavid van Moolenbroek 	 * multicast address ourselves, which also means the list is never
304*ef8d499eSDavid van Moolenbroek 	 * empty unless compiling with USE_INET6=no.  If the list is too small
305*ef8d499eSDavid van Moolenbroek 	 * for all addresses, opt to receive all multicast packets instead.
306*ef8d499eSDavid van Moolenbroek 	 */
307*ef8d499eSDavid van Moolenbroek 	nconf->nconf_mclist = ethif->ethif_mclist;
308*ef8d499eSDavid van Moolenbroek 	nconf->nconf_mccount = 0;
309*ef8d499eSDavid van Moolenbroek 	max = __arraycount(ethif->ethif_mclist);
310*ef8d499eSDavid van Moolenbroek 
311*ef8d499eSDavid van Moolenbroek 	for (group4 = netif_igmp_data(ethif_get_netif(ethif)); group4 != NULL;
312*ef8d499eSDavid van Moolenbroek 	    group4 = group4->next)
313*ef8d499eSDavid van Moolenbroek 		if (!ethif_add_mcast_v4(nconf, max, &group4->group_address))
314*ef8d499eSDavid van Moolenbroek 			return;
315*ef8d499eSDavid van Moolenbroek 
316*ef8d499eSDavid van Moolenbroek #ifdef INET6
317*ef8d499eSDavid van Moolenbroek 	if (!ethif_add_mcast_v6(nconf, max, &ethif_ip6addr_allnodes_ll))
318*ef8d499eSDavid van Moolenbroek 		return;
319*ef8d499eSDavid van Moolenbroek #endif /* INET6 */
320*ef8d499eSDavid van Moolenbroek 
321*ef8d499eSDavid van Moolenbroek 	for (group6 = netif_mld6_data(ethif_get_netif(ethif)); group6 != NULL;
322*ef8d499eSDavid van Moolenbroek 	    group6 = group6->next)
323*ef8d499eSDavid van Moolenbroek 		if (!ethif_add_mcast_v6(nconf, max, &group6->group_address))
324*ef8d499eSDavid van Moolenbroek 			return;
325*ef8d499eSDavid van Moolenbroek }
326*ef8d499eSDavid van Moolenbroek 
327*ef8d499eSDavid van Moolenbroek /*
328*ef8d499eSDavid van Moolenbroek  * Merge a source configuration into a destination configuration, copying any
329*ef8d499eSDavid van Moolenbroek  * fields intended to be set from the source into the destination and clearing
330*ef8d499eSDavid van Moolenbroek  * the "set" mask in the source, without changing the source fields, so that
331*ef8d499eSDavid van Moolenbroek  * the source will reflect the destination's contents.
332*ef8d499eSDavid van Moolenbroek  */
333*ef8d499eSDavid van Moolenbroek static void
ethif_merge_conf(struct ndev_conf * dconf,struct ndev_conf * sconf)334*ef8d499eSDavid van Moolenbroek ethif_merge_conf(struct ndev_conf * dconf, struct ndev_conf * sconf)
335*ef8d499eSDavid van Moolenbroek {
336*ef8d499eSDavid van Moolenbroek 
337*ef8d499eSDavid van Moolenbroek 	dconf->nconf_set |= sconf->nconf_set;
338*ef8d499eSDavid van Moolenbroek 
339*ef8d499eSDavid van Moolenbroek 	if (sconf->nconf_set & NDEV_SET_MODE)
340*ef8d499eSDavid van Moolenbroek 		dconf->nconf_mode = sconf->nconf_mode;
341*ef8d499eSDavid van Moolenbroek 	if (sconf->nconf_set & NDEV_SET_CAPS)
342*ef8d499eSDavid van Moolenbroek 		dconf->nconf_caps = sconf->nconf_caps;
343*ef8d499eSDavid van Moolenbroek 	if (sconf->nconf_set & NDEV_SET_FLAGS)
344*ef8d499eSDavid van Moolenbroek 		dconf->nconf_flags = sconf->nconf_flags;
345*ef8d499eSDavid van Moolenbroek 	if (sconf->nconf_set & NDEV_SET_MEDIA)
346*ef8d499eSDavid van Moolenbroek 		dconf->nconf_media = sconf->nconf_media;
347*ef8d499eSDavid van Moolenbroek 	if (sconf->nconf_set & NDEV_SET_HWADDR)
348*ef8d499eSDavid van Moolenbroek 		memcpy(&dconf->nconf_hwaddr, &sconf->nconf_hwaddr,
349*ef8d499eSDavid van Moolenbroek 		    sizeof(dconf->nconf_hwaddr));
350*ef8d499eSDavid van Moolenbroek 
351*ef8d499eSDavid van Moolenbroek 	sconf->nconf_set = 0;
352*ef8d499eSDavid van Moolenbroek }
353*ef8d499eSDavid van Moolenbroek 
354*ef8d499eSDavid van Moolenbroek /*
355*ef8d499eSDavid van Moolenbroek  * Return TRUE if we can and should try to pass a configuration request to the
356*ef8d499eSDavid van Moolenbroek  * ndev layer on this interface, or FALSE otherwise.
357*ef8d499eSDavid van Moolenbroek  */
358*ef8d499eSDavid van Moolenbroek static int
ethif_can_conf(struct ethif * ethif)359*ef8d499eSDavid van Moolenbroek ethif_can_conf(struct ethif * ethif)
360*ef8d499eSDavid van Moolenbroek {
361*ef8d499eSDavid van Moolenbroek 
362*ef8d499eSDavid van Moolenbroek 	/* Is there a configuration change waiting?  The common case is no. */
363*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_wanted.nconf_set == 0)
364*ef8d499eSDavid van Moolenbroek 		return FALSE;
365*ef8d499eSDavid van Moolenbroek 
366*ef8d499eSDavid van Moolenbroek 	/*
367*ef8d499eSDavid van Moolenbroek 	 * Is there a configuration change pending already?  Then wait for it
368*ef8d499eSDavid van Moolenbroek 	 * to be acknowledged first.
369*ef8d499eSDavid van Moolenbroek 	 */
370*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_pending.nconf_set != 0)
371*ef8d499eSDavid van Moolenbroek 		return FALSE;
372*ef8d499eSDavid van Moolenbroek 
373*ef8d499eSDavid van Moolenbroek 	/* Make sure the interface is in the appropriate state. */
374*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_flags & ETHIFF_DISABLED)
375*ef8d499eSDavid van Moolenbroek 		return FALSE;
376*ef8d499eSDavid van Moolenbroek 
377*ef8d499eSDavid van Moolenbroek 	/* First let all current packet send requests finish. */
378*ef8d499eSDavid van Moolenbroek 	return (ethif->ethif_snd.es_unsentp == &ethif->ethif_snd.es_head);
379*ef8d499eSDavid van Moolenbroek }
380*ef8d499eSDavid van Moolenbroek 
381*ef8d499eSDavid van Moolenbroek /*
382*ef8d499eSDavid van Moolenbroek  * Return TRUE if we can and should try to pass the next unsent packet send
383*ef8d499eSDavid van Moolenbroek  * request to the ndev layer on this interface, or FALSE otherwise.
384*ef8d499eSDavid van Moolenbroek  */
385*ef8d499eSDavid van Moolenbroek static int
ethif_can_send(struct ethif * ethif)386*ef8d499eSDavid van Moolenbroek ethif_can_send(struct ethif * ethif)
387*ef8d499eSDavid van Moolenbroek {
388*ef8d499eSDavid van Moolenbroek 
389*ef8d499eSDavid van Moolenbroek 	/* Is there anything to hand to ndev at all?  The common case is no. */
390*ef8d499eSDavid van Moolenbroek 	if (*ethif->ethif_snd.es_unsentp == NULL)
391*ef8d499eSDavid van Moolenbroek 		return FALSE;
392*ef8d499eSDavid van Moolenbroek 
393*ef8d499eSDavid van Moolenbroek 	/*
394*ef8d499eSDavid van Moolenbroek 	 * Is there a configuration change pending?  Then we cannot send
395*ef8d499eSDavid van Moolenbroek 	 * packets yet.  Always let all configuration changes through first.
396*ef8d499eSDavid van Moolenbroek 	 */
397*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_pending.nconf_set != 0 ||
398*ef8d499eSDavid van Moolenbroek 	    ethif->ethif_wanted.nconf_set != 0)
399*ef8d499eSDavid van Moolenbroek 		return FALSE;
400*ef8d499eSDavid van Moolenbroek 
401*ef8d499eSDavid van Moolenbroek 	/* Make sure the interface is in the appropriate state. */
402*ef8d499eSDavid van Moolenbroek 	if ((ethif->ethif_flags & (ETHIFF_DISABLED | ETHIFF_FIRST_CONF)) != 0)
403*ef8d499eSDavid van Moolenbroek 		return FALSE;
404*ef8d499eSDavid van Moolenbroek 
405*ef8d499eSDavid van Moolenbroek 	return TRUE;
406*ef8d499eSDavid van Moolenbroek }
407*ef8d499eSDavid van Moolenbroek 
408*ef8d499eSDavid van Moolenbroek /*
409*ef8d499eSDavid van Moolenbroek  * Return TRUE if we can and should try to receive packets on this interface
410*ef8d499eSDavid van Moolenbroek  * and are ready to accept received packets, or FALSE otherwise.
411*ef8d499eSDavid van Moolenbroek  */
412*ef8d499eSDavid van Moolenbroek static int
ethif_can_recv(struct ethif * ethif)413*ef8d499eSDavid van Moolenbroek ethif_can_recv(struct ethif * ethif)
414*ef8d499eSDavid van Moolenbroek {
415*ef8d499eSDavid van Moolenbroek 
416*ef8d499eSDavid van Moolenbroek 	if ((ethif->ethif_flags & (ETHIFF_DISABLED | ETHIFF_FIRST_CONF)) != 0)
417*ef8d499eSDavid van Moolenbroek 		return FALSE;
418*ef8d499eSDavid van Moolenbroek 
419*ef8d499eSDavid van Moolenbroek 	/*
420*ef8d499eSDavid van Moolenbroek 	 * We do not check the link status here.  There is no reason not to
421*ef8d499eSDavid van Moolenbroek 	 * spawn receive requests, or accept received packets, while the link
422*ef8d499eSDavid van Moolenbroek 	 * is reported to be down.
423*ef8d499eSDavid van Moolenbroek 	 */
424*ef8d499eSDavid van Moolenbroek 	return ifdev_is_up(&ethif->ethif_ifdev);
425*ef8d499eSDavid van Moolenbroek }
426*ef8d499eSDavid van Moolenbroek 
427*ef8d499eSDavid van Moolenbroek /*
428*ef8d499eSDavid van Moolenbroek  * Polling function, invoked after each message loop iteration.  Check whether
429*ef8d499eSDavid van Moolenbroek  * any configuration change or packets can be sent to the driver, and whether
430*ef8d499eSDavid van Moolenbroek  * any new packet receive requests can be enqueued at the driver.
431*ef8d499eSDavid van Moolenbroek  */
432*ef8d499eSDavid van Moolenbroek static void
ethif_poll(struct ifdev * ifdev)433*ef8d499eSDavid van Moolenbroek ethif_poll(struct ifdev * ifdev)
434*ef8d499eSDavid van Moolenbroek {
435*ef8d499eSDavid van Moolenbroek 	struct ethif *ethif = (struct ethif *)ifdev;
436*ef8d499eSDavid van Moolenbroek 	struct pbuf *pbuf, *pref;
437*ef8d499eSDavid van Moolenbroek 
438*ef8d499eSDavid van Moolenbroek 	/*
439*ef8d499eSDavid van Moolenbroek 	 * If a configuration request is desired, see if we can send it to the
440*ef8d499eSDavid van Moolenbroek 	 * driver now.  Otherwise, attempt to send any packets if possible.
441*ef8d499eSDavid van Moolenbroek 	 * In both cases, a failure of the ndev call indicates that we should
442*ef8d499eSDavid van Moolenbroek 	 * try again later.
443*ef8d499eSDavid van Moolenbroek 	 */
444*ef8d499eSDavid van Moolenbroek 	if (ethif_can_conf(ethif)) {
445*ef8d499eSDavid van Moolenbroek 		ethif_gen_mcast(ethif, &ethif->ethif_wanted);
446*ef8d499eSDavid van Moolenbroek 
447*ef8d499eSDavid van Moolenbroek 		/*
448*ef8d499eSDavid van Moolenbroek 		 * On success, move the wanted configuration into the pending
449*ef8d499eSDavid van Moolenbroek 		 * slot.  Otherwise, try again on the next poll iteration.
450*ef8d499eSDavid van Moolenbroek 		 */
451*ef8d499eSDavid van Moolenbroek 		if (ndev_conf(ethif->ethif_ndev, &ethif->ethif_wanted) == OK)
452*ef8d499eSDavid van Moolenbroek 			ethif_merge_conf(&ethif->ethif_pending,
453*ef8d499eSDavid van Moolenbroek 			    &ethif->ethif_wanted);
454*ef8d499eSDavid van Moolenbroek 	} else {
455*ef8d499eSDavid van Moolenbroek 		while (ethif_can_send(ethif)) {
456*ef8d499eSDavid van Moolenbroek 			pref = *ethif->ethif_snd.es_unsentp;
457*ef8d499eSDavid van Moolenbroek 
458*ef8d499eSDavid van Moolenbroek 			if (pref->type == PBUF_REF)
459*ef8d499eSDavid van Moolenbroek 				pbuf = (struct pbuf *)pref->payload;
460*ef8d499eSDavid van Moolenbroek 			else
461*ef8d499eSDavid van Moolenbroek 				pbuf = pref;
462*ef8d499eSDavid van Moolenbroek 
463*ef8d499eSDavid van Moolenbroek 			if (ndev_send(ethif->ethif_ndev, pbuf) == OK)
464*ef8d499eSDavid van Moolenbroek 				ethif->ethif_snd.es_unsentp =
465*ef8d499eSDavid van Moolenbroek 				    pchain_end(pref);
466*ef8d499eSDavid van Moolenbroek 			else
467*ef8d499eSDavid van Moolenbroek 				break;
468*ef8d499eSDavid van Moolenbroek 		}
469*ef8d499eSDavid van Moolenbroek 	}
470*ef8d499eSDavid van Moolenbroek 
471*ef8d499eSDavid van Moolenbroek 	/*
472*ef8d499eSDavid van Moolenbroek 	 * Attempt to create additional receive requests for the driver, if
473*ef8d499eSDavid van Moolenbroek 	 * applicable.  We currently do not set a limit on the maximum number
474*ef8d499eSDavid van Moolenbroek 	 * of concurrently pending receive requests here, because the maximum
475*ef8d499eSDavid van Moolenbroek 	 * in ndev is already quite low.  That may have to be changed one day.
476*ef8d499eSDavid van Moolenbroek 	 */
477*ef8d499eSDavid van Moolenbroek 	while (ethif_can_recv(ethif) && ndev_can_recv(ethif->ethif_ndev)) {
478*ef8d499eSDavid van Moolenbroek 		/*
479*ef8d499eSDavid van Moolenbroek 		 * Allocate a buffer for the network device driver to copy the
480*ef8d499eSDavid van Moolenbroek 		 * received packet into.  Allocation may fail if no buffers are
481*ef8d499eSDavid van Moolenbroek 		 * available at this time; in that case simply try again later.
482*ef8d499eSDavid van Moolenbroek 		 * We add room for a VLAN tag even though we do not support
483*ef8d499eSDavid van Moolenbroek 		 * such tags just yet.
484*ef8d499eSDavid van Moolenbroek 		 */
485*ef8d499eSDavid van Moolenbroek 		if ((pbuf = pchain_alloc(PBUF_RAW, ETH_PAD_LEN + ETH_HDR_LEN +
486*ef8d499eSDavid van Moolenbroek 		    ETHIF_MAX_MTU + NDEV_ETH_PACKET_TAG)) == NULL)
487*ef8d499eSDavid van Moolenbroek 			break;
488*ef8d499eSDavid van Moolenbroek 
489*ef8d499eSDavid van Moolenbroek 		/*
490*ef8d499eSDavid van Moolenbroek 		 * Effectively throw away two bytes in order to align TCP/IP
491*ef8d499eSDavid van Moolenbroek 		 * header fields to 32 bits.  See the short discussion in
492*ef8d499eSDavid van Moolenbroek 		 * lwipopts.h as to why we are not using lwIP's ETH_PAD_SIZE.
493*ef8d499eSDavid van Moolenbroek 		 */
494*ef8d499eSDavid van Moolenbroek 		util_pbuf_header(pbuf, -ETH_PAD_LEN);
495*ef8d499eSDavid van Moolenbroek 
496*ef8d499eSDavid van Moolenbroek 		/*
497*ef8d499eSDavid van Moolenbroek 		 * Send the request to the driver.  This may still fail due to
498*ef8d499eSDavid van Moolenbroek 		 * grant allocation failure, in which case we try again later.
499*ef8d499eSDavid van Moolenbroek 		 */
500*ef8d499eSDavid van Moolenbroek 		if (ndev_recv(ethif->ethif_ndev, pbuf) != OK) {
501*ef8d499eSDavid van Moolenbroek 			pbuf_free(pbuf);
502*ef8d499eSDavid van Moolenbroek 
503*ef8d499eSDavid van Moolenbroek 			break;
504*ef8d499eSDavid van Moolenbroek 		}
505*ef8d499eSDavid van Moolenbroek 
506*ef8d499eSDavid van Moolenbroek 		/*
507*ef8d499eSDavid van Moolenbroek 		 * Hold on to the packet buffer until the receive request
508*ef8d499eSDavid van Moolenbroek 		 * completes or is aborted, or the driver disappears.
509*ef8d499eSDavid van Moolenbroek 		 */
510*ef8d499eSDavid van Moolenbroek 		*ethif->ethif_rcv.er_tailp = pbuf;
511*ef8d499eSDavid van Moolenbroek 		ethif->ethif_rcv.er_tailp = pchain_end(pbuf);
512*ef8d499eSDavid van Moolenbroek 	}
513*ef8d499eSDavid van Moolenbroek }
514*ef8d499eSDavid van Moolenbroek 
515*ef8d499eSDavid van Moolenbroek /*
516*ef8d499eSDavid van Moolenbroek  * Complete the link-layer header of the packet by filling in a source address.
517*ef8d499eSDavid van Moolenbroek  * This is relevant for BPF-generated packets only, and thus we can safely
518*ef8d499eSDavid van Moolenbroek  * modify the given pbuf.
519*ef8d499eSDavid van Moolenbroek  */
520*ef8d499eSDavid van Moolenbroek static void
ethif_hdrcmplt(struct ifdev * ifdev,struct pbuf * pbuf)521*ef8d499eSDavid van Moolenbroek ethif_hdrcmplt(struct ifdev * ifdev, struct pbuf * pbuf)
522*ef8d499eSDavid van Moolenbroek {
523*ef8d499eSDavid van Moolenbroek 	struct netif *netif;
524*ef8d499eSDavid van Moolenbroek 
525*ef8d499eSDavid van Moolenbroek 	/* Make sure there is an ethernet packet header at all. */
526*ef8d499eSDavid van Moolenbroek 	if (pbuf->len < ETH_HDR_LEN)
527*ef8d499eSDavid van Moolenbroek 		return;
528*ef8d499eSDavid van Moolenbroek 
529*ef8d499eSDavid van Moolenbroek 	netif = ifdev_get_netif(ifdev);
530*ef8d499eSDavid van Moolenbroek 
531*ef8d499eSDavid van Moolenbroek 	/*
532*ef8d499eSDavid van Moolenbroek 	 * Insert the source ethernet address into the packet.  The source
533*ef8d499eSDavid van Moolenbroek 	 * address is located right after the destination address at the start
534*ef8d499eSDavid van Moolenbroek 	 * of the packet.
535*ef8d499eSDavid van Moolenbroek 	 */
536*ef8d499eSDavid van Moolenbroek 	memcpy((uint8_t *)pbuf->payload + netif->hwaddr_len, netif->hwaddr,
537*ef8d499eSDavid van Moolenbroek 	    netif->hwaddr_len);
538*ef8d499eSDavid van Moolenbroek }
539*ef8d499eSDavid van Moolenbroek 
540*ef8d499eSDavid van Moolenbroek /*
541*ef8d499eSDavid van Moolenbroek  * Return TRUE if the given additional number of spare tokens may be used, or
542*ef8d499eSDavid van Moolenbroek  * FALSE if the limit has been reached.  Each spare token represents one
543*ef8d499eSDavid van Moolenbroek  * enqueued pbuf.  The limit must be such that we do not impede normal traffic
544*ef8d499eSDavid van Moolenbroek  * but also do not spend the entire buffer pool on enqueued packets.
545*ef8d499eSDavid van Moolenbroek  */
546*ef8d499eSDavid van Moolenbroek static int
ethif_can_spare(unsigned int spares)547*ef8d499eSDavid van Moolenbroek ethif_can_spare(unsigned int spares)
548*ef8d499eSDavid van Moolenbroek {
549*ef8d499eSDavid van Moolenbroek 	unsigned int max;
550*ef8d499eSDavid van Moolenbroek 
551*ef8d499eSDavid van Moolenbroek 	/*
552*ef8d499eSDavid van Moolenbroek 	 * Use the configured maximum, which depends on the current size of the
553*ef8d499eSDavid van Moolenbroek 	 * buffer pool.
554*ef8d499eSDavid van Moolenbroek 	 */
555*ef8d499eSDavid van Moolenbroek 	max = ETHIF_PBUF_MAX_1;
556*ef8d499eSDavid van Moolenbroek 
557*ef8d499eSDavid van Moolenbroek 	/*
558*ef8d499eSDavid van Moolenbroek 	 * However, limit the total to a value based on the maximum number of
559*ef8d499eSDavid van Moolenbroek 	 * TCP packets that can, in the worst case, be expected to queue up at
560*ef8d499eSDavid van Moolenbroek 	 * any single moment.
561*ef8d499eSDavid van Moolenbroek 	 */
562*ef8d499eSDavid van Moolenbroek 	if (max > ETHIF_PBUF_MAX_2)
563*ef8d499eSDavid van Moolenbroek 		max = ETHIF_PBUF_MAX_2;
564*ef8d499eSDavid van Moolenbroek 
565*ef8d499eSDavid van Moolenbroek 	return (spares + ethif_spares <= max - ETHIF_PBUF_MIN * NR_NDEV);
566*ef8d499eSDavid van Moolenbroek }
567*ef8d499eSDavid van Moolenbroek 
568*ef8d499eSDavid van Moolenbroek /*
569*ef8d499eSDavid van Moolenbroek  * Process a packet as output on an ethernet interface.
570*ef8d499eSDavid van Moolenbroek  */
571*ef8d499eSDavid van Moolenbroek static err_t
ethif_output(struct ifdev * ifdev,struct pbuf * pbuf,struct netif * netif)572*ef8d499eSDavid van Moolenbroek ethif_output(struct ifdev * ifdev, struct pbuf * pbuf, struct netif * netif)
573*ef8d499eSDavid van Moolenbroek {
574*ef8d499eSDavid van Moolenbroek 	struct ethif *ethif = (struct ethif *)ifdev;
575*ef8d499eSDavid van Moolenbroek 	struct pbuf *pref, *pcopy;
576*ef8d499eSDavid van Moolenbroek 	size_t padding;
577*ef8d499eSDavid van Moolenbroek 	unsigned int count, spares;
578*ef8d499eSDavid van Moolenbroek 
579*ef8d499eSDavid van Moolenbroek 	/* Packets must never be sent on behalf of another interface. */
580*ef8d499eSDavid van Moolenbroek 	assert(netif == NULL);
581*ef8d499eSDavid van Moolenbroek 
582*ef8d499eSDavid van Moolenbroek 	/*
583*ef8d499eSDavid van Moolenbroek 	 * The caller already rejects packets while the interface or link is
584*ef8d499eSDavid van Moolenbroek 	 * down.  We do want to keep enqueuing packets while the driver is
585*ef8d499eSDavid van Moolenbroek 	 * restarting, so do not check ETHIFF_DISABLED or ETHIFF_FIRST_CONF.
586*ef8d499eSDavid van Moolenbroek 	 */
587*ef8d499eSDavid van Moolenbroek 
588*ef8d499eSDavid van Moolenbroek 	/*
589*ef8d499eSDavid van Moolenbroek 	 * Reject oversized packets immediately.  This should not happen.
590*ef8d499eSDavid van Moolenbroek 	 * Undersized packets are padded below.
591*ef8d499eSDavid van Moolenbroek 	 */
592*ef8d499eSDavid van Moolenbroek 	if (pbuf->tot_len > NDEV_ETH_PACKET_MAX) {
593*ef8d499eSDavid van Moolenbroek 		printf("LWIP: attempt to send oversized ethernet packet "
594*ef8d499eSDavid van Moolenbroek 		    "(size %u)\n", pbuf->tot_len);
595*ef8d499eSDavid van Moolenbroek 		util_stacktrace();
596*ef8d499eSDavid van Moolenbroek 
597*ef8d499eSDavid van Moolenbroek 		return ERR_MEM;
598*ef8d499eSDavid van Moolenbroek 	}
599*ef8d499eSDavid van Moolenbroek 
600*ef8d499eSDavid van Moolenbroek 	/*
601*ef8d499eSDavid van Moolenbroek 	 * The original lwIP idea for processing output packets is that we make
602*ef8d499eSDavid van Moolenbroek 	 * a copy of the packet here, so that lwIP is free to do whatever it
603*ef8d499eSDavid van Moolenbroek 	 * wants with the original packet (e.g., keep on the TCP retransmission
604*ef8d499eSDavid van Moolenbroek 	 * queue).  More recently, lwIP has made progress towards allowing the
605*ef8d499eSDavid van Moolenbroek 	 * packet to be referenced only, decreasing the reference count only
606*ef8d499eSDavid van Moolenbroek 	 * once the packet has been actually sent.  For many embedded systems,
607*ef8d499eSDavid van Moolenbroek 	 * that change now allows zero-copy transmission with direct DMA from
608*ef8d499eSDavid van Moolenbroek 	 * the provided packet buffer.  We are not so lucky: we have to make an
609*ef8d499eSDavid van Moolenbroek 	 * additional inter-process copy anyway.  We do however use the same
610*ef8d499eSDavid van Moolenbroek 	 * referencing system to avoid having to make yet another copy of the
611*ef8d499eSDavid van Moolenbroek 	 * packet here.
612*ef8d499eSDavid van Moolenbroek 	 *
613*ef8d499eSDavid van Moolenbroek 	 * There was previously a check on (pbuf->ref > 1) here, to ensure that
614*ef8d499eSDavid van Moolenbroek 	 * we would never enqueue packets that are retransmitted while we were
615*ef8d499eSDavid van Moolenbroek 	 * still in the process of sending the initial copy.  Now that for ARP
616*ef8d499eSDavid van Moolenbroek 	 * and NDP queuing, packets are referenced rather than copied (lwIP
617*ef8d499eSDavid van Moolenbroek 	 * patch #9272), we can no longer perform that check: packets may
618*ef8d499eSDavid van Moolenbroek 	 * legitimately have a reference count of 2 at this point.  The second
619*ef8d499eSDavid van Moolenbroek 	 * reference will be dropped by the caller immediately after we return.
620*ef8d499eSDavid van Moolenbroek 	 */
621*ef8d499eSDavid van Moolenbroek 
622*ef8d499eSDavid van Moolenbroek 	/*
623*ef8d499eSDavid van Moolenbroek 	 * There are two cases in which we need to make a copy of the packet
624*ef8d499eSDavid van Moolenbroek 	 * after all:
625*ef8d499eSDavid van Moolenbroek 	 *
626*ef8d499eSDavid van Moolenbroek 	 * 1) in the case that the packet needs to be padded in order to reach
627*ef8d499eSDavid van Moolenbroek 	 *    the minimum ethernet packet size (for drivers' convenience);
628*ef8d499eSDavid van Moolenbroek 	 * 2) in the (much more exceptional) case that the given pbuf chain
629*ef8d499eSDavid van Moolenbroek 	 *    exceeds the maximum vector size for network driver requests.
630*ef8d499eSDavid van Moolenbroek 	 */
631*ef8d499eSDavid van Moolenbroek 	if (NDEV_ETH_PACKET_MIN > pbuf->tot_len)
632*ef8d499eSDavid van Moolenbroek 		padding = NDEV_ETH_PACKET_MIN - pbuf->tot_len;
633*ef8d499eSDavid van Moolenbroek 	else
634*ef8d499eSDavid van Moolenbroek 		padding = 0;
635*ef8d499eSDavid van Moolenbroek 
636*ef8d499eSDavid van Moolenbroek 	count = pbuf_clen(pbuf);
637*ef8d499eSDavid van Moolenbroek 
638*ef8d499eSDavid van Moolenbroek 	if (padding != 0 || count > NDEV_IOV_MAX) {
639*ef8d499eSDavid van Moolenbroek 		pcopy = pchain_alloc(PBUF_RAW, pbuf->tot_len + padding);
640*ef8d499eSDavid van Moolenbroek 		if (pcopy == NULL) {
641*ef8d499eSDavid van Moolenbroek 			ifdev_output_drop(ifdev);
642*ef8d499eSDavid van Moolenbroek 
643*ef8d499eSDavid van Moolenbroek 			return ERR_MEM;
644*ef8d499eSDavid van Moolenbroek 		}
645*ef8d499eSDavid van Moolenbroek 
646*ef8d499eSDavid van Moolenbroek 		if (pbuf_copy(pcopy, pbuf) != ERR_OK)
647*ef8d499eSDavid van Moolenbroek 			panic("unexpected pbuf copy failure");
648*ef8d499eSDavid van Moolenbroek 
649*ef8d499eSDavid van Moolenbroek 		if (padding > 0) {
650*ef8d499eSDavid van Moolenbroek 			/*
651*ef8d499eSDavid van Moolenbroek 			 * This restriction can be lifted if needed, but it
652*ef8d499eSDavid van Moolenbroek 			 * involves hairy pbuf traversal and our standard pool
653*ef8d499eSDavid van Moolenbroek 			 * size should be way in excess of the minimum packet
654*ef8d499eSDavid van Moolenbroek 			 * size.
655*ef8d499eSDavid van Moolenbroek 			 */
656*ef8d499eSDavid van Moolenbroek 			assert(pcopy->len == pbuf->tot_len + padding);
657*ef8d499eSDavid van Moolenbroek 
658*ef8d499eSDavid van Moolenbroek 			memset((char *)pcopy->payload + pbuf->tot_len, 0,
659*ef8d499eSDavid van Moolenbroek 			    padding);
660*ef8d499eSDavid van Moolenbroek 		}
661*ef8d499eSDavid van Moolenbroek 
662*ef8d499eSDavid van Moolenbroek 		count = pbuf_clen(pcopy);
663*ef8d499eSDavid van Moolenbroek 		assert(count <= NDEV_IOV_MAX);
664*ef8d499eSDavid van Moolenbroek 
665*ef8d499eSDavid van Moolenbroek 		pbuf = pcopy;
666*ef8d499eSDavid van Moolenbroek 	} else
667*ef8d499eSDavid van Moolenbroek 		pcopy = NULL;
668*ef8d499eSDavid van Moolenbroek 
669*ef8d499eSDavid van Moolenbroek 	/*
670*ef8d499eSDavid van Moolenbroek 	 * Restrict the size of the send queue, so that it will not exhaust the
671*ef8d499eSDavid van Moolenbroek 	 * buffer pool.
672*ef8d499eSDavid van Moolenbroek 	 */
673*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_snd.es_count >= ETHIF_PBUF_MIN)
674*ef8d499eSDavid van Moolenbroek 		spares = count;
675*ef8d499eSDavid van Moolenbroek 	else if (ethif->ethif_snd.es_count + count > ETHIF_PBUF_MIN)
676*ef8d499eSDavid van Moolenbroek 		spares = ethif->ethif_snd.es_count + count - ETHIF_PBUF_MIN;
677*ef8d499eSDavid van Moolenbroek 	else
678*ef8d499eSDavid van Moolenbroek 		spares = 0;
679*ef8d499eSDavid van Moolenbroek 
680*ef8d499eSDavid van Moolenbroek 	if (spares > 0 && !ethif_can_spare(spares)) {
681*ef8d499eSDavid van Moolenbroek 		if (pcopy != NULL)
682*ef8d499eSDavid van Moolenbroek 			pbuf_free(pcopy);
683*ef8d499eSDavid van Moolenbroek 
684*ef8d499eSDavid van Moolenbroek 		ifdev_output_drop(ifdev);
685*ef8d499eSDavid van Moolenbroek 
686*ef8d499eSDavid van Moolenbroek 		return ERR_MEM;
687*ef8d499eSDavid van Moolenbroek 	}
688*ef8d499eSDavid van Moolenbroek 
689*ef8d499eSDavid van Moolenbroek 	/*
690*ef8d499eSDavid van Moolenbroek 	 * A side effect of the referencing approach is that we cannot touch
691*ef8d499eSDavid van Moolenbroek 	 * the last pbuf's "next" pointer.  Thus, we need another way of
692*ef8d499eSDavid van Moolenbroek 	 * linking together the buffers on the send queue.  We use a linked
693*ef8d499eSDavid van Moolenbroek 	 * list of PBUF_REF-type buffers for this instead.  However, do this
694*ef8d499eSDavid van Moolenbroek 	 * only when we have not made a copy of the original pbuf, because then
695*ef8d499eSDavid van Moolenbroek 	 * we might as well use the copy instead.
696*ef8d499eSDavid van Moolenbroek 	 */
697*ef8d499eSDavid van Moolenbroek 	if (pcopy == NULL) {
698*ef8d499eSDavid van Moolenbroek 		if ((pref = pbuf_alloc(PBUF_RAW, 0, PBUF_REF)) == NULL) {
699*ef8d499eSDavid van Moolenbroek 			ifdev_output_drop(ifdev);
700*ef8d499eSDavid van Moolenbroek 
701*ef8d499eSDavid van Moolenbroek 			return ERR_MEM;
702*ef8d499eSDavid van Moolenbroek 		}
703*ef8d499eSDavid van Moolenbroek 
704*ef8d499eSDavid van Moolenbroek 		pbuf_ref(pbuf);
705*ef8d499eSDavid van Moolenbroek 
706*ef8d499eSDavid van Moolenbroek 		pref->payload = pbuf;
707*ef8d499eSDavid van Moolenbroek 		pref->tot_len = 0;
708*ef8d499eSDavid van Moolenbroek 		pref->len = count;
709*ef8d499eSDavid van Moolenbroek 	} else
710*ef8d499eSDavid van Moolenbroek 		pref = pcopy;
711*ef8d499eSDavid van Moolenbroek 
712*ef8d499eSDavid van Moolenbroek 	/* If the send queue was empty so far, set the IFF_OACTIVE flag. */
713*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_snd.es_head == NULL)
714*ef8d499eSDavid van Moolenbroek 		ifdev_update_ifflags(&ethif->ethif_ifdev,
715*ef8d499eSDavid van Moolenbroek 		    ifdev_get_ifflags(&ethif->ethif_ifdev) | IFF_OACTIVE);
716*ef8d499eSDavid van Moolenbroek 
717*ef8d499eSDavid van Moolenbroek 	/*
718*ef8d499eSDavid van Moolenbroek 	 * Enqueue the packet on the send queue.  It will be sent from the
719*ef8d499eSDavid van Moolenbroek 	 * polling function as soon as possible.  TODO: see if sending it from
720*ef8d499eSDavid van Moolenbroek 	 * here makes any performance difference at all.
721*ef8d499eSDavid van Moolenbroek 	 */
722*ef8d499eSDavid van Moolenbroek 	*ethif->ethif_snd.es_tailp = pref;
723*ef8d499eSDavid van Moolenbroek 	ethif->ethif_snd.es_tailp = pchain_end(pref);
724*ef8d499eSDavid van Moolenbroek 
725*ef8d499eSDavid van Moolenbroek 	ethif->ethif_snd.es_count += count;
726*ef8d499eSDavid van Moolenbroek 	ethif_spares += spares;
727*ef8d499eSDavid van Moolenbroek 
728*ef8d499eSDavid van Moolenbroek 	return ERR_OK;
729*ef8d499eSDavid van Moolenbroek }
730*ef8d499eSDavid van Moolenbroek 
731*ef8d499eSDavid van Moolenbroek /*
732*ef8d499eSDavid van Moolenbroek  * Transmit an ethernet packet on an ethernet interface, as requested by lwIP.
733*ef8d499eSDavid van Moolenbroek  */
734*ef8d499eSDavid van Moolenbroek static err_t
ethif_linkoutput(struct netif * netif,struct pbuf * pbuf)735*ef8d499eSDavid van Moolenbroek ethif_linkoutput(struct netif * netif, struct pbuf * pbuf)
736*ef8d499eSDavid van Moolenbroek {
737*ef8d499eSDavid van Moolenbroek 	struct ifdev *ifdev = netif_get_ifdev(netif);
738*ef8d499eSDavid van Moolenbroek 
739*ef8d499eSDavid van Moolenbroek 	/*
740*ef8d499eSDavid van Moolenbroek 	 * Let ifdev make the callback to our output function, so that it can
741*ef8d499eSDavid van Moolenbroek 	 * pass the packet to BPF devices and generically update statistics.
742*ef8d499eSDavid van Moolenbroek 	 */
743*ef8d499eSDavid van Moolenbroek 	return ifdev_output(ifdev, pbuf, NULL /*netif*/, TRUE /*to_bpf*/,
744*ef8d499eSDavid van Moolenbroek 	    TRUE /*hdrcmplt*/);
745*ef8d499eSDavid van Moolenbroek }
746*ef8d499eSDavid van Moolenbroek 
747*ef8d499eSDavid van Moolenbroek /*
748*ef8d499eSDavid van Moolenbroek  * The multicast address list has changed.  See to it that the change will make
749*ef8d499eSDavid van Moolenbroek  * it to the network driver at some point.
750*ef8d499eSDavid van Moolenbroek  */
751*ef8d499eSDavid van Moolenbroek static err_t
ethif_set_mcast(struct ethif * ethif)752*ef8d499eSDavid van Moolenbroek ethif_set_mcast(struct ethif * ethif)
753*ef8d499eSDavid van Moolenbroek {
754*ef8d499eSDavid van Moolenbroek 
755*ef8d499eSDavid van Moolenbroek 	/*
756*ef8d499eSDavid van Moolenbroek 	 * Simply generate a mode change request, unless the interface is down.
757*ef8d499eSDavid van Moolenbroek 	 * Once the mode change request is about to be sent to the driver, we
758*ef8d499eSDavid van Moolenbroek 	 * will recompute the multicast settings.
759*ef8d499eSDavid van Moolenbroek 	 */
760*ef8d499eSDavid van Moolenbroek 	if (ifdev_is_up(&ethif->ethif_ifdev))
761*ef8d499eSDavid van Moolenbroek 		ethif->ethif_wanted.nconf_set |= NDEV_SET_MODE;
762*ef8d499eSDavid van Moolenbroek 
763*ef8d499eSDavid van Moolenbroek 	return ERR_OK;
764*ef8d499eSDavid van Moolenbroek }
765*ef8d499eSDavid van Moolenbroek 
766*ef8d499eSDavid van Moolenbroek /*
767*ef8d499eSDavid van Moolenbroek  * An IPv4 multicast address has been added to or removed from the list of IPv4
768*ef8d499eSDavid van Moolenbroek  * multicast addresses.
769*ef8d499eSDavid van Moolenbroek  */
770*ef8d499eSDavid van Moolenbroek static err_t
ethif_set_mcast_v4(struct netif * netif,const ip4_addr_t * group __unused,enum netif_mac_filter_action action __unused)771*ef8d499eSDavid van Moolenbroek ethif_set_mcast_v4(struct netif * netif, const ip4_addr_t * group __unused,
772*ef8d499eSDavid van Moolenbroek 	enum netif_mac_filter_action action __unused)
773*ef8d499eSDavid van Moolenbroek {
774*ef8d499eSDavid van Moolenbroek 
775*ef8d499eSDavid van Moolenbroek 	return ethif_set_mcast((struct ethif *)netif_get_ifdev(netif));
776*ef8d499eSDavid van Moolenbroek }
777*ef8d499eSDavid van Moolenbroek 
778*ef8d499eSDavid van Moolenbroek /*
779*ef8d499eSDavid van Moolenbroek  * An IPv6 multicast address has been added to or removed from the list of IPv6
780*ef8d499eSDavid van Moolenbroek  * multicast addresses.
781*ef8d499eSDavid van Moolenbroek  */
782*ef8d499eSDavid van Moolenbroek static err_t
ethif_set_mcast_v6(struct netif * netif,const ip6_addr_t * group __unused,enum netif_mac_filter_action action __unused)783*ef8d499eSDavid van Moolenbroek ethif_set_mcast_v6(struct netif * netif, const ip6_addr_t * group __unused,
784*ef8d499eSDavid van Moolenbroek 	enum netif_mac_filter_action action __unused)
785*ef8d499eSDavid van Moolenbroek {
786*ef8d499eSDavid van Moolenbroek 
787*ef8d499eSDavid van Moolenbroek 	return ethif_set_mcast((struct ethif *)netif_get_ifdev(netif));
788*ef8d499eSDavid van Moolenbroek }
789*ef8d499eSDavid van Moolenbroek 
790*ef8d499eSDavid van Moolenbroek /*
791*ef8d499eSDavid van Moolenbroek  * Initialization function for an ethernet-type netif interface, called from
792*ef8d499eSDavid van Moolenbroek  * lwIP at interface creation time.
793*ef8d499eSDavid van Moolenbroek  */
794*ef8d499eSDavid van Moolenbroek static err_t
ethif_init_netif(struct ifdev * ifdev,struct netif * netif)795*ef8d499eSDavid van Moolenbroek ethif_init_netif(struct ifdev * ifdev, struct netif * netif)
796*ef8d499eSDavid van Moolenbroek {
797*ef8d499eSDavid van Moolenbroek 	struct ethif *ethif = (struct ethif *)ifdev;
798*ef8d499eSDavid van Moolenbroek 
799*ef8d499eSDavid van Moolenbroek 	/*
800*ef8d499eSDavid van Moolenbroek 	 * Fill in a dummy name.  Since it is only two characters, do not
801*ef8d499eSDavid van Moolenbroek 	 * bother trying to reuse part of the given name.  If this name is ever
802*ef8d499eSDavid van Moolenbroek 	 * actually used anywhere, the dummy should suffice for debugging.
803*ef8d499eSDavid van Moolenbroek 	 */
804*ef8d499eSDavid van Moolenbroek 	netif->name[0] = 'e';
805*ef8d499eSDavid van Moolenbroek 	netif->name[1] = 'n';
806*ef8d499eSDavid van Moolenbroek 
807*ef8d499eSDavid van Moolenbroek 	netif->linkoutput = ethif_linkoutput;
808*ef8d499eSDavid van Moolenbroek 
809*ef8d499eSDavid van Moolenbroek 	memset(netif->hwaddr, 0, sizeof(netif->hwaddr));
810*ef8d499eSDavid van Moolenbroek 
811*ef8d499eSDavid van Moolenbroek 	/*
812*ef8d499eSDavid van Moolenbroek 	 * Set the netif flags, partially based on the capabilities reported by
813*ef8d499eSDavid van Moolenbroek 	 * the network device driver.  The reason that we do this now is that
814*ef8d499eSDavid van Moolenbroek 	 * lwIP tests for some of these flags and starts appropriate submodules
815*ef8d499eSDavid van Moolenbroek 	 * (e.g., IGMP) right after returning from this function.  If we set
816*ef8d499eSDavid van Moolenbroek 	 * the flags later, we also have to take over management of those
817*ef8d499eSDavid van Moolenbroek 	 * submodules, which is something we'd rather avoid.  For this reason
818*ef8d499eSDavid van Moolenbroek 	 * in particular, we also do not support capability mask changes after
819*ef8d499eSDavid van Moolenbroek 	 * driver restarts - see ethif_enable().
820*ef8d499eSDavid van Moolenbroek 	 */
821*ef8d499eSDavid van Moolenbroek 	netif->flags = NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET;
822*ef8d499eSDavid van Moolenbroek 
823*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_caps & NDEV_CAP_BCAST)
824*ef8d499eSDavid van Moolenbroek 		netif->flags |= NETIF_FLAG_BROADCAST;
825*ef8d499eSDavid van Moolenbroek 
826*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_caps & NDEV_CAP_MCAST) {
827*ef8d499eSDavid van Moolenbroek 		/* The IGMP code adds the all-stations multicast entry. */
828*ef8d499eSDavid van Moolenbroek 		netif->igmp_mac_filter = ethif_set_mcast_v4;
829*ef8d499eSDavid van Moolenbroek 
830*ef8d499eSDavid van Moolenbroek 		netif->flags |= NETIF_FLAG_IGMP;
831*ef8d499eSDavid van Moolenbroek 
832*ef8d499eSDavid van Moolenbroek 		/* For MLD6 we have to add the all-nodes entry ourselves. */
833*ef8d499eSDavid van Moolenbroek 		netif->mld_mac_filter = ethif_set_mcast_v6;
834*ef8d499eSDavid van Moolenbroek 
835*ef8d499eSDavid van Moolenbroek 		netif->flags |= NETIF_FLAG_MLD6;
836*ef8d499eSDavid van Moolenbroek 	}
837*ef8d499eSDavid van Moolenbroek 
838*ef8d499eSDavid van Moolenbroek 	return ERR_OK;
839*ef8d499eSDavid van Moolenbroek }
840*ef8d499eSDavid van Moolenbroek 
841*ef8d499eSDavid van Moolenbroek /*
842*ef8d499eSDavid van Moolenbroek  * The ndev layer reports that a new network device driver has appeared, with
843*ef8d499eSDavid van Moolenbroek  * the given ndev identifier, a driver-given name, and a certain set of
844*ef8d499eSDavid van Moolenbroek  * capabilities.  Create a new ethernet interface object for it.  On success,
845*ef8d499eSDavid van Moolenbroek  * return a pointer to the object (for later callbacks from ndev).  In that
846*ef8d499eSDavid van Moolenbroek  * case, the ndev layer will always immediately call ethif_enable() afterwards.
847*ef8d499eSDavid van Moolenbroek  * On failure, return NULL, in which case ndev will forget about the driver.
848*ef8d499eSDavid van Moolenbroek  */
849*ef8d499eSDavid van Moolenbroek struct ethif *
ethif_add(ndev_id_t id,const char * name,uint32_t caps)850*ef8d499eSDavid van Moolenbroek ethif_add(ndev_id_t id, const char * name, uint32_t caps)
851*ef8d499eSDavid van Moolenbroek {
852*ef8d499eSDavid van Moolenbroek 	struct ethif *ethif;
853*ef8d499eSDavid van Moolenbroek 	unsigned int ifflags;
854*ef8d499eSDavid van Moolenbroek 	int r;
855*ef8d499eSDavid van Moolenbroek 
856*ef8d499eSDavid van Moolenbroek 	/*
857*ef8d499eSDavid van Moolenbroek 	 * First make sure that the interface name is valid, unique, and not
858*ef8d499eSDavid van Moolenbroek 	 * reserved for virtual interface types.
859*ef8d499eSDavid van Moolenbroek 	 */
860*ef8d499eSDavid van Moolenbroek 	if ((r = ifdev_check_name(name, NULL /*vtype_slot*/)) != OK) {
861*ef8d499eSDavid van Moolenbroek 		/*
862*ef8d499eSDavid van Moolenbroek 		 * There is some risk in printing bad stuff, but this may help
863*ef8d499eSDavid van Moolenbroek 		 * in preventing serious driver writer frustration..
864*ef8d499eSDavid van Moolenbroek 		 */
865*ef8d499eSDavid van Moolenbroek 		printf("LWIP: invalid driver name '%s' (%d)\n", name, r);
866*ef8d499eSDavid van Moolenbroek 
867*ef8d499eSDavid van Moolenbroek 		return NULL;
868*ef8d499eSDavid van Moolenbroek 	}
869*ef8d499eSDavid van Moolenbroek 
870*ef8d499eSDavid van Moolenbroek 	/* Then see if there is a free ethernet interface object available. */
871*ef8d499eSDavid van Moolenbroek 	if (SIMPLEQ_EMPTY(&ethif_freelist)) {
872*ef8d499eSDavid van Moolenbroek 		printf("LWIP: out of slots for driver name '%s'\n", name);
873*ef8d499eSDavid van Moolenbroek 
874*ef8d499eSDavid van Moolenbroek 		return NULL;
875*ef8d499eSDavid van Moolenbroek 	}
876*ef8d499eSDavid van Moolenbroek 
877*ef8d499eSDavid van Moolenbroek 	/*
878*ef8d499eSDavid van Moolenbroek 	 * All good; set up the interface.  First initialize the object, since
879*ef8d499eSDavid van Moolenbroek 	 * adding the interface to lwIP might spawn some activity right away.
880*ef8d499eSDavid van Moolenbroek 	 */
881*ef8d499eSDavid van Moolenbroek 	ethif = SIMPLEQ_FIRST(&ethif_freelist);
882*ef8d499eSDavid van Moolenbroek 	SIMPLEQ_REMOVE_HEAD(&ethif_freelist, ethif_next);
883*ef8d499eSDavid van Moolenbroek 
884*ef8d499eSDavid van Moolenbroek 	/* Initialize the ethif structure. */
885*ef8d499eSDavid van Moolenbroek 	memset(ethif, 0, sizeof(*ethif));
886*ef8d499eSDavid van Moolenbroek 	ethif->ethif_ndev = id;
887*ef8d499eSDavid van Moolenbroek 	ethif->ethif_flags = ETHIFF_DISABLED;
888*ef8d499eSDavid van Moolenbroek 	ethif->ethif_caps = caps;
889*ef8d499eSDavid van Moolenbroek 
890*ef8d499eSDavid van Moolenbroek 	ethif->ethif_snd.es_head = NULL;
891*ef8d499eSDavid van Moolenbroek 	ethif->ethif_snd.es_unsentp = &ethif->ethif_snd.es_head;
892*ef8d499eSDavid van Moolenbroek 	ethif->ethif_snd.es_tailp = &ethif->ethif_snd.es_head;
893*ef8d499eSDavid van Moolenbroek 	ethif->ethif_snd.es_count = 0;
894*ef8d499eSDavid van Moolenbroek 
895*ef8d499eSDavid van Moolenbroek 	ethif->ethif_rcv.er_head = NULL;
896*ef8d499eSDavid van Moolenbroek 	ethif->ethif_rcv.er_tailp = &ethif->ethif_rcv.er_head;
897*ef8d499eSDavid van Moolenbroek 
898*ef8d499eSDavid van Moolenbroek 	/*
899*ef8d499eSDavid van Moolenbroek 	 * Set all the three configurations to the same initial values.  Since
900*ef8d499eSDavid van Moolenbroek 	 * any change to the configuration will go through all three, this
901*ef8d499eSDavid van Moolenbroek 	 * allows us to obtain various parts of the status (in particular, the
902*ef8d499eSDavid van Moolenbroek 	 * mode, flags, enabled capabilities, and media type selection) from
903*ef8d499eSDavid van Moolenbroek 	 * any of the three without having to consult the others.  Note that
904*ef8d499eSDavid van Moolenbroek 	 * the hardware address is set to a indeterminate initial value, as it
905*ef8d499eSDavid van Moolenbroek 	 * is left to the network driver unless specifically overridden.
906*ef8d499eSDavid van Moolenbroek 	 */
907*ef8d499eSDavid van Moolenbroek 	ethif->ethif_active.nconf_set = 0;
908*ef8d499eSDavid van Moolenbroek 	ethif->ethif_active.nconf_mode = NDEV_MODE_DOWN;
909*ef8d499eSDavid van Moolenbroek 	ethif->ethif_active.nconf_flags = 0;
910*ef8d499eSDavid van Moolenbroek 	ethif->ethif_active.nconf_caps = 0;
911*ef8d499eSDavid van Moolenbroek 	ethif->ethif_active.nconf_media =
912*ef8d499eSDavid van Moolenbroek 	    IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0);
913*ef8d499eSDavid van Moolenbroek 	memcpy(&ethif->ethif_pending, &ethif->ethif_active,
914*ef8d499eSDavid van Moolenbroek 	    sizeof(ethif->ethif_pending));
915*ef8d499eSDavid van Moolenbroek 	memcpy(&ethif->ethif_wanted, &ethif->ethif_pending,
916*ef8d499eSDavid van Moolenbroek 	    sizeof(ethif->ethif_wanted));
917*ef8d499eSDavid van Moolenbroek 
918*ef8d499eSDavid van Moolenbroek 	/*
919*ef8d499eSDavid van Moolenbroek 	 * Compute the initial NetBSD-style interface flags.  The IFF_SIMPLEX
920*ef8d499eSDavid van Moolenbroek 	 * interface flag is always enabled because we do not support network
921*ef8d499eSDavid van Moolenbroek 	 * drivers that are receiving their own packets.  In particular, lwIP
922*ef8d499eSDavid van Moolenbroek 	 * currently does not deal well with receiving back its own multicast
923*ef8d499eSDavid van Moolenbroek 	 * packets, which leads to IPv6 DAD failures.  The other two flags
924*ef8d499eSDavid van Moolenbroek 	 * (IFF_BROADCAST, IFF_MULTICAST) denote capabilities, not enabled
925*ef8d499eSDavid van Moolenbroek 	 * receipt modes.
926*ef8d499eSDavid van Moolenbroek 	 */
927*ef8d499eSDavid van Moolenbroek 	ifflags = IFF_SIMPLEX;
928*ef8d499eSDavid van Moolenbroek 	if (caps & NDEV_CAP_BCAST)
929*ef8d499eSDavid van Moolenbroek 		ifflags |= IFF_BROADCAST;
930*ef8d499eSDavid van Moolenbroek 	if (caps & NDEV_CAP_MCAST)
931*ef8d499eSDavid van Moolenbroek 		ifflags |= IFF_MULTICAST;
932*ef8d499eSDavid van Moolenbroek 
933*ef8d499eSDavid van Moolenbroek 	/* Finally, add the interface to ifdev and lwIP.  This cannot fail. */
934*ef8d499eSDavid van Moolenbroek 	ifdev_add(&ethif->ethif_ifdev, name, ifflags, IFT_ETHER, ETH_HDR_LEN,
935*ef8d499eSDavid van Moolenbroek 	    ETHARP_HWADDR_LEN, DLT_EN10MB, ETHIF_DEF_MTU,
936*ef8d499eSDavid van Moolenbroek 	    ND6_IFF_PERFORMNUD | ND6_IFF_AUTO_LINKLOCAL, &ethif_ops);
937*ef8d499eSDavid van Moolenbroek 
938*ef8d499eSDavid van Moolenbroek 	return ethif;
939*ef8d499eSDavid van Moolenbroek }
940*ef8d499eSDavid van Moolenbroek 
941*ef8d499eSDavid van Moolenbroek /*
942*ef8d499eSDavid van Moolenbroek  * The link status and/or media type of an ethernet interface has changed.
943*ef8d499eSDavid van Moolenbroek  */
944*ef8d499eSDavid van Moolenbroek static void
ethif_set_status(struct ethif * ethif,uint32_t link,uint32_t media)945*ef8d499eSDavid van Moolenbroek ethif_set_status(struct ethif * ethif, uint32_t link, uint32_t media)
946*ef8d499eSDavid van Moolenbroek {
947*ef8d499eSDavid van Moolenbroek 	unsigned int iflink;
948*ef8d499eSDavid van Moolenbroek 
949*ef8d499eSDavid van Moolenbroek 	/* We save the media type locally for now. */
950*ef8d499eSDavid van Moolenbroek 	ethif->ethif_media = media;
951*ef8d499eSDavid van Moolenbroek 
952*ef8d499eSDavid van Moolenbroek 	/* Let the ifdev module handle the details of the link change. */
953*ef8d499eSDavid van Moolenbroek 	switch (link) {
954*ef8d499eSDavid van Moolenbroek 	case NDEV_LINK_UP:	iflink = LINK_STATE_UP;		break;
955*ef8d499eSDavid van Moolenbroek 	case NDEV_LINK_DOWN:	iflink = LINK_STATE_DOWN;	break;
956*ef8d499eSDavid van Moolenbroek 	default:		iflink = LINK_STATE_UNKNOWN;	break;
957*ef8d499eSDavid van Moolenbroek 	}
958*ef8d499eSDavid van Moolenbroek 
959*ef8d499eSDavid van Moolenbroek 	ifdev_update_link(&ethif->ethif_ifdev, iflink);
960*ef8d499eSDavid van Moolenbroek }
961*ef8d499eSDavid van Moolenbroek 
962*ef8d499eSDavid van Moolenbroek /*
963*ef8d499eSDavid van Moolenbroek  * The ndev layer reports that a previously added or disabled network device
964*ef8d499eSDavid van Moolenbroek  * driver has been (re)enabled.  Start by initializing the driver.  Return TRUE
965*ef8d499eSDavid van Moolenbroek  * if the interface could indeed be enabled, or FALSE if it should be forgotten
966*ef8d499eSDavid van Moolenbroek  * altogether after all.
967*ef8d499eSDavid van Moolenbroek  */
968*ef8d499eSDavid van Moolenbroek int
ethif_enable(struct ethif * ethif,const char * name,const struct ndev_hwaddr * hwaddr,uint8_t hwaddr_len,uint32_t caps,uint32_t link,uint32_t media)969*ef8d499eSDavid van Moolenbroek ethif_enable(struct ethif * ethif, const char * name,
970*ef8d499eSDavid van Moolenbroek 	const struct ndev_hwaddr * hwaddr, uint8_t hwaddr_len, uint32_t caps,
971*ef8d499eSDavid van Moolenbroek 	uint32_t link, uint32_t media)
972*ef8d499eSDavid van Moolenbroek {
973*ef8d499eSDavid van Moolenbroek 	int r;
974*ef8d499eSDavid van Moolenbroek 
975*ef8d499eSDavid van Moolenbroek 	assert(ethif->ethif_flags & ETHIFF_DISABLED);
976*ef8d499eSDavid van Moolenbroek 
977*ef8d499eSDavid van Moolenbroek 	/*
978*ef8d499eSDavid van Moolenbroek 	 * One disadvantage of keeping service labels and ethernet driver names
979*ef8d499eSDavid van Moolenbroek 	 * disjunct is that the ethernet driver may mess with its name between
980*ef8d499eSDavid van Moolenbroek 	 * restarts.  Ultimately we may end up renaming our ethernet drivers
981*ef8d499eSDavid van Moolenbroek 	 * such that their labels match their names, in which case we no longer
982*ef8d499eSDavid van Moolenbroek 	 * need the drivers themselves to produce a name, and we can retire
983*ef8d499eSDavid van Moolenbroek 	 * this check.
984*ef8d499eSDavid van Moolenbroek 	 */
985*ef8d499eSDavid van Moolenbroek 	if (name != NULL && strcmp(ethif_get_name(ethif), name)) {
986*ef8d499eSDavid van Moolenbroek 		printf("LWIP: driver '%s' restarted with name '%s'\n",
987*ef8d499eSDavid van Moolenbroek 		    ethif_get_name(ethif), name);
988*ef8d499eSDavid van Moolenbroek 
989*ef8d499eSDavid van Moolenbroek 		return FALSE;
990*ef8d499eSDavid van Moolenbroek 	}
991*ef8d499eSDavid van Moolenbroek 
992*ef8d499eSDavid van Moolenbroek 	/*
993*ef8d499eSDavid van Moolenbroek 	 * The hardware address length is just a sanity check for now.  After
994*ef8d499eSDavid van Moolenbroek 	 * the initialization reply, we assume the same length is used for all
995*ef8d499eSDavid van Moolenbroek 	 * addresses, which is also the maximum, namely 48 bits (six bytes).
996*ef8d499eSDavid van Moolenbroek 	 */
997*ef8d499eSDavid van Moolenbroek 	if (hwaddr_len != ETHARP_HWADDR_LEN) {
998*ef8d499eSDavid van Moolenbroek 		printf("LWIP: driver '%s' reports hwaddr length %u\n",
999*ef8d499eSDavid van Moolenbroek 		    ethif_get_name(ethif), hwaddr_len);
1000*ef8d499eSDavid van Moolenbroek 
1001*ef8d499eSDavid van Moolenbroek 		return FALSE;
1002*ef8d499eSDavid van Moolenbroek 	}
1003*ef8d499eSDavid van Moolenbroek 
1004*ef8d499eSDavid van Moolenbroek 	/*
1005*ef8d499eSDavid van Moolenbroek 	 * If the driver has changed its available capabilities as a result of
1006*ef8d499eSDavid van Moolenbroek 	 * a restart, we have a problem: we may already have configured the
1007*ef8d499eSDavid van Moolenbroek 	 * interface's netif object to make use of of some of those
1008*ef8d499eSDavid van Moolenbroek 	 * capabilities.  TODO: we can deal with some cases (e.g., disappearing
1009*ef8d499eSDavid van Moolenbroek 	 * checksum offloading capabilities) with some effort, and with other
1010*ef8d499eSDavid van Moolenbroek 	 * cases (e.g., disappearing multicast support) with a LOT more effort.
1011*ef8d499eSDavid van Moolenbroek 	 */
1012*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_caps != caps) {
1013*ef8d499eSDavid van Moolenbroek 		printf("LWIP: driver '%s' changed capabilities\n",
1014*ef8d499eSDavid van Moolenbroek 		    ethif_get_name(ethif));
1015*ef8d499eSDavid van Moolenbroek 
1016*ef8d499eSDavid van Moolenbroek 		return FALSE;
1017*ef8d499eSDavid van Moolenbroek 	}
1018*ef8d499eSDavid van Moolenbroek 
1019*ef8d499eSDavid van Moolenbroek 	/*
1020*ef8d499eSDavid van Moolenbroek 	 * Set the hardware address on the interface, unless a request is
1021*ef8d499eSDavid van Moolenbroek 	 * currently pending to change it, in which case the new address has
1022*ef8d499eSDavid van Moolenbroek 	 * been set already and we do not want to revert that change.  If not,
1023*ef8d499eSDavid van Moolenbroek 	 * we always set the address, because it may have changed as part of a
1024*ef8d499eSDavid van Moolenbroek 	 * driver restart and we do not want to get out of sync with it, nor
1025*ef8d499eSDavid van Moolenbroek 	 * can we necessarily change it back.
1026*ef8d499eSDavid van Moolenbroek 	 */
1027*ef8d499eSDavid van Moolenbroek 	if (!(ethif->ethif_active.nconf_set & NDEV_SET_HWADDR) &&
1028*ef8d499eSDavid van Moolenbroek 	    !(ethif->ethif_pending.nconf_set & NDEV_SET_HWADDR))
1029*ef8d499eSDavid van Moolenbroek 		ifdev_update_hwaddr(&ethif->ethif_ifdev, hwaddr->nhwa_addr,
1030*ef8d499eSDavid van Moolenbroek 		    (name == NULL) /*is_factory*/);
1031*ef8d499eSDavid van Moolenbroek 
1032*ef8d499eSDavid van Moolenbroek 	/*
1033*ef8d499eSDavid van Moolenbroek 	 * At this point, only one more thing can fail: it is possible that we
1034*ef8d499eSDavid van Moolenbroek 	 * do not manage to send the first configuration request due to memory
1035*ef8d499eSDavid van Moolenbroek 	 * shortage.  This is extremely unlikely to happen, so send the conf
1036*ef8d499eSDavid van Moolenbroek 	 * request first and forget the entire driver if it fails.
1037*ef8d499eSDavid van Moolenbroek 	 */
1038*ef8d499eSDavid van Moolenbroek 	/*
1039*ef8d499eSDavid van Moolenbroek 	 * Always generate a new multicast list before sending a configuration
1040*ef8d499eSDavid van Moolenbroek 	 * request, and at no other time (since there may be a grant for it).
1041*ef8d499eSDavid van Moolenbroek 	 */
1042*ef8d499eSDavid van Moolenbroek 	ethif_gen_mcast(ethif, &ethif->ethif_active);
1043*ef8d499eSDavid van Moolenbroek 
1044*ef8d499eSDavid van Moolenbroek 	if ((r = ndev_conf(ethif->ethif_ndev, &ethif->ethif_active)) != OK) {
1045*ef8d499eSDavid van Moolenbroek 		printf("LWIP: sending first configuration to '%s' failed "
1046*ef8d499eSDavid van Moolenbroek 		    "(%d)\n", ethif_get_name(ethif), r);
1047*ef8d499eSDavid van Moolenbroek 
1048*ef8d499eSDavid van Moolenbroek 		return FALSE;
1049*ef8d499eSDavid van Moolenbroek 	}
1050*ef8d499eSDavid van Moolenbroek 
1051*ef8d499eSDavid van Moolenbroek 	ethif_set_status(ethif, link, media);
1052*ef8d499eSDavid van Moolenbroek 
1053*ef8d499eSDavid van Moolenbroek 	ethif->ethif_flags &= ~ETHIFF_DISABLED;
1054*ef8d499eSDavid van Moolenbroek 	ethif->ethif_flags |= ETHIFF_FIRST_CONF;
1055*ef8d499eSDavid van Moolenbroek 
1056*ef8d499eSDavid van Moolenbroek 	return TRUE;
1057*ef8d499eSDavid van Moolenbroek }
1058*ef8d499eSDavid van Moolenbroek 
1059*ef8d499eSDavid van Moolenbroek /*
1060*ef8d499eSDavid van Moolenbroek  * The configuration change stored in the "pending" slot of the given ethif
1061*ef8d499eSDavid van Moolenbroek  * object has been acknowledged by the network device driver (or the driver has
1062*ef8d499eSDavid van Moolenbroek  * died, see ethif_disable()).  Apply changes to the "active" slot of the given
1063*ef8d499eSDavid van Moolenbroek  * ethif object, as well as previously delayed changes to lwIP through netif.
1064*ef8d499eSDavid van Moolenbroek  */
1065*ef8d499eSDavid van Moolenbroek static void
ethif_post_conf(struct ethif * ethif)1066*ef8d499eSDavid van Moolenbroek ethif_post_conf(struct ethif * ethif)
1067*ef8d499eSDavid van Moolenbroek {
1068*ef8d499eSDavid van Moolenbroek 	struct ndev_conf *nconf;
1069*ef8d499eSDavid van Moolenbroek 	unsigned int flags;
1070*ef8d499eSDavid van Moolenbroek 
1071*ef8d499eSDavid van Moolenbroek 	nconf = &ethif->ethif_pending;
1072*ef8d499eSDavid van Moolenbroek 
1073*ef8d499eSDavid van Moolenbroek 	/*
1074*ef8d499eSDavid van Moolenbroek 	 * Now that the driver configuration has changed, we know that the
1075*ef8d499eSDavid van Moolenbroek 	 * new checksum settings will be applied to all sent and received
1076*ef8d499eSDavid van Moolenbroek 	 * packets, and we can disable checksumming flags in netif as desired.
1077*ef8d499eSDavid van Moolenbroek 	 * Enabling checksumming flags has already been done earlier on.
1078*ef8d499eSDavid van Moolenbroek 	 */
1079*ef8d499eSDavid van Moolenbroek 	if (nconf->nconf_set & NDEV_SET_CAPS) {
1080*ef8d499eSDavid van Moolenbroek 		flags = ethif_get_netif(ethif)->chksum_flags;
1081*ef8d499eSDavid van Moolenbroek 
1082*ef8d499eSDavid van Moolenbroek 		if (nconf->nconf_caps & NDEV_CAP_CS_IP4_TX)
1083*ef8d499eSDavid van Moolenbroek 			flags &= ~NETIF_CHECKSUM_GEN_IP;
1084*ef8d499eSDavid van Moolenbroek 		if (nconf->nconf_caps & NDEV_CAP_CS_IP4_RX)
1085*ef8d499eSDavid van Moolenbroek 			flags &= ~NETIF_CHECKSUM_CHECK_IP;
1086*ef8d499eSDavid van Moolenbroek 		if (nconf->nconf_caps & NDEV_CAP_CS_UDP_TX)
1087*ef8d499eSDavid van Moolenbroek 			flags &= ~NETIF_CHECKSUM_GEN_UDP;
1088*ef8d499eSDavid van Moolenbroek 		if (nconf->nconf_caps & NDEV_CAP_CS_UDP_RX)
1089*ef8d499eSDavid van Moolenbroek 			flags &= ~NETIF_CHECKSUM_CHECK_UDP;
1090*ef8d499eSDavid van Moolenbroek 		if (nconf->nconf_caps & NDEV_CAP_CS_TCP_TX)
1091*ef8d499eSDavid van Moolenbroek 			flags &= ~NETIF_CHECKSUM_GEN_TCP;
1092*ef8d499eSDavid van Moolenbroek 		if (nconf->nconf_caps & NDEV_CAP_CS_TCP_RX)
1093*ef8d499eSDavid van Moolenbroek 			flags &= ~NETIF_CHECKSUM_CHECK_TCP;
1094*ef8d499eSDavid van Moolenbroek 
1095*ef8d499eSDavid van Moolenbroek 		NETIF_SET_CHECKSUM_CTRL(ethif_get_netif(ethif), flags);
1096*ef8d499eSDavid van Moolenbroek 	}
1097*ef8d499eSDavid van Moolenbroek 
1098*ef8d499eSDavid van Moolenbroek 	/*
1099*ef8d499eSDavid van Moolenbroek 	 * Merge any individual parts of the now acknowledged configuration
1100*ef8d499eSDavid van Moolenbroek 	 * changes into the active configuration.  The result is that we are
1101*ef8d499eSDavid van Moolenbroek 	 * able to reapply these changes at any time should the network driver
1102*ef8d499eSDavid van Moolenbroek 	 * be restarted.  In addition, by only setting bits for fields that
1103*ef8d499eSDavid van Moolenbroek 	 * have actually changed, we can later tell whether the user wanted the
1104*ef8d499eSDavid van Moolenbroek 	 * change or ethif should just take over what the driver reports after
1105*ef8d499eSDavid van Moolenbroek 	 * a restart; this is important for HW-address and media settings.
1106*ef8d499eSDavid van Moolenbroek 	 */
1107*ef8d499eSDavid van Moolenbroek 	ethif_merge_conf(&ethif->ethif_active, &ethif->ethif_pending);
1108*ef8d499eSDavid van Moolenbroek }
1109*ef8d499eSDavid van Moolenbroek 
1110*ef8d499eSDavid van Moolenbroek /*
1111*ef8d499eSDavid van Moolenbroek  * All receive requests have been canceled at the ndev layer, because the
1112*ef8d499eSDavid van Moolenbroek  * network device driver has been restarted or shut down.  Clear the receive
1113*ef8d499eSDavid van Moolenbroek  * queue, freeing any packets in it.
1114*ef8d499eSDavid van Moolenbroek  */
1115*ef8d499eSDavid van Moolenbroek static void
ethif_drain(struct ethif * ethif)1116*ef8d499eSDavid van Moolenbroek ethif_drain(struct ethif * ethif)
1117*ef8d499eSDavid van Moolenbroek {
1118*ef8d499eSDavid van Moolenbroek 	struct pbuf *pbuf, **pnext;
1119*ef8d499eSDavid van Moolenbroek 
1120*ef8d499eSDavid van Moolenbroek 	while ((pbuf = ethif->ethif_rcv.er_head) != NULL) {
1121*ef8d499eSDavid van Moolenbroek 		pnext = pchain_end(pbuf);
1122*ef8d499eSDavid van Moolenbroek 
1123*ef8d499eSDavid van Moolenbroek 		if ((ethif->ethif_rcv.er_head = *pnext) == NULL)
1124*ef8d499eSDavid van Moolenbroek 			ethif->ethif_rcv.er_tailp = &ethif->ethif_rcv.er_head;
1125*ef8d499eSDavid van Moolenbroek 
1126*ef8d499eSDavid van Moolenbroek 		*pnext = NULL;
1127*ef8d499eSDavid van Moolenbroek 		pbuf_free(pbuf);
1128*ef8d499eSDavid van Moolenbroek 	}
1129*ef8d499eSDavid van Moolenbroek }
1130*ef8d499eSDavid van Moolenbroek 
1131*ef8d499eSDavid van Moolenbroek /*
1132*ef8d499eSDavid van Moolenbroek  * The network device driver has stopped working (i.e., crashed), but has not
1133*ef8d499eSDavid van Moolenbroek  * been shut down completely, and is expect to come back later.
1134*ef8d499eSDavid van Moolenbroek  */
1135*ef8d499eSDavid van Moolenbroek void
ethif_disable(struct ethif * ethif)1136*ef8d499eSDavid van Moolenbroek ethif_disable(struct ethif * ethif)
1137*ef8d499eSDavid van Moolenbroek {
1138*ef8d499eSDavid van Moolenbroek 
1139*ef8d499eSDavid van Moolenbroek 	/*
1140*ef8d499eSDavid van Moolenbroek 	 * We assume, optimistically, that a new instance of the driver will be
1141*ef8d499eSDavid van Moolenbroek 	 * brought up soon after which we can continue operating as before.  As
1142*ef8d499eSDavid van Moolenbroek 	 * such, we do not want to change most of the user-visible state until
1143*ef8d499eSDavid van Moolenbroek 	 * we know for sure that our optimism was in vain.  In particular, we
1144*ef8d499eSDavid van Moolenbroek 	 * do *not* want to change the following parts of the state here:
1145*ef8d499eSDavid van Moolenbroek 	 *
1146*ef8d499eSDavid van Moolenbroek 	 *   - the contents of the send queue;
1147*ef8d499eSDavid van Moolenbroek 	 *   - the state of the interface (up or down);
1148*ef8d499eSDavid van Moolenbroek 	 *   - the state and media type of the physical link.
1149*ef8d499eSDavid van Moolenbroek 	 *
1150*ef8d499eSDavid van Moolenbroek 	 * The main user-visible indication of the crash will be that the
1151*ef8d499eSDavid van Moolenbroek 	 * interface does not have the IFF_RUNNING flag set.
1152*ef8d499eSDavid van Moolenbroek 	 */
1153*ef8d499eSDavid van Moolenbroek 
1154*ef8d499eSDavid van Moolenbroek 	/*
1155*ef8d499eSDavid van Moolenbroek 	 * If a configuration request was pending, it will be lost now.  Highly
1156*ef8d499eSDavid van Moolenbroek 	 * unintuitively, make the requested configuration the *active* one,
1157*ef8d499eSDavid van Moolenbroek 	 * just as though the request completed successfully.  This works,
1158*ef8d499eSDavid van Moolenbroek 	 * because once the driver comes back, the active configuration will be
1159*ef8d499eSDavid van Moolenbroek 	 * replayed as initial configuration.  Therefore, by pretending that
1160*ef8d499eSDavid van Moolenbroek 	 * the current request went through, we ensure that it too will be sent
1161*ef8d499eSDavid van Moolenbroek 	 * to the new instance--before anything else is allowed to happen.
1162*ef8d499eSDavid van Moolenbroek 	 */
1163*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_pending.nconf_set != 0)
1164*ef8d499eSDavid van Moolenbroek 		ethif_post_conf(ethif);
1165*ef8d499eSDavid van Moolenbroek 
1166*ef8d499eSDavid van Moolenbroek 	/*
1167*ef8d499eSDavid van Moolenbroek 	 * Any packet send requests have been lost, too, and likewise forgotten
1168*ef8d499eSDavid van Moolenbroek 	 * by ndev.  Thus, we need to forget that we sent any packets, so that
1169*ef8d499eSDavid van Moolenbroek 	 * they will be resent after the driver comes back up.  That *may*
1170*ef8d499eSDavid van Moolenbroek 	 * cause packet duplication, but that is preferable over packet loss.
1171*ef8d499eSDavid van Moolenbroek 	 */
1172*ef8d499eSDavid van Moolenbroek 	ethif->ethif_snd.es_unsentp = &ethif->ethif_snd.es_head;
1173*ef8d499eSDavid van Moolenbroek 
1174*ef8d499eSDavid van Moolenbroek 	/*
1175*ef8d499eSDavid van Moolenbroek 	 * We fully restart the receive queue, because all receive requests
1176*ef8d499eSDavid van Moolenbroek 	 * have been forgotten by ndev as well now and it is easier to simply
1177*ef8d499eSDavid van Moolenbroek 	 * reconstruct the receive queue in its entirety later on.
1178*ef8d499eSDavid van Moolenbroek 	 */
1179*ef8d499eSDavid van Moolenbroek 	ethif_drain(ethif);
1180*ef8d499eSDavid van Moolenbroek 
1181*ef8d499eSDavid van Moolenbroek 	/* Make sure we do not attempt to initiate new requests for now. */
1182*ef8d499eSDavid van Moolenbroek 	ethif->ethif_flags &= ~ETHIFF_FIRST_CONF;
1183*ef8d499eSDavid van Moolenbroek 	ethif->ethif_flags |= ETHIFF_DISABLED;
1184*ef8d499eSDavid van Moolenbroek }
1185*ef8d499eSDavid van Moolenbroek 
1186*ef8d499eSDavid van Moolenbroek /*
1187*ef8d499eSDavid van Moolenbroek  * Dequeue and discard the packet at the head of the send queue.
1188*ef8d499eSDavid van Moolenbroek  */
1189*ef8d499eSDavid van Moolenbroek static void
ethif_dequeue_send(struct ethif * ethif)1190*ef8d499eSDavid van Moolenbroek ethif_dequeue_send(struct ethif * ethif)
1191*ef8d499eSDavid van Moolenbroek {
1192*ef8d499eSDavid van Moolenbroek 	struct pbuf *pref, *pbuf, **pnext;
1193*ef8d499eSDavid van Moolenbroek 	unsigned int count, spares;
1194*ef8d499eSDavid van Moolenbroek 
1195*ef8d499eSDavid van Moolenbroek 	/*
1196*ef8d499eSDavid van Moolenbroek 	 * The send queue is a linked list of reference buffers, each of which
1197*ef8d499eSDavid van Moolenbroek 	 * links to the actual packet.  Dequeue the first reference buffer.
1198*ef8d499eSDavid van Moolenbroek 	 */
1199*ef8d499eSDavid van Moolenbroek 	pref = ethif->ethif_snd.es_head;
1200*ef8d499eSDavid van Moolenbroek 	assert(pref != NULL);
1201*ef8d499eSDavid van Moolenbroek 
1202*ef8d499eSDavid van Moolenbroek 	pnext = pchain_end(pref);
1203*ef8d499eSDavid van Moolenbroek 
1204*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_snd.es_unsentp == pnext)
1205*ef8d499eSDavid van Moolenbroek 		ethif->ethif_snd.es_unsentp = &ethif->ethif_snd.es_head;
1206*ef8d499eSDavid van Moolenbroek 
1207*ef8d499eSDavid van Moolenbroek 	if ((ethif->ethif_snd.es_head = *pnext) == NULL)
1208*ef8d499eSDavid van Moolenbroek 		ethif->ethif_snd.es_tailp = &ethif->ethif_snd.es_head;
1209*ef8d499eSDavid van Moolenbroek 
1210*ef8d499eSDavid van Moolenbroek 	/* Do this before possibly calling pbuf_clen() below.. */
1211*ef8d499eSDavid van Moolenbroek 	*pnext = NULL;
1212*ef8d499eSDavid van Moolenbroek 
1213*ef8d499eSDavid van Moolenbroek 	/*
1214*ef8d499eSDavid van Moolenbroek 	 * If we never made a copy of the original packet, we now have it
1215*ef8d499eSDavid van Moolenbroek 	 * pointed to by a reference buffer.  If so, decrease the reference
1216*ef8d499eSDavid van Moolenbroek 	 * count of the actual packet, thereby freeing it if lwIP itself was
1217*ef8d499eSDavid van Moolenbroek 	 * already done with.  Otherwise, the copy of the packet is the
1218*ef8d499eSDavid van Moolenbroek 	 * reference buffer itself.  In both cases we need to free that buffer.
1219*ef8d499eSDavid van Moolenbroek 	 */
1220*ef8d499eSDavid van Moolenbroek 	if (pref->type == PBUF_REF) {
1221*ef8d499eSDavid van Moolenbroek 		pbuf = (struct pbuf *)pref->payload;
1222*ef8d499eSDavid van Moolenbroek 
1223*ef8d499eSDavid van Moolenbroek 		pbuf_free(pbuf);
1224*ef8d499eSDavid van Moolenbroek 
1225*ef8d499eSDavid van Moolenbroek 		count = pref->len;
1226*ef8d499eSDavid van Moolenbroek 	} else
1227*ef8d499eSDavid van Moolenbroek 		count = pbuf_clen(pref);
1228*ef8d499eSDavid van Moolenbroek 
1229*ef8d499eSDavid van Moolenbroek 	assert(count > 0);
1230*ef8d499eSDavid van Moolenbroek 	assert(ethif->ethif_snd.es_count >= count);
1231*ef8d499eSDavid van Moolenbroek 	ethif->ethif_snd.es_count -= count;
1232*ef8d499eSDavid van Moolenbroek 
1233*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_snd.es_count >= ETHIF_PBUF_MIN)
1234*ef8d499eSDavid van Moolenbroek 		spares = count;
1235*ef8d499eSDavid van Moolenbroek 	else if (ethif->ethif_snd.es_count + count > ETHIF_PBUF_MIN)
1236*ef8d499eSDavid van Moolenbroek 		spares = ethif->ethif_snd.es_count + count - ETHIF_PBUF_MIN;
1237*ef8d499eSDavid van Moolenbroek 	else
1238*ef8d499eSDavid van Moolenbroek 		spares = 0;
1239*ef8d499eSDavid van Moolenbroek 
1240*ef8d499eSDavid van Moolenbroek 	assert(ethif_spares >= spares);
1241*ef8d499eSDavid van Moolenbroek 	ethif_spares -= spares;
1242*ef8d499eSDavid van Moolenbroek 
1243*ef8d499eSDavid van Moolenbroek 	/* Free the reference buffer as well. */
1244*ef8d499eSDavid van Moolenbroek 	pbuf_free(pref);
1245*ef8d499eSDavid van Moolenbroek 
1246*ef8d499eSDavid van Moolenbroek 	/* If the send queue is now empty, clear the IFF_OACTIVE flag. */
1247*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_snd.es_head == NULL)
1248*ef8d499eSDavid van Moolenbroek 		ifdev_update_ifflags(&ethif->ethif_ifdev,
1249*ef8d499eSDavid van Moolenbroek 		    ifdev_get_ifflags(&ethif->ethif_ifdev) & ~IFF_OACTIVE);
1250*ef8d499eSDavid van Moolenbroek }
1251*ef8d499eSDavid van Moolenbroek 
1252*ef8d499eSDavid van Moolenbroek /*
1253*ef8d499eSDavid van Moolenbroek  * The ndev layer reports that a network device driver has been permanently
1254*ef8d499eSDavid van Moolenbroek  * shut down.  Remove the corresponding ethernet interface from the system.
1255*ef8d499eSDavid van Moolenbroek  */
1256*ef8d499eSDavid van Moolenbroek void
ethif_remove(struct ethif * ethif)1257*ef8d499eSDavid van Moolenbroek ethif_remove(struct ethif * ethif)
1258*ef8d499eSDavid van Moolenbroek {
1259*ef8d499eSDavid van Moolenbroek 	int r;
1260*ef8d499eSDavid van Moolenbroek 
1261*ef8d499eSDavid van Moolenbroek 	/* Clear the send and receive queues. */
1262*ef8d499eSDavid van Moolenbroek 	while (ethif->ethif_snd.es_head != NULL)
1263*ef8d499eSDavid van Moolenbroek 		ethif_dequeue_send(ethif);
1264*ef8d499eSDavid van Moolenbroek 
1265*ef8d499eSDavid van Moolenbroek 	ethif_drain(ethif);
1266*ef8d499eSDavid van Moolenbroek 
1267*ef8d499eSDavid van Moolenbroek 	/* Let the ifdev module deal with most other removal aspects. */
1268*ef8d499eSDavid van Moolenbroek 	if ((r = ifdev_remove(&ethif->ethif_ifdev)) != OK)
1269*ef8d499eSDavid van Moolenbroek 		panic("unable to remove ethernet interface: %d", r);
1270*ef8d499eSDavid van Moolenbroek 
1271*ef8d499eSDavid van Moolenbroek 	/* Finally, readd the ethif object to the free list. */
1272*ef8d499eSDavid van Moolenbroek 	SIMPLEQ_INSERT_HEAD(&ethif_freelist, ethif, ethif_next);
1273*ef8d499eSDavid van Moolenbroek }
1274*ef8d499eSDavid van Moolenbroek 
1275*ef8d499eSDavid van Moolenbroek /*
1276*ef8d499eSDavid van Moolenbroek  * The ndev layer reports that the (oldest) pending configuration request has
1277*ef8d499eSDavid van Moolenbroek  * completed with the given result.
1278*ef8d499eSDavid van Moolenbroek  */
1279*ef8d499eSDavid van Moolenbroek void
ethif_configured(struct ethif * ethif,int32_t result)1280*ef8d499eSDavid van Moolenbroek ethif_configured(struct ethif * ethif, int32_t result)
1281*ef8d499eSDavid van Moolenbroek {
1282*ef8d499eSDavid van Moolenbroek 
1283*ef8d499eSDavid van Moolenbroek 	/*
1284*ef8d499eSDavid van Moolenbroek 	 * The driver is not supposed to return failure in response to a
1285*ef8d499eSDavid van Moolenbroek 	 * configure result.  If it does, we have no proper way to recover, as
1286*ef8d499eSDavid van Moolenbroek 	 * we may already have applied part of the new configuration to netif.
1287*ef8d499eSDavid van Moolenbroek 	 * For now, just report failure and then pretend success.
1288*ef8d499eSDavid van Moolenbroek 	 */
1289*ef8d499eSDavid van Moolenbroek 	if (result < 0) {
1290*ef8d499eSDavid van Moolenbroek 		printf("LWIP: driver '%s' replied with conf result %d\n",
1291*ef8d499eSDavid van Moolenbroek 		    ethif_get_name(ethif), result);
1292*ef8d499eSDavid van Moolenbroek 
1293*ef8d499eSDavid van Moolenbroek 		result = 0;
1294*ef8d499eSDavid van Moolenbroek 	}
1295*ef8d499eSDavid van Moolenbroek 
1296*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_flags & ETHIFF_FIRST_CONF)
1297*ef8d499eSDavid van Moolenbroek 		ethif->ethif_flags &= ~ETHIFF_FIRST_CONF;
1298*ef8d499eSDavid van Moolenbroek 	else
1299*ef8d499eSDavid van Moolenbroek 		ethif_post_conf(ethif);
1300*ef8d499eSDavid van Moolenbroek 
1301*ef8d499eSDavid van Moolenbroek 	/*
1302*ef8d499eSDavid van Moolenbroek 	 * For now, the result is simply a boolean value indicating whether the
1303*ef8d499eSDavid van Moolenbroek 	 * driver is using the all-multicast receive mode instead of the
1304*ef8d499eSDavid van Moolenbroek 	 * multicast-list receive mode.  We can turn it into a bitmap later.
1305*ef8d499eSDavid van Moolenbroek 	 */
1306*ef8d499eSDavid van Moolenbroek 	if (result != 0) {
1307*ef8d499eSDavid van Moolenbroek 		ethif->ethif_active.nconf_mode &= ~NDEV_MODE_MCAST_LIST;
1308*ef8d499eSDavid van Moolenbroek 		ethif->ethif_active.nconf_mode |= NDEV_MODE_MCAST_ALL;
1309*ef8d499eSDavid van Moolenbroek 	}
1310*ef8d499eSDavid van Moolenbroek 
1311*ef8d499eSDavid van Moolenbroek 	/* The interface flags may have changed now, so update them. */
1312*ef8d499eSDavid van Moolenbroek 	ethif_update_ifflags(ethif);
1313*ef8d499eSDavid van Moolenbroek 
1314*ef8d499eSDavid van Moolenbroek 	/* Regular operation will resume from the polling function. */
1315*ef8d499eSDavid van Moolenbroek }
1316*ef8d499eSDavid van Moolenbroek 
1317*ef8d499eSDavid van Moolenbroek /*
1318*ef8d499eSDavid van Moolenbroek  * The ndev layer reports that the first packet on the send queue has been
1319*ef8d499eSDavid van Moolenbroek  * successfully transmitted with 'result' set to OK, or dropped if 'result' is
1320*ef8d499eSDavid van Moolenbroek  * negative.  The latter may happen if the interface was taken down while there
1321*ef8d499eSDavid van Moolenbroek  * were still packets in transit.
1322*ef8d499eSDavid van Moolenbroek  */
1323*ef8d499eSDavid van Moolenbroek void
ethif_sent(struct ethif * ethif,int32_t result)1324*ef8d499eSDavid van Moolenbroek ethif_sent(struct ethif * ethif, int32_t result)
1325*ef8d499eSDavid van Moolenbroek {
1326*ef8d499eSDavid van Moolenbroek 
1327*ef8d499eSDavid van Moolenbroek 	ethif_dequeue_send(ethif);
1328*ef8d499eSDavid van Moolenbroek 
1329*ef8d499eSDavid van Moolenbroek 	if (result < 0)
1330*ef8d499eSDavid van Moolenbroek 		ifdev_output_drop(&ethif->ethif_ifdev);
1331*ef8d499eSDavid van Moolenbroek 
1332*ef8d499eSDavid van Moolenbroek 	/* More requests may be sent from the polling function now. */
1333*ef8d499eSDavid van Moolenbroek }
1334*ef8d499eSDavid van Moolenbroek 
1335*ef8d499eSDavid van Moolenbroek /*
1336*ef8d499eSDavid van Moolenbroek  * The ndev layer reports that the first buffer on the receive queue has been
1337*ef8d499eSDavid van Moolenbroek  * filled with a packet of 'result' bytes, or if 'result' is negative, the
1338*ef8d499eSDavid van Moolenbroek  * receive request has been aborted.
1339*ef8d499eSDavid van Moolenbroek  */
1340*ef8d499eSDavid van Moolenbroek void
ethif_received(struct ethif * ethif,int32_t result)1341*ef8d499eSDavid van Moolenbroek ethif_received(struct ethif * ethif, int32_t result)
1342*ef8d499eSDavid van Moolenbroek {
1343*ef8d499eSDavid van Moolenbroek 	struct pbuf *pbuf, *pwalk, **pnext;
1344*ef8d499eSDavid van Moolenbroek 	size_t left;
1345*ef8d499eSDavid van Moolenbroek 
1346*ef8d499eSDavid van Moolenbroek 	/*
1347*ef8d499eSDavid van Moolenbroek 	 * Start by removing the first buffer chain off the receive queue.  The
1348*ef8d499eSDavid van Moolenbroek 	 * ndev layer guarantees that there ever was a receive request at all.
1349*ef8d499eSDavid van Moolenbroek 	 */
1350*ef8d499eSDavid van Moolenbroek 	if ((pbuf = ethif->ethif_rcv.er_head) == NULL)
1351*ef8d499eSDavid van Moolenbroek 		panic("driver received packet but queue empty");
1352*ef8d499eSDavid van Moolenbroek 
1353*ef8d499eSDavid van Moolenbroek 	pnext = pchain_end(pbuf);
1354*ef8d499eSDavid van Moolenbroek 
1355*ef8d499eSDavid van Moolenbroek 	if ((ethif->ethif_rcv.er_head = *pnext) == NULL)
1356*ef8d499eSDavid van Moolenbroek 		ethif->ethif_rcv.er_tailp = &ethif->ethif_rcv.er_head;
1357*ef8d499eSDavid van Moolenbroek 	*pnext = NULL;
1358*ef8d499eSDavid van Moolenbroek 
1359*ef8d499eSDavid van Moolenbroek 	/* Decide if we can and should deliver a packet to the layers above. */
1360*ef8d499eSDavid van Moolenbroek 	if (result <= 0 || !ethif_can_recv(ethif)) {
1361*ef8d499eSDavid van Moolenbroek 		pbuf_free(pbuf);
1362*ef8d499eSDavid van Moolenbroek 
1363*ef8d499eSDavid van Moolenbroek 		return;
1364*ef8d499eSDavid van Moolenbroek 	}
1365*ef8d499eSDavid van Moolenbroek 
1366*ef8d499eSDavid van Moolenbroek 	if (result > pbuf->tot_len) {
1367*ef8d499eSDavid van Moolenbroek 		printf("LWIP: driver '%s' returned bad packet size (%zd)\n",
1368*ef8d499eSDavid van Moolenbroek 		    ethif_get_name(ethif), (ssize_t)result);
1369*ef8d499eSDavid van Moolenbroek 
1370*ef8d499eSDavid van Moolenbroek 		pbuf_free(pbuf);
1371*ef8d499eSDavid van Moolenbroek 
1372*ef8d499eSDavid van Moolenbroek 		return;
1373*ef8d499eSDavid van Moolenbroek 	}
1374*ef8d499eSDavid van Moolenbroek 
1375*ef8d499eSDavid van Moolenbroek 	/*
1376*ef8d499eSDavid van Moolenbroek 	 * The packet often does not use all of the buffers, or at least not
1377*ef8d499eSDavid van Moolenbroek 	 * all of the last buffer.  Adjust lengths for the buffers that contain
1378*ef8d499eSDavid van Moolenbroek 	 * part of the packet, and free the remaining (unused) buffers, if any.
1379*ef8d499eSDavid van Moolenbroek 	 */
1380*ef8d499eSDavid van Moolenbroek 	left = (size_t)result;
1381*ef8d499eSDavid van Moolenbroek 
1382*ef8d499eSDavid van Moolenbroek 	for (pwalk = pbuf; ; pwalk = pwalk->next) {
1383*ef8d499eSDavid van Moolenbroek 		pwalk->tot_len = left;
1384*ef8d499eSDavid van Moolenbroek 		if (pwalk->len > left)
1385*ef8d499eSDavid van Moolenbroek 			pwalk->len = left;
1386*ef8d499eSDavid van Moolenbroek 		left -= pwalk->len;
1387*ef8d499eSDavid van Moolenbroek 		if (left == 0)
1388*ef8d499eSDavid van Moolenbroek 			break;
1389*ef8d499eSDavid van Moolenbroek 	}
1390*ef8d499eSDavid van Moolenbroek 
1391*ef8d499eSDavid van Moolenbroek 	if (pwalk->next != NULL) {
1392*ef8d499eSDavid van Moolenbroek 		pbuf_free(pwalk->next);
1393*ef8d499eSDavid van Moolenbroek 
1394*ef8d499eSDavid van Moolenbroek 		pwalk->next = NULL;
1395*ef8d499eSDavid van Moolenbroek 	}
1396*ef8d499eSDavid van Moolenbroek 
1397*ef8d499eSDavid van Moolenbroek 	/*
1398*ef8d499eSDavid van Moolenbroek 	 * Finally, hand off the packet to the layers above.  We go through
1399*ef8d499eSDavid van Moolenbroek 	 * ifdev so that it can pass the packet to BPF devices and update
1400*ef8d499eSDavid van Moolenbroek 	 * statistics and all that.
1401*ef8d499eSDavid van Moolenbroek 	 */
1402*ef8d499eSDavid van Moolenbroek 	ifdev_input(&ethif->ethif_ifdev, pbuf, NULL /*netif*/,
1403*ef8d499eSDavid van Moolenbroek 	    TRUE /*to_bpf*/);
1404*ef8d499eSDavid van Moolenbroek }
1405*ef8d499eSDavid van Moolenbroek 
1406*ef8d499eSDavid van Moolenbroek /*
1407*ef8d499eSDavid van Moolenbroek  * The ndev layer reports a network driver status update.  If anything has
1408*ef8d499eSDavid van Moolenbroek  * changed since the last status, we may have to take action.  The given
1409*ef8d499eSDavid van Moolenbroek  * statistics counters are relative to the previous status report.
1410*ef8d499eSDavid van Moolenbroek  */
1411*ef8d499eSDavid van Moolenbroek void
ethif_status(struct ethif * ethif,uint32_t link,uint32_t media,uint32_t oerror,uint32_t coll,uint32_t ierror,uint32_t iqdrop)1412*ef8d499eSDavid van Moolenbroek ethif_status(struct ethif * ethif, uint32_t link, uint32_t media,
1413*ef8d499eSDavid van Moolenbroek 	uint32_t oerror, uint32_t coll, uint32_t ierror, uint32_t iqdrop)
1414*ef8d499eSDavid van Moolenbroek {
1415*ef8d499eSDavid van Moolenbroek 	struct if_data *ifdata;
1416*ef8d499eSDavid van Moolenbroek 
1417*ef8d499eSDavid van Moolenbroek 	ethif_set_status(ethif, link, media);
1418*ef8d499eSDavid van Moolenbroek 
1419*ef8d499eSDavid van Moolenbroek 	ifdata = ifdev_get_ifdata(&ethif->ethif_ifdev);
1420*ef8d499eSDavid van Moolenbroek 	ifdata->ifi_oerrors += oerror;
1421*ef8d499eSDavid van Moolenbroek 	ifdata->ifi_collisions += coll;
1422*ef8d499eSDavid van Moolenbroek 	ifdata->ifi_ierrors += ierror;
1423*ef8d499eSDavid van Moolenbroek 	ifdata->ifi_iqdrops += iqdrop;
1424*ef8d499eSDavid van Moolenbroek }
1425*ef8d499eSDavid van Moolenbroek 
1426*ef8d499eSDavid van Moolenbroek /*
1427*ef8d499eSDavid van Moolenbroek  * Set NetBSD-style interface flags (IFF_) for an ethernet interface.
1428*ef8d499eSDavid van Moolenbroek  */
1429*ef8d499eSDavid van Moolenbroek static int
ethif_set_ifflags(struct ifdev * ifdev,unsigned int ifflags)1430*ef8d499eSDavid van Moolenbroek ethif_set_ifflags(struct ifdev * ifdev, unsigned int ifflags)
1431*ef8d499eSDavid van Moolenbroek {
1432*ef8d499eSDavid van Moolenbroek 	struct ethif *ethif = (struct ethif *)ifdev;
1433*ef8d499eSDavid van Moolenbroek 	uint32_t mode, flags;
1434*ef8d499eSDavid van Moolenbroek 
1435*ef8d499eSDavid van Moolenbroek 	/*
1436*ef8d499eSDavid van Moolenbroek 	 * We do not support IFF_NOARP at this time, because lwIP does not: the
1437*ef8d499eSDavid van Moolenbroek 	 * idea of IFF_NOARP is that only static ARP entries are used, but lwIP
1438*ef8d499eSDavid van Moolenbroek 	 * does not support separating static from dynamic ARP operation.  The
1439*ef8d499eSDavid van Moolenbroek 	 * flag does not appear to be particularly widely used anyway.
1440*ef8d499eSDavid van Moolenbroek 	 */
1441*ef8d499eSDavid van Moolenbroek 	if ((ifflags & ~(IFF_UP | IFF_DEBUG | IFF_LINK0 | IFF_LINK1 |
1442*ef8d499eSDavid van Moolenbroek 	    IFF_LINK2)) != 0)
1443*ef8d499eSDavid van Moolenbroek 		return EINVAL;
1444*ef8d499eSDavid van Moolenbroek 
1445*ef8d499eSDavid van Moolenbroek 	mode = ethif->ethif_wanted.nconf_mode;
1446*ef8d499eSDavid van Moolenbroek 	if ((ifflags & IFF_UP) && mode == NDEV_MODE_DOWN) {
1447*ef8d499eSDavid van Moolenbroek 		mode = NDEV_MODE_UP;
1448*ef8d499eSDavid van Moolenbroek 
1449*ef8d499eSDavid van Moolenbroek 		/* Always enable broadcast receipt when supported. */
1450*ef8d499eSDavid van Moolenbroek 		if (ethif->ethif_caps & NDEV_CAP_BCAST)
1451*ef8d499eSDavid van Moolenbroek 			mode |= NDEV_MODE_BCAST;
1452*ef8d499eSDavid van Moolenbroek 
1453*ef8d499eSDavid van Moolenbroek 		if (ifdev_is_promisc(ifdev))
1454*ef8d499eSDavid van Moolenbroek 			mode |= NDEV_MODE_PROMISC;
1455*ef8d499eSDavid van Moolenbroek 
1456*ef8d499eSDavid van Moolenbroek 		/*
1457*ef8d499eSDavid van Moolenbroek 		 * The multicast flags will be set right before we send the
1458*ef8d499eSDavid van Moolenbroek 		 * request to the driver.
1459*ef8d499eSDavid van Moolenbroek 		 */
1460*ef8d499eSDavid van Moolenbroek 	} else if (!(ifflags & IFF_UP) && mode != NDEV_MODE_DOWN)
1461*ef8d499eSDavid van Moolenbroek 		ethif->ethif_wanted.nconf_mode = NDEV_MODE_DOWN;
1462*ef8d499eSDavid van Moolenbroek 
1463*ef8d499eSDavid van Moolenbroek 	if (mode != ethif->ethif_wanted.nconf_mode) {
1464*ef8d499eSDavid van Moolenbroek 		ethif->ethif_wanted.nconf_mode = mode;
1465*ef8d499eSDavid van Moolenbroek 		ethif->ethif_wanted.nconf_set |= NDEV_SET_MODE;
1466*ef8d499eSDavid van Moolenbroek 	}
1467*ef8d499eSDavid van Moolenbroek 
1468*ef8d499eSDavid van Moolenbroek 	/*
1469*ef8d499eSDavid van Moolenbroek 	 * Some of the interface flags (UP, DEBUG, PROMISC, LINK[0-2]) are a
1470*ef8d499eSDavid van Moolenbroek 	 * reflection of the intended state as set by userland before, so that
1471*ef8d499eSDavid van Moolenbroek 	 * a userland utility will never not see the flag it just set (or the
1472*ef8d499eSDavid van Moolenbroek 	 * other way around).  These flags therefore do not necessarily reflect
1473*ef8d499eSDavid van Moolenbroek 	 * what is actually going on at that moment.  We cannot have both.
1474*ef8d499eSDavid van Moolenbroek 	 */
1475*ef8d499eSDavid van Moolenbroek 	flags = 0;
1476*ef8d499eSDavid van Moolenbroek 	if (ifflags & IFF_DEBUG)
1477*ef8d499eSDavid van Moolenbroek 		flags |= NDEV_FLAG_DEBUG;
1478*ef8d499eSDavid van Moolenbroek 	if (ifflags & IFF_LINK0)
1479*ef8d499eSDavid van Moolenbroek 		flags |= NDEV_FLAG_LINK0;
1480*ef8d499eSDavid van Moolenbroek 	if (ifflags & IFF_LINK1)
1481*ef8d499eSDavid van Moolenbroek 		flags |= NDEV_FLAG_LINK1;
1482*ef8d499eSDavid van Moolenbroek 	if (ifflags & IFF_LINK2)
1483*ef8d499eSDavid van Moolenbroek 		flags |= NDEV_FLAG_LINK2;
1484*ef8d499eSDavid van Moolenbroek 
1485*ef8d499eSDavid van Moolenbroek 	if (flags != ethif->ethif_wanted.nconf_flags) {
1486*ef8d499eSDavid van Moolenbroek 		ethif->ethif_wanted.nconf_flags = flags;
1487*ef8d499eSDavid van Moolenbroek 		ethif->ethif_wanted.nconf_set |= NDEV_SET_FLAGS;
1488*ef8d499eSDavid van Moolenbroek 	}
1489*ef8d499eSDavid van Moolenbroek 
1490*ef8d499eSDavid van Moolenbroek 	/* The changes will be picked up from the polling function. */
1491*ef8d499eSDavid van Moolenbroek 	return OK;
1492*ef8d499eSDavid van Moolenbroek }
1493*ef8d499eSDavid van Moolenbroek 
1494*ef8d499eSDavid van Moolenbroek /*
1495*ef8d499eSDavid van Moolenbroek  * Convert a bitmask of ndev-layer capabilities (NDEV_CAP_) to NetBSD-style
1496*ef8d499eSDavid van Moolenbroek  * interface capabilities (IFCAP_).
1497*ef8d499eSDavid van Moolenbroek  */
1498*ef8d499eSDavid van Moolenbroek static uint64_t
ethif_cap_to_ifcap(uint32_t caps)1499*ef8d499eSDavid van Moolenbroek ethif_cap_to_ifcap(uint32_t caps)
1500*ef8d499eSDavid van Moolenbroek {
1501*ef8d499eSDavid van Moolenbroek 	uint64_t ifcap;
1502*ef8d499eSDavid van Moolenbroek 
1503*ef8d499eSDavid van Moolenbroek 	ifcap = 0;
1504*ef8d499eSDavid van Moolenbroek 	if (caps & NDEV_CAP_CS_IP4_TX)
1505*ef8d499eSDavid van Moolenbroek 		ifcap |= IFCAP_CSUM_IPv4_Tx;
1506*ef8d499eSDavid van Moolenbroek 	if (caps & NDEV_CAP_CS_IP4_RX)
1507*ef8d499eSDavid van Moolenbroek 		ifcap |= IFCAP_CSUM_IPv4_Rx;
1508*ef8d499eSDavid van Moolenbroek 	if (caps & NDEV_CAP_CS_UDP_TX)
1509*ef8d499eSDavid van Moolenbroek 		ifcap |= IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv6_Tx;
1510*ef8d499eSDavid van Moolenbroek 	if (caps & NDEV_CAP_CS_UDP_RX)
1511*ef8d499eSDavid van Moolenbroek 		ifcap |= IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv6_Rx;
1512*ef8d499eSDavid van Moolenbroek 	if (caps & NDEV_CAP_CS_TCP_TX)
1513*ef8d499eSDavid van Moolenbroek 		ifcap |= IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv6_Tx;
1514*ef8d499eSDavid van Moolenbroek 	if (caps & NDEV_CAP_CS_TCP_RX)
1515*ef8d499eSDavid van Moolenbroek 		ifcap |= IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv6_Rx;
1516*ef8d499eSDavid van Moolenbroek 
1517*ef8d499eSDavid van Moolenbroek 	return ifcap;
1518*ef8d499eSDavid van Moolenbroek }
1519*ef8d499eSDavid van Moolenbroek 
1520*ef8d499eSDavid van Moolenbroek /*
1521*ef8d499eSDavid van Moolenbroek  * Retrieve potential and enabled NetBSD-style interface capabilities (IFCAP_).
1522*ef8d499eSDavid van Moolenbroek  */
1523*ef8d499eSDavid van Moolenbroek static void
ethif_get_ifcap(struct ifdev * ifdev,uint64_t * ifcap,uint64_t * ifena)1524*ef8d499eSDavid van Moolenbroek ethif_get_ifcap(struct ifdev * ifdev, uint64_t * ifcap, uint64_t * ifena)
1525*ef8d499eSDavid van Moolenbroek {
1526*ef8d499eSDavid van Moolenbroek 	struct ethif *ethif = (struct ethif *)ifdev;
1527*ef8d499eSDavid van Moolenbroek 
1528*ef8d499eSDavid van Moolenbroek 	*ifcap = ethif_cap_to_ifcap(ethif->ethif_caps);
1529*ef8d499eSDavid van Moolenbroek 	*ifena = ethif_cap_to_ifcap(ethif->ethif_wanted.nconf_caps);
1530*ef8d499eSDavid van Moolenbroek }
1531*ef8d499eSDavid van Moolenbroek 
1532*ef8d499eSDavid van Moolenbroek /*
1533*ef8d499eSDavid van Moolenbroek  * Set NetBSD-style enabled interface capabilities (IFCAP_).
1534*ef8d499eSDavid van Moolenbroek  */
1535*ef8d499eSDavid van Moolenbroek static int
ethif_set_ifcap(struct ifdev * ifdev,uint64_t ifcap)1536*ef8d499eSDavid van Moolenbroek ethif_set_ifcap(struct ifdev * ifdev, uint64_t ifcap)
1537*ef8d499eSDavid van Moolenbroek {
1538*ef8d499eSDavid van Moolenbroek 	struct ethif *ethif = (struct ethif *)ifdev;
1539*ef8d499eSDavid van Moolenbroek 	unsigned int flags;
1540*ef8d499eSDavid van Moolenbroek 	uint32_t caps;
1541*ef8d499eSDavid van Moolenbroek 
1542*ef8d499eSDavid van Moolenbroek 	if (ifcap & ~(IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1543*ef8d499eSDavid van Moolenbroek 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv6_Tx |
1544*ef8d499eSDavid van Moolenbroek 	    IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv6_Rx |
1545*ef8d499eSDavid van Moolenbroek 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv6_Tx |
1546*ef8d499eSDavid van Moolenbroek 	    IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv6_Rx))
1547*ef8d499eSDavid van Moolenbroek 		return EINVAL;
1548*ef8d499eSDavid van Moolenbroek 
1549*ef8d499eSDavid van Moolenbroek 	/*
1550*ef8d499eSDavid van Moolenbroek 	 * Some IPv4/IPv6 flags need to be set together in order to be picked
1551*ef8d499eSDavid van Moolenbroek 	 * up.  Unfortunately, that is all we can do given that lwIP does not
1552*ef8d499eSDavid van Moolenbroek 	 * distinguish IPv4/IPv6 when it comes to TCP/UDP checksum flags.
1553*ef8d499eSDavid van Moolenbroek 	 */
1554*ef8d499eSDavid van Moolenbroek 	caps = 0;
1555*ef8d499eSDavid van Moolenbroek 	if (ifcap & IFCAP_CSUM_IPv4_Tx)
1556*ef8d499eSDavid van Moolenbroek 		caps |= NDEV_CAP_CS_IP4_TX;
1557*ef8d499eSDavid van Moolenbroek 	if (ifcap & IFCAP_CSUM_IPv4_Rx)
1558*ef8d499eSDavid van Moolenbroek 		caps |= NDEV_CAP_CS_IP4_RX;
1559*ef8d499eSDavid van Moolenbroek 	if ((ifcap & (IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv6_Tx)) ==
1560*ef8d499eSDavid van Moolenbroek 	    (IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv6_Tx))
1561*ef8d499eSDavid van Moolenbroek 		caps |= NDEV_CAP_CS_UDP_TX;
1562*ef8d499eSDavid van Moolenbroek 	if ((ifcap & (IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv6_Rx)) ==
1563*ef8d499eSDavid van Moolenbroek 	    (IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv6_Rx))
1564*ef8d499eSDavid van Moolenbroek 		caps |= NDEV_CAP_CS_UDP_RX;
1565*ef8d499eSDavid van Moolenbroek 	if ((ifcap & (IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv6_Tx)) ==
1566*ef8d499eSDavid van Moolenbroek 	    (IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv6_Tx))
1567*ef8d499eSDavid van Moolenbroek 		caps |= NDEV_CAP_CS_TCP_TX;
1568*ef8d499eSDavid van Moolenbroek 	if ((ifcap & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv6_Rx)) ==
1569*ef8d499eSDavid van Moolenbroek 	    (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv6_Rx))
1570*ef8d499eSDavid van Moolenbroek 		caps |= NDEV_CAP_CS_TCP_RX;
1571*ef8d499eSDavid van Moolenbroek 
1572*ef8d499eSDavid van Moolenbroek 	/*
1573*ef8d499eSDavid van Moolenbroek 	 * When changing checksumming capabilities, we have to make sure that
1574*ef8d499eSDavid van Moolenbroek 	 * we only ever checksum too much and never too little.  This means
1575*ef8d499eSDavid van Moolenbroek 	 * that we enable any checksum options in netif here, and disable any
1576*ef8d499eSDavid van Moolenbroek 	 * checksum options in netif only after driver configuration.
1577*ef8d499eSDavid van Moolenbroek 	 *
1578*ef8d499eSDavid van Moolenbroek 	 * Note that we have to draw the line somewhere with this kind of
1579*ef8d499eSDavid van Moolenbroek 	 * self-protection, and that line is short of TCP retransmission: we
1580*ef8d499eSDavid van Moolenbroek 	 * see it as lwIP's job to compute checksums for retransmitted TCP
1581*ef8d499eSDavid van Moolenbroek 	 * packets if they were saved across checksum changes.  Even though
1582*ef8d499eSDavid van Moolenbroek 	 * lwIP may not care, there is little we can do about that anyway.
1583*ef8d499eSDavid van Moolenbroek 	 */
1584*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_wanted.nconf_caps != caps) {
1585*ef8d499eSDavid van Moolenbroek 		flags = ethif_get_netif(ethif)->chksum_flags;
1586*ef8d499eSDavid van Moolenbroek 
1587*ef8d499eSDavid van Moolenbroek 		if (!(caps & NDEV_CAP_CS_IP4_TX))
1588*ef8d499eSDavid van Moolenbroek 			flags |= NETIF_CHECKSUM_GEN_IP;
1589*ef8d499eSDavid van Moolenbroek 		if (!(caps & NDEV_CAP_CS_IP4_RX))
1590*ef8d499eSDavid van Moolenbroek 			flags |= NETIF_CHECKSUM_CHECK_IP;
1591*ef8d499eSDavid van Moolenbroek 		if (!(caps & NDEV_CAP_CS_UDP_TX))
1592*ef8d499eSDavid van Moolenbroek 			flags |= NETIF_CHECKSUM_GEN_UDP;
1593*ef8d499eSDavid van Moolenbroek 		if (!(caps & NDEV_CAP_CS_UDP_RX))
1594*ef8d499eSDavid van Moolenbroek 			flags |= NETIF_CHECKSUM_CHECK_UDP;
1595*ef8d499eSDavid van Moolenbroek 		if (!(caps & NDEV_CAP_CS_TCP_TX))
1596*ef8d499eSDavid van Moolenbroek 			flags |= NETIF_CHECKSUM_GEN_TCP;
1597*ef8d499eSDavid van Moolenbroek 		if (!(caps & NDEV_CAP_CS_TCP_RX))
1598*ef8d499eSDavid van Moolenbroek 			flags |= NETIF_CHECKSUM_CHECK_TCP;
1599*ef8d499eSDavid van Moolenbroek 
1600*ef8d499eSDavid van Moolenbroek 		NETIF_SET_CHECKSUM_CTRL(ethif_get_netif(ethif), flags);
1601*ef8d499eSDavid van Moolenbroek 
1602*ef8d499eSDavid van Moolenbroek 		ethif->ethif_wanted.nconf_caps = caps;
1603*ef8d499eSDavid van Moolenbroek 		ethif->ethif_wanted.nconf_set |= NDEV_SET_CAPS;
1604*ef8d499eSDavid van Moolenbroek 	}
1605*ef8d499eSDavid van Moolenbroek 
1606*ef8d499eSDavid van Moolenbroek 	/* The changes will be picked up from the polling function. */
1607*ef8d499eSDavid van Moolenbroek 	return OK;
1608*ef8d499eSDavid van Moolenbroek }
1609*ef8d499eSDavid van Moolenbroek 
1610*ef8d499eSDavid van Moolenbroek /*
1611*ef8d499eSDavid van Moolenbroek  * Retrieve NetBSD-style interface media type (IFM_).  Return both the current
1612*ef8d499eSDavid van Moolenbroek  * media type selection and the driver-reported active media type.
1613*ef8d499eSDavid van Moolenbroek  */
1614*ef8d499eSDavid van Moolenbroek static void
ethif_get_ifmedia(struct ifdev * ifdev,int * ifcurrent,int * ifactive)1615*ef8d499eSDavid van Moolenbroek ethif_get_ifmedia(struct ifdev * ifdev, int * ifcurrent, int * ifactive)
1616*ef8d499eSDavid van Moolenbroek {
1617*ef8d499eSDavid van Moolenbroek 	struct ethif *ethif = (struct ethif *)ifdev;
1618*ef8d499eSDavid van Moolenbroek 
1619*ef8d499eSDavid van Moolenbroek 	/*
1620*ef8d499eSDavid van Moolenbroek 	 * For the current select, report back whatever the user gave us, even
1621*ef8d499eSDavid van Moolenbroek 	 * if it has not reached the driver at all yet.
1622*ef8d499eSDavid van Moolenbroek 	 */
1623*ef8d499eSDavid van Moolenbroek 	*ifcurrent = (int)ethif->ethif_wanted.nconf_media;
1624*ef8d499eSDavid van Moolenbroek 	*ifactive = (int)ethif->ethif_media;
1625*ef8d499eSDavid van Moolenbroek }
1626*ef8d499eSDavid van Moolenbroek 
1627*ef8d499eSDavid van Moolenbroek /*
1628*ef8d499eSDavid van Moolenbroek  * Set current NetBSD-style interface media type (IFM_).
1629*ef8d499eSDavid van Moolenbroek  */
1630*ef8d499eSDavid van Moolenbroek static int
ethif_set_ifmedia(struct ifdev * ifdev,int ifmedia)1631*ef8d499eSDavid van Moolenbroek ethif_set_ifmedia(struct ifdev * ifdev, int ifmedia)
1632*ef8d499eSDavid van Moolenbroek {
1633*ef8d499eSDavid van Moolenbroek 	struct ethif *ethif = (struct ethif *)ifdev;
1634*ef8d499eSDavid van Moolenbroek 
1635*ef8d499eSDavid van Moolenbroek 	/*
1636*ef8d499eSDavid van Moolenbroek 	 * We currently completely lack the infrastructure to suspend the
1637*ef8d499eSDavid van Moolenbroek 	 * current IOCTL call until the driver replies (or disappears).
1638*ef8d499eSDavid van Moolenbroek 	 * Therefore we have no choice but to return success here, even if the
1639*ef8d499eSDavid van Moolenbroek 	 * driver cannot accept the change.  The driver does notify us of media
1640*ef8d499eSDavid van Moolenbroek 	 * changes, so the user may observe the new active media type later.
1641*ef8d499eSDavid van Moolenbroek 	 * Also note that the new media type may not be the requested type,
1642*ef8d499eSDavid van Moolenbroek 	 * which is why we do not perform any checks against the wanted or
1643*ef8d499eSDavid van Moolenbroek 	 * active media types.
1644*ef8d499eSDavid van Moolenbroek 	 */
1645*ef8d499eSDavid van Moolenbroek 	ethif->ethif_wanted.nconf_media = (uint32_t)ifmedia;
1646*ef8d499eSDavid van Moolenbroek 	ethif->ethif_wanted.nconf_set |= NDEV_SET_MEDIA;
1647*ef8d499eSDavid van Moolenbroek 
1648*ef8d499eSDavid van Moolenbroek 	/* The change will be picked up from the polling function. */
1649*ef8d499eSDavid van Moolenbroek 	return OK;
1650*ef8d499eSDavid van Moolenbroek }
1651*ef8d499eSDavid van Moolenbroek 
1652*ef8d499eSDavid van Moolenbroek /*
1653*ef8d499eSDavid van Moolenbroek  * Enable or disable promiscuous mode on the interface.
1654*ef8d499eSDavid van Moolenbroek  */
1655*ef8d499eSDavid van Moolenbroek static void
ethif_set_promisc(struct ifdev * ifdev,int promisc)1656*ef8d499eSDavid van Moolenbroek ethif_set_promisc(struct ifdev * ifdev, int promisc)
1657*ef8d499eSDavid van Moolenbroek {
1658*ef8d499eSDavid van Moolenbroek 	struct ethif *ethif = (struct ethif *)ifdev;
1659*ef8d499eSDavid van Moolenbroek 
1660*ef8d499eSDavid van Moolenbroek 	if (ethif->ethif_wanted.nconf_mode != NDEV_MODE_DOWN) {
1661*ef8d499eSDavid van Moolenbroek 		if (promisc)
1662*ef8d499eSDavid van Moolenbroek 			ethif->ethif_wanted.nconf_mode |= NDEV_MODE_PROMISC;
1663*ef8d499eSDavid van Moolenbroek 		else
1664*ef8d499eSDavid van Moolenbroek 			ethif->ethif_wanted.nconf_mode &= ~NDEV_MODE_PROMISC;
1665*ef8d499eSDavid van Moolenbroek 		ethif->ethif_wanted.nconf_set |= NDEV_SET_MODE;
1666*ef8d499eSDavid van Moolenbroek 	}
1667*ef8d499eSDavid van Moolenbroek 
1668*ef8d499eSDavid van Moolenbroek 	/* The change will be picked up from the polling function. */
1669*ef8d499eSDavid van Moolenbroek }
1670*ef8d499eSDavid van Moolenbroek 
1671*ef8d499eSDavid van Moolenbroek /*
1672*ef8d499eSDavid van Moolenbroek  * Set the hardware address on the interface.
1673*ef8d499eSDavid van Moolenbroek  */
1674*ef8d499eSDavid van Moolenbroek static int
ethif_set_hwaddr(struct ifdev * ifdev,const uint8_t * hwaddr)1675*ef8d499eSDavid van Moolenbroek ethif_set_hwaddr(struct ifdev * ifdev, const uint8_t * hwaddr)
1676*ef8d499eSDavid van Moolenbroek {
1677*ef8d499eSDavid van Moolenbroek 	struct ethif *ethif = (struct ethif *)ifdev;
1678*ef8d499eSDavid van Moolenbroek 
1679*ef8d499eSDavid van Moolenbroek 	if (!(ethif->ethif_caps & NDEV_CAP_HWADDR))
1680*ef8d499eSDavid van Moolenbroek 		return EINVAL;
1681*ef8d499eSDavid van Moolenbroek 
1682*ef8d499eSDavid van Moolenbroek 	memcpy(&ethif->ethif_wanted.nconf_hwaddr.nhwa_addr, hwaddr,
1683*ef8d499eSDavid van Moolenbroek 	    ETHARP_HWADDR_LEN);
1684*ef8d499eSDavid van Moolenbroek 	ethif->ethif_wanted.nconf_set |= NDEV_SET_HWADDR;
1685*ef8d499eSDavid van Moolenbroek 
1686*ef8d499eSDavid van Moolenbroek 	/* The change will be picked up from the polling function. */
1687*ef8d499eSDavid van Moolenbroek 	return OK;
1688*ef8d499eSDavid van Moolenbroek }
1689*ef8d499eSDavid van Moolenbroek 
1690*ef8d499eSDavid van Moolenbroek /*
1691*ef8d499eSDavid van Moolenbroek  * Set the Maximum Transmission Unit for this interface.  Return TRUE if the
1692*ef8d499eSDavid van Moolenbroek  * new value is acceptable, in which case the caller will do the rest.  Return
1693*ef8d499eSDavid van Moolenbroek  * FALSE otherwise.
1694*ef8d499eSDavid van Moolenbroek  */
1695*ef8d499eSDavid van Moolenbroek static int
ethif_set_mtu(struct ifdev * ifdev __unused,unsigned int mtu)1696*ef8d499eSDavid van Moolenbroek ethif_set_mtu(struct ifdev * ifdev __unused, unsigned int mtu)
1697*ef8d499eSDavid van Moolenbroek {
1698*ef8d499eSDavid van Moolenbroek 
1699*ef8d499eSDavid van Moolenbroek 	return (mtu <= ETHIF_MAX_MTU);
1700*ef8d499eSDavid van Moolenbroek }
1701*ef8d499eSDavid van Moolenbroek 
1702*ef8d499eSDavid van Moolenbroek static const struct ifdev_ops ethif_ops = {
1703*ef8d499eSDavid van Moolenbroek 	.iop_init = ethif_init_netif,
1704*ef8d499eSDavid van Moolenbroek 	.iop_input = netif_input,
1705*ef8d499eSDavid van Moolenbroek 	.iop_output = ethif_output,
1706*ef8d499eSDavid van Moolenbroek 	.iop_output_v4 = etharp_output,
1707*ef8d499eSDavid van Moolenbroek 	.iop_output_v6 = ethip6_output,
1708*ef8d499eSDavid van Moolenbroek 	.iop_hdrcmplt = ethif_hdrcmplt,
1709*ef8d499eSDavid van Moolenbroek 	.iop_poll = ethif_poll,
1710*ef8d499eSDavid van Moolenbroek 	.iop_set_ifflags = ethif_set_ifflags,
1711*ef8d499eSDavid van Moolenbroek 	.iop_get_ifcap = ethif_get_ifcap,
1712*ef8d499eSDavid van Moolenbroek 	.iop_set_ifcap = ethif_set_ifcap,
1713*ef8d499eSDavid van Moolenbroek 	.iop_get_ifmedia = ethif_get_ifmedia,
1714*ef8d499eSDavid van Moolenbroek 	.iop_set_ifmedia = ethif_set_ifmedia,
1715*ef8d499eSDavid van Moolenbroek 	.iop_set_promisc = ethif_set_promisc,
1716*ef8d499eSDavid van Moolenbroek 	.iop_set_hwaddr = ethif_set_hwaddr,
1717*ef8d499eSDavid van Moolenbroek 	.iop_set_mtu = ethif_set_mtu,
1718*ef8d499eSDavid van Moolenbroek };
1719