xref: /netbsd-src/sys/dev/pci/ixgbe/ixgbe_netbsd.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /* $NetBSD: ixgbe_netbsd.c,v 1.15 2020/06/25 07:53:02 msaitoh Exp $ */
2 /*
3  * Copyright (c) 2011 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Coyote Point Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 #include <sys/param.h>
31 
32 #include <sys/atomic.h>
33 #include <sys/bus.h>
34 #include <sys/condvar.h>
35 #include <sys/cpu.h>
36 #include <sys/kmem.h>
37 #include <sys/mbuf.h>
38 #include <sys/mutex.h>
39 #include <sys/queue.h>
40 #include <sys/workqueue.h>
41 #include <dev/pci/pcivar.h>
42 
43 #include "ixgbe.h"
44 
45 void
46 ixgbe_dma_tag_destroy(ixgbe_dma_tag_t *dt)
47 {
48 	kmem_free(dt, sizeof(*dt));
49 }
50 
51 int
52 ixgbe_dma_tag_create(bus_dma_tag_t dmat, bus_size_t alignment,
53     bus_size_t boundary, bus_size_t maxsize, int nsegments,
54     bus_size_t maxsegsize, int flags, ixgbe_dma_tag_t **dtp)
55 {
56 	ixgbe_dma_tag_t *dt;
57 
58 	*dtp = NULL;
59 
60 	dt = kmem_zalloc(sizeof(*dt), KM_SLEEP);
61 	dt->dt_dmat = dmat;
62 	dt->dt_alignment = alignment;
63 	dt->dt_boundary = boundary;
64 	dt->dt_maxsize = maxsize;
65 	dt->dt_nsegments = nsegments;
66 	dt->dt_maxsegsize = maxsegsize;
67 	dt->dt_flags = flags;
68 	*dtp = dt;
69 
70 	return 0;
71 }
72 
73 void
74 ixgbe_dmamap_destroy(ixgbe_dma_tag_t *dt, bus_dmamap_t dmam)
75 {
76 	bus_dmamap_destroy(dt->dt_dmat, dmam);
77 }
78 
79 void
80 ixgbe_dmamap_sync(ixgbe_dma_tag_t *dt, bus_dmamap_t dmam, int ops)
81 {
82 	bus_dmamap_sync(dt->dt_dmat, dmam, 0, dt->dt_maxsize, ops);
83 }
84 
85 void
86 ixgbe_dmamap_unload(ixgbe_dma_tag_t *dt, bus_dmamap_t dmam)
87 {
88 	bus_dmamap_unload(dt->dt_dmat, dmam);
89 }
90 
91 int
92 ixgbe_dmamap_create(ixgbe_dma_tag_t *dt, int flags, bus_dmamap_t *dmamp)
93 {
94 	return bus_dmamap_create(dt->dt_dmat, dt->dt_maxsize, dt->dt_nsegments,
95 	    dt->dt_maxsegsize, dt->dt_boundary, flags, dmamp);
96 }
97 
98 static void
99 ixgbe_putext(ixgbe_extmem_t *em)
100 {
101 	ixgbe_extmem_head_t *eh = em->em_head;
102 
103 	mutex_enter(&eh->eh_mtx);
104 
105 	TAILQ_INSERT_HEAD(&eh->eh_freelist, em, em_link);
106 
107 	mutex_exit(&eh->eh_mtx);
108 
109 	return;
110 }
111 
112 static ixgbe_extmem_t *
113 ixgbe_getext(ixgbe_extmem_head_t *eh, size_t size)
114 {
115 	ixgbe_extmem_t *em;
116 
117 	mutex_enter(&eh->eh_mtx);
118 
119 	TAILQ_FOREACH(em, &eh->eh_freelist, em_link) {
120 		if (em->em_size >= size)
121 			break;
122 	}
123 
124 	if (em != NULL)
125 		TAILQ_REMOVE(&eh->eh_freelist, em, em_link);
126 
127 	mutex_exit(&eh->eh_mtx);
128 
129 	return em;
130 }
131 
132 static ixgbe_extmem_t *
133 ixgbe_newext(ixgbe_extmem_head_t *eh, bus_dma_tag_t dmat, size_t size)
134 {
135 	ixgbe_extmem_t *em;
136 	int nseg, rc;
137 
138 	em = kmem_zalloc(sizeof(*em), KM_SLEEP);
139 
140 	rc = bus_dmamem_alloc(dmat, size, PAGE_SIZE, 0, &em->em_seg, 1, &nseg,
141 	    BUS_DMA_WAITOK);
142 
143 	if (rc != 0)
144 		goto post_zalloc_err;
145 
146 	rc = bus_dmamem_map(dmat, &em->em_seg, 1, size, &em->em_vaddr,
147 	    BUS_DMA_WAITOK);
148 
149 	if (rc != 0)
150 		goto post_dmamem_err;
151 
152 	em->em_dmat = dmat;
153 	em->em_size = size;
154 	em->em_head = eh;
155 
156 	return em;
157 post_dmamem_err:
158 	bus_dmamem_free(dmat, &em->em_seg, 1);
159 post_zalloc_err:
160 	kmem_free(em, sizeof(*em));
161 	return NULL;
162 }
163 
164 static void
165 ixgbe_jcl_freeall(struct adapter *adapter, struct rx_ring *rxr)
166 {
167 	ixgbe_extmem_head_t *eh = &rxr->jcl_head;
168 	ixgbe_extmem_t *em;
169 	bus_dma_tag_t dmat = rxr->ptag->dt_dmat;
170 
171 	while ((em = ixgbe_getext(eh, 0)) != NULL) {
172 		KASSERT(em->em_vaddr != NULL);
173 		bus_dmamem_unmap(dmat, em->em_vaddr, em->em_size);
174 		bus_dmamem_free(dmat, &em->em_seg, 1);
175 		memset(em, 0, sizeof(*em));
176 		kmem_free(em, sizeof(*em));
177 	}
178 }
179 
180 void
181 ixgbe_jcl_reinit(struct adapter *adapter, bus_dma_tag_t dmat,
182     struct rx_ring *rxr, int nbuf, size_t size)
183 {
184 	ixgbe_extmem_head_t *eh = &rxr->jcl_head;
185 	ixgbe_extmem_t *em;
186 	int i;
187 
188 	if (!eh->eh_initialized) {
189 		TAILQ_INIT(&eh->eh_freelist);
190 		mutex_init(&eh->eh_mtx, MUTEX_DEFAULT, IPL_NET);
191 		eh->eh_initialized = true;
192 	}
193 
194 	/*
195 	 *  Check previous parameters. If it's not required to reinit, just
196 	 * return.
197 	 *
198 	 *  Note that the num_rx_desc is currently fixed value. It's never
199 	 * changed after device is attached.
200 	 */
201 	if ((rxr->last_rx_mbuf_sz == rxr->mbuf_sz)
202 	    && (rxr->last_num_rx_desc == adapter->num_rx_desc))
203 		return;
204 
205 	/* Free all dmamem */
206 	ixgbe_jcl_freeall(adapter, rxr);
207 
208 	for (i = 0; i < nbuf; i++) {
209 		if ((em = ixgbe_newext(eh, dmat, size)) == NULL) {
210 			device_printf(adapter->dev,
211 			    "%s: only %d of %d jumbo buffers allocated\n",
212 			    __func__, i, nbuf);
213 			break;
214 		}
215 		ixgbe_putext(em);
216 	}
217 
218 	/* Keep current parameters */
219 	rxr->last_rx_mbuf_sz = adapter->rx_mbuf_sz;
220 	rxr->last_num_rx_desc = adapter->num_rx_desc;
221 }
222 
223 void
224 ixgbe_jcl_destroy(struct adapter *adapter, struct rx_ring *rxr)
225 {
226 	ixgbe_extmem_head_t *eh = &rxr->jcl_head;
227 
228 	if (eh->eh_initialized) {
229 		/* Free all dmamem */
230 		ixgbe_jcl_freeall(adapter, rxr);
231 
232 		mutex_destroy(&eh->eh_mtx);
233 		eh->eh_initialized = false;
234 	}
235 }
236 
237 
238 static void
239 ixgbe_jcl_free(struct mbuf *m, void *buf, size_t size, void *arg)
240 {
241 	ixgbe_extmem_t *em = arg;
242 
243 	KASSERT(em->em_size == size);
244 
245 	ixgbe_putext(em);
246 	/* this is an abstraction violation, but it does not lead to a
247 	 * double-free
248 	 */
249 	if (__predict_true(m != NULL)) {
250 		KASSERT(m->m_type != MT_FREE);
251 		m->m_type = MT_FREE;
252 		pool_cache_put(mb_cache, m);
253 	}
254 }
255 
256 /* XXX need to wait for the system to finish with each jumbo mbuf and
257  * free it before detaching the driver from the device.
258  */
259 struct mbuf *
260 ixgbe_getjcl(ixgbe_extmem_head_t *eh, int nowait /* M_DONTWAIT */,
261     int type /* MT_DATA */, int flags /* M_PKTHDR */, size_t size)
262 {
263 	ixgbe_extmem_t *em;
264 	struct mbuf *m;
265 
266 	if ((flags & M_PKTHDR) != 0)
267 		m = m_gethdr(nowait, type);
268 	else
269 		m = m_get(nowait, type);
270 
271 	if (m == NULL)
272 		return NULL;
273 
274 	em = ixgbe_getext(eh, size);
275 	if (em == NULL) {
276 		m_freem(m);
277 		return NULL;
278 	}
279 
280 	MEXTADD(m, em->em_vaddr, em->em_size, M_DEVBUF, &ixgbe_jcl_free, em);
281 
282 	if ((m->m_flags & M_EXT) == 0) {
283 		ixgbe_putext(em);
284 		m_freem(m);
285 		return NULL;
286 	}
287 
288 	return m;
289 }
290 
291 void
292 ixgbe_pci_enable_busmaster(pci_chipset_tag_t pc, pcitag_t tag)
293 {
294 	pcireg_t	pci_cmd_word;
295 
296 	pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
297 	if (!(pci_cmd_word & PCI_COMMAND_MASTER_ENABLE)) {
298 		pci_cmd_word |= PCI_COMMAND_MASTER_ENABLE;
299 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
300 	}
301 }
302 
303 u_int
304 atomic_load_acq_uint(volatile u_int *p)
305 {
306 	return atomic_load_acquire(p);
307 }
308 
309 void
310 ixgbe_delay(unsigned int us)
311 {
312 
313 	if (__predict_false(cold))
314 		delay(us);
315 	else if ((us / 1000) >= hztoms(1)) {
316 		/*
317 		 * Wait at least two clock ticks so we know the time has
318 		 * passed.
319 		 */
320 		kpause("ixgdly", false, mstohz(us / 1000) + 1, NULL);
321 	} else
322 		delay(us);
323 }
324