1 /* $NetBSD: mvxpbm.c,v 1.4 2024/02/08 20:51:24 andvar Exp $ */
2 /*
3 * Copyright (c) 2015 Internet Initiative Japan Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: mvxpbm.c,v 1.4 2024/02/08 20:51:24 andvar Exp $");
29
30 #include "opt_multiprocessor.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/device.h>
35 #include <sys/mbuf.h>
36
37 #include <dev/marvell/marvellreg.h>
38 #include <dev/marvell/marvellvar.h>
39
40 #include "mvxpbmvar.h"
41
42 #ifdef DEBUG
43 #define STATIC /* nothing */
44 #define DPRINTF(fmt, ...) \
45 do { \
46 if (mvxpbm_debug >= 1) { \
47 printf("%s: ", __func__); \
48 printf((fmt), ##__VA_ARGS__); \
49 } \
50 } while (/*CONSTCOND*/0)
51 #define DPRINTFN(level , fmt, ...) \
52 do { \
53 if (mvxpbm_debug >= (level)) { \
54 printf("%s: ", __func__); \
55 printf((fmt), ##__VA_ARGS__); \
56 } \
57 } while (/*CONSTCOND*/0)
58 #define DPRINTDEV(dev, level, fmt, ...) \
59 do { \
60 if (mvxpbm_debug >= (level)) { \
61 device_printf((dev), \
62 "%s: "fmt , __func__, ##__VA_ARGS__); \
63 } \
64 } while (/*CONSTCOND*/0)
65 #define DPRINTSC(sc, level, fmt, ...) \
66 do { \
67 device_t dev = (sc)->sc_dev; \
68 if (mvxpbm_debug >= (level)) { \
69 device_printf(dev, \
70 "%s: " fmt, __func__, ##__VA_ARGS__); \
71 } \
72 } while (/*CONSTCOND*/0)
73 #else
74 #define STATIC static
75 #define DPRINTF(fmt, ...)
76 #define DPRINTFN(level, fmt, ...)
77 #define DPRINTDEV(dev, level, fmt, ...)
78 #define DPRINTSC(sc, level, fmt, ...)
79 #endif
80
81 /* autoconf(9) */
82 STATIC int mvxpbm_match(device_t, cfdata_t, void *);
83 STATIC void mvxpbm_attach(device_t, device_t, void *);
84 STATIC int mvxpbm_evcnt_attach(struct mvxpbm_softc *);
85 CFATTACH_DECL_NEW(mvxpbm_mbus, sizeof(struct mvxpbm_softc),
86 mvxpbm_match, mvxpbm_attach, NULL, NULL);
87
88 /* DMA buffers */
89 STATIC int mvxpbm_alloc_buffer(struct mvxpbm_softc *);
90
91 /* mbuf subroutines */
92 STATIC void mvxpbm_free_mbuf(struct mbuf *, void *, size_t, void *);
93
94 /* singleton device instance */
95 static struct mvxpbm_softc sc_emul;
96 static struct mvxpbm_softc *sc0;
97
98 /* debug level */
99 #ifdef DEBUG
100 static int mvxpbm_debug = 0;
101 #endif
102
103 /*
104 * autoconf(9)
105 */
106 STATIC int
mvxpbm_match(device_t parent,cfdata_t match,void * aux)107 mvxpbm_match(device_t parent, cfdata_t match, void *aux)
108 {
109 struct marvell_attach_args *mva = aux;
110
111 if (strcmp(mva->mva_name, match->cf_name) != 0)
112 return 0;
113 if (mva->mva_unit > MVXPBM_UNIT_MAX)
114 return 0;
115 if (sc0 != NULL)
116 return 0;
117 if (mva->mva_offset != MVA_OFFSET_DEFAULT) {
118 /* Hardware BM is not supported yet. */
119 return 0;
120 }
121
122 return 1;
123 }
124
125 STATIC void
mvxpbm_attach(device_t parnet,device_t self,void * aux)126 mvxpbm_attach(device_t parnet, device_t self, void *aux)
127 {
128 struct marvell_attach_args *mva = aux;
129 struct mvxpbm_softc *sc = device_private(self);
130
131 aprint_naive("\n");
132 aprint_normal(": Marvell ARMADA Buffer Manager\n");
133 memset(sc, 0, sizeof(*sc));
134 sc->sc_dev = self;
135 sc->sc_iot = mva->mva_iot;
136 sc->sc_dmat = mva->mva_dmat;
137
138 if (mva->mva_offset == MVA_OFFSET_DEFAULT) {
139 aprint_normal_dev(sc->sc_dev, "Software emulation.\n");
140 sc->sc_emul = 1;
141 }
142
143 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
144 LIST_INIT(&sc->sc_free);
145 LIST_INIT(&sc->sc_inuse);
146
147 /* DMA buffers */
148 if (mvxpbm_alloc_buffer(sc) != 0)
149 return;
150
151 /* event counters */
152 mvxpbm_evcnt_attach(sc);
153
154 sc0 = sc;
155 return;
156
157 }
158
159 STATIC int
mvxpbm_evcnt_attach(struct mvxpbm_softc * sc)160 mvxpbm_evcnt_attach(struct mvxpbm_softc *sc)
161 {
162 return 0;
163 }
164
165 /*
166 * DMA buffers
167 */
168 STATIC int
mvxpbm_alloc_buffer(struct mvxpbm_softc * sc)169 mvxpbm_alloc_buffer(struct mvxpbm_softc *sc)
170 {
171 bus_dma_segment_t segs;
172 char *kva, *ptr, *ptr_next, *ptr_data;
173 char *bm_buf_end;
174 uint32_t align, pad;
175 int nsegs;
176 int error;
177
178 /*
179 * set default buffer sizes. this will changed to satisfy
180 * alignment restrictions.
181 */
182 sc->sc_chunk_count = 0;
183 sc->sc_chunk_size = MVXPBM_PACKET_SIZE;
184 sc->sc_chunk_header_size = sizeof(struct mvxpbm_chunk);
185 sc->sc_chunk_packet_offset = 64;
186
187 /*
188 * adjust bm_chunk_size, bm_chunk_header_size, bm_slotsize
189 * to satisfy alignment restrictions.
190 *
191 * <---------------- bm_slotsize [oct.] ------------------>
192 * <--- bm_chunk_size[oct.] ---->
193 * <--- header_size[oct] ---> <-- MBXPE_BM_SIZE[oct.] --->
194 * +-----------------+--------+---------+-----------------+--+
195 * | bm_chunk hdr |pad |pkt_off | packet data | |
196 * +-----------------+--------+---------+-----------------+--+
197 * ^ ^ ^ ^
198 * | | | |
199 * ptr ptr_data DMA here ptr_next
200 *
201 * Restrictions:
202 * - total buffer size must be multiple of MVXPBM_BUF_ALIGN
203 * - ptr must be aligned to MVXPBM_CHUNK_ALIGN
204 * - ptr_data must be aligned to MVXPEBM_DATA_ALIGN
205 * - bm_chunk_size must be multiple of 8[bytes].
206 */
207 /* start calclation from 0x0000.0000 */
208 ptr = (char *)0;
209
210 /* align start of packet data */
211 ptr_data = ptr + sc->sc_chunk_header_size;
212 align = (unsigned long)ptr_data & MVXPBM_DATA_MASK;
213 if (align != 0) {
214 pad = MVXPBM_DATA_ALIGN - align;
215 sc->sc_chunk_header_size += pad;
216 DPRINTSC(sc, 1, "added padding to BM header, %u bytes\n", pad);
217 }
218
219 /* align size of packet data */
220 ptr_data = ptr + sc->sc_chunk_header_size;
221 ptr_next = ptr_data + MVXPBM_PACKET_SIZE;
222 align = (unsigned long)ptr_next & MVXPBM_CHUNK_MASK;
223 if (align != 0) {
224 pad = MVXPBM_CHUNK_ALIGN - align;
225 ptr_next += pad;
226 DPRINTSC(sc, 1, "added padding to BM pktbuf, %u bytes\n", pad);
227 }
228 sc->sc_slotsize = ptr_next - ptr;
229 sc->sc_chunk_size = ptr_next - ptr_data;
230 KASSERT((sc->sc_chunk_size % MVXPBM_DATA_UNIT) == 0);
231
232 /* align total buffer size to Mbus window boundary */
233 sc->sc_buf_size = sc->sc_slotsize * MVXPBM_NUM_SLOTS;
234 align = (unsigned long)sc->sc_buf_size & MVXPBM_BUF_MASK;
235 if (align != 0) {
236 pad = MVXPBM_BUF_ALIGN - align;
237 sc->sc_buf_size += pad;
238 DPRINTSC(sc, 1,
239 "expand buffer to fit page boundary, %u bytes\n", pad);
240 }
241
242 /*
243 * get the aligned buffer from busdma(9) framework
244 */
245 if (bus_dmamem_alloc(sc->sc_dmat, sc->sc_buf_size, MVXPBM_BUF_ALIGN, 0,
246 &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
247 aprint_error_dev(sc->sc_dev, "can't alloc BM buffers\n");
248 return ENOBUFS;
249 }
250 if (bus_dmamem_map(sc->sc_dmat, &segs, nsegs, sc->sc_buf_size,
251 (void **)&kva, BUS_DMA_NOWAIT)) {
252 aprint_error_dev(sc->sc_dev,
253 "can't map dma buffers (%zu bytes)\n", sc->sc_buf_size);
254 error = ENOBUFS;
255 goto fail1;
256 }
257 if (bus_dmamap_create(sc->sc_dmat, sc->sc_buf_size, 1, sc->sc_buf_size,
258 0, BUS_DMA_NOWAIT, &sc->sc_buf_map)) {
259 aprint_error_dev(sc->sc_dev, "can't create dma map\n");
260 error = ENOBUFS;
261 goto fail2;
262 }
263 if (bus_dmamap_load(sc->sc_dmat, sc->sc_buf_map,
264 kva, sc->sc_buf_size, NULL, BUS_DMA_NOWAIT)) {
265 aprint_error_dev(sc->sc_dev, "can't load dma map\n");
266 error = ENOBUFS;
267 goto fail3;
268 }
269 sc->sc_buf = (void *)kva;
270 sc->sc_buf_pa = segs.ds_addr;
271 bm_buf_end = (void *)(kva + sc->sc_buf_size);
272 DPRINTSC(sc, 1, "memory pool at %p\n", sc->sc_buf);
273
274 /* slice the buffer */
275 mvxpbm_lock(sc);
276 for (ptr = sc->sc_buf; ptr + sc->sc_slotsize <= bm_buf_end;
277 ptr += sc->sc_slotsize) {
278 struct mvxpbm_chunk *chunk;
279
280 /* initialize chunk */
281 ptr_data = ptr + sc->sc_chunk_header_size;
282 chunk = (struct mvxpbm_chunk *)ptr;
283 chunk->m = NULL;
284 chunk->sc = sc;
285 chunk->off = (ptr - sc->sc_buf);
286 chunk->pa = (paddr_t)(sc->sc_buf_pa + chunk->off);
287 chunk->buf_off = (ptr_data - sc->sc_buf);
288 chunk->buf_pa = (paddr_t)(sc->sc_buf_pa + chunk->buf_off);
289 chunk->buf_va = (vaddr_t)(sc->sc_buf + chunk->buf_off);
290 chunk->buf_size = sc->sc_chunk_size;
291
292 /* add to free list (for software management) */
293 LIST_INSERT_HEAD(&sc->sc_free, chunk, link);
294 mvxpbm_dmamap_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD);
295 sc->sc_chunk_count++;
296
297 DPRINTSC(sc, 9, "new chunk %p\n", (void *)chunk->buf_va);
298 }
299 mvxpbm_unlock(sc);
300 return 0;
301
302 fail3:
303 bus_dmamap_destroy(sc->sc_dmat, sc->sc_buf_map);
304 fail2:
305 bus_dmamem_unmap(sc->sc_dmat, kva, sc->sc_buf_size);
306 fail1:
307 bus_dmamem_free(sc->sc_dmat, &segs, nsegs);
308
309 return error;
310 }
311
312 /*
313 * mbuf subroutines
314 */
315 STATIC void
mvxpbm_free_mbuf(struct mbuf * m,void * buf,size_t size,void * arg)316 mvxpbm_free_mbuf(struct mbuf *m, void *buf, size_t size, void *arg)
317 {
318 struct mvxpbm_chunk *chunk = (struct mvxpbm_chunk *)arg;
319 int s;
320
321 KASSERT(m != NULL);
322 KASSERT(arg != NULL);
323
324 DPRINTFN(3, "free packet %p\n", m);
325
326 chunk->m = NULL;
327 s = splvm();
328 pool_cache_put(mb_cache, m);
329 splx(s);
330 return mvxpbm_free_chunk(chunk);
331 }
332
333 /*
334 * Exported APIs
335 */
336 /* get mvxpbm device context */
337 struct mvxpbm_softc *
mvxpbm_device(struct marvell_attach_args * mva)338 mvxpbm_device(struct marvell_attach_args *mva)
339 {
340 struct mvxpbm_softc *sc;
341
342 if (sc0 != NULL)
343 return sc0;
344 if (mva == NULL)
345 return NULL;
346
347 /* allocate software emulation context */
348 sc = &sc_emul;
349 memset(sc, 0, sizeof(*sc));
350 sc->sc_emul = 1;
351 sc->sc_iot = mva->mva_iot;
352 sc->sc_dmat = mva->mva_dmat;
353
354 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
355 LIST_INIT(&sc->sc_free);
356 LIST_INIT(&sc->sc_inuse);
357
358 if (mvxpbm_alloc_buffer(sc) != 0)
359 return NULL;
360 mvxpbm_evcnt_attach(sc);
361 sc0 = sc;
362 return sc0;
363 }
364
365 /* allocate new memory chunk */
366 struct mvxpbm_chunk *
mvxpbm_alloc(struct mvxpbm_softc * sc)367 mvxpbm_alloc(struct mvxpbm_softc *sc)
368 {
369 struct mvxpbm_chunk *chunk;
370
371 mvxpbm_lock(sc);
372
373 chunk = LIST_FIRST(&sc->sc_free);
374 if (chunk == NULL) {
375 mvxpbm_unlock(sc);
376 return NULL;
377 }
378
379 LIST_REMOVE(chunk, link);
380 LIST_INSERT_HEAD(&sc->sc_inuse, chunk, link);
381
382 mvxpbm_unlock(sc);
383 return chunk;
384 }
385
386 /* free memory chunk */
387 void
mvxpbm_free_chunk(struct mvxpbm_chunk * chunk)388 mvxpbm_free_chunk(struct mvxpbm_chunk *chunk)
389 {
390 struct mvxpbm_softc *sc = chunk->sc;
391
392 KASSERT(chunk->m == NULL);
393 DPRINTFN(3, "bm chunk free\n");
394
395 mvxpbm_lock(sc);
396
397 LIST_REMOVE(chunk, link);
398 LIST_INSERT_HEAD(&sc->sc_free, chunk, link);
399
400 mvxpbm_unlock(sc);
401 }
402
403 /* prepare mbuf header after Rx */
404 int
mvxpbm_init_mbuf_hdr(struct mvxpbm_chunk * chunk)405 mvxpbm_init_mbuf_hdr(struct mvxpbm_chunk *chunk)
406 {
407 struct mvxpbm_softc *sc = chunk->sc;
408
409 KASSERT(chunk->m == NULL);
410
411 /* add new mbuf header */
412 MGETHDR(chunk->m, M_DONTWAIT, MT_DATA);
413 if (chunk->m == NULL) {
414 aprint_error_dev(sc->sc_dev, "cannot get mbuf\n");
415 return ENOBUFS;
416 }
417 MEXTADD(chunk->m, chunk->buf_va, chunk->buf_size, 0,
418 mvxpbm_free_mbuf, chunk);
419 chunk->m->m_flags |= M_EXT_RW;
420 chunk->m->m_len = chunk->m->m_pkthdr.len = chunk->buf_size;
421 if (sc->sc_chunk_packet_offset)
422 m_adj(chunk->m, sc->sc_chunk_packet_offset);
423
424 return 0;
425 }
426
427 /* sync DMA seguments */
428 void
mvxpbm_dmamap_sync(struct mvxpbm_chunk * chunk,size_t size,int ops)429 mvxpbm_dmamap_sync(struct mvxpbm_chunk *chunk, size_t size, int ops)
430 {
431 struct mvxpbm_softc *sc = chunk->sc;
432
433 KASSERT(size <= chunk->buf_size);
434 if (size == 0)
435 size = chunk->buf_size;
436
437 bus_dmamap_sync(sc->sc_dmat, sc->sc_buf_map, chunk->buf_off, size, ops);
438 }
439
440 /* lock */
441 void
mvxpbm_lock(struct mvxpbm_softc * sc)442 mvxpbm_lock(struct mvxpbm_softc *sc)
443 {
444 mutex_enter(&sc->sc_mtx);
445 }
446
447 void
mvxpbm_unlock(struct mvxpbm_softc * sc)448 mvxpbm_unlock(struct mvxpbm_softc *sc)
449 {
450 mutex_exit(&sc->sc_mtx);
451 }
452
453 /* get params */
454 const char *
mvxpbm_xname(struct mvxpbm_softc * sc)455 mvxpbm_xname(struct mvxpbm_softc *sc)
456 {
457 if (sc->sc_emul) {
458 return "software_bm";
459 }
460 return device_xname(sc->sc_dev);
461 }
462
463 size_t
mvxpbm_chunk_size(struct mvxpbm_softc * sc)464 mvxpbm_chunk_size(struct mvxpbm_softc *sc)
465 {
466 return sc->sc_chunk_size;
467 }
468
469 uint32_t
mvxpbm_chunk_count(struct mvxpbm_softc * sc)470 mvxpbm_chunk_count(struct mvxpbm_softc *sc)
471 {
472 return sc->sc_chunk_count;
473 }
474
475 off_t
mvxpbm_packet_offset(struct mvxpbm_softc * sc)476 mvxpbm_packet_offset(struct mvxpbm_softc *sc)
477 {
478 return sc->sc_chunk_packet_offset;
479 }
480
481 paddr_t
mvxpbm_buf_pbase(struct mvxpbm_softc * sc)482 mvxpbm_buf_pbase(struct mvxpbm_softc *sc)
483 {
484 return sc->sc_buf_pa;
485 }
486
487 size_t
mvxpbm_buf_size(struct mvxpbm_softc * sc)488 mvxpbm_buf_size(struct mvxpbm_softc *sc)
489 {
490 return sc->sc_buf_size;
491 }
492