xref: /netbsd-src/sys/dev/usb/usb_mem.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: usb_mem.c,v 1.81 2021/05/27 10:44:29 jmcneill Exp $	*/
2 
3 /*
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Lennart Augustsson (lennart@augustsson.net) at
9  * Carlstedt Research & Technology.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * USB DMA memory allocation.
35  * We need to allocate a lot of small (many 8 byte, some larger)
36  * memory blocks that can be used for DMA.  Using the bus_dma
37  * routines directly would incur large overheads in space and time.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: usb_mem.c,v 1.81 2021/05/27 10:44:29 jmcneill Exp $");
42 
43 #ifdef _KERNEL_OPT
44 #include "opt_usb.h"
45 #endif
46 
47 #include <sys/param.h>
48 #include <sys/bus.h>
49 #include <sys/cpu.h>
50 #include <sys/device.h>		/* for usbdivar.h */
51 #include <sys/kernel.h>
52 #include <sys/kmem.h>
53 #include <sys/once.h>
54 #include <sys/queue.h>
55 #include <sys/systm.h>
56 
57 #include <dev/usb/usb.h>
58 #include <dev/usb/usbdi.h>
59 #include <dev/usb/usbdivar.h>	/* just for usb_dma_t */
60 #include <dev/usb/usbhist.h>
61 #include <dev/usb/usb_mem.h>
62 
63 #define	DPRINTF(FMT,A,B,C,D)	USBHIST_LOG(usbdebug,FMT,A,B,C,D)
64 #define	DPRINTFN(N,FMT,A,B,C,D)	USBHIST_LOGN(usbdebug,N,FMT,A,B,C,D)
65 
66 #define USB_MEM_SMALL roundup(64, CACHE_LINE_SIZE)
67 #define USB_MEM_CHUNKS 64
68 #define USB_MEM_BLOCK (USB_MEM_SMALL * USB_MEM_CHUNKS)
69 
70 /* This struct is overlayed on free fragments. */
71 struct usb_frag_dma {
72 	usb_dma_block_t		*ufd_block;
73 	u_int			ufd_offs;
74 	LIST_ENTRY(usb_frag_dma) ufd_next;
75 };
76 
77 Static int	usb_block_allocmem(bus_dma_tag_t, size_t, size_t,
78 		    u_int, usb_dma_block_t **);
79 Static void	usb_block_freemem(usb_dma_block_t *);
80 
81 LIST_HEAD(usb_dma_block_qh, usb_dma_block);
82 Static struct usb_dma_block_qh usb_blk_freelist =
83 	LIST_HEAD_INITIALIZER(usb_blk_freelist);
84 kmutex_t usb_blk_lock;
85 
86 #ifdef DEBUG
87 Static struct usb_dma_block_qh usb_blk_fraglist =
88 	LIST_HEAD_INITIALIZER(usb_blk_fraglist);
89 Static struct usb_dma_block_qh usb_blk_fulllist =
90 	LIST_HEAD_INITIALIZER(usb_blk_fulllist);
91 #endif
92 Static u_int usb_blk_nfree = 0;
93 /* XXX should have different free list for different tags (for speed) */
94 Static LIST_HEAD(, usb_frag_dma) usb_frag_freelist =
95 	LIST_HEAD_INITIALIZER(usb_frag_freelist);
96 
97 Static int usb_mem_init(void);
98 
99 Static int
100 usb_mem_init(void)
101 {
102 
103 	mutex_init(&usb_blk_lock, MUTEX_DEFAULT, IPL_NONE);
104 	return 0;
105 }
106 
107 Static int
108 usb_block_allocmem(bus_dma_tag_t tag, size_t size, size_t align,
109     u_int flags, usb_dma_block_t **dmap)
110 {
111 	usb_dma_block_t *b;
112 	int error;
113 
114 	USBHIST_FUNC();
115 	USBHIST_CALLARGS(usbdebug, "size=%ju align=%ju flags=%#jx", size, align, flags, 0);
116 
117 	ASSERT_SLEEPABLE();
118 	KASSERT(size != 0);
119 	KASSERT(mutex_owned(&usb_blk_lock));
120 
121 #ifdef USB_FRAG_DMA_WORKAROUND
122 	flags |= USBMALLOC_ZERO;
123 #endif
124 
125 	bool multiseg = (flags & USBMALLOC_MULTISEG) != 0;
126 	bool coherent = (flags & USBMALLOC_COHERENT) != 0;
127 	bool zero = (flags & USBMALLOC_ZERO) != 0;
128 	u_int dmaflags = coherent ? USB_DMA_COHERENT : 0;
129 
130 	/* First check the free list. */
131 	LIST_FOREACH(b, &usb_blk_freelist, next) {
132 		/* Don't allocate multiple segments to unwilling callers */
133 		if (b->nsegs != 1 && !multiseg)
134 			continue;
135 		if (b->tag == tag &&
136 		    b->size >= size &&
137 		    b->align >= align &&
138 		    (b->flags & USB_DMA_COHERENT) == dmaflags) {
139 			LIST_REMOVE(b, next);
140 			usb_blk_nfree--;
141 			*dmap = b;
142 			if (zero) {
143 				memset(b->kaddr, 0, b->size);
144 				bus_dmamap_sync(b->tag, b->map, 0, b->size,
145 				    BUS_DMASYNC_PREWRITE);
146 			}
147 			DPRINTFN(6, "free list size=%ju", b->size, 0, 0, 0);
148 			return 0;
149 		}
150 	}
151 
152 	DPRINTFN(6, "no freelist entry", 0, 0, 0, 0);
153 	mutex_exit(&usb_blk_lock);
154 
155 	b = kmem_zalloc(sizeof(*b), KM_SLEEP);
156 	b->tag = tag;
157 	b->size = size;
158 	b->align = align;
159 	b->flags = dmaflags;
160 
161 	if (!multiseg)
162 		/* Caller wants one segment */
163 		b->nsegs = 1;
164 	else
165 		b->nsegs = howmany(size, PAGE_SIZE);
166 
167 	b->segs = kmem_alloc(b->nsegs * sizeof(*b->segs), KM_SLEEP);
168 	b->nsegs_alloc = b->nsegs;
169 
170 	error = bus_dmamem_alloc(tag, b->size, align, 0,
171 				 b->segs, b->nsegs,
172 				 &b->nsegs, BUS_DMA_WAITOK);
173 	if (error)
174 		goto free0;
175 
176 	error = bus_dmamem_map(tag, b->segs, b->nsegs, b->size, &b->kaddr,
177 	    BUS_DMA_WAITOK | (coherent ? BUS_DMA_COHERENT : 0));
178 	if (error)
179 		goto free1;
180 
181 	error = bus_dmamap_create(tag, b->size, b->nsegs, b->size,
182 				  0, BUS_DMA_WAITOK, &b->map);
183 	if (error)
184 		goto unmap;
185 
186 	error = bus_dmamap_load(tag, b->map, b->kaddr, b->size, NULL,
187 				BUS_DMA_WAITOK);
188 	if (error)
189 		goto destroy;
190 
191 	*dmap = b;
192 
193 	if (zero) {
194 		memset(b->kaddr, 0, b->size);
195 		bus_dmamap_sync(b->tag, b->map, 0, b->size,
196 		    BUS_DMASYNC_PREWRITE);
197 	}
198 
199 	mutex_enter(&usb_blk_lock);
200 
201 	return 0;
202 
203  destroy:
204 	bus_dmamap_destroy(tag, b->map);
205  unmap:
206 	bus_dmamem_unmap(tag, b->kaddr, b->size);
207  free1:
208 	bus_dmamem_free(tag, b->segs, b->nsegs);
209  free0:
210 	kmem_free(b->segs, b->nsegs_alloc * sizeof(*b->segs));
211 	kmem_free(b, sizeof(*b));
212 	mutex_enter(&usb_blk_lock);
213 
214 	return USBD_NOMEM;
215 }
216 
217 #if 0
218 void
219 usb_block_real_freemem(usb_dma_block_t *b)
220 {
221 	ASSERT_SLEEPABLE();
222 
223 	bus_dmamap_unload(b->tag, b->map);
224 	bus_dmamap_destroy(b->tag, b->map);
225 	bus_dmamem_unmap(b->tag, b->kaddr, b->size);
226 	bus_dmamem_free(b->tag, b->segs, b->nsegs);
227 	kmem_free(b->segs, b->nsegs_alloc * sizeof(*b->segs));
228 	kmem_free(b, sizeof(*b));
229 }
230 #endif
231 
232 #ifdef DEBUG
233 static bool
234 usb_valid_block_p(usb_dma_block_t *b, struct usb_dma_block_qh *qh)
235 {
236 	usb_dma_block_t *xb;
237 	LIST_FOREACH(xb, qh, next) {
238 		if (xb == b)
239 			return true;
240 	}
241 	return false;
242 }
243 #endif
244 
245 /*
246  * Do not free the memory unconditionally since we might be called
247  * from an interrupt context and that is BAD.
248  * XXX when should we really free?
249  */
250 Static void
251 usb_block_freemem(usb_dma_block_t *b)
252 {
253 	USBHIST_FUNC();
254 	USBHIST_CALLARGS(usbdebug, "size=%ju", b->size, 0, 0, 0);
255 
256 	KASSERT(mutex_owned(&usb_blk_lock));
257 
258 #ifdef DEBUG
259 	LIST_REMOVE(b, next);
260 #endif
261 	LIST_INSERT_HEAD(&usb_blk_freelist, b, next);
262 	usb_blk_nfree++;
263 }
264 
265 int
266 usb_allocmem(struct usbd_bus *bus, size_t size, size_t align, u_int flags,
267     usb_dma_t *p)
268 {
269 	bus_dma_tag_t tag = bus->ub_dmatag;
270 	usbd_status err;
271 	struct usb_frag_dma *f;
272 	usb_dma_block_t *b;
273 	int i;
274 	static ONCE_DECL(init_control);
275 
276 	USBHIST_FUNC(); USBHIST_CALLED(usbdebug);
277 
278 	ASSERT_SLEEPABLE();
279 
280 	RUN_ONCE(&init_control, usb_mem_init);
281 
282 	u_int dmaflags = (flags & USBMALLOC_COHERENT) ? USB_DMA_COHERENT : 0;
283 
284 	/* If the request is large then just use a full block. */
285 	if (size > USB_MEM_SMALL || align > USB_MEM_SMALL) {
286 		DPRINTFN(1, "large alloc %jd", size, 0, 0, 0);
287 		size = (size + USB_MEM_BLOCK - 1) & ~(USB_MEM_BLOCK - 1);
288 		mutex_enter(&usb_blk_lock);
289 		err = usb_block_allocmem(tag, size, align, flags,
290 		    &p->udma_block);
291 		if (!err) {
292 #ifdef DEBUG
293 			LIST_INSERT_HEAD(&usb_blk_fulllist, p->udma_block, next);
294 #endif
295 			p->udma_block->flags = USB_DMA_FULLBLOCK | dmaflags;
296 			p->udma_offs = 0;
297 		}
298 		mutex_exit(&usb_blk_lock);
299 		return err;
300 	}
301 
302 	mutex_enter(&usb_blk_lock);
303 	/* Check for free fragments. */
304 	LIST_FOREACH(f, &usb_frag_freelist, ufd_next) {
305 		KDASSERTMSG(usb_valid_block_p(f->ufd_block, &usb_blk_fraglist),
306 		    "%s: usb frag %p: unknown block pointer %p",
307 		    __func__, f, f->ufd_block);
308 		if (f->ufd_block->tag == tag &&
309 		    (f->ufd_block->flags & USB_DMA_COHERENT) == dmaflags)
310 			break;
311 	}
312 	if (f == NULL) {
313 		DPRINTFN(1, "adding fragments", 0, 0, 0, 0);
314 
315 		err = usb_block_allocmem(tag, USB_MEM_BLOCK, USB_MEM_SMALL,
316 		    flags, &b);
317 		if (err) {
318 			mutex_exit(&usb_blk_lock);
319 			return err;
320 		}
321 #ifdef DEBUG
322 		LIST_INSERT_HEAD(&usb_blk_fraglist, b, next);
323 #endif
324 		b->flags = 0;
325 		for (i = 0; i < USB_MEM_BLOCK; i += USB_MEM_SMALL) {
326 			f = (struct usb_frag_dma *)((char *)b->kaddr + i);
327 			f->ufd_block = b;
328 			f->ufd_offs = i;
329 			LIST_INSERT_HEAD(&usb_frag_freelist, f, ufd_next);
330 #ifdef USB_FRAG_DMA_WORKAROUND
331 			i += 1 * USB_MEM_SMALL;
332 #endif
333 		}
334 		f = LIST_FIRST(&usb_frag_freelist);
335 	}
336 	p->udma_block = f->ufd_block;
337 	p->udma_offs = f->ufd_offs;
338 #ifdef USB_FRAG_DMA_WORKAROUND
339 	p->udma_offs += USB_MEM_SMALL;
340 #endif
341 	LIST_REMOVE(f, ufd_next);
342 	mutex_exit(&usb_blk_lock);
343 	DPRINTFN(5, "use frag=%#jx size=%jd", (uintptr_t)f, size, 0, 0);
344 
345 	return 0;
346 }
347 
348 void
349 usb_freemem(struct usbd_bus *bus, usb_dma_t *p)
350 {
351 	struct usb_frag_dma *f;
352 
353 	USBHIST_FUNC(); USBHIST_CALLED(usbdebug);
354 
355 	mutex_enter(&usb_blk_lock);
356 	if (p->udma_block->flags & USB_DMA_FULLBLOCK) {
357 		KDASSERTMSG(usb_valid_block_p(p->udma_block, &usb_blk_fulllist),
358 		    "%s: dma %p: invalid block pointer %p",
359 		    __func__, p, p->udma_block);
360 		DPRINTFN(1, "large free", 0, 0, 0, 0);
361 		usb_block_freemem(p->udma_block);
362 		mutex_exit(&usb_blk_lock);
363 		return;
364 	}
365 	KDASSERTMSG(usb_valid_block_p(p->udma_block, &usb_blk_fraglist),
366 	    "%s: dma %p: invalid block pointer %p",
367 	    __func__, p, p->udma_block);
368 	//usb_syncmem(p, 0, USB_MEM_SMALL, BUS_DMASYNC_POSTREAD);
369 	f = KERNADDR(p, 0);
370 #ifdef USB_FRAG_DMA_WORKAROUND
371 	f = (void *)((uintptr_t)f - USB_MEM_SMALL);
372 #endif
373 	f->ufd_block = p->udma_block;
374 	f->ufd_offs = p->udma_offs;
375 #ifdef USB_FRAG_DMA_WORKAROUND
376 	f->ufd_offs -= USB_MEM_SMALL;
377 #endif
378 	LIST_INSERT_HEAD(&usb_frag_freelist, f, ufd_next);
379 	mutex_exit(&usb_blk_lock);
380 	DPRINTFN(5, "frag=%#jx", (uintptr_t)f, 0, 0, 0);
381 }
382 
383 bus_addr_t
384 usb_dmaaddr(usb_dma_t *dma, unsigned int offset)
385 {
386 	unsigned int i;
387 	bus_size_t seg_offs;
388 
389 	offset += dma->udma_offs;
390 
391 	KASSERTMSG(offset < dma->udma_block->size, "offset %d vs %zu", offset,
392 	    dma->udma_block->size);
393 
394 	if (dma->udma_block->nsegs == 1) {
395 		KASSERT(dma->udma_block->map->dm_segs[0].ds_len > offset);
396 		return dma->udma_block->map->dm_segs[0].ds_addr + offset;
397 	}
398 
399 	/*
400 	 * Search for a bus_segment_t corresponding to this offset. With no
401 	 * record of the offset in the map to a particular dma_segment_t, we
402 	 * have to iterate from the start of the list each time. Could be
403 	 * improved
404 	 */
405 	seg_offs = 0;
406 	for (i = 0; i < dma->udma_block->nsegs; i++) {
407 		if (seg_offs + dma->udma_block->map->dm_segs[i].ds_len > offset)
408 			break;
409 
410 		seg_offs += dma->udma_block->map->dm_segs[i].ds_len;
411 	}
412 
413 	KASSERT(i != dma->udma_block->nsegs);
414 	offset -= seg_offs;
415 	return dma->udma_block->map->dm_segs[i].ds_addr + offset;
416 }
417 
418 void
419 usb_syncmem(usb_dma_t *p, bus_addr_t offset, bus_size_t len, int ops)
420 {
421 
422 	bus_dmamap_sync(p->udma_block->tag, p->udma_block->map,
423 	    p->udma_offs + offset, len, ops);
424 }
425