1 /* $NetBSD: uturn.c,v 1.7 2023/12/03 02:17:06 thorpej Exp $ */
2
3 /* $OpenBSD: uturn.c,v 1.6 2007/12/29 01:26:14 kettenis Exp $ */
4
5 /*-
6 * Copyright (c) 2012 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Nick Hudson.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2007 Mark Kettenis
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 /*
51 * Copyright (c) 2004 Michael Shalayeff
52 * All rights reserved.
53 *
54 * Redistribution and use in source and binary forms, with or without
55 * modification, are permitted provided that the following conditions
56 * are met:
57 * 1. Redistributions of source code must retain the above copyright
58 * notice, this list of conditions and the following disclaimer.
59 * 2. Redistributions in binary form must reproduce the above copyright
60 * notice, this list of conditions and the following disclaimer in the
61 * documentation and/or other materials provided with the distribution.
62 *
63 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
64 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
65 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
66 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
67 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
68 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
69 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
71 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
72 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
73 * THE POSSIBILITY OF SUCH DAMAGE.
74 */
75
76 /*
77 * References:
78 * 1. Hardware Cache Coherent Input/Output. Hewlett-Packard Journal, February
79 * 1996.
80 * 2. PA-RISC 1.1 Architecture and Instruction Set Reference Manual,
81 * Hewlett-Packard, February 1994, Third Edition
82 */
83
84 #include <sys/param.h>
85
86 #include <sys/systm.h>
87 #include <sys/device.h>
88 #include <sys/vmem.h>
89 #include <sys/kmem.h>
90 #include <sys/mbuf.h>
91 #include <sys/reboot.h>
92 #include <sys/tree.h>
93
94 #include <uvm/uvm.h>
95
96 #include <sys/bus.h>
97 #include <machine/iomod.h>
98 #include <machine/autoconf.h>
99
100 #include <hppa/dev/cpudevs.h>
101
102 #define UTURNDEBUG
103 #ifdef UTURNDEBUG
104
105 #define DPRINTF(s) do { \
106 if (uturndebug) \
107 printf s; \
108 } while(0)
109
110 int uturndebug = 0;
111 #else
112 #define DPRINTF(s) /* */
113 #endif
114
115 struct uturn_regs {
116 /* Runway Supervisory Set */
117 int32_t unused1[12];
118 uint32_t io_command; /* Offset 12 */
119 #define UTURN_CMD_TLB_PURGE 33 /* Purge I/O TLB entry */
120 #define UTURN_CMD_TLB_DIRECT_WRITE 35 /* I/O TLB Writes */
121
122 uint32_t io_status; /* Offset 13 */
123 uint32_t io_control; /* Offset 14 */
124 #define UTURN_IOCTRL_TLB_REAL 0x00000000
125 #define UTURN_IOCTRL_TLB_ERROR 0x00010000
126 #define UTURN_IOCTRL_TLB_NORMAL 0x00020000
127
128 #define UTURN_IOCTRL_MODE_OFF 0x00000000
129 #define UTURN_IOCTRL_MODE_INCLUDE 0x00000080
130 #define UTURN_IOCTRL_MODE_PEEK 0x00000180
131
132 #define UTURN_VIRTUAL_MODE \
133 (UTURN_IOCTRL_TLB_NORMAL | UTURN_IOCTRL_MODE_INCLUDE)
134
135 #define UTURN_REAL_MODE \
136 UTURN_IOCTRL_MODE_INCLUDE
137
138 int32_t unused2[1];
139
140 /* Runway Auxiliary Register Set */
141 uint32_t io_err_resp; /* Offset 0 */
142 uint32_t io_err_info; /* Offset 1 */
143 uint32_t io_err_req; /* Offset 2 */
144 uint32_t io_err_resp_hi; /* Offset 3 */
145 uint32_t io_tlb_entry_m; /* Offset 4 */
146 uint32_t io_tlb_entry_l; /* Offset 5 */
147 uint32_t unused3[1];
148 uint32_t io_pdir_base; /* Offset 7 */
149 uint32_t io_io_low_hv; /* Offset 8 */
150 uint32_t io_io_high_hv; /* Offset 9 */
151 uint32_t unused4[1];
152 uint32_t io_chain_id_mask; /* Offset 11 */
153 uint32_t unused5[2];
154 uint32_t io_io_low; /* Offset 14 */
155 uint32_t io_io_high; /* Offset 15 */
156 };
157
158
159 /* Uturn supports 256 TLB entries */
160 #define UTURN_CHAINID_SHIFT 8
161 #define UTURN_CHAINID_MASK 0xff
162 #define UTURN_TLB_ENTRIES (1 << UTURN_CHAINID_SHIFT)
163
164 #define UTURN_IOVP_SIZE PAGE_SIZE
165 #define UTURN_IOVP_SHIFT PAGE_SHIFT
166 #define UTURN_IOVP_MASK PAGE_MASK
167
168 #define UTURN_IOVA(iovp, off) ((iovp) | (off))
169 #define UTURN_IOVP(iova) ((iova) & UTURN_IOVP_MASK)
170 #define UTURN_IOVA_INDEX(iova) ((iova) >> UTURN_IOVP_SHIFT)
171
172 struct uturn_softc {
173 device_t sc_dv;
174
175 bus_dma_tag_t sc_dmat;
176 struct uturn_regs volatile *sc_regs;
177 uint64_t *sc_pdir;
178 uint32_t sc_chainid_shift;
179
180 char sc_mapname[20];
181 vmem_t *sc_map;
182
183 struct hppa_bus_dma_tag sc_dmatag;
184 };
185
186 /*
187 * per-map IOVA page table
188 */
189 struct uturn_page_entry {
190 SPLAY_ENTRY(uturn_page_entry) upe_node;
191 paddr_t upe_pa;
192 vaddr_t upe_va;
193 bus_addr_t upe_iova;
194 };
195
196 struct uturn_page_map {
197 SPLAY_HEAD(uturn_page_tree, uturn_page_entry) upm_tree;
198 int upm_maxpage; /* Size of allocated page map */
199 int upm_pagecnt; /* Number of entries in use */
200 struct uturn_page_entry upm_map[1];
201 };
202
203 /*
204 * per-map UTURN state
205 */
206 struct uturn_map_state {
207 struct uturn_softc *ums_sc;
208 bus_addr_t ums_iovastart;
209 bus_size_t ums_iovasize;
210 struct uturn_page_map ums_map; /* map must be last (array at end) */
211 };
212
213 int uturnmatch(device_t, cfdata_t, void *);
214 void uturnattach(device_t, device_t, void *);
215 static device_t uturn_callback(device_t, struct confargs *);
216
217 CFATTACH_DECL_NEW(uturn, sizeof(struct uturn_softc),
218 uturnmatch, uturnattach, NULL, NULL);
219
220 extern struct cfdriver uturn_cd;
221
222 int uturn_dmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t, int,
223 bus_dmamap_t *);
224 void uturn_dmamap_destroy(void *, bus_dmamap_t);
225 int uturn_dmamap_load(void *, bus_dmamap_t, void *, bus_size_t, struct proc *,
226 int);
227 int uturn_dmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int);
228 int uturn_dmamap_load_uio(void *, bus_dmamap_t, struct uio *, int);
229 int uturn_dmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *, int,
230 bus_size_t, int);
231 void uturn_dmamap_unload(void *, bus_dmamap_t);
232 void uturn_dmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int);
233 int uturn_dmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t,
234 bus_dma_segment_t *, int, int *, int);
235 void uturn_dmamem_free(void *, bus_dma_segment_t *, int);
236 int uturn_dmamem_map(void *, bus_dma_segment_t *, int, size_t, void **, int);
237 void uturn_dmamem_unmap(void *, void *, size_t);
238 paddr_t uturn_dmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int);
239
240 static void uturn_iommu_enter(struct uturn_softc *, bus_addr_t, pa_space_t,
241 vaddr_t, paddr_t);
242 static void uturn_iommu_remove(struct uturn_softc *, bus_addr_t, bus_size_t);
243
244 struct uturn_map_state *uturn_iomap_create(int, int);
245 void uturn_iomap_destroy(struct uturn_map_state *);
246 int uturn_iomap_insert_page(struct uturn_map_state *, vaddr_t, paddr_t);
247 bus_addr_t uturn_iomap_translate(struct uturn_map_state *, paddr_t);
248 void uturn_iomap_clear_pages(struct uturn_map_state *);
249
250 static int uturn_iomap_load_map(struct uturn_softc *, bus_dmamap_t, int);
251
252 const struct hppa_bus_dma_tag uturn_dmat = {
253 NULL,
254 uturn_dmamap_create, uturn_dmamap_destroy,
255 uturn_dmamap_load, uturn_dmamap_load_mbuf,
256 uturn_dmamap_load_uio, uturn_dmamap_load_raw,
257 uturn_dmamap_unload, uturn_dmamap_sync,
258
259 uturn_dmamem_alloc, uturn_dmamem_free, uturn_dmamem_map,
260 uturn_dmamem_unmap, uturn_dmamem_mmap
261 };
262
263 int
uturnmatch(device_t parent,cfdata_t cf,void * aux)264 uturnmatch(device_t parent, cfdata_t cf, void *aux)
265 {
266 struct confargs *ca = aux;
267
268 /* there will be only one */
269 if (ca->ca_type.iodc_type != HPPA_TYPE_IOA ||
270 ca->ca_type.iodc_sv_model != HPPA_IOA_UTURN)
271 return 0;
272
273 if (ca->ca_type.iodc_model == 0x58 &&
274 ca->ca_type.iodc_revision >= 0x20)
275 return 0;
276
277 return 1;
278 }
279
280 void
uturnattach(device_t parent,device_t self,void * aux)281 uturnattach(device_t parent, device_t self, void *aux)
282 {
283 struct confargs *ca = aux, nca;
284 struct uturn_softc *sc = device_private(self);
285 bus_space_handle_t ioh;
286 volatile struct uturn_regs *r;
287 struct pglist pglist;
288 int iova_bits;
289 vaddr_t va;
290 psize_t size;
291 int i;
292
293 if (bus_space_map(ca->ca_iot, ca->ca_hpa, IOMOD_HPASIZE, 0, &ioh)) {
294 aprint_error(": can't map IO space\n");
295 return;
296 }
297
298 sc->sc_dv = self;
299 sc->sc_dmat = ca->ca_dmatag;
300 sc->sc_regs = r = bus_space_vaddr(ca->ca_iot, ioh);
301
302 aprint_normal(": %x-%x", r->io_io_low << 16, r->io_io_high << 16);
303 aprint_normal(": %x-%x", r->io_io_low_hv << 16, r->io_io_high_hv << 16);
304
305 aprint_normal(": %s rev %d\n",
306 ca->ca_type.iodc_revision < 0x10 ? "U2" : "UTurn",
307 ca->ca_type.iodc_revision & 0xf);
308
309 /*
310 * Setup the iommu.
311 */
312
313 /* XXX 28 bits gives us 256Mb of iova space */
314 /* Calculate based on %age of RAM */
315 iova_bits = 28;
316
317 /*
318 * size is # of pdir entries (64bits) in bytes. 1 entry per IOVA
319 * page.
320 */
321 size = (1 << (iova_bits - UTURN_IOVP_SHIFT)) * sizeof(uint64_t);
322
323 /*
324 * Chainid is the upper most bits of an IOVP used to determine which
325 * TLB entry an IOVP will use.
326 */
327 sc->sc_chainid_shift = iova_bits - UTURN_CHAINID_SHIFT;
328
329 /*
330 * Allocate memory for I/O pagetables. They need to be physically
331 * contiguous.
332 */
333
334 if (uvm_pglistalloc(size, 0, -1, PAGE_SIZE, 0, &pglist, 1, 0) != 0)
335 panic("%s: no memory", __func__);
336
337 va = (vaddr_t)VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
338 sc->sc_pdir = (int64_t *)va;
339
340 memset(sc->sc_pdir, 0, size);
341
342 r->io_chain_id_mask = UTURN_CHAINID_MASK << sc->sc_chainid_shift;
343 r->io_pdir_base = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
344
345 r->io_tlb_entry_m = 0;
346 r->io_tlb_entry_l = 0;
347
348 /* for (i = UTURN_TLB_ENTRIES; i != 0; i--) { */
349 for (i = 0; i < UTURN_TLB_ENTRIES; i++) {
350 r->io_command =
351 UTURN_CMD_TLB_DIRECT_WRITE | (i << sc->sc_chainid_shift);
352 }
353 /*
354 * Go to "Virtual Mode"
355 */
356 r->io_control = UTURN_VIRTUAL_MODE;
357
358 snprintf(sc->sc_mapname, sizeof(sc->sc_mapname), "%s_map",
359 device_xname(sc->sc_dv));
360 sc->sc_map = vmem_create(sc->sc_mapname,
361 0, /* base */
362 (1 << iova_bits), /* size */
363 PAGE_SIZE, /* quantum */
364 NULL, /* allocfn */
365 NULL, /* freefn */
366 NULL, /* source */
367 0, /* qcache_max */
368 VM_SLEEP,
369 IPL_VM);
370 KASSERT(sc->sc_map != NULL);
371
372 sc->sc_dmatag = uturn_dmat;
373 sc->sc_dmatag._cookie = sc;
374
375 /*
376 * U2/UTurn is actually a combination of an Upper Bus Converter (UBC)
377 * and a Lower Bus Converter (LBC). This driver attaches to the UBC;
378 * the LBC isn't very interesting, so we skip it. This is easy, since
379 * it always is module 63, hence the MAXMODBUS - 1 below.
380 */
381 nca = *ca;
382 nca.ca_hpabase = r->io_io_low << 16;
383 nca.ca_dmatag = &sc->sc_dmatag;
384 nca.ca_nmodules = MAXMODBUS - 1;
385 pdc_scanbus(self, &nca, uturn_callback);
386 }
387
388 static device_t
uturn_callback(device_t self,struct confargs * ca)389 uturn_callback(device_t self, struct confargs *ca)
390 {
391
392 return config_found(self, ca, mbprint,
393 CFARGS(.submatch = mbsubmatch));
394 }
395
396 /*
397 * PDIR entry format (HP bit number)
398 *
399 * +-------+----------------+----------------------------------------------+
400 * |0 3|4 15|16 31|
401 * | PPN | Virtual Index | Physical Page Number (PPN) |
402 * | [0:3] | [0:11] | [4:19] |
403 * +-------+----------------+----------------------------------------------+
404 *
405 * +-----------------------+-----------------------------------------------+
406 * |0 19|20 24| 25 | | | | 30 | 31 |
407 * | PPN | Rsvd | PH |Update | Rsvd |Lock | Safe | Valid |
408 * | [20:39 | | Enable |Enable | |Enable| DMA | |
409 * +-----------------------+-----------------------------------------------+
410 *
411 */
412
413 #define UTURN_PENTRY_PREFETCH 0x40
414 #define UTURN_PENTRY_UPDATE 0x20
415 #define UTURN_PENTRY_LOCK 0x04 /* eisa devices only */
416 #define UTURN_PENTRY_SAFEDMA 0x02 /* use safe dma - for subcacheline */
417 #define UTURN_PENTRY_VALID 0x01
418
419 static void
uturn_iommu_enter(struct uturn_softc * sc,bus_addr_t iova,pa_space_t sp,vaddr_t va,paddr_t pa)420 uturn_iommu_enter(struct uturn_softc *sc, bus_addr_t iova, pa_space_t sp,
421 vaddr_t va, paddr_t pa)
422 {
423 uint64_t pdir_entry;
424 uint64_t *pdirp;
425 uint32_t ci; /* coherent index */
426
427 pdirp = &sc->sc_pdir[UTURN_IOVA_INDEX(iova)];
428
429 DPRINTF(("%s: iova %lx pdir %p pdirp %p pa %lx", __func__, iova,
430 sc->sc_pdir, pdirp, pa));
431
432 ci = lci(HPPA_SID_KERNEL, va);
433
434 /* setup hints, etc */
435 pdir_entry = (UTURN_PENTRY_LOCK | UTURN_PENTRY_SAFEDMA |
436 UTURN_PENTRY_VALID);
437
438 /*
439 * bottom 36 bits of pa map directly into entry to form PPN[4:39]
440 * leaving last 12 bits for hints, etc.
441 */
442 pdir_entry |= (pa & ~PAGE_MASK);
443
444 /* mask off top PPN bits */
445 pdir_entry &= 0x0000ffffffffffffUL;
446
447 /* insert the virtual index bits */
448 pdir_entry |= (((uint64_t)ci >> 12) << 48);
449
450 /* PPN[0:3] of the 40bit PPN go in entry[0:3] */
451 pdir_entry |= ((((uint64_t)pa & 0x000f000000000000UL) >> 48) << 60);
452
453 *pdirp = pdir_entry;
454
455 DPRINTF((": pdir_entry %llx\n", pdir_entry));
456
457 /*
458 * We could use PDC_MODEL_CAPABILITIES here
459 */
460 fdcache(HPPA_SID_KERNEL, (vaddr_t)pdirp, sizeof(uint64_t));
461 }
462
463
464 static void
uturn_iommu_remove(struct uturn_softc * sc,bus_addr_t iova,bus_size_t size)465 uturn_iommu_remove(struct uturn_softc *sc, bus_addr_t iova, bus_size_t size)
466 {
467 uint32_t chain_size = 1 << sc->sc_chainid_shift;
468 bus_size_t len;
469
470 KASSERT((iova & PAGE_MASK) == 0);
471 KASSERT((size & PAGE_MASK) == 0);
472
473 DPRINTF(("%s: sc %p iova %lx size %lx\n", __func__, sc, iova, size));
474 len = size;
475 while (len != 0) {
476 uint64_t *pdirp = &sc->sc_pdir[UTURN_IOVA_INDEX(iova)];
477
478 /* XXX Just the valid bit??? */
479 *pdirp = 0;
480
481 /*
482 * We could use PDC_MODEL_CAPABILITIES here
483 */
484 fdcache(HPPA_SID_KERNEL, (vaddr_t)pdirp, sizeof(uint64_t));
485
486 iova += PAGE_SIZE;
487 len -= PAGE_SIZE;
488 }
489
490 len = size + chain_size;
491
492 while (len > chain_size) {
493 sc->sc_regs->io_command = UTURN_CMD_TLB_PURGE | iova;
494 iova += chain_size;
495 len -= chain_size;
496 }
497 }
498
499 int
uturn_dmamap_create(void * v,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamap)500 uturn_dmamap_create(void *v, bus_size_t size, int nsegments,
501 bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamap)
502 {
503 struct uturn_softc *sc = v;
504 bus_dmamap_t map;
505 struct uturn_map_state *ums;
506 int error;
507
508 error = bus_dmamap_create(sc->sc_dmat, size, nsegments, maxsegsz,
509 boundary, flags, &map);
510 if (error)
511 return (error);
512
513 ums = uturn_iomap_create(atop(round_page(size)), flags);
514 if (ums == NULL) {
515 bus_dmamap_destroy(sc->sc_dmat, map);
516 return (ENOMEM);
517 }
518
519 ums->ums_sc = sc;
520 map->_dm_cookie = ums;
521 *dmamap = map;
522
523 return (0);
524 }
525
526 void
uturn_dmamap_destroy(void * v,bus_dmamap_t map)527 uturn_dmamap_destroy(void *v, bus_dmamap_t map)
528 {
529 struct uturn_softc *sc = v;
530
531 /*
532 * The specification (man page) requires a loaded
533 * map to be unloaded before it is destroyed.
534 */
535 if (map->dm_nsegs)
536 uturn_dmamap_unload(sc, map);
537
538 if (map->_dm_cookie)
539 uturn_iomap_destroy(map->_dm_cookie);
540 map->_dm_cookie = NULL;
541
542 bus_dmamap_destroy(sc->sc_dmat, map);
543 }
544
545 static int
uturn_iomap_load_map(struct uturn_softc * sc,bus_dmamap_t map,int flags)546 uturn_iomap_load_map(struct uturn_softc *sc, bus_dmamap_t map, int flags)
547 {
548 struct uturn_map_state *ums = map->_dm_cookie;
549 struct uturn_page_map *upm = &ums->ums_map;
550 struct uturn_page_entry *e;
551 int err, seg;
552 paddr_t pa, paend;
553 vaddr_t va;
554 bus_size_t sgsize;
555 bus_size_t align, boundary;
556 vmem_addr_t iovaddr;
557 bus_addr_t iova;
558 int i;
559
560 /* XXX */
561 boundary = map->_dm_boundary;
562 align = 0; /* align to quantum */
563
564 uturn_iomap_clear_pages(ums);
565
566 for (seg = 0; seg < map->dm_nsegs; seg++) {
567 struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
568
569 paend = round_page(ds->ds_addr + ds->ds_len);
570 for (pa = trunc_page(ds->ds_addr), va = trunc_page(ds->_ds_va);
571 pa < paend; pa += PAGE_SIZE, va += PAGE_SIZE) {
572 err = uturn_iomap_insert_page(ums, va, pa);
573 if (err) {
574 printf("iomap insert error: %d for "
575 "va 0x%lx pa 0x%lx\n", err, va, pa);
576 bus_dmamap_unload(sc->sc_dmat, map);
577 uturn_iomap_clear_pages(ums);
578 }
579 }
580 }
581
582 const vm_flag_t vmflags = VM_BESTFIT |
583 ((flags & BUS_DMA_NOWAIT) ? VM_NOSLEEP : VM_SLEEP);
584
585 sgsize = ums->ums_map.upm_pagecnt * PAGE_SIZE;
586 err = vmem_xalloc(sc->sc_map, sgsize,
587 align, /* align */
588 0, /* phase */
589 boundary, /* nocross */
590 VMEM_ADDR_MIN, /* minaddr */
591 VMEM_ADDR_MAX, /* maxaddr */
592 vmflags,
593 &iovaddr);
594 if (err)
595 return (err);
596
597 ums->ums_iovastart = iovaddr;
598 ums->ums_iovasize = sgsize;
599
600 iova = iovaddr;
601 for (i = 0, e = upm->upm_map; i < upm->upm_pagecnt; ++i, ++e) {
602 e->upe_iova = iova;
603 uturn_iommu_enter(sc, e->upe_iova, HPPA_SID_KERNEL, e->upe_va,
604 e->upe_pa);
605 iova += PAGE_SIZE;
606 }
607
608 for (seg = 0; seg < map->dm_nsegs; seg++) {
609 struct hppa_bus_dma_segment *ds = &map->dm_segs[seg];
610 ds->ds_addr = uturn_iomap_translate(ums, ds->ds_addr);
611 }
612
613 return (0);
614 }
615
616 int
uturn_dmamap_load(void * v,bus_dmamap_t map,void * addr,bus_size_t size,struct proc * p,int flags)617 uturn_dmamap_load(void *v, bus_dmamap_t map, void *addr, bus_size_t size,
618 struct proc *p, int flags)
619 {
620 struct uturn_softc *sc = v;
621 int err;
622
623 err = bus_dmamap_load(sc->sc_dmat, map, addr, size, p, flags);
624 if (err)
625 return (err);
626
627 return uturn_iomap_load_map(sc, map, flags);
628 }
629
630 int
uturn_dmamap_load_mbuf(void * v,bus_dmamap_t map,struct mbuf * m,int flags)631 uturn_dmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m, int flags)
632 {
633 struct uturn_softc *sc = v;
634 int err;
635
636 err = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, flags);
637 if (err)
638 return (err);
639
640 return uturn_iomap_load_map(sc, map, flags);
641 }
642
643 int
uturn_dmamap_load_uio(void * v,bus_dmamap_t map,struct uio * uio,int flags)644 uturn_dmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio, int flags)
645 {
646 struct uturn_softc *sc = v;
647
648 printf("load_uio\n");
649
650 return (bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags));
651 }
652
653 int
uturn_dmamap_load_raw(void * v,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)654 uturn_dmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs,
655 int nsegs, bus_size_t size, int flags)
656 {
657 struct uturn_softc *sc = v;
658
659 printf("load_raw\n");
660
661 return (bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags));
662 }
663
664 void
uturn_dmamap_unload(void * v,bus_dmamap_t map)665 uturn_dmamap_unload(void *v, bus_dmamap_t map)
666 {
667 struct uturn_softc *sc = v;
668 struct uturn_map_state *ums = map->_dm_cookie;
669 struct uturn_page_map *upm = &ums->ums_map;
670 struct uturn_page_entry *e;
671 int i;
672
673 /* Remove the IOMMU entries. */
674 for (i = 0, e = upm->upm_map; i < upm->upm_pagecnt; ++i, ++e)
675 uturn_iommu_remove(sc, e->upe_iova, PAGE_SIZE);
676
677 /* Clear the iomap. */
678 uturn_iomap_clear_pages(ums);
679
680 bus_dmamap_unload(sc->sc_dmat, map);
681
682 vmem_xfree(sc->sc_map, ums->ums_iovastart, ums->ums_iovasize);
683 ums->ums_iovastart = 0;
684 ums->ums_iovasize = 0;
685 }
686
687 void
uturn_dmamap_sync(void * v,bus_dmamap_t map,bus_addr_t off,bus_size_t len,int ops)688 uturn_dmamap_sync(void *v, bus_dmamap_t map, bus_addr_t off,
689 bus_size_t len, int ops)
690 {
691 /* Nothing to do; DMA is cache-coherent. */
692 }
693
694 int
uturn_dmamem_alloc(void * v,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)695 uturn_dmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
696 bus_size_t boundary, bus_dma_segment_t *segs,
697 int nsegs, int *rsegs, int flags)
698 {
699 struct uturn_softc *sc = v;
700
701 return (bus_dmamem_alloc(sc->sc_dmat, size, alignment, boundary,
702 segs, nsegs, rsegs, flags));
703 }
704
705 void
uturn_dmamem_free(void * v,bus_dma_segment_t * segs,int nsegs)706 uturn_dmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
707 {
708 struct uturn_softc *sc = v;
709
710 bus_dmamem_free(sc->sc_dmat, segs, nsegs);
711 }
712
713 int
uturn_dmamem_map(void * v,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)714 uturn_dmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
715 void **kvap, int flags)
716 {
717 struct uturn_softc *sc = v;
718
719 return (bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags));
720 }
721
722 void
uturn_dmamem_unmap(void * v,void * kva,size_t size)723 uturn_dmamem_unmap(void *v, void *kva, size_t size)
724 {
725 struct uturn_softc *sc = v;
726
727 bus_dmamem_unmap(sc->sc_dmat, kva, size);
728 }
729
730 paddr_t
uturn_dmamem_mmap(void * v,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)731 uturn_dmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs, off_t off,
732 int prot, int flags)
733 {
734 struct uturn_softc *sc = v;
735
736 return (bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, off, prot, flags));
737 }
738
739 /*
740 * Utility function used by splay tree to order page entries by pa.
741 */
742 static inline int
upe_compare(struct uturn_page_entry * a,struct uturn_page_entry * b)743 upe_compare(struct uturn_page_entry *a, struct uturn_page_entry *b)
744 {
745 return ((a->upe_pa > b->upe_pa) ? 1 :
746 (a->upe_pa < b->upe_pa) ? -1 : 0);
747 }
748
749 SPLAY_PROTOTYPE(uturn_page_tree, uturn_page_entry, upe_node, upe_compare);
750
751 SPLAY_GENERATE(uturn_page_tree, uturn_page_entry, upe_node, upe_compare);
752
753 /*
754 * Create a new iomap.
755 */
756 struct uturn_map_state *
uturn_iomap_create(int n,int flags)757 uturn_iomap_create(int n, int flags)
758 {
759 struct uturn_map_state *ums;
760
761 /* Safety for heavily fragmented data, such as mbufs */
762 n += 4;
763 if (n < 16)
764 n = 16;
765 const size_t sz =
766 sizeof(*ums) + (n - 1) * sizeof(ums->ums_map.upm_map[0]);
767 ums = kmem_zalloc(sz, (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP);
768 if (ums == NULL)
769 return (NULL);
770
771 /* Initialize the map. */
772 ums->ums_map.upm_maxpage = n;
773 SPLAY_INIT(&ums->ums_map.upm_tree);
774
775 return (ums);
776 }
777
778 /*
779 * Destroy an iomap.
780 */
781 void
uturn_iomap_destroy(struct uturn_map_state * ums)782 uturn_iomap_destroy(struct uturn_map_state *ums)
783 {
784 KASSERT(ums->ums_map.upm_pagecnt == 0);
785 const int n = ums->ums_map.upm_maxpage;
786 const size_t sz =
787 sizeof(*ums) + (n - 1) * sizeof(ums->ums_map.upm_map[0]);
788
789 kmem_free(ums, sz);
790 }
791
792 /*
793 * Insert a pa entry in the iomap.
794 */
795 int
uturn_iomap_insert_page(struct uturn_map_state * ums,vaddr_t va,paddr_t pa)796 uturn_iomap_insert_page(struct uturn_map_state *ums, vaddr_t va, paddr_t pa)
797 {
798 struct uturn_page_map *upm = &ums->ums_map;
799 struct uturn_page_entry *e;
800
801 if (upm->upm_pagecnt >= upm->upm_maxpage) {
802 struct uturn_page_entry upe;
803
804 upe.upe_pa = pa;
805 if (SPLAY_FIND(uturn_page_tree, &upm->upm_tree, &upe))
806 return (0);
807
808 return (ENOMEM);
809 }
810
811 e = &upm->upm_map[upm->upm_pagecnt];
812
813 e->upe_pa = pa;
814 e->upe_va = va;
815 e->upe_iova = 0;
816
817 e = SPLAY_INSERT(uturn_page_tree, &upm->upm_tree, e);
818
819 /* Duplicates are okay, but only count them once. */
820 if (e)
821 return (0);
822
823 ++upm->upm_pagecnt;
824
825 return (0);
826 }
827
828 /*
829 * Translate a physical address (pa) into a IOVA address.
830 */
831 bus_addr_t
uturn_iomap_translate(struct uturn_map_state * ums,paddr_t pa)832 uturn_iomap_translate(struct uturn_map_state *ums, paddr_t pa)
833 {
834 struct uturn_page_map *upm = &ums->ums_map;
835 struct uturn_page_entry *e;
836 struct uturn_page_entry pe;
837 paddr_t offset = pa & PAGE_MASK;
838
839 pe.upe_pa = trunc_page(pa);
840
841 e = SPLAY_FIND(uturn_page_tree, &upm->upm_tree, &pe);
842
843 if (e == NULL) {
844 panic("couldn't find pa %lx\n", pa);
845 return 0;
846 }
847
848 return (e->upe_iova | offset);
849 }
850
851 /*
852 * Clear the iomap table and tree.
853 */
854 void
uturn_iomap_clear_pages(struct uturn_map_state * ums)855 uturn_iomap_clear_pages(struct uturn_map_state *ums)
856 {
857 ums->ums_map.upm_pagecnt = 0;
858 SPLAY_INIT(&ums->ums_map.upm_tree);
859 }
860