1 /* $NetBSD: maple.c,v 1.57 2022/07/05 19:21:26 andvar Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by ITOH Yasufumi.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*-
33 * Copyright (c) 2001 Marcus Comstedt
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by Marcus Comstedt.
47 * 4. Neither the name of The NetBSD Foundation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
52 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
53 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
54 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
55 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
56 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
57 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
58 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
59 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
60 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
61 * POSSIBILITY OF SUCH DAMAGE.
62 */
63
64 #include <sys/cdefs.h>
65 __KERNEL_RCSID(0, "$NetBSD: maple.c,v 1.57 2022/07/05 19:21:26 andvar Exp $");
66
67 #include <sys/param.h>
68 #include <sys/device.h>
69 #include <sys/device_impl.h> /* XXX autoconf abuse */
70 #include <sys/fcntl.h>
71 #include <sys/kernel.h>
72 #include <sys/kthread.h>
73 #include <sys/poll.h>
74 #include <sys/select.h>
75 #include <sys/proc.h>
76 #include <sys/signalvar.h>
77 #include <sys/systm.h>
78 #include <sys/conf.h>
79 #include <sys/bus.h>
80 #include <sys/mutex.h>
81 #include <sys/condvar.h>
82
83 #include <uvm/uvm.h>
84
85 #include <machine/cpu.h>
86 #include <machine/sysasicvar.h>
87 #include <sh3/pmap.h>
88
89 #include <dreamcast/dev/maple/maple.h>
90 #include <dreamcast/dev/maple/mapleconf.h>
91 #include <dreamcast/dev/maple/maplevar.h>
92 #include <dreamcast/dev/maple/maplereg.h>
93 #include <dreamcast/dev/maple/mapleio.h>
94
95 #include "ioconf.h"
96 #include "locators.h"
97
98 /* Internal macros, functions, and variables. */
99
100 #define MAPLE_CALLOUT_TICKS 2
101
102 #define MAPLEBUSUNIT(dev) (minor(dev)>>5)
103 #define MAPLEPORT(dev) ((minor(dev) & 0x18) >> 3)
104 #define MAPLESUBUNIT(dev) (minor(dev) & 0x7)
105
106 /* interrupt priority level */
107 #define IPL_MAPLE IPL_BIO
108 #define splmaple() splbio()
109 #define IRL_MAPLE SYSASIC_IRL9
110
111 /*
112 * Function declarations.
113 */
114 static int maplematch(device_t, cfdata_t, void *);
115 static void mapleattach(device_t, device_t, void *);
116 static void maple_scanbus(struct maple_softc *);
117 static char * maple_unit_name(char *, size_t, int port, int subunit);
118 static void maple_begin_txbuf(struct maple_softc *);
119 static int maple_end_txbuf(struct maple_softc *);
120 static void maple_queue_command(struct maple_softc *, struct maple_unit *,
121 int command, int datalen, const void *dataaddr);
122 static void maple_write_command(struct maple_softc *, struct maple_unit *,
123 int, int, const void *);
124 static void maple_start(struct maple_softc *sc);
125 static void maple_start_poll(struct maple_softc *);
126 static void maple_check_subunit_change(struct maple_softc *,
127 struct maple_unit *);
128 static void maple_check_unit_change(struct maple_softc *,
129 struct maple_unit *);
130 static void maple_print_unit(void *, const char *);
131 static int maplesubmatch(device_t, cfdata_t, const int *, void *);
132 static int mapleprint(void *, const char *);
133 static void maple_attach_unit(struct maple_softc *, struct maple_unit *);
134 static void maple_detach_unit_nofix(struct maple_softc *,
135 struct maple_unit *);
136 static void maple_detach_unit(struct maple_softc *, struct maple_unit *);
137 static void maple_queue_cmds(struct maple_softc *,
138 struct maple_cmdq_head *);
139 static void maple_unit_probe(struct maple_softc *);
140 static void maple_unit_ping(struct maple_softc *);
141 static int maple_send_defered_periodic(struct maple_softc *);
142 static void maple_send_periodic(struct maple_softc *);
143 static void maple_remove_from_queues(struct maple_softc *,
144 struct maple_unit *);
145 static int maple_retry(struct maple_softc *, struct maple_unit *,
146 enum maple_dma_stat);
147 static void maple_queue_retry(struct maple_softc *);
148 static void maple_check_responses(struct maple_softc *);
149 static void maple_event_thread(void *);
150 static int maple_intr(void *);
151 static void maple_callout(void *);
152
153 int maple_alloc_dma(size_t, vaddr_t *, paddr_t *);
154 #if 0
155 void maple_free_dma(paddr_t, size_t);
156 #endif
157
158 /*
159 * Global variables.
160 */
161 int maple_polling; /* Are we polling? (Debugger mode) */
162
163 CFATTACH_DECL_NEW(maple, sizeof(struct maple_softc),
164 maplematch, mapleattach, NULL, NULL);
165
166 dev_type_open(mapleopen);
167 dev_type_close(mapleclose);
168 dev_type_ioctl(mapleioctl);
169
170 const struct cdevsw maple_cdevsw = {
171 .d_open = mapleopen,
172 .d_close = mapleclose,
173 .d_read = noread,
174 .d_write = nowrite,
175 .d_ioctl = mapleioctl,
176 .d_stop = nostop,
177 .d_tty = notty,
178 .d_poll = nopoll,
179 .d_mmap = nommap,
180 .d_kqfilter = nokqfilter,
181 .d_discard = nodiscard,
182 .d_flag = 0
183 };
184
185 static int
maplematch(device_t parent,cfdata_t cf,void * aux)186 maplematch(device_t parent, cfdata_t cf, void *aux)
187 {
188
189 return 1;
190 }
191
192 static void
mapleattach(device_t parent,device_t self,void * aux)193 mapleattach(device_t parent, device_t self, void *aux)
194 {
195 struct maple_softc *sc;
196 struct maple_unit *u;
197 vaddr_t dmabuffer;
198 paddr_t dmabuffer_phys;
199 uint32_t *p;
200 int port, subunit, f;
201
202 sc = device_private(self);
203 sc->sc_dev = self;
204
205 printf(": %s\n", sysasic_intr_string(IRL_MAPLE));
206
207 if (maple_alloc_dma(MAPLE_DMABUF_SIZE, &dmabuffer, &dmabuffer_phys)) {
208 printf("%s: unable to allocate DMA buffers.\n",
209 device_xname(self));
210 return;
211 }
212
213 p = (uint32_t *)dmabuffer;
214
215 for (port = 0; port < MAPLE_PORTS; port++) {
216 for (subunit = 0; subunit < MAPLE_SUBUNITS; subunit++) {
217 u = &sc->sc_unit[port][subunit];
218 u->port = port;
219 u->subunit = subunit;
220 u->u_dma_stat = MAPLE_DMA_IDLE;
221 u->u_rxbuf = p;
222 u->u_rxbuf_phys = SH3_P2SEG_TO_PHYS(p);
223 p += 256;
224
225 for (f = 0; f < MAPLE_NFUNC; f++) {
226 u->u_func[f].f_funcno = f;
227 u->u_func[f].f_unit = u;
228 }
229 }
230 }
231
232 sc->sc_txbuf = p;
233 sc->sc_txbuf_phys = SH3_P2SEG_TO_PHYS(p);
234
235 SIMPLEQ_INIT(&sc->sc_retryq);
236 TAILQ_INIT(&sc->sc_probeq);
237 TAILQ_INIT(&sc->sc_pingq);
238 TAILQ_INIT(&sc->sc_periodicq);
239 TAILQ_INIT(&sc->sc_periodicdeferq);
240 TAILQ_INIT(&sc->sc_acmdq);
241 TAILQ_INIT(&sc->sc_pcmdq);
242
243 MAPLE_RESET = RESET_MAGIC;
244 MAPLE_RESET2 = 0;
245
246 MAPLE_SPEED = SPEED_2MBPS | TIMEOUT(50000);
247
248 MAPLE_ENABLE = 1;
249
250 mutex_init(&sc->sc_dma_lock, MUTEX_DEFAULT, IPL_MAPLE);
251 cv_init(&sc->sc_dma_cv, device_xname(self));
252 mutex_init(&sc->sc_event_lock, MUTEX_DEFAULT, IPL_SOFTCLOCK);
253 cv_init(&sc->sc_event_cv, device_xname(self));
254
255 maple_polling = 1;
256 maple_scanbus(sc);
257
258 callout_init(&sc->maple_callout_ch, 0);
259
260 sc->sc_intrhand = sysasic_intr_establish(SYSASIC_EVENT_MAPLE_DMADONE,
261 IPL_MAPLE, IRL_MAPLE, maple_intr, sc);
262
263 config_pending_incr(self); /* create thread before mounting root */
264
265 if (kthread_create(PRI_NONE, 0, NULL, maple_event_thread, sc,
266 &sc->event_thread, "%s", device_xname(self)) == 0)
267 return;
268
269 panic("%s: unable to create event thread", device_xname(self));
270 }
271
272 /*
273 * initial device attach
274 */
275 static void
maple_scanbus(struct maple_softc * sc)276 maple_scanbus(struct maple_softc *sc)
277 {
278 struct maple_unit *u;
279 int port;
280 int last_port, last_subunit;
281 int i;
282
283 KASSERT(cold && maple_polling);
284
285 /* probe all ports */
286 for (port = 0; port < MAPLE_PORTS; port++) {
287 u = &sc->sc_unit[port][0];
288 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
289 {
290 char buf[16];
291 printf("%s: queued to probe 1\n",
292 maple_unit_name(buf, sizeof(buf), u->port, u->subunit));
293 }
294 #endif
295 TAILQ_INSERT_TAIL(&sc->sc_probeq, u, u_q);
296 u->u_queuestat = MAPLE_QUEUE_PROBE;
297 }
298
299 last_port = last_subunit = -1;
300 maple_begin_txbuf(sc);
301 while ((u = TAILQ_FIRST(&sc->sc_probeq)) != NULL) {
302 /*
303 * Check wrap condition
304 */
305 if (u->port < last_port || u->subunit <= last_subunit)
306 break;
307 last_port = u->port;
308 if (u->port == MAPLE_PORTS - 1)
309 last_subunit = u->subunit;
310
311 maple_unit_probe(sc);
312 for (i = 10 /* just not forever */; maple_end_txbuf(sc); i--) {
313 maple_start_poll(sc);
314 maple_check_responses(sc);
315 if (i == 0)
316 break;
317 /* attach may issue cmds */
318 maple_queue_cmds(sc, &sc->sc_acmdq);
319 }
320 }
321 }
322
323 void
maple_run_polling(device_t dev)324 maple_run_polling(device_t dev)
325 {
326 struct maple_softc *sc;
327 int port, subunit;
328 int i;
329
330 sc = device_private(dev);
331
332 /*
333 * first, make sure polling works
334 */
335 while (MAPLE_STATE != 0) /* XXX may lost a DMA cycle */
336 ;
337
338 /* XXX this will break internal state */
339 for (port = 0; port < MAPLE_PORTS; port++)
340 for (subunit = 0; subunit < MAPLE_SUBUNITS; subunit++)
341 sc->sc_unit[port][subunit].u_dma_stat = MAPLE_DMA_IDLE;
342 SIMPLEQ_INIT(&sc->sc_retryq); /* XXX discard current retries */
343
344 /*
345 * do polling (periodic status check only)
346 */
347 maple_begin_txbuf(sc);
348 maple_send_defered_periodic(sc);
349 maple_send_periodic(sc);
350 for (i = 10 /* just not forever */; maple_end_txbuf(sc); i--) {
351 maple_start_poll(sc);
352 maple_check_responses(sc);
353 if (i == 0)
354 break;
355
356 /* maple_check_responses() has executed maple_begin_txbuf() */
357 maple_queue_retry(sc);
358 maple_send_defered_periodic(sc);
359 }
360 }
361
362 static char *
maple_unit_name(char * buf,size_t len,int port,int subunit)363 maple_unit_name(char *buf, size_t len, int port, int subunit)
364 {
365 size_t l = snprintf(buf, len, "maple%c", port + 'A');
366 if (l > len)
367 l = len;
368 if (subunit)
369 snprintf(buf + l, len - l, "%d", subunit);
370
371 return buf;
372 }
373
374 int
maple_alloc_dma(size_t size,vaddr_t * vap,paddr_t * pap)375 maple_alloc_dma(size_t size, vaddr_t *vap, paddr_t *pap)
376 {
377 extern paddr_t avail_start, avail_end; /* from pmap.c */
378 struct pglist mlist;
379 struct vm_page *m;
380 int error;
381
382 size = round_page(size);
383
384 error = uvm_pglistalloc(size, avail_start, avail_end - PAGE_SIZE,
385 0, 0, &mlist, 1, 0);
386 if (error)
387 return error;
388
389 m = TAILQ_FIRST(&mlist);
390 *pap = VM_PAGE_TO_PHYS(m);
391 *vap = SH3_PHYS_TO_P2SEG(VM_PAGE_TO_PHYS(m));
392
393 return 0;
394 }
395
396 #if 0 /* currently unused */
397 void
398 maple_free_dma(paddr_t paddr, size_t size)
399 {
400 struct pglist mlist;
401 struct vm_page *m;
402 bus_addr_t addr;
403
404 TAILQ_INIT(&mlist);
405 for (addr = paddr; addr < paddr + size; addr += PAGE_SIZE) {
406 m = PHYS_TO_VM_PAGE(addr);
407 TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
408 }
409 uvm_pglistfree(&mlist);
410 }
411 #endif
412
413 static void
maple_begin_txbuf(struct maple_softc * sc)414 maple_begin_txbuf(struct maple_softc *sc)
415 {
416
417 sc->sc_txlink = sc->sc_txpos = sc->sc_txbuf;
418 SIMPLEQ_INIT(&sc->sc_dmaq);
419 }
420
421 static int
maple_end_txbuf(struct maple_softc * sc)422 maple_end_txbuf(struct maple_softc *sc)
423 {
424
425 /* if no frame have been written, we can't mark the
426 list end, and so the DMA must not be activated */
427 if (sc->sc_txpos == sc->sc_txbuf)
428 return 0;
429
430 *sc->sc_txlink |= 0x80000000;
431
432 return 1;
433 }
434
435 static const int8_t subunit_code[] = { 0x20, 0x01, 0x02, 0x04, 0x08, 0x10 };
436
437 static void
maple_queue_command(struct maple_softc * sc,struct maple_unit * u,int command,int datalen,const void * dataaddr)438 maple_queue_command(struct maple_softc *sc, struct maple_unit *u,
439 int command, int datalen, const void *dataaddr)
440 {
441 int to, from;
442 uint32_t *p = sc->sc_txpos;
443
444 /* Max data length = 255 longs = 1020 bytes */
445 KASSERT(datalen >= 0 && datalen <= 255);
446
447 /* Compute sender and recipient address */
448 from = u->port << 6;
449 to = from | subunit_code[u->subunit];
450
451 sc->sc_txlink = p;
452
453 /* Set length of packet and destination port (A-D) */
454 *p++ = datalen | (u->port << 16);
455
456 /* Write address to receive buffer where the response
457 frame should be put */
458 *p++ = u->u_rxbuf_phys;
459
460 /* Create the frame header. The fields are assembled "backwards"
461 because of the Maple Bus big-endianness. */
462 *p++ = (command & 0xff) | (to << 8) | (from << 16) | (datalen << 24);
463
464 /* Copy parameter data, if any */
465 if (datalen > 0) {
466 const uint32_t *param = dataaddr;
467 int i;
468 for (i = 0; i < datalen; i++)
469 *p++ = *param++;
470 }
471
472 sc->sc_txpos = p;
473
474 SIMPLEQ_INSERT_TAIL(&sc->sc_dmaq, u, u_dmaq);
475 }
476
477 static void
maple_write_command(struct maple_softc * sc,struct maple_unit * u,int command,int datalen,const void * dataaddr)478 maple_write_command(struct maple_softc *sc, struct maple_unit *u, int command,
479 int datalen, const void *dataaddr)
480 {
481 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
482 char buf[16];
483
484 if (u->u_retrycnt)
485 printf("%s: retrycnt %d\n", maple_unit_name(buf, sizeof(buf),
486 u->port, u->subunit), u->u_retrycnt);
487 #endif
488 u->u_retrycnt = 0;
489 u->u_command = command;
490 u->u_datalen = datalen;
491 u->u_dataaddr = dataaddr;
492
493 maple_queue_command(sc, u, command, datalen, dataaddr);
494 }
495
496 /* start DMA */
497 static void
maple_start(struct maple_softc * sc)498 maple_start(struct maple_softc *sc)
499 {
500
501 MAPLE_DMAADDR = sc->sc_txbuf_phys;
502 MAPLE_STATE = 1;
503 }
504
505 /* start DMA -- wait until DMA done */
506 static void
maple_start_poll(struct maple_softc * sc)507 maple_start_poll(struct maple_softc *sc)
508 {
509
510 MAPLE_DMAADDR = sc->sc_txbuf_phys;
511 MAPLE_STATE = 1;
512 while (MAPLE_STATE != 0)
513 ;
514 }
515
516 static void
maple_check_subunit_change(struct maple_softc * sc,struct maple_unit * u)517 maple_check_subunit_change(struct maple_softc *sc, struct maple_unit *u)
518 {
519 struct maple_unit *u1;
520 int port;
521 int8_t unit_map;
522 int units, un;
523 int i;
524
525 KASSERT(u->subunit == 0);
526
527 port = u->port;
528 unit_map = ((int8_t *) u->u_rxbuf)[2];
529 if (sc->sc_port_unit_map[port] == unit_map)
530 return;
531
532 units = ((unit_map & 0x1f) << 1) | 1;
533 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
534 {
535 char buf[16];
536 printf("%s: unit_map 0x%x -> 0x%x (units 0x%x)\n",
537 maple_unit_name(buf, sizeof(buf), u->port, u->subunit),
538 sc->sc_port_unit_map[port], unit_map, units);
539 }
540 #endif
541 #if 0 /* this detects unit removal rapidly but is not reliable */
542 /* check for unit change */
543 un = sc->sc_port_units[port] & ~units;
544
545 /* detach removed devices */
546 for (i = MAPLE_SUBUNITS - 1; i > 0; i--)
547 if (un & (1 << i))
548 maple_detach_unit_nofix(sc, &sc->sc_unit[port][i]);
549 #endif
550
551 sc->sc_port_unit_map[port] = unit_map;
552
553 /* schedule scanning child devices */
554 un = units & ~sc->sc_port_units[port];
555 for (i = MAPLE_SUBUNITS - 1; i > 0; i--)
556 if (un & (1 << i)) {
557 u1 = &sc->sc_unit[port][i];
558 maple_remove_from_queues(sc, u1);
559 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
560 {
561 char buf[16];
562 printf("%s: queued to probe 2\n",
563 maple_unit_name(buf, sizeof(buf),
564 u1->port, u1->subunit));
565 }
566 #endif
567 TAILQ_INSERT_HEAD(&sc->sc_probeq, u1, u_q);
568 u1->u_queuestat = MAPLE_QUEUE_PROBE;
569 u1->u_proberetry = 0;
570 }
571 }
572
573 static void
maple_check_unit_change(struct maple_softc * sc,struct maple_unit * u)574 maple_check_unit_change(struct maple_softc *sc, struct maple_unit *u)
575 {
576 struct maple_devinfo *newinfo = (void *) (u->u_rxbuf + 1);
577
578 if (memcmp(&u->devinfo, newinfo, sizeof(struct maple_devinfo)) == 0)
579 goto out; /* no change */
580
581 /* unit inserted */
582
583 /* attach this device */
584 u->devinfo = *newinfo;
585 maple_attach_unit(sc, u);
586
587 out:
588 maple_remove_from_queues(sc, u);
589 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
590 {
591 char buf[16];
592 printf("%s: queued to ping\n",
593 maple_unit_name(buf, sizeof(buf), u->port, u->subunit));
594 }
595 #endif
596 TAILQ_INSERT_TAIL(&sc->sc_pingq, u, u_q);
597 u->u_queuestat = MAPLE_QUEUE_PING;
598 }
599
600 static void
maple_print_unit(void * aux,const char * pnp)601 maple_print_unit(void *aux, const char *pnp)
602 {
603 struct maple_attach_args *ma = aux;
604 int port, subunit;
605 char buf[16];
606 char *prod, *p, oc;
607
608 port = ma->ma_unit->port;
609 subunit = ma->ma_unit->subunit;
610
611 if (pnp != NULL)
612 printf("%s at %s", maple_unit_name(buf, sizeof(buf), port,
613 subunit), pnp);
614
615 printf(" port %d", port);
616
617 if (subunit != 0)
618 printf(" subunit %d", subunit);
619
620 #ifdef MAPLE_DEBUG
621 printf(": a %#x c %#x fn %#x d %#x,%#x,%#x",
622 ma->ma_devinfo->di_area_code,
623 ma->ma_devinfo->di_connector_direction,
624 be32toh(ma->ma_devinfo->di_func),
625 be32toh(ma->ma_devinfo->di_function_data[0]),
626 be32toh(ma->ma_devinfo->di_function_data[1]),
627 be32toh(ma->ma_devinfo->di_function_data[2]));
628 #endif
629
630 /* nul termination */
631 prod = ma->ma_devinfo->di_product_name;
632 for (p = prod + sizeof ma->ma_devinfo->di_product_name; p >= prod; p--)
633 if (p[-1] != '\0' && p[-1] != ' ')
634 break;
635 oc = *p;
636 *p = '\0';
637
638 printf(": %s", prod);
639
640 *p = oc; /* restore */
641 }
642
643 static int
maplesubmatch(device_t parent,cfdata_t match,const int * ldesc,void * aux)644 maplesubmatch(device_t parent, cfdata_t match, const int *ldesc, void *aux)
645 {
646 struct maple_attach_args *ma = aux;
647
648 if (match->cf_loc[MAPLECF_PORT] != MAPLECF_PORT_DEFAULT &&
649 match->cf_loc[MAPLECF_PORT] != ma->ma_unit->port)
650 return 0;
651
652 if (match->cf_loc[MAPLECF_SUBUNIT] != MAPLECF_SUBUNIT_DEFAULT &&
653 match->cf_loc[MAPLECF_SUBUNIT] != ma->ma_unit->subunit)
654 return 0;
655
656 return config_match(parent, match, aux);
657 }
658
659 static int
mapleprint(void * aux,const char * str)660 mapleprint(void *aux, const char *str)
661 {
662 struct maple_attach_args *ma = aux;
663
664 #ifdef MAPLE_DEBUG
665 if (str)
666 aprint_normal("%s", str);
667 aprint_normal(" function %d", ma->ma_function);
668
669 return UNCONF;
670 #else /* quiet */
671 if (!str)
672 aprint_normal(" function %d", ma->ma_function);
673
674 return QUIET;
675 #endif
676 }
677
678 static void
maple_attach_unit(struct maple_softc * sc,struct maple_unit * u)679 maple_attach_unit(struct maple_softc *sc, struct maple_unit *u)
680 {
681 struct maple_attach_args ma;
682 uint32_t func;
683 int f;
684 char oldxname[16];
685
686 ma.ma_unit = u;
687 ma.ma_devinfo = &u->devinfo;
688 ma.ma_basedevinfo = &sc->sc_unit[u->port][0].devinfo;
689 func = be32toh(ma.ma_devinfo->di_func);
690
691 maple_print_unit(&ma, device_xname(sc->sc_dev));
692 printf("\n");
693 strcpy(oldxname, device_xname(sc->sc_dev));
694 maple_unit_name(sc->sc_dev->dv_xname, sizeof(sc->sc_dev->dv_xname),
695 u->port, u->subunit);
696
697 for (f = 0; f < MAPLE_NFUNC; f++) {
698 u->u_func[f].f_callback = NULL;
699 u->u_func[f].f_arg = NULL;
700 u->u_func[f].f_cmdstat = MAPLE_CMDSTAT_NONE;
701 u->u_func[f].f_dev = NULL;
702 if (func & MAPLE_FUNC(f)) {
703 ma.ma_function = f;
704 u->u_func[f].f_dev =
705 config_found(sc->sc_dev, &ma, mapleprint,
706 CFARGS(.submatch = maplesubmatch));
707 u->u_ping_func = f; /* XXX using largest func */
708 }
709 }
710 #ifdef MAPLE_MEMCARD_PING_HACK
711 /*
712 * Some 3rd party memory card pretend to be Visual Memory,
713 * but need special handling for ping.
714 */
715 if (func == (MAPLE_FUNC(MAPLE_FN_MEMCARD) | MAPLE_FUNC(MAPLE_FN_LCD) |
716 MAPLE_FUNC(MAPLE_FN_CLOCK))) {
717 u->u_ping_func = MAPLE_FN_MEMCARD;
718 u->u_ping_stat = MAPLE_PING_MEMCARD;
719 } else {
720 u->u_ping_stat = MAPLE_PING_NORMAL;
721 }
722 #endif
723 strcpy(sc->sc_dev->dv_xname, oldxname);
724
725 sc->sc_port_units[u->port] |= 1 << u->subunit;
726 }
727
728 static void
maple_detach_unit_nofix(struct maple_softc * sc,struct maple_unit * u)729 maple_detach_unit_nofix(struct maple_softc *sc, struct maple_unit *u)
730 {
731 struct maple_func *fn;
732 device_t dev;
733 struct maple_unit *u1;
734 int port;
735 int error;
736 int i;
737 char buf[16];
738
739 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
740 printf("%s: remove\n", maple_unit_name(buf, sizeof(buf), u->port, u->subunit));
741 #endif
742 maple_remove_from_queues(sc, u);
743 port = u->port;
744 sc->sc_port_units[port] &= ~(1 << u->subunit);
745
746 if (u->subunit == 0) {
747 for (i = MAPLE_SUBUNITS - 1; i > 0; i--)
748 maple_detach_unit_nofix(sc, &sc->sc_unit[port][i]);
749 }
750
751 for (fn = u->u_func; fn < &u->u_func[MAPLE_NFUNC]; fn++) {
752 if ((dev = fn->f_dev) != NULL) {
753 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
754 printf("%s: detaching func %d\n",
755 maple_unit_name(buf, sizeof(buf), port, u->subunit),
756 fn->f_funcno);
757 #endif
758
759 /*
760 * Remove functions from command queue.
761 */
762 switch (fn->f_cmdstat) {
763 case MAPLE_CMDSTAT_ASYNC:
764 case MAPLE_CMDSTAT_PERIODIC_DEFERED:
765 TAILQ_REMOVE(&sc->sc_acmdq, fn, f_cmdq);
766 break;
767 case MAPLE_CMDSTAT_ASYNC_PERIODICQ:
768 case MAPLE_CMDSTAT_PERIODIC:
769 TAILQ_REMOVE(&sc->sc_pcmdq, fn, f_cmdq);
770 break;
771 default:
772 break;
773 }
774
775 /*
776 * Detach devices.
777 */
778 if ((error = config_detach(fn->f_dev, DETACH_FORCE))) {
779 printf("%s: failed to detach %s (func %d), errno %d\n",
780 maple_unit_name(buf, sizeof(buf), port, u->subunit),
781 device_xname(fn->f_dev), fn->f_funcno, error);
782 }
783 }
784
785 maple_enable_periodic(sc->sc_dev, u, fn->f_funcno, 0);
786
787 fn->f_dev = NULL;
788 fn->f_callback = NULL;
789 fn->f_arg = NULL;
790 fn->f_cmdstat = MAPLE_CMDSTAT_NONE;
791 }
792 if (u->u_dma_stat == MAPLE_DMA_RETRY) {
793 /* XXX expensive? */
794 SIMPLEQ_FOREACH(u1, &sc->sc_retryq, u_dmaq) {
795 if (u1 == u) {
796 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
797 printf("%s: abort retry\n",
798 maple_unit_name(buf, sizeof(buf), port, u->subunit));
799 #endif
800 SIMPLEQ_REMOVE(&sc->sc_retryq, u, maple_unit,
801 u_dmaq);
802 break;
803 }
804 }
805 }
806 u->u_dma_stat = MAPLE_DMA_IDLE;
807 u->u_noping = 0;
808 /* u->u_dma_func = uninitialized; */
809 KASSERT(u->getcond_func_set == 0);
810 memset(&u->devinfo, 0, sizeof(struct maple_devinfo));
811
812 if (u->subunit == 0) {
813 sc->sc_port_unit_map[port] = 0;
814 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
815 {
816 char buf2[16];
817 printf("%s: queued to probe 3\n",
818 maple_unit_name(buf2, sizeof(buf2), port, u->subunit));
819 }
820 #endif
821 TAILQ_INSERT_TAIL(&sc->sc_probeq, u, u_q);
822 u->u_queuestat = MAPLE_QUEUE_PROBE;
823 }
824 }
825
826 static void
maple_detach_unit(struct maple_softc * sc,struct maple_unit * u)827 maple_detach_unit(struct maple_softc *sc, struct maple_unit *u)
828 {
829
830 maple_detach_unit_nofix(sc, u);
831 if (u->subunit != 0)
832 sc->sc_port_unit_map[u->port] &= ~(1 << (u->subunit - 1));
833 }
834
835 /*
836 * Send a command (called by drivers)
837 *
838 * The "cataaddr" must not point at temporary storage like stack.
839 * Only one command (per function) is valid at a time.
840 */
841 void
maple_command(device_t dev,struct maple_unit * u,int func,int command,int datalen,const void * dataaddr,int flags)842 maple_command(device_t dev, struct maple_unit *u, int func,
843 int command, int datalen, const void *dataaddr, int flags)
844 {
845 struct maple_softc *sc = device_private(dev);
846 struct maple_func *fn;
847
848 KASSERT(func >= 0 && func < 32);
849 KASSERT(command);
850 KASSERT((flags & ~MAPLE_FLAG_CMD_PERIODIC_TIMING) == 0);
851
852 mutex_enter(&sc->sc_event_lock);
853
854 fn = &u->u_func[func];
855 #if 1 /*def DIAGNOSTIC*/
856 {char buf[16];
857 if (fn->f_cmdstat != MAPLE_CMDSTAT_NONE)
858 panic("maple_command: %s func %d: requesting more than one commands",
859 maple_unit_name(buf, sizeof(buf), u->port, u->subunit), func);
860 }
861 #endif
862 fn->f_command = command;
863 fn->f_datalen = datalen;
864 fn->f_dataaddr = dataaddr;
865 if (flags & MAPLE_FLAG_CMD_PERIODIC_TIMING) {
866 fn->f_cmdstat = MAPLE_CMDSTAT_PERIODIC;
867 TAILQ_INSERT_TAIL(&sc->sc_pcmdq, fn, f_cmdq);
868 } else {
869 fn->f_cmdstat = MAPLE_CMDSTAT_ASYNC;
870 TAILQ_INSERT_TAIL(&sc->sc_acmdq, fn, f_cmdq);
871 cv_broadcast(&sc->sc_event_cv); /* wake for async event */
872 }
873 mutex_exit(&sc->sc_event_lock);
874 }
875
876 static void
maple_queue_cmds(struct maple_softc * sc,struct maple_cmdq_head * head)877 maple_queue_cmds(struct maple_softc *sc,
878 struct maple_cmdq_head *head)
879 {
880 struct maple_func *fn, *nextfn;
881 struct maple_unit *u;
882
883 /*
884 * Note: since the queue element may be queued immediately,
885 * we can't use TAILQ_FOREACH.
886 */
887 fn = TAILQ_FIRST(head);
888 TAILQ_INIT(head);
889 for ( ; fn; fn = nextfn) {
890 nextfn = TAILQ_NEXT(fn, f_cmdq);
891
892 KASSERT(fn->f_cmdstat != MAPLE_CMDSTAT_NONE);
893 u = fn->f_unit;
894 if (u->u_dma_stat == MAPLE_DMA_IDLE) {
895 maple_write_command(sc, u,
896 fn->f_command, fn->f_datalen, fn->f_dataaddr);
897 u->u_dma_stat = (fn->f_cmdstat == MAPLE_CMDSTAT_ASYNC ||
898 fn->f_cmdstat == MAPLE_CMDSTAT_ASYNC_PERIODICQ) ?
899 MAPLE_DMA_ACMD : MAPLE_DMA_PCMD;
900 u->u_dma_func = fn->f_funcno;
901 fn->f_cmdstat = MAPLE_CMDSTAT_NONE;
902 } else if (u->u_dma_stat == MAPLE_DMA_RETRY) {
903 /* unit is busy --- try again */
904 /*
905 * always add to periodic command queue
906 * (wait until the next periodic timing),
907 * since the unit will never be freed until the
908 * next periodic timing.
909 */
910 switch (fn->f_cmdstat) {
911 case MAPLE_CMDSTAT_ASYNC:
912 fn->f_cmdstat = MAPLE_CMDSTAT_ASYNC_PERIODICQ;
913 break;
914 case MAPLE_CMDSTAT_PERIODIC_DEFERED:
915 fn->f_cmdstat = MAPLE_CMDSTAT_PERIODIC;
916 break;
917 default:
918 break;
919 }
920 TAILQ_INSERT_TAIL(&sc->sc_pcmdq, fn, f_cmdq);
921 } else {
922 /* unit is busy --- try again */
923 /*
924 * always add to async command queue
925 * (process immediately)
926 */
927 switch (fn->f_cmdstat) {
928 case MAPLE_CMDSTAT_ASYNC_PERIODICQ:
929 fn->f_cmdstat = MAPLE_CMDSTAT_ASYNC;
930 break;
931 case MAPLE_CMDSTAT_PERIODIC:
932 fn->f_cmdstat = MAPLE_CMDSTAT_PERIODIC_DEFERED;
933 break;
934 default:
935 break;
936 }
937 TAILQ_INSERT_TAIL(&sc->sc_acmdq, fn, f_cmdq);
938 }
939 }
940 }
941
942 /* schedule probing a device */
943 static void
maple_unit_probe(struct maple_softc * sc)944 maple_unit_probe(struct maple_softc *sc)
945 {
946 struct maple_unit *u;
947
948 if ((u = TAILQ_FIRST(&sc->sc_probeq)) != NULL) {
949 KASSERT(u->u_dma_stat == MAPLE_DMA_IDLE);
950 KASSERT(u->u_queuestat == MAPLE_QUEUE_PROBE);
951 maple_remove_from_queues(sc, u);
952 maple_write_command(sc, u, MAPLE_COMMAND_DEVINFO, 0, NULL);
953 u->u_dma_stat = MAPLE_DMA_PROBE;
954 /* u->u_dma_func = ignored; */
955 }
956 }
957
958 /*
959 * Enable/disable unit pinging (called by drivers)
960 */
961 /* ARGSUSED */
962 void
maple_enable_unit_ping(device_t dev,struct maple_unit * u,int func,int enable)963 maple_enable_unit_ping(device_t dev, struct maple_unit *u, int func, int enable)
964 {
965 #if 0 /* currently unused */
966 struct maple_softc *sc = device_private(dev);
967 #endif
968
969 if (enable)
970 u->u_noping &= ~MAPLE_FUNC(func);
971 else
972 u->u_noping |= MAPLE_FUNC(func);
973 }
974
975 /* schedule pinging a device */
976 static void
maple_unit_ping(struct maple_softc * sc)977 maple_unit_ping(struct maple_softc *sc)
978 {
979 struct maple_unit *u;
980 struct maple_func *fn;
981 #ifdef MAPLE_MEMCARD_PING_HACK
982 static const uint32_t memcard_ping_arg[2] = {
983 0x02000000, /* htobe32(MAPLE_FUNC(MAPLE_FN_MEMCARD)) */
984 0 /* pt (1 byte) and unused 3 bytes */
985 };
986 #endif
987
988 if ((u = TAILQ_FIRST(&sc->sc_pingq)) != NULL) {
989 KASSERT(u->u_queuestat == MAPLE_QUEUE_PING);
990 maple_remove_from_queues(sc, u);
991 if (u->u_dma_stat == MAPLE_DMA_IDLE && u->u_noping == 0) {
992 #ifdef MAPLE_MEMCARD_PING_HACK
993 if (u->u_ping_stat == MAPLE_PING_MINFO) {
994 /* use MINFO for some memory cards */
995 maple_write_command(sc, u,
996 MAPLE_COMMAND_GETMINFO,
997 2, memcard_ping_arg);
998 } else
999 #endif
1000 {
1001 fn = &u->u_func[u->u_ping_func];
1002 fn->f_work = htobe32(MAPLE_FUNC(u->u_ping_func));
1003 maple_write_command(sc, u,
1004 MAPLE_COMMAND_GETCOND,
1005 1, &fn->f_work);
1006 }
1007 u->u_dma_stat = MAPLE_DMA_PING;
1008 /* u->u_dma_func = XXX; */
1009 } else {
1010 /* no need if periodic */
1011 TAILQ_INSERT_TAIL(&sc->sc_pingq, u, u_q);
1012 u->u_queuestat = MAPLE_QUEUE_PING;
1013 }
1014 }
1015 }
1016
1017 /*
1018 * Enable/disable periodic GETCOND (called by drivers)
1019 */
1020 void
maple_enable_periodic(device_t dev,struct maple_unit * u,int func,int on)1021 maple_enable_periodic(device_t dev, struct maple_unit *u, int func, int on)
1022 {
1023 struct maple_softc *sc = device_private(dev);
1024 struct maple_func *fn;
1025
1026 KASSERT(func >= 0 && func < 32);
1027
1028 fn = &u->u_func[func];
1029
1030 if (on) {
1031 if (fn->f_periodic_stat == MAPLE_PERIODIC_NONE) {
1032 TAILQ_INSERT_TAIL(&sc->sc_periodicq, fn, f_periodicq);
1033 fn->f_periodic_stat = MAPLE_PERIODIC_INQ;
1034 u->getcond_func_set |= MAPLE_FUNC(func);
1035 }
1036 } else {
1037 if (fn->f_periodic_stat == MAPLE_PERIODIC_INQ)
1038 TAILQ_REMOVE(&sc->sc_periodicq, fn, f_periodicq);
1039 else if (fn->f_periodic_stat == MAPLE_PERIODIC_DEFERED)
1040 TAILQ_REMOVE(&sc->sc_periodicdeferq, fn, f_periodicq);
1041 fn->f_periodic_stat = MAPLE_PERIODIC_NONE;
1042 u->getcond_func_set &= ~MAPLE_FUNC(func);
1043 }
1044 }
1045
1046 /*
1047 * queue periodic GETCOND
1048 */
1049 static int
maple_send_defered_periodic(struct maple_softc * sc)1050 maple_send_defered_periodic(struct maple_softc *sc)
1051 {
1052 struct maple_unit *u;
1053 struct maple_func *fn, *nextfn;
1054 int defer_remain = 0;
1055
1056 for (fn = TAILQ_FIRST(&sc->sc_periodicdeferq); fn; fn = nextfn) {
1057 KASSERT(fn->f_periodic_stat == MAPLE_PERIODIC_DEFERED);
1058
1059 nextfn = TAILQ_NEXT(fn, f_periodicq);
1060
1061 u = fn->f_unit;
1062 if (u->u_dma_stat == MAPLE_DMA_IDLE ||
1063 u->u_dma_stat == MAPLE_DMA_RETRY) {
1064 /*
1065 * if IDLE -> queue this request
1066 * if RETRY -> the unit never be freed until the next
1067 * periodic timing, so just restore to
1068 * the normal periodic queue.
1069 */
1070 TAILQ_REMOVE(&sc->sc_periodicdeferq, fn, f_periodicq);
1071 TAILQ_INSERT_TAIL(&sc->sc_periodicq, fn, f_periodicq);
1072 fn->f_periodic_stat = MAPLE_PERIODIC_INQ;
1073
1074 if (u->u_dma_stat == MAPLE_DMA_IDLE) {
1075 /*
1076 * queue periodic command
1077 */
1078 fn->f_work = htobe32(MAPLE_FUNC(fn->f_funcno));
1079 maple_write_command(sc, u,
1080 MAPLE_COMMAND_GETCOND, 1, &fn->f_work);
1081 u->u_dma_stat = MAPLE_DMA_PERIODIC;
1082 u->u_dma_func = fn->f_funcno;
1083 }
1084 } else {
1085 defer_remain = 1;
1086 }
1087 }
1088
1089 return defer_remain;
1090 }
1091
1092 static void
maple_send_periodic(struct maple_softc * sc)1093 maple_send_periodic(struct maple_softc *sc)
1094 {
1095 struct maple_unit *u;
1096 struct maple_func *fn, *nextfn;
1097
1098 for (fn = TAILQ_FIRST(&sc->sc_periodicq); fn; fn = nextfn) {
1099 KASSERT(fn->f_periodic_stat == MAPLE_PERIODIC_INQ);
1100
1101 nextfn = TAILQ_NEXT(fn, f_periodicq);
1102
1103 u = fn->f_unit;
1104 if (u->u_dma_stat != MAPLE_DMA_IDLE) {
1105 if (u->u_dma_stat != MAPLE_DMA_RETRY) {
1106 /*
1107 * can't be queued --- move to defered queue
1108 */
1109 TAILQ_REMOVE(&sc->sc_periodicq, fn,
1110 f_periodicq);
1111 TAILQ_INSERT_TAIL(&sc->sc_periodicdeferq, fn,
1112 f_periodicq);
1113 fn->f_periodic_stat = MAPLE_PERIODIC_DEFERED;
1114 }
1115 } else {
1116 /*
1117 * queue periodic command
1118 */
1119 fn->f_work = htobe32(MAPLE_FUNC(fn->f_funcno));
1120 maple_write_command(sc, u, MAPLE_COMMAND_GETCOND,
1121 1, &fn->f_work);
1122 u->u_dma_stat = MAPLE_DMA_PERIODIC;
1123 u->u_dma_func = fn->f_funcno;
1124 }
1125 }
1126 }
1127
1128 static void
maple_remove_from_queues(struct maple_softc * sc,struct maple_unit * u)1129 maple_remove_from_queues(struct maple_softc *sc, struct maple_unit *u)
1130 {
1131
1132 /* remove from queues */
1133 if (u->u_queuestat == MAPLE_QUEUE_PROBE)
1134 TAILQ_REMOVE(&sc->sc_probeq, u, u_q);
1135 else if (u->u_queuestat == MAPLE_QUEUE_PING)
1136 TAILQ_REMOVE(&sc->sc_pingq, u, u_q);
1137 #ifdef DIAGNOSTIC
1138 else if (u->u_queuestat != MAPLE_QUEUE_NONE)
1139 panic("maple_remove_from_queues: queuestat %d", u->u_queuestat);
1140 #endif
1141 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
1142 if (u->u_queuestat != MAPLE_QUEUE_NONE) {
1143 char buf[16];
1144 printf("%s: dequeued\n",
1145 maple_unit_name(buf, sizeof(buf), u->port, u->subunit));
1146 }
1147 #endif
1148
1149 u->u_queuestat = MAPLE_QUEUE_NONE;
1150 }
1151
1152 /*
1153 * retry current command at next periodic timing
1154 */
1155 static int
maple_retry(struct maple_softc * sc,struct maple_unit * u,enum maple_dma_stat st)1156 maple_retry(struct maple_softc *sc, struct maple_unit *u,
1157 enum maple_dma_stat st)
1158 {
1159
1160 KASSERT(st != MAPLE_DMA_IDLE && st != MAPLE_DMA_RETRY);
1161
1162 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
1163 if (u->u_retrycnt == 0) {
1164 char buf[16];
1165 printf("%s: retrying: %#x, %#x, %p\n",
1166 maple_unit_name(buf, sizeof(buf), u->port, u->subunit),
1167 u->u_command, u->u_datalen, u->u_dataaddr);
1168 }
1169 #endif
1170 if (u->u_retrycnt >= MAPLE_RETRY_MAX)
1171 return 1;
1172
1173 u->u_retrycnt++;
1174
1175 u->u_saved_dma_stat = st;
1176 u->u_dma_stat = MAPLE_DMA_RETRY; /* no new command before retry done */
1177 SIMPLEQ_INSERT_TAIL(&sc->sc_retryq, u, u_dmaq);
1178
1179 return 0;
1180 }
1181
1182 static void
maple_queue_retry(struct maple_softc * sc)1183 maple_queue_retry(struct maple_softc *sc)
1184 {
1185 struct maple_unit *u, *nextu;
1186
1187 /*
1188 * Note: since the queue element is queued immediately
1189 * in maple_queue_command, we can't use SIMPLEQ_FOREACH.
1190 */
1191 for (u = SIMPLEQ_FIRST(&sc->sc_retryq); u; u = nextu) {
1192 nextu = SIMPLEQ_NEXT(u, u_dmaq);
1193
1194 /*
1195 * Retrying is in the highest priority, and the unit shall
1196 * always be free.
1197 */
1198 KASSERT(u->u_dma_stat == MAPLE_DMA_RETRY);
1199 maple_queue_command(sc, u, u->u_command, u->u_datalen,
1200 u->u_dataaddr);
1201 u->u_dma_stat = u->u_saved_dma_stat;
1202
1203 #ifdef DIAGNOSTIC
1204 KASSERT(u->u_saved_dma_stat != MAPLE_DMA_IDLE);
1205 u->u_saved_dma_stat = MAPLE_DMA_IDLE;
1206 #endif
1207 }
1208 SIMPLEQ_INIT(&sc->sc_retryq);
1209 }
1210
1211 /*
1212 * Process DMA results.
1213 * Requires kernel context.
1214 */
1215 static void
maple_check_responses(struct maple_softc * sc)1216 maple_check_responses(struct maple_softc *sc)
1217 {
1218 struct maple_unit *u, *nextu;
1219 struct maple_func *fn;
1220 maple_response_t response;
1221 int func_code, len;
1222 int flags;
1223 char buf[16];
1224
1225 /*
1226 * Note: since the queue element may be queued immediately,
1227 * we can't use SIMPLEQ_FOREACH.
1228 */
1229 for (u = SIMPLEQ_FIRST(&sc->sc_dmaq), maple_begin_txbuf(sc);
1230 u; u = nextu) {
1231 nextu = SIMPLEQ_NEXT(u, u_dmaq);
1232
1233 if (u->u_dma_stat == MAPLE_DMA_IDLE)
1234 continue; /* just detached or DDB was active */
1235
1236 /*
1237 * check for retransmission
1238 */
1239 if ((response = u->u_rxbuf[0]) == MAPLE_RESPONSE_AGAIN) {
1240 if (maple_retry(sc, u, u->u_dma_stat) == 0)
1241 continue;
1242 /* else pass error to upper layer */
1243 }
1244
1245 len = (u->u_rxbuf[0] >> 24); /* length in long */
1246 len <<= 2; /* length in byte */
1247
1248 /*
1249 * call handler
1250 */
1251 if (u->u_dma_stat == MAPLE_DMA_PERIODIC) {
1252 /*
1253 * periodic GETCOND
1254 */
1255 u->u_dma_stat = MAPLE_DMA_IDLE;
1256 func_code = u->u_dma_func;
1257 if (response == MAPLE_RESPONSE_DATATRF && len > 0 &&
1258 be32toh(u->u_rxbuf[1]) == MAPLE_FUNC(func_code)) {
1259 fn = &u->u_func[func_code];
1260 if (fn->f_dev)
1261 (*fn->f_callback)(fn->f_arg,
1262 (void *)u->u_rxbuf, len,
1263 MAPLE_FLAG_PERIODIC);
1264 } else if (response == MAPLE_RESPONSE_NONE) {
1265 /* XXX OK? */
1266 /* detach */
1267 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
1268 printf("%s: func: %d: periodic response %d\n",
1269 maple_unit_name(buf, sizeof(buf), u->port, u->subunit),
1270 u->u_dma_func,
1271 response);
1272 #endif
1273 /*
1274 * Some 3rd party devices sometimes
1275 * do not respond.
1276 */
1277 if (maple_retry(sc, u, MAPLE_DMA_PERIODIC))
1278 maple_detach_unit(sc, u);
1279 }
1280 /* XXX check unexpected conditions? */
1281
1282 } else if (u->u_dma_stat == MAPLE_DMA_PROBE) {
1283 KASSERT(u->u_queuestat == MAPLE_QUEUE_NONE);
1284 u->u_dma_stat = MAPLE_DMA_IDLE;
1285 switch (response) {
1286 default:
1287 case MAPLE_RESPONSE_NONE:
1288 /*
1289 * Do not use maple_retry(), which conflicts
1290 * with probe structure.
1291 */
1292 if (u->subunit != 0 &&
1293 ++u->u_proberetry > MAPLE_PROBERETRY_MAX) {
1294 printf("%s: no response\n",
1295 maple_unit_name(buf, sizeof(buf),
1296 u->port, u->subunit));
1297 } else {
1298 /* probe again */
1299 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 2
1300 printf("%s: queued to probe 4\n",
1301 maple_unit_name(buf, sizeof(buf), u->port, u->subunit));
1302 #endif
1303 TAILQ_INSERT_TAIL(&sc->sc_probeq, u,
1304 u_q);
1305 u->u_queuestat = MAPLE_QUEUE_PROBE;
1306 }
1307 break;
1308 case MAPLE_RESPONSE_DEVINFO:
1309 /* check if the unit is changed */
1310 maple_check_unit_change(sc, u);
1311 break;
1312 }
1313
1314 } else if (u->u_dma_stat == MAPLE_DMA_PING) {
1315 KASSERT(u->u_queuestat == MAPLE_QUEUE_NONE);
1316 u->u_dma_stat = MAPLE_DMA_IDLE;
1317 switch (response) {
1318 default:
1319 case MAPLE_RESPONSE_NONE:
1320 /*
1321 * Some 3rd party devices sometimes
1322 * do not respond.
1323 */
1324 if (maple_retry(sc, u, MAPLE_DMA_PING)) {
1325 /* detach */
1326 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1327 printf("%s: ping response %d\n",
1328 maple_unit_name(buf, sizeof(buf), u->port,
1329 u->subunit),
1330 response);
1331 #endif
1332 #ifdef MAPLE_MEMCARD_PING_HACK
1333 if (u->u_ping_stat
1334 == MAPLE_PING_MEMCARD) {
1335 /*
1336 * The unit claims itself to be
1337 * a Visual Memory, and has
1338 * never responded to GETCOND.
1339 * Try again using MINFO, in
1340 * case it is a poorly
1341 * implemented 3rd party card.
1342 */
1343 #ifdef MAPLE_DEBUG
1344 printf("%s: switching ping method\n",
1345 maple_unit_name(buf, sizeof(buf),
1346 u->port, u->subunit));
1347 #endif
1348 u->u_ping_stat
1349 = MAPLE_PING_MINFO;
1350 TAILQ_INSERT_TAIL(&sc->sc_pingq,
1351 u, u_q);
1352 u->u_queuestat
1353 = MAPLE_QUEUE_PING;
1354 } else
1355 #endif /* MAPLE_MEMCARD_PING_HACK */
1356 maple_detach_unit(sc, u);
1357 }
1358 break;
1359 case MAPLE_RESPONSE_BADCMD:
1360 case MAPLE_RESPONSE_BADFUNC:
1361 case MAPLE_RESPONSE_DATATRF:
1362 TAILQ_INSERT_TAIL(&sc->sc_pingq, u, u_q);
1363 u->u_queuestat = MAPLE_QUEUE_PING;
1364 #ifdef MAPLE_MEMCARD_PING_HACK
1365 /*
1366 * If the unit responds to GETCOND, it is a
1367 * normal implementation.
1368 */
1369 if (u->u_ping_stat == MAPLE_PING_MEMCARD)
1370 u->u_ping_stat = MAPLE_PING_NORMAL;
1371 #endif
1372 break;
1373 }
1374
1375 } else {
1376 /*
1377 * Note: Do not rely on the consistency of responses.
1378 */
1379
1380 if (response == MAPLE_RESPONSE_NONE) {
1381 if (maple_retry(sc, u, u->u_dma_stat)) {
1382 /* detach */
1383 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1384 printf("%s: command response %d\n",
1385 maple_unit_name(buf, sizeof(buf), u->port,
1386 u->subunit),
1387 response);
1388 #endif
1389 maple_detach_unit(sc, u);
1390 }
1391 continue;
1392 }
1393
1394 flags = (u->u_dma_stat == MAPLE_DMA_PCMD) ?
1395 MAPLE_FLAG_CMD_PERIODIC_TIMING : 0;
1396 u->u_dma_stat = MAPLE_DMA_IDLE;
1397
1398 func_code = u->u_dma_func;
1399 fn = &u->u_func[func_code];
1400 if (fn->f_dev == NULL) {
1401 /* detached right now */
1402 #ifdef MAPLE_DEBUG
1403 printf("%s: unknown function: function %d, response %d\n",
1404 maple_unit_name(buf, sizeof(buf), u->port, u->subunit),
1405 func_code, response);
1406 #endif
1407 continue;
1408 }
1409 if (fn->f_callback != NULL) {
1410 (*fn->f_callback)(fn->f_arg,
1411 (void *)u->u_rxbuf, len, flags);
1412 }
1413 }
1414
1415 /*
1416 * check for subunit change and schedule probing subunits
1417 */
1418 if (u->subunit == 0 && response != MAPLE_RESPONSE_NONE &&
1419 response != MAPLE_RESPONSE_AGAIN &&
1420 ((int8_t *) u->u_rxbuf)[2] != sc->sc_port_unit_map[u->port])
1421 maple_check_subunit_change(sc, u);
1422 }
1423 }
1424
1425 /*
1426 * Main Maple Bus thread
1427 */
1428 static void
maple_event_thread(void * arg)1429 maple_event_thread(void *arg)
1430 {
1431 struct maple_softc *sc = arg;
1432 unsigned cnt = 1; /* timing counter */
1433 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1434 int noreq = 0;
1435 #endif
1436
1437 #ifdef MAPLE_DEBUG
1438 printf("%s: forked event thread, pid %d\n",
1439 device_xname(sc->sc_dev), sc->event_thread->l_proc->p_pid);
1440 #endif
1441
1442 /* begin first DMA cycle */
1443 maple_begin_txbuf(sc);
1444
1445 sc->sc_event = 1;
1446
1447 /* OK, continue booting system */
1448 maple_polling = 0;
1449 config_pending_decr(sc->sc_dev);
1450
1451 for (;;) {
1452 /*
1453 * queue requests
1454 */
1455
1456 /* queue async commands */
1457 if (!TAILQ_EMPTY(&sc->sc_acmdq))
1458 maple_queue_cmds(sc, &sc->sc_acmdq);
1459
1460 /* send defered periodic command */
1461 if (!TAILQ_EMPTY(&sc->sc_periodicdeferq))
1462 maple_send_defered_periodic(sc);
1463
1464 /* queue periodic commands */
1465 if (sc->sc_event) {
1466 /* queue commands on periodic timing */
1467 if (!TAILQ_EMPTY(&sc->sc_pcmdq))
1468 maple_queue_cmds(sc, &sc->sc_pcmdq);
1469
1470 /* retry */
1471 if (!SIMPLEQ_EMPTY(&sc->sc_retryq))
1472 maple_queue_retry(sc);
1473
1474 if ((cnt & 31) == 0) /* XXX */
1475 maple_unit_probe(sc);
1476 cnt++;
1477
1478 maple_send_periodic(sc);
1479 if ((cnt & 7) == 0) /* XXX */
1480 maple_unit_ping(sc);
1481
1482 /*
1483 * schedule periodic event
1484 */
1485 sc->sc_event = 0;
1486 callout_reset(&sc->maple_callout_ch,
1487 MAPLE_CALLOUT_TICKS, maple_callout, sc);
1488 }
1489
1490 if (maple_end_txbuf(sc)) {
1491
1492 /*
1493 * start DMA
1494 */
1495 mutex_enter(&sc->sc_dma_lock);
1496 maple_start(sc);
1497
1498 /*
1499 * wait until DMA done
1500 */
1501 if (cv_timedwait(&sc->sc_dma_cv, &sc->sc_dma_lock, hz)
1502 == EWOULDBLOCK) {
1503 /* was DDB active? */
1504 printf("%s: timed out\n",
1505 device_xname(sc->sc_dev));
1506 }
1507 mutex_exit(&sc->sc_dma_lock);
1508
1509 /*
1510 * call handlers
1511 */
1512 maple_check_responses(sc);
1513 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1514 noreq = 0;
1515 #endif
1516 }
1517 #if defined(MAPLE_DEBUG) && MAPLE_DEBUG > 1
1518 else {
1519 /* weird if occurs in succession */
1520 #if MAPLE_DEBUG <= 2
1521 if (noreq) /* ignore first time */
1522 #endif
1523 printf("%s: no request %d\n",
1524 device_xname(sc->sc_dev), noreq);
1525 noreq++;
1526 }
1527 #endif
1528
1529 /*
1530 * wait for an event
1531 */
1532 mutex_enter(&sc->sc_event_lock);
1533 if (TAILQ_EMPTY(&sc->sc_acmdq) && sc->sc_event == 0 &&
1534 TAILQ_EMPTY(&sc->sc_periodicdeferq)) {
1535 if (cv_timedwait(&sc->sc_event_cv, &sc->sc_event_lock,
1536 hz) == EWOULDBLOCK) {
1537 printf("%s: event timed out\n",
1538 device_xname(sc->sc_dev));
1539 }
1540
1541 }
1542 mutex_exit(&sc->sc_event_lock);
1543
1544 }
1545
1546 #if 0 /* maple root device can't be detached */
1547 kthread_exit(0);
1548 /* NOTREACHED */
1549 #endif
1550 }
1551
1552 static int
maple_intr(void * arg)1553 maple_intr(void *arg)
1554 {
1555 struct maple_softc *sc = arg;
1556
1557 mutex_enter(&sc->sc_dma_lock);
1558 cv_broadcast(&sc->sc_dma_cv);
1559 mutex_exit(&sc->sc_dma_lock);
1560
1561 return 1;
1562 }
1563
1564 static void
maple_callout(void * ctx)1565 maple_callout(void *ctx)
1566 {
1567 struct maple_softc *sc = ctx;
1568
1569 mutex_enter(&sc->sc_event_lock);
1570 sc->sc_event = 1; /* mark as periodic event */
1571 cv_broadcast(&sc->sc_event_cv);
1572 mutex_exit(&sc->sc_event_lock);
1573 }
1574
1575 /*
1576 * Install callback handler (called by drivers)
1577 */
1578 /* ARGSUSED */
1579 void
maple_set_callback(device_t dev,struct maple_unit * u,int func,void (* callback)(void *,struct maple_response *,int,int),void * arg)1580 maple_set_callback(device_t dev, struct maple_unit *u, int func,
1581 void (*callback)(void *, struct maple_response *, int, int), void *arg)
1582 {
1583 #if 0 /* currently unused */
1584 struct maple_softc *sc = device_private(dev);
1585 #endif
1586 struct maple_func *fn;
1587
1588 KASSERT(func >= 0 && func < MAPLE_NFUNC);
1589
1590 fn = &u->u_func[func];
1591
1592 fn->f_callback = callback;
1593 fn->f_arg = arg;
1594 }
1595
1596 /*
1597 * Return function definition data (called by drivers)
1598 */
1599 uint32_t
maple_get_function_data(struct maple_devinfo * devinfo,int function_code)1600 maple_get_function_data(struct maple_devinfo *devinfo, int function_code)
1601 {
1602 int i, p = 0;
1603 uint32_t func;
1604
1605 func = be32toh(devinfo->di_func);
1606 for (i = 31; i >= 0; --i)
1607 if (func & MAPLE_FUNC(i)) {
1608 if (function_code == i)
1609 return be32toh(devinfo->di_function_data[p]);
1610 else
1611 if (++p >= 3)
1612 break;
1613 }
1614
1615 return 0;
1616 }
1617
1618 /* Generic maple device interface */
1619
1620 int
mapleopen(dev_t dev,int flag,int mode,struct lwp * l)1621 mapleopen(dev_t dev, int flag, int mode, struct lwp *l)
1622 {
1623 struct maple_softc *sc;
1624
1625 sc = device_lookup_private(&maple_cd, MAPLEBUSUNIT(dev));
1626 if (sc == NULL) /* make sure it was attached */
1627 return ENXIO;
1628
1629 if (MAPLEPORT(dev) >= MAPLE_PORTS)
1630 return ENXIO;
1631
1632 if (MAPLESUBUNIT(dev) >= MAPLE_SUBUNITS)
1633 return ENXIO;
1634
1635 if (!(sc->sc_port_units[MAPLEPORT(dev)] & (1 << MAPLESUBUNIT(dev))))
1636 return ENXIO;
1637
1638 sc->sc_port_units_open[MAPLEPORT(dev)] |= 1 << MAPLESUBUNIT(dev);
1639
1640 return 0;
1641 }
1642
1643 int
mapleclose(dev_t dev,int flag,int mode,struct lwp * l)1644 mapleclose(dev_t dev, int flag, int mode, struct lwp *l)
1645 {
1646 struct maple_softc *sc;
1647
1648 sc = device_lookup_private(&maple_cd, MAPLEBUSUNIT(dev));
1649
1650 sc->sc_port_units_open[MAPLEPORT(dev)] &= ~(1 << MAPLESUBUNIT(dev));
1651
1652 return 0;
1653 }
1654
1655 int
maple_unit_ioctl(device_t dev,struct maple_unit * u,u_long cmd,void * data,int flag,struct lwp * l)1656 maple_unit_ioctl(device_t dev, struct maple_unit *u, u_long cmd,
1657 void *data, int flag, struct lwp *l)
1658 {
1659 struct maple_softc *sc = device_private(dev);
1660
1661 if (!(sc->sc_port_units[u->port] & (1 << u->subunit)))
1662 return ENXIO;
1663
1664 switch(cmd) {
1665 case MAPLEIO_GDEVINFO:
1666 memcpy(data, &u->devinfo, sizeof(struct maple_devinfo));
1667 break;
1668 default:
1669 return EPASSTHROUGH;
1670 }
1671
1672 return 0;
1673 }
1674
1675 int
mapleioctl(dev_t dev,u_long cmd,void * data,int flag,struct lwp * l)1676 mapleioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1677 {
1678 struct maple_softc *sc;
1679 struct maple_unit *u;
1680
1681 sc = device_lookup_private(&maple_cd, MAPLEBUSUNIT(dev));
1682 u = &sc->sc_unit[MAPLEPORT(dev)][MAPLESUBUNIT(dev)];
1683
1684 return maple_unit_ioctl(sc->sc_dev, u, cmd, data, flag, l);
1685 }
1686