1 /* $NetBSD: sw.c,v 1.26 2023/01/23 22:16:44 andvar Exp $ */
2
3 /*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Adam Glass, David Jones, Gordon W. Ross, and Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * This file contains only the machine-dependent parts of the
34 * Sun4 SCSI driver. (Autoconfig stuff and DMA functions.)
35 * The machine-independent parts are in ncr5380sbc.c
36 *
37 * Supported hardware includes:
38 * Sun "SCSI Weird" on OBIO (sw: Sun 4/100-series)
39 * Sun SCSI-3 on VME (si: Sun 4/200-series, others)
40 *
41 * The VME variant has a bit to enable or disable the DMA engine,
42 * but that bit also gates the interrupt line from the NCR5380!
43 * Therefore, in order to get any interrupt from the 5380, (i.e.
44 * for reselect) one must clear the DMA engine transfer count and
45 * then enable DMA. This has the further complication that you
46 * CAN NOT touch the NCR5380 while the DMA enable bit is set, so
47 * we have to turn DMA back off before we even look at the 5380.
48 *
49 * What wonderfully whacky hardware this is!
50 *
51 * David Jones wrote the initial version of this module for NetBSD/sun3,
52 * which included support for the VME adapter only. (no reselection).
53 *
54 * Gordon Ross added support for the Sun 3 OBIO adapter, and re-worked
55 * both the VME and OBIO code to support disconnect/reselect.
56 * (Required figuring out the hardware "features" noted above.)
57 *
58 * The autoconfiguration boilerplate came from Adam Glass.
59 *
60 * Jason R. Thorpe ported the autoconfiguration and VME portions to
61 * NetBSD/sparc, and added initial support for the 4/100 "SCSI Weird",
62 * a wacky OBIO variant of the VME SCSI-3. Many thanks to Chuck Cranor
63 * for lots of helpful tips and suggestions. Thanks also to Paul Kranenburg
64 * and Chris Torek for bits of insight needed along the way. Thanks to
65 * David Gilbert and Andrew Gillham who risked filesystem life-and-limb
66 * for the sake of testing. Andrew Gillham helped work out the bugs
67 * the 4/100 DMA code.
68 */
69
70 /*
71 * NOTE: support for the 4/100 "SCSI Weird" is not complete! DMA
72 * works, but interrupts (and, thus, reselection) don't. I don't know
73 * why, and I don't have a machine to test this on further.
74 *
75 * DMA, DMA completion interrupts, and reselection work fine on my
76 * 4/260 with modern SCSI-II disks attached. I've had reports of
77 * reselection failing on Sun Shoebox-type configurations where
78 * there are multiple non-SCSI devices behind Emulex or Adaptec
79 * bridges. These devices pre-date the SCSI-I spec, and might not
80 * behave the way the 5380 code expects. For this reason, only
81 * DMA is enabled by default in this driver.
82 *
83 * Jason R. Thorpe <thorpej@NetBSD.org>
84 * December 8, 1995
85 */
86
87 #include <sys/cdefs.h>
88 __KERNEL_RCSID(0, "$NetBSD: sw.c,v 1.26 2023/01/23 22:16:44 andvar Exp $");
89
90 #include "opt_ddb.h"
91
92 #include <sys/types.h>
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/kernel.h>
96 #include <sys/kmem.h>
97 #include <sys/errno.h>
98 #include <sys/device.h>
99 #include <sys/buf.h>
100
101 #include <sys/bus.h>
102 #include <machine/intr.h>
103 #include <machine/autoconf.h>
104
105 #include <dev/scsipi/scsi_all.h>
106 #include <dev/scsipi/scsipi_all.h>
107 #include <dev/scsipi/scsipi_debug.h>
108 #include <dev/scsipi/scsiconf.h>
109
110 #ifndef DDB
111 #define Debugger()
112 #endif
113
114 #ifndef DEBUG
115 #define DEBUG XXX
116 #endif
117
118 #define COUNT_SW_LEFTOVERS XXX /* See sw DMA completion code */
119
120 #include <dev/ic/ncr5380reg.h>
121 #include <dev/ic/ncr5380var.h>
122
123 #include <sparc/dev/swreg.h>
124
125 /*
126 * Transfers smaller than this are done using PIO
127 * (on assumption they're not worth DMA overhead)
128 */
129 #define MIN_DMA_LEN 128
130
131 /*
132 * Transfers larger than 65535 bytes need to be split-up.
133 * (Some of the FIFO logic has only 16 bits counters.)
134 * Make the size an integer multiple of the page size
135 * to avoid buf/cluster remap problems. (paranoid?)
136 */
137 #define MAX_DMA_LEN 0xE000
138
139 #ifdef DEBUG
140 int sw_debug = 0;
141 #endif
142
143 /*
144 * This structure is used to keep track of mapped DMA requests.
145 */
146 struct sw_dma_handle {
147 int dh_flags;
148 #define SIDH_BUSY 0x01 /* This DH is in use */
149 #define SIDH_OUT 0x02 /* DMA does data out (write) */
150 u_char *dh_addr; /* KVA of start of buffer */
151 int dh_maplen; /* Original data length */
152 long dh_startingpa; /* PA of buffer; for "sw" */
153 bus_dmamap_t dh_dmamap;
154 #define dh_dvma dh_dmamap->dm_segs[0].ds_addr /* VA of buffer in DVMA space */
155 };
156
157 /*
158 * The first structure member has to be the ncr5380_softc
159 * so we can just cast to go back and fourth between them.
160 */
161 struct sw_softc {
162 struct ncr5380_softc ncr_sc;
163 bus_space_tag_t sc_bustag; /* bus tags */
164 bus_dma_tag_t sc_dmatag;
165
166 struct sw_dma_handle *sc_dma;
167 int sc_xlen; /* length of current DMA segment. */
168 int sc_options; /* options for this instance. */
169 };
170
171 /*
172 * Options. By default, DMA is enabled and DMA completion interrupts
173 * and reselect are disabled. You may enable additional features
174 * the `flags' directive in your kernel's configuration file.
175 *
176 * Alternatively, you can patch your kernel with DDB or some other
177 * mechanism. The sc_options member of the softc is OR'd with
178 * the value in sw_options.
179 *
180 * On the "sw", interrupts (and thus) reselection don't work, so they're
181 * disabled by default. DMA is still a little dangerous, too.
182 *
183 * Note, there's a separate sw_options to make life easier.
184 */
185 #define SW_ENABLE_DMA 0x01 /* Use DMA (maybe polled) */
186 #define SW_DMA_INTR 0x02 /* DMA completion interrupts */
187 #define SW_DO_RESELECT 0x04 /* Allow disconnect/reselect */
188 #define SW_OPTIONS_MASK (SW_ENABLE_DMA|SW_DMA_INTR|SW_DO_RESELECT)
189 #define SW_OPTIONS_BITS "\10\3RESELECT\2DMA_INTR\1DMA"
190 int sw_options = SW_ENABLE_DMA;
191
192 static int sw_match(device_t, cfdata_t, void *);
193 static void sw_attach(device_t, device_t, void *);
194 static int sw_intr(void *);
195 static void sw_reset_adapter(struct ncr5380_softc *);
196 static void sw_minphys(struct buf *);
197
198 void sw_dma_alloc(struct ncr5380_softc *);
199 void sw_dma_free(struct ncr5380_softc *);
200 void sw_dma_poll(struct ncr5380_softc *);
201
202 void sw_dma_setup(struct ncr5380_softc *);
203 void sw_dma_start(struct ncr5380_softc *);
204 void sw_dma_eop(struct ncr5380_softc *);
205 void sw_dma_stop(struct ncr5380_softc *);
206
207 void sw_intr_on(struct ncr5380_softc *);
208 void sw_intr_off(struct ncr5380_softc *);
209
210 /* Shorthand bus space access */
211 #define SWREG_READ(sc, index) \
212 bus_space_read_4((sc)->sc_regt, (sc)->sc_regh, index)
213 #define SWREG_WRITE(sc, index, v) \
214 bus_space_write_4((sc)->sc_regt, (sc)->sc_regh, index, v)
215
216
217 /* The Sun "SCSI Weird" 4/100 obio controller. */
218 CFATTACH_DECL_NEW(sw, sizeof(struct sw_softc),
219 sw_match, sw_attach, NULL, NULL);
220
221 static int
sw_match(device_t parent,cfdata_t cf,void * aux)222 sw_match(device_t parent, cfdata_t cf, void *aux)
223 {
224 union obio_attach_args *uoba = aux;
225 struct obio4_attach_args *oba;
226
227 /* Nothing but a Sun 4/100 is going to have these devices. */
228 if (cpuinfo.cpu_type != CPUTYP_4_100)
229 return (0);
230
231 if (uoba->uoba_isobio4 == 0)
232 return (0);
233
234 /* Make sure there is something there... */
235 oba = &uoba->uoba_oba4;
236 return (bus_space_probe(oba->oba_bustag, oba->oba_paddr,
237 1, /* probe size */
238 1, /* offset */
239 0, /* flags */
240 NULL, NULL));
241 }
242
243 static void
sw_attach(device_t parent,device_t self,void * aux)244 sw_attach(device_t parent, device_t self, void *aux)
245 {
246 struct sw_softc *sc = device_private(self);
247 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
248 union obio_attach_args *uoba = aux;
249 struct obio4_attach_args *oba = &uoba->uoba_oba4;
250 bus_space_handle_t bh;
251 char bits[64];
252 int i;
253
254 ncr_sc->sc_dev = self;
255 sc->sc_dmatag = oba->oba_dmatag;
256
257 /* Map the controller registers. */
258 if (bus_space_map(oba->oba_bustag, oba->oba_paddr,
259 SWREG_BANK_SZ,
260 BUS_SPACE_MAP_LINEAR,
261 &bh) != 0) {
262 aprint_error(": cannot map registers\n");
263 return;
264 }
265
266 ncr_sc->sc_regt = oba->oba_bustag;
267 ncr_sc->sc_regh = bh;
268
269 sc->sc_options = sw_options;
270
271 ncr_sc->sc_dma_setup = sw_dma_setup;
272 ncr_sc->sc_dma_start = sw_dma_start;
273 ncr_sc->sc_dma_eop = sw_dma_stop;
274 ncr_sc->sc_dma_stop = sw_dma_stop;
275 ncr_sc->sc_intr_on = sw_intr_on;
276 ncr_sc->sc_intr_off = sw_intr_off;
277
278 /*
279 * Establish interrupt channel.
280 * Default interrupt priority always is 3. At least, that's
281 * what my board seems to be at. --thorpej
282 */
283 if (oba->oba_pri == -1)
284 oba->oba_pri = 3;
285
286 (void)bus_intr_establish(oba->oba_bustag, oba->oba_pri, IPL_BIO,
287 sw_intr, sc);
288
289 aprint_normal(" pri %d\n", oba->oba_pri);
290
291
292 /*
293 * Pull in the options flags. Allow the user to completely
294 * override the default values.
295 */
296 if ((device_cfdata(self)->cf_flags & SW_OPTIONS_MASK) != 0)
297 sc->sc_options =
298 device_cfdata(self)->cf_flags & SW_OPTIONS_MASK;
299
300 /*
301 * Initialize fields used by the MI code
302 */
303
304 /* NCR5380 register bank offsets */
305 ncr_sc->sci_r0 = 0;
306 ncr_sc->sci_r1 = 1;
307 ncr_sc->sci_r2 = 2;
308 ncr_sc->sci_r3 = 3;
309 ncr_sc->sci_r4 = 4;
310 ncr_sc->sci_r5 = 5;
311 ncr_sc->sci_r6 = 6;
312 ncr_sc->sci_r7 = 7;
313
314 ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
315
316 /*
317 * MD function pointers used by the MI code.
318 */
319 ncr_sc->sc_pio_out = ncr5380_pio_out;
320 ncr_sc->sc_pio_in = ncr5380_pio_in;
321 ncr_sc->sc_dma_alloc = sw_dma_alloc;
322 ncr_sc->sc_dma_free = sw_dma_free;
323 ncr_sc->sc_dma_poll = sw_dma_poll;
324
325 ncr_sc->sc_flags = 0;
326 if ((sc->sc_options & SW_DO_RESELECT) == 0)
327 ncr_sc->sc_no_disconnect = 0xFF;
328 if ((sc->sc_options & SW_DMA_INTR) == 0)
329 ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
330 ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
331
332
333 /*
334 * Allocate DMA handles.
335 */
336 i = SCI_OPENINGS * sizeof(struct sw_dma_handle);
337 sc->sc_dma = kmem_alloc(i, KM_SLEEP);
338
339 for (i = 0; i < SCI_OPENINGS; i++) {
340 sc->sc_dma[i].dh_flags = 0;
341
342 /* Allocate a DMA handle */
343 if (bus_dmamap_create(
344 sc->sc_dmatag, /* tag */
345 MAXPHYS, /* size */
346 1, /* nsegments */
347 MAXPHYS, /* maxsegsz */
348 0, /* boundary */
349 BUS_DMA_NOWAIT,
350 &sc->sc_dma[i].dh_dmamap) != 0) {
351
352 aprint_error_dev(self, "DMA buffer map create error\n");
353 return;
354 }
355 }
356
357 if (sc->sc_options) {
358 snprintb(bits, sizeof(bits),
359 SW_OPTIONS_BITS, sc->sc_options);
360 aprint_normal_dev(self, "options=%s\n", bits);
361 }
362
363 ncr_sc->sc_channel.chan_id = 7;
364 ncr_sc->sc_adapter.adapt_minphys = sw_minphys;
365
366 /* Initialize sw board */
367 sw_reset_adapter(ncr_sc);
368
369 /* Attach the ncr5380 chip driver */
370 ncr5380_attach(ncr_sc);
371 }
372
373 static void
sw_minphys(struct buf * bp)374 sw_minphys(struct buf *bp)
375 {
376
377 if (bp->b_bcount > MAX_DMA_LEN) {
378 #ifdef DEBUG
379 if (sw_debug) {
380 printf("sw_minphys len = 0x%x.\n", MAX_DMA_LEN);
381 Debugger();
382 }
383 #endif
384 bp->b_bcount = MAX_DMA_LEN;
385 }
386 minphys(bp);
387 }
388
389 #define CSR_WANT (SW_CSR_SBC_IP | SW_CSR_DMA_IP | \
390 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR )
391
392 static int
sw_intr(void * arg)393 sw_intr(void *arg)
394 {
395 struct sw_softc *sc = arg;
396 struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
397 int dma_error, claimed;
398 u_short csr;
399
400 claimed = 0;
401 dma_error = 0;
402
403 /* SBC interrupt? DMA interrupt? */
404 csr = SWREG_READ(ncr_sc, SWREG_CSR);
405
406 NCR_TRACE("sw_intr: csr=0x%x\n", csr);
407
408 if (csr & SW_CSR_DMA_CONFLICT) {
409 dma_error |= SW_CSR_DMA_CONFLICT;
410 printf("%s: DMA conflict\n", __func__);
411 }
412 if (csr & SW_CSR_DMA_BUS_ERR) {
413 dma_error |= SW_CSR_DMA_BUS_ERR;
414 printf("%s: DMA bus error\n", __func__);
415 }
416 if (dma_error) {
417 if (sc->ncr_sc.sc_state & NCR_DOINGDMA)
418 sc->ncr_sc.sc_state |= NCR_ABORTING;
419 /* Make sure we will call the main isr. */
420 csr |= SW_CSR_DMA_IP;
421 }
422
423 if (csr & (SW_CSR_SBC_IP | SW_CSR_DMA_IP)) {
424 claimed = ncr5380_intr(&sc->ncr_sc);
425 #ifdef DEBUG
426 if (!claimed) {
427 printf("%s: spurious from SBC\n", __func__);
428 if (sw_debug & 4) {
429 Debugger(); /* XXX */
430 }
431 }
432 #endif
433 }
434
435 return claimed;
436 }
437
438
439 static void
sw_reset_adapter(struct ncr5380_softc * ncr_sc)440 sw_reset_adapter(struct ncr5380_softc *ncr_sc)
441 {
442
443 #ifdef DEBUG
444 if (sw_debug) {
445 printf("%s\n", __func__);
446 }
447 #endif
448
449 /*
450 * The reset bits in the CSR are active low.
451 */
452 SWREG_WRITE(ncr_sc, SWREG_CSR, 0);
453 delay(10);
454 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES);
455
456 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
457 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
458 delay(10);
459 SWREG_WRITE(ncr_sc, SWREG_CSR, SW_CSR_SCSI_RES | SW_CSR_INTR_EN);
460
461 SCI_CLR_INTR(ncr_sc);
462 }
463
464
465 /*****************************************************************
466 * Common functions for DMA
467 ****************************************************************/
468
469 /*
470 * Allocate a DMA handle and put it in sc->sc_dma. Prepare
471 * for DMA transfer. On the Sun4, this means mapping the buffer
472 * into DVMA space.
473 */
474 void
sw_dma_alloc(struct ncr5380_softc * ncr_sc)475 sw_dma_alloc(struct ncr5380_softc *ncr_sc)
476 {
477 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
478 struct sci_req *sr = ncr_sc->sc_current;
479 struct scsipi_xfer *xs = sr->sr_xs;
480 struct sw_dma_handle *dh;
481 int i, xlen;
482 u_long addr;
483
484 #ifdef DIAGNOSTIC
485 if (sr->sr_dma_hand != NULL)
486 panic("%s: already have DMA handle", __func__);
487 #endif
488
489 #if 1 /* XXX - Temporary */
490 /* XXX - In case we think DMA is completely broken... */
491 if ((sc->sc_options & SW_ENABLE_DMA) == 0)
492 return;
493 #endif
494
495 addr = (u_long)ncr_sc->sc_dataptr;
496 xlen = ncr_sc->sc_datalen;
497
498 /* If the DMA start addr is misaligned then do PIO */
499 if ((addr & 1) || (xlen & 1)) {
500 printf("%s: misaligned.\n", __func__);
501 return;
502 }
503
504 /* Make sure our caller checked sc_min_dma_len. */
505 if (xlen < MIN_DMA_LEN)
506 panic("%s: xlen=0x%x", __func__, xlen);
507
508 /* Find free DMA handle. Guaranteed to find one since we have
509 as many DMA handles as the driver has processes. */
510 for (i = 0; i < SCI_OPENINGS; i++) {
511 if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
512 goto found;
513 }
514 panic("sw: no free DMA handles.");
515
516 found:
517 dh = &sc->sc_dma[i];
518 dh->dh_flags = SIDH_BUSY;
519 dh->dh_addr = (u_char *)addr;
520 dh->dh_maplen = xlen;
521
522 /* Copy the "write" flag for convenience. */
523 if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
524 dh->dh_flags |= SIDH_OUT;
525
526 /*
527 * Double-map the buffer into DVMA space. If we can't re-map
528 * the buffer, we print a warning and fall back to PIO mode.
529 *
530 * NOTE: it is not safe to sleep here!
531 */
532 if (bus_dmamap_load(sc->sc_dmatag, dh->dh_dmamap,
533 (void *)addr, xlen, NULL, BUS_DMA_NOWAIT) != 0) {
534 /* Can't remap segment */
535 printf("%s: can't remap 0x%lx/0x%x, doing PIO\n",
536 __func__, addr, dh->dh_maplen);
537 dh->dh_flags = 0;
538 return;
539 }
540 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap, addr, xlen,
541 (dh->dh_flags & SIDH_OUT)
542 ? BUS_DMASYNC_PREWRITE
543 : BUS_DMASYNC_PREREAD);
544
545 /* success */
546 sr->sr_dma_hand = dh;
547 }
548
549
550 void
sw_dma_free(struct ncr5380_softc * ncr_sc)551 sw_dma_free(struct ncr5380_softc *ncr_sc)
552 {
553 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
554 struct sci_req *sr = ncr_sc->sc_current;
555 struct sw_dma_handle *dh = sr->sr_dma_hand;
556
557 #ifdef DIAGNOSTIC
558 if (dh == NULL)
559 panic("%s: no DMA handle", __func__);
560 #endif
561
562 if (ncr_sc->sc_state & NCR_DOINGDMA)
563 panic("%s: free while in progress", __func__);
564
565 if (dh->dh_flags & SIDH_BUSY) {
566 /* Give back the DVMA space. */
567 bus_dmamap_sync(sc->sc_dmatag, dh->dh_dmamap,
568 dh->dh_dvma, dh->dh_maplen,
569 (dh->dh_flags & SIDH_OUT)
570 ? BUS_DMASYNC_POSTWRITE
571 : BUS_DMASYNC_POSTREAD);
572 bus_dmamap_unload(sc->sc_dmatag, dh->dh_dmamap);
573 dh->dh_flags = 0;
574 }
575 sr->sr_dma_hand = NULL;
576 }
577
578
579 /*
580 * Poll (spin-wait) for DMA completion.
581 * Called right after xx_dma_start(), and
582 * xx_dma_stop() will be called next.
583 * Same for either VME or OBIO.
584 */
585 void
sw_dma_poll(struct ncr5380_softc * ncr_sc)586 sw_dma_poll(struct ncr5380_softc *ncr_sc)
587 {
588 struct sci_req *sr = ncr_sc->sc_current;
589 int tmo, csr_mask, csr;
590
591 /* Make sure DMA started successfully. */
592 if (ncr_sc->sc_state & NCR_ABORTING)
593 return;
594
595 csr_mask = SW_CSR_SBC_IP | SW_CSR_DMA_IP |
596 SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR;
597
598 tmo = 50000; /* X100 = 5 sec. */
599 for (;;) {
600 csr = SWREG_READ(ncr_sc, SWREG_CSR);
601 if (csr & csr_mask)
602 break;
603 if (--tmo <= 0) {
604 printf("%s: DMA timeout (while polling)\n",
605 device_xname(ncr_sc->sc_dev));
606 /* Indicate timeout as MI code would. */
607 sr->sr_flags |= SR_OVERDUE;
608 break;
609 }
610 delay(100);
611 }
612
613 #ifdef DEBUG
614 if (sw_debug) {
615 printf("%s: done, csr=0x%x\n", __func__, csr);
616 }
617 #endif
618 }
619
620
621 /*
622 * This is called when the bus is going idle,
623 * so we want to enable the SBC interrupts.
624 * That is controlled by the DMA enable!
625 * Who would have guessed!
626 * What a NASTY trick!
627 *
628 * XXX THIS MIGHT NOT WORK RIGHT!
629 */
630 void
sw_intr_on(struct ncr5380_softc * ncr_sc)631 sw_intr_on(struct ncr5380_softc *ncr_sc)
632 {
633 uint32_t csr;
634
635 sw_dma_setup(ncr_sc);
636 csr = SWREG_READ(ncr_sc, SWREG_CSR);
637 csr |= SW_CSR_DMA_EN; /* XXX - this bit is for vme only?! */
638 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
639 }
640
641 /*
642 * This is called when the bus is idle and we are
643 * about to start playing with the SBC chip.
644 *
645 * XXX THIS MIGHT NOT WORK RIGHT!
646 */
647 void
sw_intr_off(struct ncr5380_softc * ncr_sc)648 sw_intr_off(struct ncr5380_softc *ncr_sc)
649 {
650 uint32_t csr;
651
652 csr = SWREG_READ(ncr_sc, SWREG_CSR);
653 csr &= ~SW_CSR_DMA_EN;
654 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
655 }
656
657
658 /*
659 * This function is called during the COMMAND or MSG_IN phase
660 * that precedes a DATA_IN or DATA_OUT phase, in case we need
661 * to setup the DMA engine before the bus enters a DATA phase.
662 *
663 * On the OBIO version we just clear the DMA count and address
664 * here (to make sure it stays idle) and do the real setup
665 * later, in dma_start.
666 */
667 void
sw_dma_setup(struct ncr5380_softc * ncr_sc)668 sw_dma_setup(struct ncr5380_softc *ncr_sc)
669 {
670 uint32_t csr;
671
672 /* No FIFO to reset on "sw". */
673
674 /* Set direction (assume recv here) */
675 csr = SWREG_READ(ncr_sc, SWREG_CSR);
676 csr &= ~SW_CSR_SEND;
677 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
678
679 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
680 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
681 }
682
683
684 void
sw_dma_start(struct ncr5380_softc * ncr_sc)685 sw_dma_start(struct ncr5380_softc *ncr_sc)
686 {
687 struct sw_softc *sc = (struct sw_softc *)ncr_sc;
688 struct sci_req *sr = ncr_sc->sc_current;
689 struct sw_dma_handle *dh = sr->sr_dma_hand;
690 u_long dva;
691 int xlen, adj, adjlen;
692 u_int mode;
693 uint32_t csr;
694
695 /*
696 * Get the DVMA mapping for this segment.
697 */
698 dva = (u_long)(dh->dh_dvma);
699 if (dva & 1)
700 panic("%s: bad dva=0x%lx", __func__, dva);
701
702 xlen = ncr_sc->sc_datalen;
703 xlen &= ~1;
704 sc->sc_xlen = xlen; /* XXX: or less... */
705
706 #ifdef DEBUG
707 if (sw_debug & 2) {
708 printf("%s: dh=%p, dva=0x%lx, xlen=%d\n",
709 __func__, dh, dva, xlen);
710 }
711 #endif
712
713 /*
714 * Set up the DMA controller.
715 * Note that (dh->dh_len < sc_datalen)
716 */
717
718 /* Set direction (send/recv) */
719 csr = SWREG_READ(ncr_sc, SWREG_CSR);
720 if (dh->dh_flags & SIDH_OUT) {
721 csr |= SW_CSR_SEND;
722 } else {
723 csr &= ~SW_CSR_SEND;
724 }
725 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
726
727 /*
728 * The "sw" needs longword aligned transfers. We
729 * detect a shortword aligned transfer here, and adjust the
730 * DMA transfer by 2 bytes. These two bytes are read/written
731 * in PIO mode just before the DMA is started.
732 */
733 adj = 0;
734 if (dva & 2) {
735 adj = 2;
736 #ifdef DEBUG
737 if (sw_debug & 2)
738 printf("%s: adjusted up %d bytes\n", __func__, adj);
739 #endif
740 }
741
742 /* We have to frob the address on the "sw". */
743 dh->dh_startingpa = (dva | 0xF00000);
744 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, (u_int)(dh->dh_startingpa + adj));
745 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, xlen - adj);
746
747 /*
748 * Acknowledge the phase change. (After DMA setup!)
749 * Put the SBIC into DMA mode, and start the transfer.
750 */
751 if (dh->dh_flags & SIDH_OUT) {
752 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_OUT);
753 if (adj) {
754 adjlen = ncr5380_pio_out(ncr_sc, PHASE_DATA_OUT,
755 adj, dh->dh_addr);
756 if (adjlen != adj)
757 printf("%s: bad outgoing adj, %d != %d\n",
758 device_xname(ncr_sc->sc_dev), adjlen, adj);
759 }
760 SCI_CLR_INTR(ncr_sc);
761 NCR5380_WRITE(ncr_sc, sci_icmd, SCI_ICMD_DATA);
762 mode = NCR5380_READ(ncr_sc, sci_mode);
763 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
764 NCR5380_WRITE(ncr_sc, sci_mode, mode);
765 NCR5380_WRITE(ncr_sc, sci_dma_send, 0); /* start it */
766 } else {
767 NCR5380_WRITE(ncr_sc, sci_tcmd, PHASE_DATA_IN);
768 if (adj) {
769 adjlen = ncr5380_pio_in(ncr_sc, PHASE_DATA_IN,
770 adj, dh->dh_addr);
771 if (adjlen != adj)
772 printf("%s: bad incoming adj, %d != %d\n",
773 device_xname(ncr_sc->sc_dev), adjlen, adj);
774 }
775 SCI_CLR_INTR(ncr_sc);
776 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
777 mode = NCR5380_READ(ncr_sc, sci_mode);
778 mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
779 NCR5380_WRITE(ncr_sc, sci_mode, mode);
780 NCR5380_WRITE(ncr_sc, sci_irecv, 0); /* start it */
781 }
782
783 /* Let'er rip! */
784 csr |= SW_CSR_DMA_EN;
785 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
786
787 ncr_sc->sc_state |= NCR_DOINGDMA;
788
789 #ifdef DEBUG
790 if (sw_debug & 2) {
791 printf("%s: started, flags=0x%x\n",
792 __func__, ncr_sc->sc_state);
793 }
794 #endif
795 }
796
797
798 void
sw_dma_eop(struct ncr5380_softc * ncr_sc)799 sw_dma_eop(struct ncr5380_softc *ncr_sc)
800 {
801
802 /* Not needed - DMA was stopped prior to examining sci_csr */
803 }
804
805 #if (defined(DEBUG) || defined(DIAGNOSTIC)) && !defined(COUNT_SW_LEFTOVERS)
806 #define COUNT_SW_LEFTOVERS
807 #endif
808 #ifdef COUNT_SW_LEFTOVERS
809 /*
810 * Let's find out how often these occur. Read these with DDB from time
811 * to time.
812 */
813 int sw_3_leftover = 0;
814 int sw_2_leftover = 0;
815 int sw_1_leftover = 0;
816 int sw_0_leftover = 0;
817 #endif
818
819 void
sw_dma_stop(struct ncr5380_softc * ncr_sc)820 sw_dma_stop(struct ncr5380_softc *ncr_sc)
821 {
822 struct sci_req *sr = ncr_sc->sc_current;
823 struct sw_dma_handle *dh = sr->sr_dma_hand;
824 int ntrans = 0, dva;
825 u_int mode;
826 uint32_t csr;
827
828 if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
829 #ifdef DEBUG
830 printf("%s: DMA not running\n", __func__);
831 #endif
832 return;
833 }
834 ncr_sc->sc_state &= ~NCR_DOINGDMA;
835
836 /* First, halt the DMA engine. */
837 csr = SWREG_READ(ncr_sc, SWREG_CSR);
838 csr &= ~SW_CSR_DMA_EN;
839 SWREG_WRITE(ncr_sc, SWREG_CSR, csr);
840
841 /*
842 * XXX HARDWARE BUG!
843 * Apparently, some early 4/100 SCSI controllers had a hardware
844 * bug that caused the controller to do illegal memory access.
845 * We see this as SW_CSR_DMA_BUS_ERR (makes sense). To work around
846 * this, we simply need to clean up after ourselves ... there will
847 * be as many as 3 bytes left over. Since we clean up "left-over"
848 * bytes on every read anyway, we just continue to chug along
849 * if SW_CSR_DMA_BUS_ERR is asserted. (This was probably worked
850 * around in hardware later with the "left-over byte" indicator
851 * in the VME controller.)
852 */
853 #if 0
854 if (csr & (SW_CSR_DMA_CONFLICT | SW_CSR_DMA_BUS_ERR))
855 #else
856 if (csr & (SW_CSR_DMA_CONFLICT))
857 #endif
858 {
859 printf("sw: DMA error, csr=0x%x, reset\n", csr);
860 sr->sr_xs->error = XS_DRIVER_STUFFUP;
861 ncr_sc->sc_state |= NCR_ABORTING;
862 sw_reset_adapter(ncr_sc);
863 }
864
865 /* Note that timeout may have set the error flag. */
866 if (ncr_sc->sc_state & NCR_ABORTING)
867 goto out;
868
869 /*
870 * Now try to figure out how much actually transferred
871 *
872 * The "sw" doesn't have a FIFO or a bcr, so we've stored
873 * the starting PA of the transfer in the DMA handle,
874 * and subtract it from the ending PA left in the dma_addr
875 * register.
876 */
877 dva = SWREG_READ(ncr_sc, SWREG_DMA_ADDR);
878 ntrans = (dva - dh->dh_startingpa);
879
880 #ifdef DEBUG
881 if (sw_debug & 2) {
882 printf("%s: ntrans=0x%x\n", __func__, ntrans);
883 }
884 #endif
885
886 if (ntrans > ncr_sc->sc_datalen)
887 panic("%s: excess transfer", __func__);
888
889 /* Adjust data pointer */
890 ncr_sc->sc_dataptr += ntrans;
891 ncr_sc->sc_datalen -= ntrans;
892
893 /*
894 * After a read, we may need to clean-up
895 * "Left-over bytes" (yuck!) The "sw" doesn't
896 * have a "left-over" indicator, so we have to so
897 * this no matter what. Ick.
898 */
899 if ((dh->dh_flags & SIDH_OUT) == 0) {
900 char *cp = ncr_sc->sc_dataptr;
901 uint32_t bpr;
902
903 bpr = SWREG_READ(ncr_sc, SWREG_BPR);
904
905 switch (dva & 3) {
906 case 3:
907 cp[0] = (bpr & 0xff000000) >> 24;
908 cp[1] = (bpr & 0x00ff0000) >> 16;
909 cp[2] = (bpr & 0x0000ff00) >> 8;
910 #ifdef COUNT_SW_LEFTOVERS
911 ++sw_3_leftover;
912 #endif
913 break;
914
915 case 2:
916 cp[0] = (bpr & 0xff000000) >> 24;
917 cp[1] = (bpr & 0x00ff0000) >> 16;
918 #ifdef COUNT_SW_LEFTOVERS
919 ++sw_2_leftover;
920 #endif
921 break;
922
923 case 1:
924 cp[0] = (bpr & 0xff000000) >> 24;
925 #ifdef COUNT_SW_LEFTOVERS
926 ++sw_1_leftover;
927 #endif
928 break;
929
930 #ifdef COUNT_SW_LEFTOVERS
931 default:
932 ++sw_0_leftover;
933 break;
934 #endif
935 }
936 }
937
938 out:
939 SWREG_WRITE(ncr_sc, SWREG_DMA_ADDR, 0);
940 SWREG_WRITE(ncr_sc, SWREG_DMA_CNT, 0);
941
942 /* Put SBIC back in PIO mode. */
943 mode = NCR5380_READ(ncr_sc, sci_mode);
944 mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
945 NCR5380_WRITE(ncr_sc, sci_mode, mode);
946 NCR5380_WRITE(ncr_sc, sci_icmd, 0);
947
948 #ifdef DEBUG
949 if (sw_debug & 2) {
950 printf("%s: ntrans=0x%x\n", __func__, ntrans);
951 }
952 #endif
953 }
954