xref: /netbsd-src/sys/arch/sun3/dev/si_sebuf.c (revision 8ac07aec990b9d2e483062509d0a9fa5b4f57cf2)
1 /*	$NetBSD: si_sebuf.c,v 1.26 2008/04/04 16:00:58 tsutsui Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Gordon W. Ross.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Sun3/E SCSI driver (machine-dependent portion).
41  * The machine-independent parts are in ncr5380sbc.c
42  *
43  * XXX - Mostly from the si driver.  Merge?
44  */
45 
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: si_sebuf.c,v 1.26 2008/04/04 16:00:58 tsutsui Exp $");
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/errno.h>
52 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/device.h>
55 #include <sys/buf.h>
56 #include <sys/proc.h>
57 #include <sys/user.h>
58 
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsipi_debug.h>
62 #include <dev/scsipi/scsiconf.h>
63 
64 #include <machine/autoconf.h>
65 
66 /* #define DEBUG XXX */
67 
68 #include <dev/ic/ncr5380reg.h>
69 #include <dev/ic/ncr5380var.h>
70 
71 #include "sereg.h"
72 #include "sevar.h"
73 
74 /*
75  * Transfers smaller than this are done using PIO
76  * (on assumption they're not worth DMA overhead)
77  */
78 #define	MIN_DMA_LEN 128
79 
80 /*
81  * Transfers lager than 65535 bytes need to be split-up.
82  * (Some of the FIFO logic has only 16 bits counters.)
83  * Make the size an integer multiple of the page size
84  * to avoid buf/cluster remap problems.  (paranoid?)
85  */
86 #define	MAX_DMA_LEN 0xE000
87 
88 /*
89  * This structure is used to keep track of mapped DMA requests.
90  */
91 struct se_dma_handle {
92 	int 		dh_flags;
93 #define	SIDH_BUSY	1		/* This DH is in use */
94 #define	SIDH_OUT	2		/* DMA does data out (write) */
95 	u_char *	dh_addr;	/* KVA of start of buffer */
96 	int 		dh_maplen;	/* Length of KVA mapping. */
97 	long		dh_dma; 	/* Offset in DMA buffer. */
98 };
99 
100 /*
101  * The first structure member has to be the ncr5380_softc
102  * so we can just cast to go back and fourth between them.
103  */
104 struct se_softc {
105 	struct ncr5380_softc	ncr_sc;
106 	volatile struct se_regs	*sc_regs;
107 	int		sc_adapter_type;
108 	int		sc_adapter_iv;		/* int. vec */
109 	int 	sc_options;			/* options for this instance */
110 	int 	sc_reqlen;  		/* requested transfer length */
111 	struct se_dma_handle *sc_dma;
112 	/* DMA command block for the OBIO controller. */
113 	void *sc_dmacmd;
114 };
115 
116 /* Options for disconnect/reselect, DMA, and interrupts. */
117 #define SE_NO_DISCONNECT    0xff
118 #define SE_NO_PARITY_CHK  0xff00
119 #define SE_FORCE_POLLING 0x10000
120 #define SE_DISABLE_DMA   0x20000
121 
122 void se_dma_alloc(struct ncr5380_softc *);
123 void se_dma_free(struct ncr5380_softc *);
124 void se_dma_poll(struct ncr5380_softc *);
125 
126 void se_dma_setup(struct ncr5380_softc *);
127 void se_dma_start(struct ncr5380_softc *);
128 void se_dma_eop(struct ncr5380_softc *);
129 void se_dma_stop(struct ncr5380_softc *);
130 
131 void se_intr_on (struct ncr5380_softc *);
132 void se_intr_off(struct ncr5380_softc *);
133 
134 static int  se_intr(void *);
135 static void se_reset(struct ncr5380_softc *);
136 
137 /*
138  * New-style autoconfig attachment
139  */
140 
141 static int	se_match(device_t, cfdata_t, void *);
142 static void	se_attach(device_t, device_t, void *);
143 
144 CFATTACH_DECL_NEW(si_sebuf, sizeof(struct se_softc),
145     se_match, se_attach, NULL, NULL);
146 
147 static void	se_minphys(struct buf *);
148 
149 /* Options for disconnect/reselect, DMA, and interrupts. */
150 int se_options = SE_DISABLE_DMA | SE_FORCE_POLLING | 0xff;
151 
152 /* How long to wait for DMA before declaring an error. */
153 int se_dma_intr_timo = 500;	/* ticks (sec. X 100) */
154 
155 int se_debug = 0;
156 
157 static int
158 se_match(device_t parent, cfdata_t cf, void *args)
159 {
160 	struct sebuf_attach_args *aa = args;
161 
162 	/* Match by name. */
163 	if (strcmp(aa->name, "se"))
164 		return 0;
165 
166 	/* Anyting else to check? */
167 
168 	return 1;
169 }
170 
171 static void
172 se_attach(device_t parent, device_t self, void *args)
173 {
174 	struct se_softc *sc = device_private(self);
175 	struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
176 	struct cfdata *cf = device_cfdata(self);
177 	struct sebuf_attach_args *aa = args;
178 	volatile struct se_regs *regs;
179 	int i;
180 
181 	ncr_sc->sc_dev = self;
182 
183 	/* Get options from config flags if specified. */
184 	if (cf->cf_flags)
185 		sc->sc_options = cf->cf_flags;
186 	else
187 		sc->sc_options = se_options;
188 
189 	aprint_normal(": options=0x%x\n", sc->sc_options);
190 
191 	sc->sc_adapter_type = aa->ca.ca_bustype;
192 	sc->sc_adapter_iv = aa->ca.ca_intvec;
193 	sc->sc_regs = regs = aa->regs;
194 
195 	/*
196 	 * MD function pointers used by the MI code.
197 	 */
198 	ncr_sc->sc_pio_out = ncr5380_pio_out;
199 	ncr_sc->sc_pio_in =  ncr5380_pio_in;
200 
201 #if 0	/* XXX - not yet... */
202 	ncr_sc->sc_dma_alloc = se_dma_alloc;
203 	ncr_sc->sc_dma_free  = se_dma_free;
204 	ncr_sc->sc_dma_setup = se_dma_setup;
205 	ncr_sc->sc_dma_start = se_dma_start;
206 	ncr_sc->sc_dma_poll  = se_dma_poll;
207 	ncr_sc->sc_dma_eop   = se_dma_eop;
208 	ncr_sc->sc_dma_stop  = se_dma_stop;
209 	ncr_sc->sc_intr_on   = se_intr_on;
210 	ncr_sc->sc_intr_off  = se_intr_off;
211 #endif	/* XXX */
212 
213 	/* Attach interrupt handler. */
214 	isr_add_vectored(se_intr, (void *)sc,
215 	    aa->ca.ca_intpri, aa->ca.ca_intvec);
216 
217 	/* Reset the hardware. */
218 	se_reset(ncr_sc);
219 
220 	/* Do the common attach stuff. */
221 
222 	/*
223 	 * Support the "options" (config file flags).
224 	 * Disconnect/reselect is a per-target mask.
225 	 * Interrupts and DMA are per-controller.
226 	 */
227 	ncr_sc->sc_no_disconnect =
228 	    (sc->sc_options & SE_NO_DISCONNECT);
229 	ncr_sc->sc_parity_disable =
230 	    (sc->sc_options & SE_NO_PARITY_CHK) >> 8;
231 	if (sc->sc_options & SE_FORCE_POLLING)
232 		ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
233 
234 #if 1	/* XXX - Temporary */
235 	/* XXX - In case we think DMA is completely broken... */
236 	if (sc->sc_options & SE_DISABLE_DMA) {
237 		/* Override this function pointer. */
238 		ncr_sc->sc_dma_alloc = NULL;
239 	}
240 #endif
241 	ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
242 
243 	/*
244 	 * Initialize fields used by the MI code
245 	 */
246 	ncr_sc->sci_r0 = &regs->ncrregs[0];
247 	ncr_sc->sci_r1 = &regs->ncrregs[1];
248 	ncr_sc->sci_r2 = &regs->ncrregs[2];
249 	ncr_sc->sci_r3 = &regs->ncrregs[3];
250 	ncr_sc->sci_r4 = &regs->ncrregs[4];
251 	ncr_sc->sci_r5 = &regs->ncrregs[5];
252 	ncr_sc->sci_r6 = &regs->ncrregs[6];
253 	ncr_sc->sci_r7 = &regs->ncrregs[7];
254 
255 	ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
256 
257 	/*
258 	 * Allocate DMA handles.
259 	 */
260 	i = SCI_OPENINGS * sizeof(struct se_dma_handle);
261 	sc->sc_dma = malloc(i, M_DEVBUF, M_WAITOK);
262 	if (sc->sc_dma == NULL)
263 		panic("se: dma_malloc failed");
264 	for (i = 0; i < SCI_OPENINGS; i++)
265 		sc->sc_dma[i].dh_flags = 0;
266 
267 	ncr_sc->sc_channel.chan_id = 7;
268 	ncr_sc->sc_adapter.adapt_minphys = se_minphys;
269 
270 	/*
271 	 *  Initialize se board itself.
272 	 */
273 	ncr5380_attach(ncr_sc);
274 }
275 
276 static void
277 se_reset(struct ncr5380_softc *ncr_sc)
278 {
279 	struct se_softc *sc = (struct se_softc *)ncr_sc;
280 	volatile struct se_regs *se = sc->sc_regs;
281 
282 #ifdef	DEBUG
283 	if (se_debug) {
284 		printf("%s\n", __func__);
285 	}
286 #endif
287 
288 	/* The reset bits in the CSR are active low. */
289 	se->se_csr = 0;
290 	delay(10);
291 	se->se_csr = SE_CSR_SCSI_RES /* | SE_CSR_INTR_EN */ ;
292 	delay(10);
293 
294 	/* Make sure the DMA engine is stopped. */
295 	se->dma_addr = 0;
296 	se->dma_cntr = 0;
297 	se->se_ivec = sc->sc_adapter_iv;
298 }
299 
300 /*
301  * This is called when the bus is going idle,
302  * so we want to enable the SBC interrupts.
303  * That is controlled by the DMA enable!
304  * Who would have guessed!
305  * What a NASTY trick!
306  */
307 void
308 se_intr_on(struct ncr5380_softc *ncr_sc)
309 {
310 	struct se_softc *sc = (struct se_softc *)ncr_sc;
311 	volatile struct se_regs *se = sc->sc_regs;
312 
313 	/* receive mode should be safer */
314 	se->se_csr &= ~SE_CSR_SEND;
315 
316 	/* Clear the count so nothing happens. */
317 	se->dma_cntr = 0;
318 
319 	/* Clear the start address too. (paranoid?) */
320 	se->dma_addr = 0;
321 
322 	/* Finally, enable the DMA engine. */
323 	se->se_csr |= SE_CSR_INTR_EN;
324 }
325 
326 /*
327  * This is called when the bus is idle and we are
328  * about to start playing with the SBC chip.
329  */
330 void
331 se_intr_off(struct ncr5380_softc *ncr_sc)
332 {
333 	struct se_softc *sc = (struct se_softc *)ncr_sc;
334 	volatile struct se_regs *se = sc->sc_regs;
335 
336 	se->se_csr &= ~SE_CSR_INTR_EN;
337 }
338 
339 /*
340  * This function is called during the COMMAND or MSG_IN phase
341  * that precedes a DATA_IN or DATA_OUT phase, in case we need
342  * to setup the DMA engine before the bus enters a DATA phase.
343  *
344  * On the VME version, setup the start addres, but clear the
345  * count (to make sure it stays idle) and set that later.
346  * XXX: The VME adapter appears to suppress SBC interrupts
347  * when the FIFO is not empty or the FIFO count is non-zero!
348  * XXX: Need to copy data into the DMA buffer...
349  */
350 void
351 se_dma_setup(struct ncr5380_softc *ncr_sc)
352 {
353 	struct se_softc *sc = (struct se_softc *)ncr_sc;
354 	struct sci_req *sr = ncr_sc->sc_current;
355 	struct se_dma_handle *dh = sr->sr_dma_hand;
356 	volatile struct se_regs *se = sc->sc_regs;
357 	long data_pa;
358 	int xlen;
359 
360 	/*
361 	 * Get the DMA mapping for this segment.
362 	 * XXX - Should separate allocation and mapin.
363 	 */
364 	data_pa = 0; /* XXX se_dma_kvtopa(dh->dh_dma); */
365 	data_pa += (ncr_sc->sc_dataptr - dh->dh_addr);
366 	if (data_pa & 1)
367 		panic("%s: bad pa=0x%lx", __func__, data_pa);
368 	xlen = ncr_sc->sc_datalen;
369 	xlen &= ~1;				/* XXX: necessary? */
370 	sc->sc_reqlen = xlen; 	/* XXX: or less? */
371 
372 #ifdef	DEBUG
373 	if (se_debug & 2) {
374 		printf("%s: dh=%p, pa=0x%lx, xlen=0x%x\n",
375 		    __func__, dh, data_pa, xlen);
376 	}
377 #endif
378 
379 	/* Set direction (send/recv) */
380 	if (dh->dh_flags & SIDH_OUT) {
381 		se->se_csr |= SE_CSR_SEND;
382 	} else {
383 		se->se_csr &= ~SE_CSR_SEND;
384 	}
385 
386 	/* Load the start address. */
387 	se->dma_addr = (ushort)(data_pa & 0xFFFF);
388 
389 	/*
390 	 * Keep the count zero or it may start early!
391 	 */
392 	se->dma_cntr = 0;
393 }
394 
395 
396 void
397 se_dma_start(struct ncr5380_softc *ncr_sc)
398 {
399 	struct se_softc *sc = (struct se_softc *)ncr_sc;
400 	struct sci_req *sr = ncr_sc->sc_current;
401 	struct se_dma_handle *dh = sr->sr_dma_hand;
402 	volatile struct se_regs *se = sc->sc_regs;
403 	int s, xlen;
404 
405 	xlen = sc->sc_reqlen;
406 
407 	/* This MAY be time critical (not sure). */
408 	s = splhigh();
409 
410 	se->dma_cntr = (ushort)(xlen & 0xFFFF);
411 
412 	/*
413 	 * Acknowledge the phase change.  (After DMA setup!)
414 	 * Put the SBIC into DMA mode, and start the transfer.
415 	 */
416 	if (dh->dh_flags & SIDH_OUT) {
417 		*ncr_sc->sci_tcmd = PHASE_DATA_OUT;
418 		SCI_CLR_INTR(ncr_sc);
419 		*ncr_sc->sci_icmd = SCI_ICMD_DATA;
420 		*ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
421 		*ncr_sc->sci_dma_send = 0;	/* start it */
422 	} else {
423 		*ncr_sc->sci_tcmd = PHASE_DATA_IN;
424 		SCI_CLR_INTR(ncr_sc);
425 		*ncr_sc->sci_icmd = 0;
426 		*ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
427 		*ncr_sc->sci_irecv = 0;	/* start it */
428 	}
429 
430 	/* Let'er rip! */
431 	se->se_csr |= SE_CSR_INTR_EN;
432 
433 	splx(s);
434 	ncr_sc->sc_state |= NCR_DOINGDMA;
435 
436 #ifdef	DEBUG
437 	if (se_debug & 2) {
438 		printf("%s: started, flags=0x%x\n",
439 		    __func__, ncr_sc->sc_state);
440 	}
441 #endif
442 }
443 
444 
445 void
446 se_dma_eop(struct ncr5380_softc *ncr_sc)
447 {
448 
449 	/* Not needed - DMA was stopped prior to examining sci_csr */
450 }
451 
452 
453 void
454 se_dma_stop(struct ncr5380_softc *ncr_sc)
455 {
456 	struct se_softc *sc = (struct se_softc *)ncr_sc;
457 	struct sci_req *sr = ncr_sc->sc_current;
458 	struct se_dma_handle *dh = sr->sr_dma_hand;
459 	volatile struct se_regs *se = sc->sc_regs;
460 	int resid, ntrans;
461 
462 	if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
463 #ifdef	DEBUG
464 		printf("%s: DMA not running\n", __func__);
465 #endif
466 		return;
467 	}
468 	ncr_sc->sc_state &= ~NCR_DOINGDMA;
469 
470 	/* First, halt the DMA engine. */
471 	se->se_csr &= ~SE_CSR_INTR_EN;	/* VME only */
472 
473 	/* Set an impossible phase to prevent data movement? */
474 	*ncr_sc->sci_tcmd = PHASE_INVALID;
475 
476 	/* Note that timeout may have set the error flag. */
477 	if (ncr_sc->sc_state & NCR_ABORTING)
478 		goto out;
479 
480 	/* XXX: Wait for DMA to actually finish? */
481 
482 	/*
483 	 * Now try to figure out how much actually transferred
484 	 */
485 	resid = se->dma_cntr & 0xFFFF;
486 	if (dh->dh_flags & SIDH_OUT)
487 		if ((resid > 0) && (resid < sc->sc_reqlen))
488 			resid++;
489 	ntrans = sc->sc_reqlen - resid;
490 
491 #ifdef	DEBUG
492 	if (se_debug & 2) {
493 		printf("%s: resid=0x%x ntrans=0x%x\n",
494 		    __func__, resid, ntrans);
495 	}
496 #endif
497 
498 	if (ntrans < MIN_DMA_LEN) {
499 		printf("se: fifo count: 0x%x\n", resid);
500 		ncr_sc->sc_state |= NCR_ABORTING;
501 		goto out;
502 	}
503 	if (ntrans > ncr_sc->sc_datalen)
504 		panic("%s: excess transfer", __func__);
505 
506 	/* Adjust data pointer */
507 	ncr_sc->sc_dataptr += ntrans;
508 	ncr_sc->sc_datalen -= ntrans;
509 
510 out:
511 	se->dma_addr = 0;
512 	se->dma_cntr = 0;
513 
514 	/* Put SBIC back in PIO mode. */
515 	*ncr_sc->sci_mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
516 	*ncr_sc->sci_icmd = 0;
517 }
518 
519 /*****************************************************************/
520 
521 static void
522 se_minphys(struct buf *bp)
523 {
524 
525 	if (bp->b_bcount > MAX_DMA_LEN)
526 		bp->b_bcount = MAX_DMA_LEN;
527 
528 	minphys(bp);
529 }
530 
531 
532 int
533 se_intr(void *arg)
534 {
535 	struct se_softc *sc = arg;
536 	volatile struct se_regs *se = sc->sc_regs;
537 	int dma_error, claimed;
538 	u_short csr;
539 
540 	claimed = 0;
541 	dma_error = 0;
542 
543 	/* SBC interrupt? DMA interrupt? */
544 	csr = se->se_csr;
545 	NCR_TRACE("se_intr: csr=0x%x\n", csr);
546 
547 	if (csr & SE_CSR_SBC_IP) {
548 		claimed = ncr5380_intr(&sc->ncr_sc);
549 #ifdef	DEBUG
550 		if (!claimed) {
551 			printf("%s: spurious from SBC\n", __func__);
552 		}
553 #endif
554 		/* Yes, we DID cause this interrupt. */
555 		claimed = 1;
556 	}
557 
558 	return claimed;
559 }
560 
561 
562 /*****************************************************************
563  * Common functions for DMA
564  ****************************************************************/
565 
566 /*
567  * Allocate a DMA handle and put it in sc->sc_dma.  Prepare
568  * for DMA transfer.  On the Sun3/E, this means we have to
569  * allocate space in the DMA buffer for this transfer.
570  */
571 void
572 se_dma_alloc(struct ncr5380_softc *ncr_sc)
573 {
574 	struct se_softc *sc = (struct se_softc *)ncr_sc;
575 	struct sci_req *sr = ncr_sc->sc_current;
576 	struct scsipi_xfer *xs = sr->sr_xs;
577 	struct se_dma_handle *dh;
578 	int i, xlen;
579 	u_long addr;
580 
581 #ifdef	DIAGNOSTIC
582 	if (sr->sr_dma_hand != NULL)
583 		panic("%s: already have DMA handle", __func__);
584 #endif
585 
586 	addr = (u_long)ncr_sc->sc_dataptr;
587 	xlen = ncr_sc->sc_datalen;
588 
589 	/* If the DMA start addr is misaligned then do PIO */
590 	if ((addr & 1) || (xlen & 1)) {
591 		printf("%s: misaligned.\n", __func__);
592 		return;
593 	}
594 
595 	/* Make sure our caller checked sc_min_dma_len. */
596 	if (xlen < MIN_DMA_LEN)
597 		panic("%s: xlen=0x%x", __func__, xlen);
598 
599 	/*
600 	 * Never attempt single transfers of more than 63k, because
601 	 * our count register may be only 16 bits (an OBIO adapter).
602 	 * This should never happen since already bounded by minphys().
603 	 * XXX - Should just segment these...
604 	 */
605 	if (xlen > MAX_DMA_LEN) {
606 		printf("%s: excessive xlen=0x%x\n", __func__, xlen);
607 		ncr_sc->sc_datalen = xlen = MAX_DMA_LEN;
608 	}
609 
610 	/* Find free DMA handle.  Guaranteed to find one since we have
611 	   as many DMA handles as the driver has processes. */
612 	for (i = 0; i < SCI_OPENINGS; i++) {
613 		if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
614 			goto found;
615 	}
616 	panic("se: no free DMA handles.");
617 found:
618 
619 	dh = &sc->sc_dma[i];
620 	dh->dh_flags = SIDH_BUSY;
621 
622 	/* Copy the "write" flag for convenience. */
623 	if (xs->xs_control & XS_CTL_DATA_OUT)
624 		dh->dh_flags |= SIDH_OUT;
625 
626 	dh->dh_addr = (uint8_t *)addr;
627 	dh->dh_maplen  = xlen;
628 	dh->dh_dma = 0;	/* XXX - Allocate space in DMA buffer. */
629 	/* XXX: dh->dh_dma = alloc(xlen) */
630 	if (!dh->dh_dma) {
631 		/* Can't remap segment */
632 		printf("%s: can't remap %p/0x%x\n",
633 		    __func__, dh->dh_addr, dh->dh_maplen);
634 		dh->dh_flags = 0;
635 		return;
636 	}
637 
638 	/* success */
639 	sr->sr_dma_hand = dh;
640 }
641 
642 
643 void
644 se_dma_free(struct ncr5380_softc *ncr_sc)
645 {
646 	struct sci_req *sr = ncr_sc->sc_current;
647 	struct se_dma_handle *dh = sr->sr_dma_hand;
648 
649 #ifdef	DIAGNOSTIC
650 	if (dh == NULL)
651 		panic("%s: no DMA handle", __func__);
652 #endif
653 
654 	if (ncr_sc->sc_state & NCR_DOINGDMA)
655 		panic("%s: free while in progress", __func__);
656 
657 	if (dh->dh_flags & SIDH_BUSY) {
658 		/* XXX: Should separate allocation and mapping. */
659 		/* XXX: Give back the DMA space. */
660 		/* XXX: free((void *)dh->dh_dma, dh->dh_maplen); */
661 		dh->dh_dma = 0;
662 		dh->dh_flags = 0;
663 	}
664 	sr->sr_dma_hand = NULL;
665 }
666 
667 
668 #define	CSR_MASK SE_CSR_SBC_IP
669 #define	POLL_TIMO	50000	/* X100 = 5 sec. */
670 
671 /*
672  * Poll (spin-wait) for DMA completion.
673  * Called right after xx_dma_start(), and
674  * xx_dma_stop() will be called next.
675  * Same for either VME or OBIO.
676  */
677 void
678 se_dma_poll(struct ncr5380_softc *ncr_sc)
679 {
680 	struct se_softc *sc = (struct se_softc *)ncr_sc;
681 	struct sci_req *sr = ncr_sc->sc_current;
682 	volatile struct se_regs *se = sc->sc_regs;
683 	int tmo;
684 
685 	/* Make sure DMA started successfully. */
686 	if (ncr_sc->sc_state & NCR_ABORTING)
687 		return;
688 
689 	/*
690 	 * XXX: The Sun driver waits for ~SE_CSR_DMA_ACTIVE here
691 	 * XXX: (on obio) or even worse (on vme) a 10mS. delay!
692 	 * XXX: I really doubt that is necessary...
693 	 */
694 
695 	/* Wait for any "DMA complete" or error bits. */
696 	tmo = POLL_TIMO;
697 	for (;;) {
698 		if (se->se_csr & CSR_MASK)
699 			break;
700 		if (--tmo <= 0) {
701 			printf("se: DMA timeout (while polling)\n");
702 			/* Indicate timeout as MI code would. */
703 			sr->sr_flags |= SR_OVERDUE;
704 			break;
705 		}
706 		delay(100);
707 	}
708 	NCR_TRACE("se_dma_poll: waited %d\n",
709 			  POLL_TIMO - tmo);
710 
711 #ifdef	DEBUG
712 	if (se_debug & 2) {
713 		printf("%s: done, csr=0x%x\n", __func__, se->se_csr);
714 	}
715 #endif
716 }
717 
718