xref: /netbsd-src/sys/arch/newsmips/dev/sc_wrap.c (revision d710132b4b8ce7f7cccaaf660cb16aa16b4077a0)
1 /*	$NetBSD: sc_wrap.c,v 1.24 2003/05/10 09:46:25 tsutsui Exp $	*/
2 
3 /*
4  * This driver is slow!  Need to rewrite.
5  */
6 
7 #include <sys/types.h>
8 #include <sys/param.h>
9 #include <sys/systm.h>
10 #include <sys/kernel.h>
11 #include <sys/device.h>
12 #include <sys/proc.h>
13 #include <sys/buf.h>
14 #include <sys/malloc.h>
15 
16 #include <uvm/uvm_extern.h>
17 
18 #include <dev/scsipi/scsi_all.h>
19 #include <dev/scsipi/scsipi_all.h>
20 #include <dev/scsipi/scsiconf.h>
21 #include <dev/scsipi/scsi_message.h>
22 
23 #include <newsmips/dev/hbvar.h>
24 #include <newsmips/dev/scsireg.h>
25 #include <newsmips/dev/dmac_0448.h>
26 #include <newsmips/dev/screg_1185.h>
27 
28 #include <machine/adrsmap.h>
29 #include <machine/autoconf.h>
30 #include <machine/machConst.h>
31 
32 #include <mips/cache.h>
33 
34 static int cxd1185_match __P((struct device *, struct cfdata *, void *));
35 static void cxd1185_attach __P((struct device *, struct device *, void *));
36 
37 CFATTACH_DECL(sc, sizeof(struct sc_softc),
38     cxd1185_match, cxd1185_attach, NULL, NULL);
39 
40 void cxd1185_init __P((struct sc_softc *));
41 static void free_scb __P((struct sc_softc *, struct sc_scb *));
42 static struct sc_scb *get_scb __P((struct sc_softc *, int));
43 static void sc_scsipi_request __P((struct scsipi_channel *,
44 					scsipi_adapter_req_t, void *));
45 static int sc_poll __P((struct sc_softc *, int, int));
46 static void sc_sched __P((struct sc_softc *));
47 void sc_done __P((struct sc_scb *));
48 int sc_intr __P((void *));
49 static void cxd1185_timeout __P((void *));
50 
51 extern void sc_send __P((struct sc_scb *, int, int));
52 extern int scintr __P((void));
53 extern void scsi_hardreset __P((void));
54 extern int sc_busy __P((struct sc_softc *, int));
55 extern paddr_t kvtophys __P((vaddr_t));
56 
57 static int sc_disconnect = IDT_DISCON;
58 
59 int
60 cxd1185_match(parent, cf, aux)
61 	struct device *parent;
62 	struct cfdata *cf;
63 	void *aux;
64 {
65 	struct hb_attach_args *ha = aux;
66 
67 	if (strcmp(ha->ha_name, "sc"))
68 		return 0;
69 
70 	return 1;
71 }
72 
73 void
74 cxd1185_attach(parent, self, aux)
75 	struct device *parent, *self;
76 	void *aux;
77 {
78 	struct sc_softc *sc = (void *)self;
79 	struct hb_attach_args *ha = aux;
80 	struct sc_scb *scb;
81 	int i, intlevel;
82 
83 	intlevel = ha->ha_level;
84 	if (intlevel == -1) {
85 #if 0
86 		printf(": interrupt level not configured\n");
87 		return;
88 #else
89 		printf(": interrupt level not configured; using");
90 		intlevel = 0;
91 #endif
92 	}
93 	printf(" level %d\n", intlevel);
94 
95 	if (sc_idenr & 0x08)
96 		sc->scsi_1185AQ = 1;
97 	else
98 		sc->scsi_1185AQ = 0;
99 
100 	sc->sc_adapter.adapt_dev = &sc->sc_dev;
101 	sc->sc_adapter.adapt_nchannels = 1;
102 	sc->sc_adapter.adapt_openings = 7;
103 	sc->sc_adapter.adapt_max_periph = 1;
104 	sc->sc_adapter.adapt_ioctl = NULL;
105 	sc->sc_adapter.adapt_minphys = minphys;
106 	sc->sc_adapter.adapt_request = sc_scsipi_request;
107 
108 	memset(&sc->sc_channel, 0, sizeof(sc->sc_channel));
109 	sc->sc_channel.chan_adapter = &sc->sc_adapter;
110 	sc->sc_channel.chan_bustype = &scsi_bustype;
111 	sc->sc_channel.chan_channel = 0;
112 	sc->sc_channel.chan_ntargets = 8;
113 	sc->sc_channel.chan_nluns = 8;
114 	sc->sc_channel.chan_id = 7;
115 
116 	TAILQ_INIT(&sc->ready_list);
117 	TAILQ_INIT(&sc->free_list);
118 
119 	scb = sc->sc_scb;
120 	for (i = 0; i < 24; i++) {	/* XXX 24 */
121 		TAILQ_INSERT_TAIL(&sc->free_list, scb, chain);
122 		scb++;
123 	}
124 
125 	cxd1185_init(sc);
126 	DELAY(100000);
127 
128 	hb_intr_establish(intlevel, INTEN1_DMA, IPL_BIO, sc_intr, sc);
129 
130 	config_found(&sc->sc_dev, &sc->sc_channel, scsiprint);
131 }
132 
133 void
134 cxd1185_init(sc)
135 	struct sc_softc *sc;
136 {
137 	int i;
138 
139 	for (i = 0; i < 8; i++)
140 		sc->inuse[i] = 0;
141 
142 	scsi_hardreset();
143 }
144 
145 void
146 free_scb(sc, scb)
147 	struct sc_softc *sc;
148 	struct sc_scb *scb;
149 {
150 	int s;
151 
152 	s = splbio();
153 
154 	TAILQ_INSERT_HEAD(&sc->free_list, scb, chain);
155 
156 	/*
157 	 * If there were none, wake anybody waiting for one to come free,
158 	 * starting with queued entries.
159 	 */
160 	if (scb->chain.tqe_next == 0)
161 		wakeup(&sc->free_list);
162 
163 	splx(s);
164 }
165 
166 struct sc_scb *
167 get_scb(sc, flags)
168 	struct sc_softc *sc;
169 	int flags;
170 {
171 	int s;
172 	struct sc_scb *scb;
173 
174 	s = splbio();
175 
176 	while ((scb = sc->free_list.tqh_first) == NULL &&
177 		(flags & XS_CTL_NOSLEEP) == 0)
178 		tsleep(&sc->free_list, PRIBIO, "sc_scb", 0);
179 	if (scb) {
180 		TAILQ_REMOVE(&sc->free_list, scb, chain);
181 	}
182 
183 	splx(s);
184 	return scb;
185 }
186 
187 void
188 sc_scsipi_request(chan, req, arg)
189 	struct scsipi_channel *chan;
190 	scsipi_adapter_req_t req;
191 	void *arg;
192 {
193 	struct scsipi_xfer *xs;
194 	struct scsipi_periph *periph;
195 	struct sc_softc *sc = (void *)chan->chan_adapter->adapt_dev;
196 	struct sc_scb *scb;
197 	int flags, s;
198 	int target;
199 
200 	switch (req) {
201 	case ADAPTER_REQ_RUN_XFER:
202 		xs = arg;
203 		periph = xs->xs_periph;
204 
205 		flags = xs->xs_control;
206 		if ((scb = get_scb(sc, flags)) == NULL)
207 			panic("sc_scsipi_request: no scb");
208 
209 		scb->xs = xs;
210 		scb->flags = 0;
211 		scb->sc_ctag = 0;
212 		scb->sc_coffset = 0;
213 		scb->istatus = 0;
214 		scb->tstatus = 0;
215 		scb->message = 0;
216 		bzero(scb->msgbuf, sizeof(scb->msgbuf));
217 
218 		s = splbio();
219 
220 		TAILQ_INSERT_TAIL(&sc->ready_list, scb, chain);
221 		sc_sched(sc);
222 		splx(s);
223 
224 		if (flags & XS_CTL_POLL) {
225 			target = periph->periph_target;
226 			if (sc_poll(sc, target, xs->timeout)) {
227 				printf("sc: timeout (retry)\n");
228 				if (sc_poll(sc, target, xs->timeout)) {
229 					printf("sc: timeout\n");
230 				}
231 			}
232 			/* called during autoconfig only... */
233 			mips_dcache_wbinv_all();	/* Flush DCache */
234 		}
235 		return;
236 	case ADAPTER_REQ_GROW_RESOURCES:
237 		/* XXX Not supported. */
238 		return;
239 	case ADAPTER_REQ_SET_XFER_MODE:
240 		/* XXX Not supported. */
241 		return;
242 	}
243 }
244 
245 /*
246  * Used when interrupt driven I/O isn't allowed, e.g. during boot.
247  */
248 int
249 sc_poll(sc, chan, count)
250 	struct sc_softc *sc;
251 	int chan, count;
252 {
253 	volatile u_char *int_stat = (void *)INTST1;
254 	volatile u_char *int_clear = (void *)INTCLR1;
255 
256 	while (sc_busy(sc, chan)) {
257 		if (*int_stat & INTST1_DMA) {
258 		    *int_clear = INTST1_DMA;
259 		    if (dmac_gstat & CH_INT(CH_SCSI)) {
260 			if (dmac_gstat & CH_MRQ(CH_SCSI)) {
261 			    DELAY(50);
262 			    if (dmac_gstat & CH_MRQ(CH_SCSI))
263 				printf("dma_poll\n");
264 			}
265 			DELAY(10);
266 			scintr();
267 		    }
268 		}
269 		DELAY(1000);
270 		count--;
271 		if (count <= 0)
272 			return 1;
273 	}
274 	return 0;
275 }
276 
277 void
278 sc_sched(sc)
279 	struct sc_softc *sc;
280 {
281 	struct scsipi_xfer *xs;
282 	struct scsipi_periph *periph;
283 	int ie = 0;
284 	int flags;
285 	int chan, lun;
286 	struct sc_scb *scb, *nextscb;
287 
288 	scb = sc->ready_list.tqh_first;
289 start:
290 	if (scb == NULL)
291 		return;
292 
293 	xs = scb->xs;
294 	periph = xs->xs_periph;
295 	chan = periph->periph_target;
296 	flags = xs->xs_control;
297 
298 	if (cold)
299 		flags |= XS_CTL_POLL;
300 
301 	if (sc->inuse[chan]) {
302 		scb = scb->chain.tqe_next;
303 		goto start;
304 	}
305 	sc->inuse[chan] = 1;
306 
307 	if (flags & XS_CTL_RESET)
308 		printf("SCSI RESET\n");
309 
310 	lun = periph->periph_lun;
311 
312 	scb->identify = MSG_IDENT | sc_disconnect | (lun & IDT_DRMASK);
313 	scb->sc_ctrnscnt = xs->datalen;
314 
315 	/* make va->pa mapping table for DMA */
316 	if (xs->datalen > 0) {
317 		int pages, offset;
318 		int i, pn;
319 		vaddr_t va;
320 
321 		/* bzero(&sc->sc_map[chan], sizeof(struct sc_map)); */
322 
323 		va = (vaddr_t)xs->data;
324 
325 		offset = va & PGOFSET;
326 		pages = (offset + xs->datalen + PAGE_SIZE -1 ) >> PGSHIFT;
327 		if (pages >= NSCMAP)
328 			panic("sc_map: Too many pages");
329 
330 		for (i = 0; i < pages; i++) {
331 			pn = kvtophys(va) >> PGSHIFT;
332 			sc->sc_map[chan].mp_addr[i] = pn;
333 			va += PAGE_SIZE;
334 		}
335 
336 		sc->sc_map[chan].mp_offset = offset;
337 		sc->sc_map[chan].mp_pages = pages;
338 		scb->sc_map = &sc->sc_map[chan];
339 	}
340 
341 	if ((flags & XS_CTL_POLL) == 0)
342 		ie = SCSI_INTEN;
343 
344 	if (xs->data)
345 		scb->sc_cpoint = (void *)xs->data;
346 	else
347 		scb->sc_cpoint = scb->msgbuf;
348 	scb->scb_softc = sc;
349 
350 	callout_reset(&scb->xs->xs_callout, hz * 10, cxd1185_timeout, scb);
351 	sc_send(scb, chan, ie);
352 	callout_stop(&scb->xs->xs_callout);
353 
354 	nextscb = scb->chain.tqe_next;
355 
356 	TAILQ_REMOVE(&sc->ready_list, scb, chain);
357 
358 	scb = nextscb;
359 
360 	goto start;
361 }
362 
363 void
364 sc_done(scb)
365 	struct sc_scb *scb;
366 {
367 	struct scsipi_xfer *xs = scb->xs;
368 	struct scsipi_periph *periph = xs->xs_periph;
369 	struct sc_softc *sc = (void *)periph->periph_channel->chan_adapter->adapt_dev;
370 
371 	xs->resid = 0;
372 	xs->status = 0;
373 
374 	if (scb->istatus != INST_EP) {
375 		if (scb->istatus == (INST_EP|INST_TO))
376 			xs->error = XS_SELTIMEOUT;
377 		else {
378 			printf("SC(i): [istatus=0x%x, tstatus=0x%x]\n",
379 				scb->istatus, scb->tstatus);
380 			xs->error = XS_DRIVER_STUFFUP;
381 		}
382 	}
383 
384 	switch (scb->tstatus) {
385 
386 	case TGST_GOOD:
387 		break;
388 
389 	case TGST_CC:
390 		xs->status = SCSI_CHECK;
391 		if (xs->error == 0)
392 			xs->error = XS_BUSY;
393 
394 	default:
395 		printf("SC(t): [istatus=0x%x, tstatus=0x%x]\n",
396 			scb->istatus, scb->tstatus);
397 		break;
398 	}
399 
400 	scsipi_done(xs);
401 	free_scb(sc, scb);
402 	sc->inuse[periph->periph_target] = 0;
403 	sc_sched(sc);
404 }
405 
406 int
407 sc_intr(v)
408 	void *v;
409 {
410 	/* struct sc_softc *sc = v; */
411 	volatile u_char *gsp = (u_char *)DMAC_GSTAT;
412 	u_int gstat = *gsp;
413 	int mrqb, i;
414 
415 	if ((gstat & CH_INT(CH_SCSI)) == 0)
416 		return 0;
417 
418 	/*
419 	 * when DMA interrupt occurs there remain some untransferred data.
420 	 * wait data transfer completion.
421 	 */
422 	mrqb = (gstat & CH_INT(CH_SCSI)) << 1;
423 	if (gstat & mrqb) {
424 		/*
425 		 * XXX SHOULD USE DELAY()
426 		 */
427 		for (i = 0; i < 50; i++)
428 			;
429 		if (*gsp & mrqb)
430 			printf("sc_intr: MRQ\n");
431 	}
432 	scintr();
433 
434 	return 1;
435 }
436 
437 
438 #if 0
439 /*
440  * SCOP_RSENSE request
441  */
442 void
443 scop_rsense(intr, sc_param, lun, ie, count, param)
444 	register int intr;
445 	register struct scsi *sc_param;
446 	register int lun;
447 	register int ie;
448 	register int count;
449 	register caddr_t param;
450 {
451 	bzero(sc_param, sizeof(struct scsi));
452 	sc_param->identify = MSG_IDENT | sc_disconnect | (lun & IDT_DRMASK);
453 	sc_param->sc_lun = lun;
454 
455 	sc_param->sc_cpoint = (u_char *)param;
456 	sc_param->sc_ctrnscnt = count;
457 
458 	/* sc_cdb */
459 	sc_param->sc_opcode = SCOP_RSENSE;
460 	sc_param->sc_count = count;
461 
462 	sc_go(intr, sc_param, ie, sc_param);
463 }
464 #endif
465 
466 void
467 cxd1185_timeout(arg)
468 	void *arg;
469 {
470 	struct sc_scb *scb = arg;
471 	struct scsipi_xfer *xs = scb->xs;
472 	struct scsipi_periph *periph = xs->xs_periph;
473 	int chan;
474 
475 	chan = periph->periph_target;
476 
477 	printf("sc: timeout ch=%d\n", chan);
478 
479 	/* XXX abort transfer and ... */
480 }
481