xref: /netbsd-src/sys/arch/newsmips/dev/sc_wrap.c (revision e5fbc36ada28f9b9a5836ecffaf4a06aa1ebb687)
1 /*	$NetBSD: sc_wrap.c,v 1.34 2023/12/20 15:29:05 thorpej Exp $	*/
2 
3 /*
4  * This driver is slow!  Need to rewrite.
5  */
6 
7 #include <sys/cdefs.h>
8 __KERNEL_RCSID(0, "$NetBSD: sc_wrap.c,v 1.34 2023/12/20 15:29:05 thorpej Exp $");
9 
10 #include <sys/types.h>
11 #include <sys/param.h>
12 #include <sys/systm.h>
13 #include <sys/kernel.h>
14 #include <sys/device.h>
15 #include <sys/proc.h>
16 #include <sys/buf.h>
17 
18 #include <uvm/uvm_extern.h>
19 
20 #include <dev/scsipi/scsi_all.h>
21 #include <dev/scsipi/scsipi_all.h>
22 #include <dev/scsipi/scsiconf.h>
23 #include <dev/scsipi/scsi_message.h>
24 
25 #include <newsmips/dev/hbvar.h>
26 #include <newsmips/dev/scsireg.h>
27 #include <newsmips/dev/dmac_0448.h>
28 #include <newsmips/dev/screg_1185.h>
29 
30 #include <machine/adrsmap.h>
31 #include <machine/autoconf.h>
32 #include <machine/machConst.h>
33 
34 #include <mips/cache.h>
35 
36 static int cxd1185_match(device_t, cfdata_t, void *);
37 static void cxd1185_attach(device_t, device_t, void *);
38 
39 CFATTACH_DECL_NEW(sc, sizeof(struct sc_softc),
40     cxd1185_match, cxd1185_attach, NULL, NULL);
41 
42 void cxd1185_init(struct sc_softc *);
43 static void free_scb(struct sc_softc *, struct sc_scb *);
44 static struct sc_scb *get_scb(struct sc_softc *, int);
45 static void sc_scsipi_request(struct scsipi_channel *,
46     scsipi_adapter_req_t, void *);
47 static int sc_poll(struct sc_softc *, int, int);
48 static void sc_sched(struct sc_softc *);
49 void sc_done(struct sc_scb *);
50 int sc_intr(void *);
51 static void cxd1185_timeout(void *);
52 
53 extern void sc_send(struct sc_scb *, int, int);
54 extern int scintr(void);
55 extern void scsi_hardreset(void);
56 extern int sc_busy(struct sc_softc *, int);
57 extern paddr_t kvtophys(vaddr_t);
58 
59 static int sc_disconnect = IDT_DISCON;
60 
61 int
cxd1185_match(device_t parent,cfdata_t cf,void * aux)62 cxd1185_match(device_t parent, cfdata_t cf, void *aux)
63 {
64 	struct hb_attach_args *ha = aux;
65 
66 	if (strcmp(ha->ha_name, "sc"))
67 		return 0;
68 
69 	return 1;
70 }
71 
72 void
cxd1185_attach(device_t parent,device_t self,void * aux)73 cxd1185_attach(device_t parent, device_t self, void *aux)
74 {
75 	struct sc_softc *sc = device_private(self);
76 	struct hb_attach_args *ha = aux;
77 	struct sc_scb *scb;
78 	int i, intlevel;
79 
80 	sc->sc_dev = self;
81 
82 	intlevel = ha->ha_level;
83 	if (intlevel == -1) {
84 #if 0
85 		aprint_error(": interrupt level not configured\n");
86 		return;
87 #else
88 		aprint_normal(": interrupt level not configured; using");
89 		intlevel = 0;
90 #endif
91 	}
92 	aprint_normal(" level %d\n", intlevel);
93 
94 	if (sc_idenr & 0x08)
95 		sc->scsi_1185AQ = 1;
96 	else
97 		sc->scsi_1185AQ = 0;
98 
99 	sc->sc_adapter.adapt_dev = self;
100 	sc->sc_adapter.adapt_nchannels = 1;
101 	sc->sc_adapter.adapt_openings = 7;
102 	sc->sc_adapter.adapt_max_periph = 1;
103 	sc->sc_adapter.adapt_ioctl = NULL;
104 	sc->sc_adapter.adapt_minphys = minphys;
105 	sc->sc_adapter.adapt_request = sc_scsipi_request;
106 
107 	memset(&sc->sc_channel, 0, sizeof(sc->sc_channel));
108 	sc->sc_channel.chan_adapter = &sc->sc_adapter;
109 	sc->sc_channel.chan_bustype = &scsi_bustype;
110 	sc->sc_channel.chan_channel = 0;
111 	sc->sc_channel.chan_ntargets = 8;
112 	sc->sc_channel.chan_nluns = 8;
113 	sc->sc_channel.chan_id = 7;
114 
115 	TAILQ_INIT(&sc->ready_list);
116 	TAILQ_INIT(&sc->free_list);
117 
118 	scb = sc->sc_scb;
119 	for (i = 0; i < 24; i++) {	/* XXX 24 */
120 		TAILQ_INSERT_TAIL(&sc->free_list, scb, chain);
121 		scb++;
122 	}
123 
124 	cxd1185_init(sc);
125 	DELAY(100000);
126 
127 	hb_intr_establish(intlevel, INTEN1_DMA, IPL_BIO, sc_intr, sc);
128 
129 	config_found(self, &sc->sc_channel, scsiprint, CFARGS_NONE);
130 }
131 
132 void
cxd1185_init(struct sc_softc * sc)133 cxd1185_init(struct sc_softc *sc)
134 {
135 	int i;
136 
137 	for (i = 0; i < 8; i++)
138 		sc->inuse[i] = 0;
139 
140 	scsi_hardreset();
141 }
142 
143 void
free_scb(struct sc_softc * sc,struct sc_scb * scb)144 free_scb(struct sc_softc *sc, struct sc_scb *scb)
145 {
146 	int s;
147 
148 	s = splbio();
149 
150 	TAILQ_INSERT_HEAD(&sc->free_list, scb, chain);
151 
152 	/*
153 	 * If there were none, wake anybody waiting for one to come free,
154 	 * starting with queued entries.
155 	 */
156 	if (scb->chain.tqe_next == 0)
157 		wakeup(&sc->free_list);
158 
159 	splx(s);
160 }
161 
162 struct sc_scb *
get_scb(struct sc_softc * sc,int flags)163 get_scb(struct sc_softc *sc, int flags)
164 {
165 	int s;
166 	struct sc_scb *scb;
167 
168 	s = splbio();
169 
170 	while ((scb = sc->free_list.tqh_first) == NULL &&
171 		(flags & XS_CTL_NOSLEEP) == 0)
172 		tsleep(&sc->free_list, PRIBIO, "sc_scb", 0);
173 	if (scb) {
174 		TAILQ_REMOVE(&sc->free_list, scb, chain);
175 	}
176 
177 	splx(s);
178 	return scb;
179 }
180 
181 void
sc_scsipi_request(struct scsipi_channel * chan,scsipi_adapter_req_t req,void * arg)182 sc_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
183     void *arg)
184 {
185 	struct scsipi_xfer *xs;
186 	struct scsipi_periph *periph;
187 	struct sc_softc *sc = device_private(chan->chan_adapter->adapt_dev);
188 	struct sc_scb *scb;
189 	int flags, s;
190 	int target;
191 
192 	switch (req) {
193 	case ADAPTER_REQ_RUN_XFER:
194 		xs = arg;
195 		periph = xs->xs_periph;
196 
197 		flags = xs->xs_control;
198 		if ((scb = get_scb(sc, flags)) == NULL)
199 			panic("%s: no scb", __func__);
200 
201 		scb->xs = xs;
202 		scb->flags = 0;
203 		scb->sc_ctag = 0;
204 		scb->sc_coffset = 0;
205 		scb->istatus = 0;
206 		scb->tstatus = 0;
207 		scb->message = 0;
208 		memset(scb->msgbuf, 0, sizeof(scb->msgbuf));
209 
210 		s = splbio();
211 
212 		TAILQ_INSERT_TAIL(&sc->ready_list, scb, chain);
213 		sc_sched(sc);
214 		splx(s);
215 
216 		if (flags & XS_CTL_POLL) {
217 			target = periph->periph_target;
218 			if (sc_poll(sc, target, xs->timeout)) {
219 				printf("sc: timeout (retry)\n");
220 				if (sc_poll(sc, target, xs->timeout)) {
221 					printf("sc: timeout\n");
222 				}
223 			}
224 			/* called during autoconfig only... */
225 			mips_dcache_wbinv_all();	/* Flush DCache */
226 		}
227 		return;
228 	case ADAPTER_REQ_GROW_RESOURCES:
229 		/* XXX Not supported. */
230 		return;
231 	case ADAPTER_REQ_SET_XFER_MODE:
232 		/* XXX Not supported. */
233 		return;
234 	}
235 }
236 
237 /*
238  * Used when interrupt driven I/O isn't allowed, e.g. during boot.
239  */
240 int
sc_poll(struct sc_softc * sc,int chan,int count)241 sc_poll(struct sc_softc *sc, int chan, int count)
242 {
243 	volatile uint8_t *int_stat = (void *)INTST1;
244 	volatile uint8_t *int_clear = (void *)INTCLR1;
245 
246 	while (sc_busy(sc, chan)) {
247 		if (*int_stat & INTST1_DMA) {
248 		    *int_clear = INTST1_DMA;
249 		    if (dmac_gstat & CH_INT(CH_SCSI)) {
250 			if (dmac_gstat & CH_MRQ(CH_SCSI)) {
251 			    DELAY(50);
252 			    if (dmac_gstat & CH_MRQ(CH_SCSI))
253 				printf("dma_poll\n");
254 			}
255 			DELAY(10);
256 			scintr();
257 		    }
258 		}
259 		DELAY(1000);
260 		count--;
261 		if (count <= 0)
262 			return 1;
263 	}
264 	return 0;
265 }
266 
267 void
sc_sched(struct sc_softc * sc)268 sc_sched(struct sc_softc *sc)
269 {
270 	struct scsipi_xfer *xs;
271 	struct scsipi_periph *periph;
272 	int ie = 0;
273 	int flags;
274 	int chan, lun;
275 	struct sc_scb *scb, *nextscb;
276 
277 	scb = sc->ready_list.tqh_first;
278 start:
279 	if (scb == NULL)
280 		return;
281 
282 	xs = scb->xs;
283 	periph = xs->xs_periph;
284 	chan = periph->periph_target;
285 	flags = xs->xs_control;
286 
287 	if (sc->inuse[chan]) {
288 		scb = scb->chain.tqe_next;
289 		goto start;
290 	}
291 	sc->inuse[chan] = 1;
292 
293 	if (flags & XS_CTL_RESET)
294 		printf("SCSI RESET\n");
295 
296 	lun = periph->periph_lun;
297 
298 	scb->identify = MSG_IDENT | sc_disconnect | (lun & IDT_DRMASK);
299 	scb->sc_ctrnscnt = xs->datalen;
300 
301 	/* make va->pa mapping table for DMA */
302 	if (xs->datalen > 0) {
303 		uint32_t pn, pages, offset;
304 		int i;
305 		vaddr_t va;
306 
307 #if 0
308 		memset(&sc->sc_map[chan], 0, sizeof(struct sc_map));
309 #endif
310 
311 		va = (vaddr_t)xs->data;
312 
313 		offset = va & PGOFSET;
314 		pages = (offset + xs->datalen + PAGE_SIZE -1 ) >> PGSHIFT;
315 		if (pages >= NSCMAP)
316 			panic("sc_map: Too many pages");
317 
318 		for (i = 0; i < pages; i++) {
319 			pn = kvtophys(va) >> PGSHIFT;
320 			sc->sc_map[chan].mp_addr[i] = pn;
321 			va += PAGE_SIZE;
322 		}
323 
324 		sc->sc_map[chan].mp_offset = offset;
325 		sc->sc_map[chan].mp_pages = pages;
326 		scb->sc_map = &sc->sc_map[chan];
327 	}
328 
329 	if ((flags & XS_CTL_POLL) == 0)
330 		ie = SCSI_INTEN;
331 
332 	if (xs->data)
333 		scb->sc_cpoint = (void *)xs->data;
334 	else
335 		scb->sc_cpoint = scb->msgbuf;
336 	scb->scb_softc = sc;
337 
338 	callout_reset(&scb->xs->xs_callout, hz * 10, cxd1185_timeout, scb);
339 	sc_send(scb, chan, ie);
340 	callout_stop(&scb->xs->xs_callout);
341 
342 	nextscb = scb->chain.tqe_next;
343 
344 	TAILQ_REMOVE(&sc->ready_list, scb, chain);
345 
346 	scb = nextscb;
347 
348 	goto start;
349 }
350 
351 void
sc_done(struct sc_scb * scb)352 sc_done(struct sc_scb *scb)
353 {
354 	struct scsipi_xfer *xs = scb->xs;
355 	struct scsipi_periph *periph = xs->xs_periph;
356 	struct sc_softc *sc;
357 
358 	sc = device_private(periph->periph_channel->chan_adapter->adapt_dev);
359 	xs->resid = 0;
360 	xs->status = 0;
361 
362 	if (scb->istatus != INST_EP) {
363 		if (scb->istatus == (INST_EP|INST_TO))
364 			xs->error = XS_SELTIMEOUT;
365 		else {
366 			printf("SC(i): [istatus=0x%x, tstatus=0x%x]\n",
367 				scb->istatus, scb->tstatus);
368 			xs->error = XS_DRIVER_STUFFUP;
369 		}
370 	}
371 
372 	switch (scb->tstatus) {
373 
374 	case TGST_GOOD:
375 		break;
376 
377 	case TGST_CC:
378 		xs->status = SCSI_CHECK;
379 		if (xs->error == 0)
380 			xs->error = XS_BUSY;
381 		break;
382 
383 	default:
384 		printf("SC(t): [istatus=0x%x, tstatus=0x%x]\n",
385 			scb->istatus, scb->tstatus);
386 		break;
387 	}
388 
389 	scsipi_done(xs);
390 	free_scb(sc, scb);
391 	sc->inuse[periph->periph_target] = 0;
392 	sc_sched(sc);
393 }
394 
395 int
sc_intr(void * v)396 sc_intr(void *v)
397 {
398 	/* struct sc_softc *sc = v; */
399 	volatile uint8_t *gsp = (uint8_t *)DMAC_GSTAT;
400 	u_int gstat = *gsp;
401 	int mrqb, i;
402 
403 	if ((gstat & CH_INT(CH_SCSI)) == 0)
404 		return 0;
405 
406 	/*
407 	 * when DMA interrupt occurs there remain some untransferred data.
408 	 * wait data transfer completion.
409 	 */
410 	mrqb = (gstat & CH_INT(CH_SCSI)) << 1;
411 	if (gstat & mrqb) {
412 		/*
413 		 * XXX SHOULD USE DELAY()
414 		 */
415 		for (i = 0; i < 50; i++)
416 			;
417 		if (*gsp & mrqb)
418 			printf("%s: MRQ\n", __func__);
419 	}
420 	scintr();
421 
422 	return 1;
423 }
424 
425 
426 #if 0
427 /*
428  * SCOP_RSENSE request
429  */
430 void
431 scop_rsense(int intr, struct scsi *sc_param, int lun, int ie, int count,
432     void *param)
433 {
434 
435 	memset(sc_param, 0, sizeof(struct scsi));
436 	sc_param->identify = MSG_IDENT | sc_disconnect | (lun & IDT_DRMASK);
437 	sc_param->sc_lun = lun;
438 
439 	sc_param->sc_cpoint = (uint8_t *)param;
440 	sc_param->sc_ctrnscnt = count;
441 
442 	/* sc_cdb */
443 	sc_param->sc_opcode = SCOP_RSENSE;
444 	sc_param->sc_count = count;
445 
446 	sc_go(intr, sc_param, ie, sc_param);
447 }
448 #endif
449 
450 void
cxd1185_timeout(void * arg)451 cxd1185_timeout(void *arg)
452 {
453 	struct sc_scb *scb = arg;
454 	struct scsipi_xfer *xs = scb->xs;
455 	struct scsipi_periph *periph = xs->xs_periph;
456 	int chan;
457 
458 	chan = periph->periph_target;
459 
460 	printf("sc: timeout ch=%d\n", chan);
461 
462 	/* XXX abort transfer and ... */
463 }
464