xref: /netbsd-src/sys/dev/pci/isp_pci.c (revision 1394f01b4a9e99092957ca5d824d67219565d9b5)
1 /*	$NetBSD: isp_pci.c,v 1.14 1997/06/08 22:15:34 thorpej Exp $	*/
2 
3 /*
4  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
5  *
6  * Copyright (c) 1997 by Matthew Jacob
7  * NASA AMES Research Center
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice immediately at the beginning of the file, without modification,
15  *    this list of conditions, and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/queue.h>
40 #include <sys/device.h>
41 #include <machine/bus.h>
42 #include <machine/intr.h>
43 #include <scsi/scsi_all.h>
44 #include <scsi/scsiconf.h>
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47 #include <dev/pci/pcidevs.h>
48 #include <vm/vm.h>
49 
50 #include <dev/ic/ispreg.h>
51 #include <dev/ic/ispvar.h>
52 #include <dev/ic/ispmbox.h>
53 #include <dev/microcode/isp/asm_pci.h>
54 
55 static u_int16_t isp_pci_rd_reg __P((struct ispsoftc *, int));
56 static void isp_pci_wr_reg __P((struct ispsoftc *, int, u_int16_t));
57 static int isp_pci_mbxdma __P((struct ispsoftc *));
58 static int isp_pci_dmasetup __P((struct ispsoftc *, struct scsi_xfer *,
59 	ispreq_t *, u_int8_t *, u_int8_t));
60 static void isp_pci_dmateardown __P((struct ispsoftc *, struct scsi_xfer *,
61 	u_int32_t));
62 
63 static void isp_pci_reset1 __P((struct ispsoftc *));
64 
65 static struct ispmdvec mdvec = {
66 	isp_pci_rd_reg,
67 	isp_pci_wr_reg,
68 	isp_pci_mbxdma,
69 	isp_pci_dmasetup,
70 	isp_pci_dmateardown,
71 	NULL,
72 	isp_pci_reset1,
73 	ISP_RISC_CODE,
74 	ISP_CODE_LENGTH,
75 	ISP_CODE_ORG,
76 	BIU_PCI_CONF1_FIFO_16 | BIU_BURST_ENABLE,
77 	60	/* MAGIC- all known PCI card implementations are 60MHz */
78 };
79 
80 #define	PCI_QLOGIC_ISP	\
81 	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
82 
83 #define IO_MAP_REG	0x10
84 #define MEM_MAP_REG	0x14
85 
86 
87 #ifdef	__BROKEN_INDIRECT_CONFIG
88 static int isp_pci_probe __P((struct device *, void *, void *));
89 #else
90 static int isp_pci_probe __P((struct device *, struct cfdata *, void *));
91 #endif
92 static void isp_pci_attach __P((struct device *, struct device *, void *));
93 
94 struct isp_pcisoftc {
95 	struct ispsoftc		pci_isp;
96 	bus_space_tag_t		pci_st;
97 	bus_space_handle_t	pci_sh;
98 	bus_dma_tag_t		pci_dmat;
99 	bus_dmamap_t		pci_rquest_dmap;
100 	bus_dmamap_t		pci_result_dmap;
101 	bus_dmamap_t		pci_xfer_dmap[RQUEST_QUEUE_LEN];
102 	void *			pci_ih;
103 };
104 
105 struct cfattach isp_pci_ca = {
106 	sizeof (struct isp_pcisoftc), isp_pci_probe, isp_pci_attach
107 };
108 
109 static int
110 isp_pci_probe(parent, match, aux)
111         struct device *parent;
112 #ifdef	__BROKEN_INDIRECT_CONFIG
113         void *match, *aux;
114 #else
115         struct cfdata *match;
116 	void *aux;
117 #endif
118 {
119         struct pci_attach_args *pa = aux;
120 
121 	if (pa->pa_id == PCI_QLOGIC_ISP) {
122 		return (1);
123 	} else {
124 		return (0);
125 	}
126 }
127 
128 
129 static void
130 isp_pci_attach(parent, self, aux)
131         struct device *parent, *self;
132         void *aux;
133 {
134 	struct pci_attach_args *pa = aux;
135 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) self;
136 	bus_space_tag_t st, iot, memt;
137 	bus_space_handle_t sh, ioh, memh;
138 	pci_intr_handle_t ih;
139 	const char *intrstr;
140 	int ioh_valid, memh_valid;
141 	int i;
142 
143 	ioh_valid = (pci_mapreg_map(pa, IO_MAP_REG,
144 	    PCI_MAPREG_TYPE_IO, 0,
145 	    &iot, &ioh, NULL, NULL) == 0);
146 	memh_valid = (pci_mapreg_map(pa, MEM_MAP_REG,
147 	    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
148 	    &memt, &memh, NULL, NULL) == 0);
149 
150 	if (memh_valid) {
151 		st = memt;
152 		sh = memh;
153 	} else if (ioh_valid) {
154 		st = iot;
155 		sh = ioh;
156 	} else {
157 		printf(": unable to map device registers\n");
158 		return;
159 	}
160 	printf("\n");
161 
162 	pcs->pci_st = st;
163 	pcs->pci_sh = sh;
164 	pcs->pci_dmat = pa->pa_dmat;
165 	pcs->pci_isp.isp_mdvec = &mdvec;
166 	isp_reset(&pcs->pci_isp);
167 	if (pcs->pci_isp.isp_state != ISP_RESETSTATE) {
168 		return;
169 	}
170 	isp_init(&pcs->pci_isp);
171 	if (pcs->pci_isp.isp_state != ISP_INITSTATE) {
172 		isp_uninit(&pcs->pci_isp);
173 		return;
174 	}
175 
176 	if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
177 			 pa->pa_intrline, &ih)) {
178 		printf("%s: couldn't map interrupt\n", pcs->pci_isp.isp_name);
179 		isp_uninit(&pcs->pci_isp);
180 		return;
181 	}
182 
183 	intrstr = pci_intr_string(pa->pa_pc, ih);
184 	if (intrstr == NULL)
185 		intrstr = "<I dunno>";
186 	pcs->pci_ih =
187 	  pci_intr_establish(pa->pa_pc, ih, IPL_BIO, isp_intr, &pcs->pci_isp);
188 	if (pcs->pci_ih == NULL) {
189 		printf("%s: couldn't establish interrupt at %s\n",
190 			pcs->pci_isp.isp_name, intrstr);
191 		isp_uninit(&pcs->pci_isp);
192 		return;
193 	}
194 	printf("%s: interrupting at %s\n", pcs->pci_isp.isp_name, intrstr);
195 
196 	/*
197 	 * Create the DMA maps for the data transfers.
198 	 */
199 	for (i = 0; i < RQUEST_QUEUE_LEN; i++) {
200 		if (bus_dmamap_create(pcs->pci_dmat, MAXPHYS,
201 		    (MAXPHYS / NBPG) + 1, MAXPHYS, 0, BUS_DMA_NOWAIT,
202 		    &pcs->pci_xfer_dmap[i])) {
203 			printf("%s: can't create dma maps\n",
204 			    pcs->pci_isp.isp_name);
205 			isp_uninit(&pcs->pci_isp);
206 			return;
207 		}
208 	}
209 
210 	/*
211 	 * Do Generic attach now.
212 	 */
213 	isp_attach(&pcs->pci_isp);
214 	if (pcs->pci_isp.isp_state != ISP_RUNSTATE) {
215 		isp_uninit(&pcs->pci_isp);
216 	}
217 }
218 
219 #define  PCI_BIU_REGS_OFF		0x00
220 #define	 PCI_MBOX_REGS_OFF		0x70
221 #define	 PCI_SXP_REGS_OFF		0x80
222 #define	 PCI_RISC_REGS_OFF		0x80
223 
224 static u_int16_t
225 isp_pci_rd_reg(isp, regoff)
226 	struct ispsoftc *isp;
227 	int regoff;
228 {
229 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
230 	int offset;
231 	if ((regoff & BIU_BLOCK) != 0) {
232 		offset = PCI_BIU_REGS_OFF;
233 	} else if ((regoff & MBOX_BLOCK) != 0) {
234 		offset = PCI_MBOX_REGS_OFF;
235 	} else if ((regoff & SXP_BLOCK) != 0) {
236 		offset = PCI_SXP_REGS_OFF;
237 		/*
238 		 * XXX
239 		 */
240 		panic("SXP Registers not accessible yet!");
241 	} else {
242 		offset = PCI_RISC_REGS_OFF;
243 	}
244 	regoff &= 0xff;
245 	offset += regoff;
246 	return bus_space_read_2(pcs->pci_st, pcs->pci_sh, offset);
247 }
248 
249 static void
250 isp_pci_wr_reg(isp, regoff, val)
251 	struct ispsoftc *isp;
252 	int regoff;
253 	u_int16_t val;
254 {
255 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp;
256 	int offset;
257 	if ((regoff & BIU_BLOCK) != 0) {
258 		offset = PCI_BIU_REGS_OFF;
259 	} else if ((regoff & MBOX_BLOCK) != 0) {
260 		offset = PCI_MBOX_REGS_OFF;
261 	} else if ((regoff & SXP_BLOCK) != 0) {
262 		offset = PCI_SXP_REGS_OFF;
263 		/*
264 		 * XXX
265 		 */
266 		panic("SXP Registers not accessible yet!");
267 	} else {
268 		offset = PCI_RISC_REGS_OFF;
269 	}
270 	regoff &= 0xff;
271 	offset += regoff;
272 	bus_space_write_2(pcs->pci_st, pcs->pci_sh, offset, val);
273 }
274 
275 static int
276 isp_pci_mbxdma(isp)
277 	struct ispsoftc *isp;
278 {
279 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
280 	bus_dma_segment_t seg;
281 	bus_size_t len;
282 	int rseg;
283 
284 	/*
285 	 * Allocate and map the request queue.
286 	 */
287 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
288 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
289 	      BUS_DMA_NOWAIT) ||
290 	    bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
291 	      (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMAMEM_NOSYNC))
292 		return (1);
293 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
294 	      &pci->pci_rquest_dmap) ||
295 	    bus_dmamap_load(pci->pci_dmat, pci->pci_rquest_dmap,
296 	      (caddr_t)isp->isp_rquest, len, NULL, BUS_DMA_NOWAIT))
297 		return (1);
298 
299 	isp->isp_rquest_dma = pci->pci_rquest_dmap->dm_segs[0].ds_addr;
300 
301 	/*
302 	 * Allocate and map the result queue.
303 	 */
304 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
305 	if (bus_dmamem_alloc(pci->pci_dmat, len, NBPG, 0, &seg, 1, &rseg,
306 	      BUS_DMA_NOWAIT) ||
307 	    bus_dmamem_map(pci->pci_dmat, &seg, rseg, len,
308 	      (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMAMEM_NOSYNC))
309 		return (1);
310 	if (bus_dmamap_create(pci->pci_dmat, len, 1, len, 0, BUS_DMA_NOWAIT,
311 	      &pci->pci_result_dmap) ||
312 	    bus_dmamap_load(pci->pci_dmat, pci->pci_result_dmap,
313 	      (caddr_t)isp->isp_result, len, NULL, BUS_DMA_NOWAIT))
314 		return (1);
315 
316 	isp->isp_result_dma = pci->pci_result_dmap->dm_segs[0].ds_addr;
317 
318 	return (0);
319 }
320 
321 static int
322 isp_pci_dmasetup(isp, xs, rq, iptrp, optr)
323 	struct ispsoftc *isp;
324 	struct scsi_xfer *xs;
325 	ispreq_t *rq;
326 	u_int8_t *iptrp;
327 	u_int8_t optr;
328 {
329 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
330 	bus_dmamap_t dmap = pci->pci_xfer_dmap[rq->req_handle];
331 	ispcontreq_t *crq;
332 	int segcnt, seg, error, ovseg;
333 
334 	if (xs->datalen == 0) {
335 		rq->req_seg_count = 1;
336 		rq->req_flags |= REQFLAG_DATA_IN;
337 		return (0);
338 	}
339 
340 	if (rq->req_handle >= RQUEST_QUEUE_LEN) {
341 		panic("%s: bad handle (%d) in isp_pci_dmasetup\n",
342 		    isp->isp_name, rq->req_handle);
343 		/* NOTREACHED */
344 	}
345 
346 	if (xs->flags & SCSI_DATA_IN) {
347 		rq->req_flags |= REQFLAG_DATA_IN;
348 	} else {
349 		rq->req_flags |= REQFLAG_DATA_OUT;
350 	}
351 
352 	error = bus_dmamap_load(pci->pci_dmat, dmap, xs->data, xs->datalen,
353 	    NULL, xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
354 	if (error)
355 		return (error);
356 
357 	segcnt = dmap->dm_nsegs;
358 
359 	for (seg = 0, rq->req_seg_count = 0;
360 	    seg < segcnt && rq->req_seg_count < ISP_RQDSEG;
361 	    seg++, rq->req_seg_count++) {
362 		rq->req_dataseg[rq->req_seg_count].ds_count =
363 		    dmap->dm_segs[seg].ds_len;
364 		rq->req_dataseg[rq->req_seg_count].ds_base =
365 		    dmap->dm_segs[seg].ds_addr;
366 	}
367 
368 	if (seg == segcnt)
369 		goto mapsync;
370 
371 	do {
372 		crq = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest,
373 		    *iptrp);
374 		*iptrp = (*iptrp + 1) & (RQUEST_QUEUE_LEN - 1);
375 		if (*iptrp == optr) {
376 			printf("%s: Request Queue Overflow++\n",
377 			       isp->isp_name);
378 			bus_dmamap_unload(pci->pci_dmat, dmap);
379 			return (EFBIG);
380 		}
381 		rq->req_header.rqs_entry_count++;
382 		bzero((void *)crq, sizeof (*crq));
383 		crq->req_header.rqs_entry_count = 1;
384 		crq->req_header.rqs_entry_type = RQSTYPE_DATASEG;
385 
386 		for (ovseg = 0; seg < segcnt && ovseg < ISP_CDSEG;
387 		    rq->req_seg_count++, seg++, ovseg++) {
388 			crq->req_dataseg[ovseg].ds_count =
389 			    dmap->dm_segs[seg].ds_len;
390 			crq->req_dataseg[ovseg].ds_base =
391 			    dmap->dm_segs[seg].ds_addr;
392 		}
393 	} while (seg < segcnt);
394 
395  mapsync:
396 	bus_dmamap_sync(pci->pci_dmat, dmap, xs->flags & SCSI_DATA_IN ?
397 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
398 	return (0);
399 }
400 
401 static void
402 isp_pci_dmateardown(isp, xs, handle)
403 	struct ispsoftc *isp;
404 	struct scsi_xfer *xs;
405 	u_int32_t handle;
406 {
407 	struct isp_pcisoftc *pci = (struct isp_pcisoftc *)isp;
408 	bus_dmamap_t dmap = pci->pci_xfer_dmap[handle];
409 
410 	bus_dmamap_sync(pci->pci_dmat, dmap, xs->flags & SCSI_DATA_IN ?
411 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
412 
413 	bus_dmamap_unload(pci->pci_dmat, dmap);
414 }
415 
416 static void
417 isp_pci_reset1(isp)
418 	struct ispsoftc *isp;
419 {
420 	/* Make sure the BIOS is disabled */
421 	isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS);
422 }
423