1 /* $NetBSD: dpt.c,v 1.39 2003/01/31 00:26:29 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical 9 * Aerospace Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation 42 * Copyright (c) 2000 Adaptec Corporation 43 * All rights reserved. 44 * 45 * TERMS AND CONDITIONS OF USE 46 * 47 * Redistribution and use in source form, with or without modification, are 48 * permitted provided that redistributions of source code must retain the 49 * above copyright notice, this list of conditions and the following disclaimer. 50 * 51 * This software is provided `as is' by Adaptec and any express or implied 52 * warranties, including, but not limited to, the implied warranties of 53 * merchantability and fitness for a particular purpose, are disclaimed. In no 54 * event shall Adaptec be liable for any direct, indirect, incidental, special, 55 * exemplary or consequential damages (including, but not limited to, 56 * procurement of substitute goods or services; loss of use, data, or profits; 57 * or business interruptions) however caused and on any theory of liability, 58 * whether in contract, strict liability, or tort (including negligence or 59 * otherwise) arising in any way out of the use of this driver software, even 60 * if advised of the possibility of such damage. 61 */ 62 63 /* 64 * Portions of this code fall under the following copyright: 65 * 66 * Originally written by Julian Elischer (julian@tfs.com) 67 * for TRW Financial Systems for use under the MACH(2.5) operating system. 68 * 69 * TRW Financial Systems, in accordance with their agreement with Carnegie 70 * Mellon University, makes this software available to CMU to distribute 71 * or use in any manner that they see fit as long as this message is kept with 72 * the software. For this reason TFS also grants any other persons or 73 * organisations permission to use or modify this software. 74 * 75 * TFS supplies this software to be publicly redistributed 76 * on the understanding that TFS is not responsible for the correct 77 * functioning of this software in any circumstances. 78 */ 79 80 #include <sys/cdefs.h> 81 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.39 2003/01/31 00:26:29 thorpej Exp $"); 82 83 #include <sys/param.h> 84 #include <sys/systm.h> 85 #include <sys/device.h> 86 #include <sys/queue.h> 87 #include <sys/buf.h> 88 #include <sys/endian.h> 89 #include <sys/conf.h> 90 91 #include <uvm/uvm_extern.h> 92 93 #include <machine/bus.h> 94 #ifdef i386 95 #include <machine/pio.h> 96 #endif 97 98 #include <dev/scsipi/scsi_all.h> 99 #include <dev/scsipi/scsipi_all.h> 100 #include <dev/scsipi/scsiconf.h> 101 102 #include <dev/ic/dptreg.h> 103 #include <dev/ic/dptvar.h> 104 105 #include <dev/i2o/dptivar.h> 106 107 #ifdef DEBUG 108 #define DPRINTF(x) printf x 109 #else 110 #define DPRINTF(x) 111 #endif 112 113 #define dpt_inb(x, o) \ 114 bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o)) 115 #define dpt_outb(x, o, d) \ 116 bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d)) 117 118 static const char * const dpt_cname[] = { 119 "3334", "SmartRAID IV", 120 "3332", "SmartRAID IV", 121 "2144", "SmartCache IV", 122 "2044", "SmartCache IV", 123 "2142", "SmartCache IV", 124 "2042", "SmartCache IV", 125 "2041", "SmartCache IV", 126 "3224", "SmartRAID III", 127 "3222", "SmartRAID III", 128 "3021", "SmartRAID III", 129 "2124", "SmartCache III", 130 "2024", "SmartCache III", 131 "2122", "SmartCache III", 132 "2022", "SmartCache III", 133 "2021", "SmartCache III", 134 "2012", "SmartCache Plus", 135 "2011", "SmartCache Plus", 136 NULL, "<unknown>", 137 }; 138 139 static void *dpt_sdh; 140 141 dev_type_open(dptopen); 142 dev_type_ioctl(dptioctl); 143 144 const struct cdevsw dpt_cdevsw = { 145 dptopen, nullclose, noread, nowrite, dptioctl, 146 nostop, notty, nopoll, nommap, nokqfilter, 147 }; 148 149 extern struct cfdriver dpt_cd; 150 151 static struct dpt_sig dpt_sig = { 152 { 'd', 'P', 't', 'S', 'i', 'G'}, 153 SIG_VERSION, 154 #if defined(i386) 155 PROC_INTEL, 156 #elif defined(powerpc) 157 PROC_POWERPC, 158 #elif defined(alpha) 159 PROC_ALPHA, 160 #elif defined(__mips__) 161 PROC_MIPS, 162 #elif defined(sparc64) 163 PROC_ULTRASPARC, 164 #else 165 0xff, 166 #endif 167 #if defined(i386) 168 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, 169 #else 170 0, 171 #endif 172 FT_HBADRVR, 173 0, 174 OEM_DPT, 175 OS_FREE_BSD, /* XXX */ 176 CAP_ABOVE16MB, 177 DEV_ALL, 178 ADF_ALL_EATA, 179 0, 180 0, 181 DPT_VERSION, 182 DPT_REVISION, 183 DPT_SUBREVISION, 184 DPT_MONTH, 185 DPT_DAY, 186 DPT_YEAR, 187 "" /* Will be filled later */ 188 }; 189 190 static void dpt_ccb_abort(struct dpt_softc *, struct dpt_ccb *); 191 static void dpt_ccb_done(struct dpt_softc *, struct dpt_ccb *); 192 static int dpt_ccb_map(struct dpt_softc *, struct dpt_ccb *); 193 static int dpt_ccb_poll(struct dpt_softc *, struct dpt_ccb *); 194 static void dpt_ccb_unmap(struct dpt_softc *, struct dpt_ccb *); 195 static int dpt_cmd(struct dpt_softc *, struct dpt_ccb *, int, int); 196 static void dpt_ctlrinfo(struct dpt_softc *, struct dpt_eata_ctlrinfo *); 197 static void dpt_hba_inquire(struct dpt_softc *, struct eata_inquiry_data **); 198 static void dpt_minphys(struct buf *); 199 static int dpt_passthrough(struct dpt_softc *, struct eata_ucp *, 200 struct proc *); 201 static void dpt_scsipi_request(struct scsipi_channel *, 202 scsipi_adapter_req_t, void *); 203 static void dpt_shutdown(void *); 204 static void dpt_sysinfo(struct dpt_softc *, struct dpt_sysinfo *); 205 static int dpt_wait(struct dpt_softc *, u_int8_t, u_int8_t, int); 206 207 static __inline__ struct dpt_ccb *dpt_ccb_alloc(struct dpt_softc *); 208 static __inline__ void dpt_ccb_free(struct dpt_softc *, struct dpt_ccb *); 209 210 static __inline__ struct dpt_ccb * 211 dpt_ccb_alloc(struct dpt_softc *sc) 212 { 213 struct dpt_ccb *ccb; 214 int s; 215 216 s = splbio(); 217 ccb = SLIST_FIRST(&sc->sc_ccb_free); 218 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain); 219 splx(s); 220 221 return (ccb); 222 } 223 224 static __inline__ void 225 dpt_ccb_free(struct dpt_softc *sc, struct dpt_ccb *ccb) 226 { 227 int s; 228 229 ccb->ccb_flg = 0; 230 ccb->ccb_savesp = NULL; 231 s = splbio(); 232 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain); 233 splx(s); 234 } 235 236 /* 237 * Handle an interrupt from the HBA. 238 */ 239 int 240 dpt_intr(void *cookie) 241 { 242 struct dpt_softc *sc; 243 struct dpt_ccb *ccb; 244 struct eata_sp *sp; 245 volatile int junk; 246 int forus; 247 248 sc = cookie; 249 sp = sc->sc_stp; 250 forus = 0; 251 252 for (;;) { 253 /* 254 * HBA might have interrupted while we were dealing with the 255 * last completed command, since we ACK before we deal; keep 256 * polling. 257 */ 258 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0) 259 break; 260 forus = 1; 261 262 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff, 263 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD); 264 265 /* Might have looped before HBA can reset HBA_AUX_INTR. */ 266 if (sp->sp_ccbid == -1) { 267 DELAY(50); 268 269 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0) 270 return (0); 271 272 printf("%s: no status\n", sc->sc_dv.dv_xname); 273 274 /* Re-sync DMA map */ 275 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 276 sc->sc_stpoff, sizeof(struct eata_sp), 277 BUS_DMASYNC_POSTREAD); 278 } 279 280 /* Make sure CCB ID from status packet is realistic. */ 281 if ((u_int)sp->sp_ccbid >= sc->sc_nccbs) { 282 printf("%s: bogus status (returned CCB id %d)\n", 283 sc->sc_dv.dv_xname, sp->sp_ccbid); 284 285 /* Ack the interrupt */ 286 sp->sp_ccbid = -1; 287 junk = dpt_inb(sc, HA_STATUS); 288 continue; 289 } 290 291 /* Sync up DMA map and cache cmd status. */ 292 ccb = sc->sc_ccbs + sp->sp_ccbid; 293 294 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb), 295 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE); 296 297 ccb->ccb_hba_status = sp->sp_hba_status & 0x7f; 298 ccb->ccb_scsi_status = sp->sp_scsi_status; 299 if (ccb->ccb_savesp != NULL) 300 memcpy(ccb->ccb_savesp, sp, sizeof(*sp)); 301 302 /* 303 * Ack the interrupt and process the CCB. If this 304 * is a private CCB it's up to dpt_ccb_poll() to 305 * notice. 306 */ 307 sp->sp_ccbid = -1; 308 ccb->ccb_flg |= CCB_INTR; 309 junk = dpt_inb(sc, HA_STATUS); 310 if ((ccb->ccb_flg & CCB_PRIVATE) == 0) 311 dpt_ccb_done(sc, ccb); 312 else if ((ccb->ccb_flg & CCB_WAIT) != 0) 313 wakeup(ccb); 314 } 315 316 return (forus); 317 } 318 319 /* 320 * Initialize and attach the HBA. This is the entry point from bus 321 * specific probe-and-attach code. 322 */ 323 void 324 dpt_init(struct dpt_softc *sc, const char *intrstr) 325 { 326 struct scsipi_adapter *adapt; 327 struct scsipi_channel *chan; 328 struct eata_inquiry_data *ei; 329 int i, j, rv, rseg, maxchannel, maxtarget, mapsize; 330 bus_dma_segment_t seg; 331 struct eata_cfg *ec; 332 struct dpt_ccb *ccb; 333 char model[16]; 334 335 ec = &sc->sc_ec; 336 sprintf(dpt_sig.dsDescription, "NetBSD %s DPT driver", osrelease); 337 338 /* 339 * Allocate the CCB/status packet/scratch DMA map and load. 340 */ 341 sc->sc_nccbs = 342 min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS); 343 sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb); 344 sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp); 345 mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) + 346 DPT_SCRATCH_SIZE + sizeof(struct eata_sp); 347 348 if ((rv = bus_dmamem_alloc(sc->sc_dmat, mapsize, 349 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 350 aprint_error("%s: unable to allocate CCBs, rv = %d\n", 351 sc->sc_dv.dv_xname, rv); 352 return; 353 } 354 355 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize, 356 (caddr_t *)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 357 aprint_error("%s: unable to map CCBs, rv = %d\n", 358 sc->sc_dv.dv_xname, rv); 359 return; 360 } 361 362 if ((rv = bus_dmamap_create(sc->sc_dmat, mapsize, 363 mapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 364 aprint_error("%s: unable to create CCB DMA map, rv = %d\n", 365 sc->sc_dv.dv_xname, rv); 366 return; 367 } 368 369 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, 370 sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) { 371 aprint_error("%s: unable to load CCB DMA map, rv = %d\n", 372 sc->sc_dv.dv_xname, rv); 373 return; 374 } 375 376 sc->sc_stp = (struct eata_sp *)((caddr_t)sc->sc_ccbs + sc->sc_stpoff); 377 sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff; 378 sc->sc_scr = (caddr_t)sc->sc_ccbs + sc->sc_scroff; 379 sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff; 380 sc->sc_stp->sp_ccbid = -1; 381 382 /* 383 * Create the CCBs. 384 */ 385 SLIST_INIT(&sc->sc_ccb_free); 386 memset(sc->sc_ccbs, 0, sizeof(struct dpt_ccb) * sc->sc_nccbs); 387 388 for (i = 0, ccb = sc->sc_ccbs; i < sc->sc_nccbs; i++, ccb++) { 389 rv = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER, 390 DPT_SG_SIZE, DPT_MAX_XFER, 0, 391 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 392 &ccb->ccb_dmamap_xfer); 393 if (rv) { 394 aprint_error("%s: can't create ccb dmamap (%d)\n", 395 sc->sc_dv.dv_xname, rv); 396 break; 397 } 398 399 ccb->ccb_id = i; 400 ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr + 401 CCB_OFF(sc, ccb); 402 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain); 403 } 404 405 if (i == 0) { 406 aprint_error("%s: unable to create CCBs\n", sc->sc_dv.dv_xname); 407 return; 408 } else if (i != sc->sc_nccbs) { 409 aprint_error("%s: %d/%d CCBs created!\n", sc->sc_dv.dv_xname, 410 i, sc->sc_nccbs); 411 sc->sc_nccbs = i; 412 } 413 414 /* Set shutdownhook before we start any device activity. */ 415 if (dpt_sdh == NULL) 416 dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL); 417 418 /* Get the inquiry data from the HBA. */ 419 dpt_hba_inquire(sc, &ei); 420 421 /* 422 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R) 423 * dpt0: interrupting at irq 10 424 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7 425 */ 426 for (i = 0; ei->ei_vendor[i] != ' ' && i < 8; i++) 427 ; 428 ei->ei_vendor[i] = '\0'; 429 430 for (i = 0; ei->ei_model[i] != ' ' && i < 7; i++) 431 model[i] = ei->ei_model[i]; 432 for (j = 0; ei->ei_suffix[j] != ' ' && j < 7; i++, j++) 433 model[i] = ei->ei_model[i]; 434 model[i] = '\0'; 435 436 /* Find the marketing name for the board. */ 437 for (i = 0; dpt_cname[i] != NULL; i += 2) 438 if (memcmp(ei->ei_model + 2, dpt_cname[i], 4) == 0) 439 break; 440 441 aprint_normal("%s %s (%s)\n", ei->ei_vendor, dpt_cname[i + 1], model); 442 443 if (intrstr != NULL) 444 aprint_normal("%s: interrupting at %s\n", sc->sc_dv.dv_xname, 445 intrstr); 446 447 maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >> 448 EC_F3_MAX_CHANNEL_SHIFT; 449 maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >> 450 EC_F3_MAX_TARGET_SHIFT; 451 452 aprint_normal("%s: %d queued commands, %d channel(s), adapter on ID(s)", 453 sc->sc_dv.dv_xname, sc->sc_nccbs, maxchannel + 1); 454 455 for (i = 0; i <= maxchannel; i++) { 456 sc->sc_hbaid[i] = ec->ec_hba[3 - i]; 457 aprint_normal(" %d", sc->sc_hbaid[i]); 458 } 459 aprint_normal("\n"); 460 461 /* 462 * Reset the SCSI controller chip(s) and bus. XXX Do we need to do 463 * this for each bus? 464 */ 465 if (dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_BUS_RESET)) 466 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname); 467 468 /* Fill in the scsipi_adapter. */ 469 adapt = &sc->sc_adapt; 470 memset(adapt, 0, sizeof(*adapt)); 471 adapt->adapt_dev = &sc->sc_dv; 472 adapt->adapt_nchannels = maxchannel + 1; 473 adapt->adapt_openings = sc->sc_nccbs - 1; 474 adapt->adapt_max_periph = sc->sc_nccbs - 1; 475 adapt->adapt_request = dpt_scsipi_request; 476 adapt->adapt_minphys = dpt_minphys; 477 478 for (i = 0; i <= maxchannel; i++) { 479 /* Fill in the scsipi_channel. */ 480 chan = &sc->sc_chans[i]; 481 memset(chan, 0, sizeof(*chan)); 482 chan->chan_adapter = adapt; 483 chan->chan_bustype = &scsi_bustype; 484 chan->chan_channel = i; 485 chan->chan_ntargets = maxtarget + 1; 486 chan->chan_nluns = ec->ec_maxlun + 1; 487 chan->chan_id = sc->sc_hbaid[i]; 488 config_found(&sc->sc_dv, chan, scsiprint); 489 } 490 } 491 492 /* 493 * Read the EATA configuration from the HBA and perform some sanity checks. 494 */ 495 int 496 dpt_readcfg(struct dpt_softc *sc) 497 { 498 struct eata_cfg *ec; 499 int i, j, stat; 500 u_int16_t *p; 501 502 ec = &sc->sc_ec; 503 504 /* Older firmware may puke if we talk to it too soon after reset. */ 505 dpt_outb(sc, HA_COMMAND, CP_RESET); 506 DELAY(750000); 507 508 for (i = 1000; i; i--) { 509 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0) 510 break; 511 DELAY(2000); 512 } 513 514 if (i == 0) { 515 printf("%s: HBA not ready after reset (hba status:%02x)\n", 516 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS)); 517 return (-1); 518 } 519 520 while((((stat = dpt_inb(sc, HA_STATUS)) 521 != (HA_ST_READY|HA_ST_SEEK_COMPLETE)) 522 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR)) 523 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ))) 524 || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) { 525 /* RAID drives still spinning up? */ 526 if(dpt_inb(sc, HA_ERROR) != 'D' || 527 dpt_inb(sc, HA_ERROR + 1) != 'P' || 528 dpt_inb(sc, HA_ERROR + 2) != 'T') { 529 printf("%s: HBA not ready\n", sc->sc_dv.dv_xname); 530 return (-1); 531 } 532 } 533 534 /* 535 * Issue the read-config command and wait for the data to appear. 536 * 537 * Apparently certian firmware revisions won't DMA later on if we 538 * request the config data using PIO, but it makes it a lot easier 539 * as no DMA setup is required. 540 */ 541 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG); 542 memset(ec, 0, sizeof(*ec)); 543 i = ((int)&((struct eata_cfg *)0)->ec_cfglen + 544 sizeof(ec->ec_cfglen)) >> 1; 545 p = (u_int16_t *)ec; 546 547 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) { 548 printf("%s: cfg data didn't appear (hba status:%02x)\n", 549 sc->sc_dv.dv_xname, dpt_inb(sc, HA_STATUS)); 550 return (-1); 551 } 552 553 /* Begin reading. */ 554 while (i--) 555 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA); 556 557 if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg) 558 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) 559 - sizeof(ec->ec_cfglen))) 560 i = sizeof(struct eata_cfg) 561 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) 562 - sizeof(ec->ec_cfglen); 563 564 j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) + 565 sizeof(ec->ec_cfglen); 566 i >>= 1; 567 568 while (i--) 569 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA); 570 571 /* Flush until we have read 512 bytes. */ 572 i = (512 - j + 1) >> 1; 573 while (i--) 574 bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA); 575 576 /* Defaults for older firmware... */ 577 if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1]) 578 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7; 579 580 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) { 581 printf("%s: HBA error\n", sc->sc_dv.dv_xname); 582 return (-1); 583 } 584 585 if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) { 586 printf("%s: EATA signature mismatch\n", sc->sc_dv.dv_xname); 587 return (-1); 588 } 589 590 if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) { 591 printf("%s: ec_hba field invalid\n", sc->sc_dv.dv_xname); 592 return (-1); 593 } 594 595 if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) { 596 printf("%s: DMA not supported\n", sc->sc_dv.dv_xname); 597 return (-1); 598 } 599 600 return (0); 601 } 602 603 /* 604 * Our `shutdownhook' to cleanly shut down the HBA. The HBA must flush all 605 * data from it's cache and mark array groups as clean. 606 * 607 * XXX This doesn't always work (i.e., the HBA may still be flushing after 608 * we tell root that it's safe to power off). 609 */ 610 static void 611 dpt_shutdown(void *cookie) 612 { 613 extern struct cfdriver dpt_cd; 614 struct dpt_softc *sc; 615 int i; 616 617 printf("shutting down dpt devices..."); 618 619 for (i = 0; i < dpt_cd.cd_ndevs; i++) { 620 if ((sc = device_lookup(&dpt_cd, i)) == NULL) 621 continue; 622 dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_POWEROFF_WARN); 623 } 624 625 delay(10000*1000); 626 printf(" done\n"); 627 } 628 629 /* 630 * Send an EATA command to the HBA. 631 */ 632 static int 633 dpt_cmd(struct dpt_softc *sc, struct dpt_ccb *ccb, int eatacmd, int icmd) 634 { 635 u_int32_t pa; 636 int i, s; 637 638 s = splbio(); 639 640 for (i = 20000; i != 0; i--) { 641 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0) 642 break; 643 DELAY(50); 644 } 645 if (i == 0) { 646 splx(s); 647 return (-1); 648 } 649 650 pa = (ccb != NULL ? ccb->ccb_ccbpa : 0); 651 dpt_outb(sc, HA_DMA_BASE + 0, (pa ) & 0xff); 652 dpt_outb(sc, HA_DMA_BASE + 1, (pa >> 8) & 0xff); 653 dpt_outb(sc, HA_DMA_BASE + 2, (pa >> 16) & 0xff); 654 dpt_outb(sc, HA_DMA_BASE + 3, (pa >> 24) & 0xff); 655 656 if (eatacmd == CP_IMMEDIATE) 657 dpt_outb(sc, HA_ICMD, icmd); 658 659 dpt_outb(sc, HA_COMMAND, eatacmd); 660 661 splx(s); 662 return (0); 663 } 664 665 /* 666 * Wait for the HBA status register to reach a specific state. 667 */ 668 static int 669 dpt_wait(struct dpt_softc *sc, u_int8_t mask, u_int8_t state, int ms) 670 { 671 672 for (ms *= 10; ms != 0; ms--) { 673 if ((dpt_inb(sc, HA_STATUS) & mask) == state) 674 return (0); 675 DELAY(100); 676 } 677 678 return (-1); 679 } 680 681 /* 682 * Spin waiting for a command to finish. The timeout value from the CCB is 683 * used. The CCB must be marked with CCB_PRIVATE, otherwise it'll will get 684 * recycled before we get a look at it. 685 */ 686 static int 687 dpt_ccb_poll(struct dpt_softc *sc, struct dpt_ccb *ccb) 688 { 689 int i, s; 690 691 #ifdef DEBUG 692 if ((ccb->ccb_flg & CCB_PRIVATE) == 0) 693 panic("dpt_ccb_poll: called for non-CCB_PRIVATE request"); 694 #endif 695 696 s = splbio(); 697 698 if ((ccb->ccb_flg & CCB_INTR) != 0) { 699 splx(s); 700 return (0); 701 } 702 703 for (i = ccb->ccb_timeout * 20; i != 0; i--) { 704 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0) 705 dpt_intr(sc); 706 if ((ccb->ccb_flg & CCB_INTR) != 0) 707 break; 708 DELAY(50); 709 } 710 711 splx(s); 712 return (i == 0); 713 } 714 715 /* 716 * We have a command which has been processed by the HBA, so now we look to 717 * see how the operation went. CCBs marked CCB_PRIVATE are not passed here 718 * by dpt_intr(). 719 */ 720 static void 721 dpt_ccb_done(struct dpt_softc *sc, struct dpt_ccb *ccb) 722 { 723 struct scsipi_xfer *xs; 724 725 xs = ccb->ccb_xs; 726 727 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("dpt_ccb_done\n")); 728 729 /* 730 * If we were a data transfer, unload the map that described the 731 * data buffer. 732 */ 733 if (xs->datalen != 0) 734 dpt_ccb_unmap(sc, ccb); 735 736 if (xs->error == XS_NOERROR) { 737 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) { 738 switch (ccb->ccb_hba_status) { 739 case SP_HBA_ERROR_SEL_TO: 740 xs->error = XS_SELTIMEOUT; 741 break; 742 case SP_HBA_ERROR_RESET: 743 xs->error = XS_RESET; 744 break; 745 default: 746 printf("%s: HBA status %x\n", 747 sc->sc_dv.dv_xname, ccb->ccb_hba_status); 748 xs->error = XS_DRIVER_STUFFUP; 749 break; 750 } 751 } else if (ccb->ccb_scsi_status != SCSI_OK) { 752 switch (ccb->ccb_scsi_status) { 753 case SCSI_CHECK: 754 memcpy(&xs->sense.scsi_sense, &ccb->ccb_sense, 755 sizeof(xs->sense.scsi_sense)); 756 xs->error = XS_SENSE; 757 break; 758 case SCSI_BUSY: 759 case SCSI_QUEUE_FULL: 760 xs->error = XS_BUSY; 761 break; 762 default: 763 scsipi_printaddr(xs->xs_periph); 764 printf("SCSI status %x\n", 765 ccb->ccb_scsi_status); 766 xs->error = XS_DRIVER_STUFFUP; 767 break; 768 } 769 } else 770 xs->resid = 0; 771 772 xs->status = ccb->ccb_scsi_status; 773 } 774 775 /* Free up the CCB and mark the command as done. */ 776 dpt_ccb_free(sc, ccb); 777 scsipi_done(xs); 778 } 779 780 /* 781 * Specified CCB has timed out, abort it. 782 */ 783 static void 784 dpt_ccb_abort(struct dpt_softc *sc, struct dpt_ccb *ccb) 785 { 786 struct scsipi_periph *periph; 787 struct scsipi_xfer *xs; 788 int s; 789 790 xs = ccb->ccb_xs; 791 periph = xs->xs_periph; 792 793 scsipi_printaddr(periph); 794 printf("timed out (status:%02x aux status:%02x)", 795 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS)); 796 797 s = splbio(); 798 799 if ((ccb->ccb_flg & CCB_ABORT) != 0) { 800 /* Abort timed out, reset the HBA */ 801 printf(" AGAIN, resetting HBA\n"); 802 dpt_outb(sc, HA_COMMAND, CP_RESET); 803 DELAY(750000); 804 } else { 805 /* Abort the operation that has timed out */ 806 printf("\n"); 807 xs->error = XS_TIMEOUT; 808 ccb->ccb_timeout = DPT_ABORT_TIMEOUT; 809 ccb->ccb_flg |= CCB_ABORT; 810 /* Start the abort */ 811 if (dpt_cmd(sc, ccb, CP_IMMEDIATE, CPI_SPEC_ABORT)) 812 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname); 813 } 814 815 splx(s); 816 } 817 818 /* 819 * Map a data transfer. 820 */ 821 static int 822 dpt_ccb_map(struct dpt_softc *sc, struct dpt_ccb *ccb) 823 { 824 struct scsipi_xfer *xs; 825 bus_dmamap_t xfer; 826 bus_dma_segment_t *ds; 827 struct eata_sg *sg; 828 struct eata_cp *cp; 829 int rv, i; 830 831 xs = ccb->ccb_xs; 832 xfer = ccb->ccb_dmamap_xfer; 833 cp = &ccb->ccb_eata_cp; 834 835 rv = bus_dmamap_load(sc->sc_dmat, xfer, xs->data, xs->datalen, NULL, 836 ((xs->xs_control & XS_CTL_NOSLEEP) != 0 ? 837 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 838 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE)); 839 840 switch (rv) { 841 case 0: 842 break; 843 case ENOMEM: 844 case EAGAIN: 845 xs->error = XS_RESOURCE_SHORTAGE; 846 break; 847 default: 848 xs->error = XS_DRIVER_STUFFUP; 849 printf("%s: error %d loading map\n", sc->sc_dv.dv_xname, rv); 850 break; 851 } 852 853 if (xs->error != XS_NOERROR) { 854 dpt_ccb_free(sc, ccb); 855 scsipi_done(xs); 856 return (-1); 857 } 858 859 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize, 860 (xs->xs_control & XS_CTL_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD : 861 BUS_DMASYNC_PREWRITE); 862 863 /* Don't bother using scatter/gather for just 1 seg */ 864 if (xfer->dm_nsegs == 1) { 865 cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr); 866 cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len); 867 } else { 868 /* 869 * Load the hardware scatter/gather map with 870 * the contents of the DMA map. 871 */ 872 sg = ccb->ccb_sg; 873 ds = xfer->dm_segs; 874 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) { 875 sg->sg_addr = htobe32(ds->ds_addr); 876 sg->sg_len = htobe32(ds->ds_len); 877 } 878 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) + 879 sc->sc_dmamap->dm_segs[0].ds_addr + 880 offsetof(struct dpt_ccb, ccb_sg)); 881 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg)); 882 cp->cp_ctl0 |= CP_C0_SCATTER; 883 } 884 885 return (0); 886 } 887 888 /* 889 * Unmap a transfer. 890 */ 891 static void 892 dpt_ccb_unmap(struct dpt_softc *sc, struct dpt_ccb *ccb) 893 { 894 895 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, 896 ccb->ccb_dmamap_xfer->dm_mapsize, 897 (ccb->ccb_eata_cp.cp_ctl0 & CP_C0_DATA_IN) != 0 ? 898 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 899 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer); 900 } 901 902 /* 903 * Adjust the size of each I/O before it passes to the SCSI layer. 904 */ 905 static void 906 dpt_minphys(struct buf *bp) 907 { 908 909 if (bp->b_bcount > DPT_MAX_XFER) 910 bp->b_bcount = DPT_MAX_XFER; 911 minphys(bp); 912 } 913 914 /* 915 * Start a SCSI command. 916 */ 917 static void 918 dpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 919 void *arg) 920 { 921 struct dpt_softc *sc; 922 struct scsipi_xfer *xs; 923 int flags; 924 struct scsipi_periph *periph; 925 struct dpt_ccb *ccb; 926 struct eata_cp *cp; 927 928 sc = (struct dpt_softc *)chan->chan_adapter->adapt_dev; 929 930 switch (req) { 931 case ADAPTER_REQ_RUN_XFER: 932 xs = arg; 933 periph = xs->xs_periph; 934 flags = xs->xs_control; 935 936 #ifdef DIAGNOSTIC 937 /* Cmds must be no more than 12 bytes for us. */ 938 if (xs->cmdlen > 12) { 939 xs->error = XS_DRIVER_STUFFUP; 940 scsipi_done(xs); 941 break; 942 } 943 #endif 944 /* 945 * XXX We can't reset devices just yet. Apparently some 946 * older firmware revisions don't even support it. 947 */ 948 if ((flags & XS_CTL_RESET) != 0) { 949 xs->error = XS_DRIVER_STUFFUP; 950 scsipi_done(xs); 951 break; 952 } 953 954 /* 955 * Get a CCB and fill it. 956 */ 957 ccb = dpt_ccb_alloc(sc); 958 ccb->ccb_xs = xs; 959 ccb->ccb_timeout = xs->timeout; 960 961 cp = &ccb->ccb_eata_cp; 962 memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen); 963 cp->cp_ccbid = ccb->ccb_id; 964 cp->cp_senselen = sizeof(ccb->ccb_sense); 965 cp->cp_stataddr = htobe32(sc->sc_stppa); 966 cp->cp_ctl0 = CP_C0_AUTO_SENSE; 967 cp->cp_ctl1 = 0; 968 cp->cp_ctl2 = 0; 969 cp->cp_ctl3 = periph->periph_target << CP_C3_ID_SHIFT; 970 cp->cp_ctl3 |= chan->chan_channel << CP_C3_CHANNEL_SHIFT; 971 cp->cp_ctl4 = periph->periph_lun << CP_C4_LUN_SHIFT; 972 cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY; 973 974 if ((flags & XS_CTL_DATA_IN) != 0) 975 cp->cp_ctl0 |= CP_C0_DATA_IN; 976 if ((flags & XS_CTL_DATA_OUT) != 0) 977 cp->cp_ctl0 |= CP_C0_DATA_OUT; 978 if (sc->sc_hbaid[chan->chan_channel] == periph->periph_target) 979 cp->cp_ctl0 |= CP_C0_INTERPRET; 980 981 /* Synchronous xfers musn't write-back through the cache. */ 982 if (xs->bp != NULL) 983 if ((xs->bp->b_flags & (B_ASYNC | B_READ)) == 0) 984 cp->cp_ctl2 |= CP_C2_NO_CACHE; 985 986 cp->cp_senseaddr = 987 htobe32(sc->sc_dmamap->dm_segs[0].ds_addr + 988 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense)); 989 990 if (xs->datalen != 0) { 991 if (dpt_ccb_map(sc, ccb)) 992 break; 993 } else { 994 cp->cp_dataaddr = 0; 995 cp->cp_datalen = 0; 996 } 997 998 /* Sync up CCB and status packet. */ 999 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1000 CCB_OFF(sc, ccb), sizeof(struct dpt_ccb), 1001 BUS_DMASYNC_PREWRITE); 1002 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff, 1003 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD); 1004 1005 /* 1006 * Start the command. 1007 */ 1008 if ((xs->xs_control & XS_CTL_POLL) != 0) 1009 ccb->ccb_flg |= CCB_PRIVATE; 1010 1011 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) { 1012 printf("%s: dpt_cmd failed\n", sc->sc_dv.dv_xname); 1013 xs->error = XS_DRIVER_STUFFUP; 1014 if (xs->datalen != 0) 1015 dpt_ccb_unmap(sc, ccb); 1016 dpt_ccb_free(sc, ccb); 1017 break; 1018 } 1019 1020 if ((xs->xs_control & XS_CTL_POLL) == 0) 1021 break; 1022 1023 if (dpt_ccb_poll(sc, ccb)) { 1024 dpt_ccb_abort(sc, ccb); 1025 /* Wait for abort to complete... */ 1026 if (dpt_ccb_poll(sc, ccb)) 1027 dpt_ccb_abort(sc, ccb); 1028 } 1029 1030 dpt_ccb_done(sc, ccb); 1031 break; 1032 1033 case ADAPTER_REQ_GROW_RESOURCES: 1034 /* 1035 * Not supported, since we allocate the maximum number of 1036 * CCBs up front. 1037 */ 1038 break; 1039 1040 case ADAPTER_REQ_SET_XFER_MODE: 1041 /* 1042 * This will be handled by the HBA itself, and we can't 1043 * modify that (ditto for tagged queueing). 1044 */ 1045 break; 1046 } 1047 } 1048 1049 /* 1050 * Get inquiry data from the adapter. 1051 */ 1052 static void 1053 dpt_hba_inquire(struct dpt_softc *sc, struct eata_inquiry_data **ei) 1054 { 1055 struct dpt_ccb *ccb; 1056 struct eata_cp *cp; 1057 1058 *ei = (struct eata_inquiry_data *)sc->sc_scr; 1059 1060 /* Get a CCB and mark as private */ 1061 ccb = dpt_ccb_alloc(sc); 1062 ccb->ccb_flg |= CCB_PRIVATE; 1063 ccb->ccb_timeout = 200; 1064 1065 /* Put all the arguments into the CCB. */ 1066 cp = &ccb->ccb_eata_cp; 1067 cp->cp_ccbid = ccb->ccb_id; 1068 cp->cp_senselen = sizeof(ccb->ccb_sense); 1069 cp->cp_senseaddr = 0; 1070 cp->cp_stataddr = htobe32(sc->sc_stppa); 1071 cp->cp_dataaddr = htobe32(sc->sc_scrpa); 1072 cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data)); 1073 cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET; 1074 cp->cp_ctl1 = 0; 1075 cp->cp_ctl2 = 0; 1076 cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT; 1077 cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY; 1078 1079 /* Put together the SCSI inquiry command. */ 1080 memset(&cp->cp_cdb_cmd, 0, 12); 1081 cp->cp_cdb_cmd = INQUIRY; 1082 cp->cp_cdb_len = sizeof(struct eata_inquiry_data); 1083 1084 /* Sync up CCB, status packet and scratch area. */ 1085 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb), 1086 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE); 1087 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff, 1088 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD); 1089 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff, 1090 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD); 1091 1092 /* Start the command and poll on completion. */ 1093 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) 1094 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname); 1095 1096 if (dpt_ccb_poll(sc, ccb)) 1097 panic("%s: inquiry timed out", sc->sc_dv.dv_xname); 1098 1099 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR || 1100 ccb->ccb_scsi_status != SCSI_OK) 1101 panic("%s: inquiry failed (hba:%02x scsi:%02x)", 1102 sc->sc_dv.dv_xname, ccb->ccb_hba_status, 1103 ccb->ccb_scsi_status); 1104 1105 /* Sync up the DMA map and free CCB, returning. */ 1106 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff, 1107 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD); 1108 dpt_ccb_free(sc, ccb); 1109 } 1110 1111 int 1112 dptopen(dev_t dev, int flag, int mode, struct proc *p) 1113 { 1114 1115 if (securelevel > 1) 1116 return (EPERM); 1117 if (device_lookup(&dpt_cd, minor(dev)) == NULL) 1118 return (ENXIO); 1119 1120 return (0); 1121 } 1122 1123 int 1124 dptioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) 1125 { 1126 struct dpt_softc *sc; 1127 int rv; 1128 1129 sc = device_lookup(&dpt_cd, minor(dev)); 1130 1131 switch (cmd & 0xffff) { 1132 case DPT_SIGNATURE: 1133 memcpy(data, &dpt_sig, min(IOCPARM_LEN(cmd), sizeof(dpt_sig))); 1134 break; 1135 1136 case DPT_CTRLINFO: 1137 dpt_ctlrinfo(sc, (struct dpt_eata_ctlrinfo *)data); 1138 break; 1139 1140 case DPT_SYSINFO: 1141 dpt_sysinfo(sc, (struct dpt_sysinfo *)data); 1142 break; 1143 1144 case DPT_BLINKLED: 1145 /* 1146 * XXX Don't know how to get this from EATA boards. I think 1147 * it involves waiting for a "DPT" sequence from HA_ERROR 1148 * and then reading one of the HA_ICMD registers. 1149 */ 1150 *(int *)data = 0; 1151 break; 1152 1153 case DPT_EATAUSRCMD: 1154 if (IOCPARM_LEN(cmd) < sizeof(struct eata_ucp)) { 1155 DPRINTF(("%s: ucp %lu vs %lu bytes\n", 1156 sc->sc_dv.dv_xname, IOCPARM_LEN(cmd), 1157 (unsigned long int)sizeof(struct eata_ucp))); 1158 return (EINVAL); 1159 } 1160 1161 if (sc->sc_uactive++) 1162 tsleep(&sc->sc_uactive, PRIBIO, "dptslp", 0); 1163 1164 rv = dpt_passthrough(sc, (struct eata_ucp *)data, p); 1165 1166 sc->sc_uactive--; 1167 wakeup_one(&sc->sc_uactive); 1168 return (rv); 1169 1170 default: 1171 DPRINTF(("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd)); 1172 return (ENOTTY); 1173 } 1174 1175 return (0); 1176 } 1177 1178 void 1179 dpt_ctlrinfo(struct dpt_softc *sc, struct dpt_eata_ctlrinfo *info) 1180 { 1181 1182 memset(info, 0, sizeof(*info)); 1183 info->id = sc->sc_hbaid[0]; 1184 info->vect = sc->sc_isairq; 1185 info->base = sc->sc_isaport; 1186 info->qdepth = sc->sc_nccbs; 1187 info->sgsize = DPT_SG_SIZE * sizeof(struct eata_sg); 1188 info->heads = 16; 1189 info->sectors = 63; 1190 info->do_drive32 = 1; 1191 info->primary = 1; 1192 info->cpLength = sizeof(struct eata_cp); 1193 info->spLength = sizeof(struct eata_sp); 1194 info->drqNum = sc->sc_isadrq; 1195 } 1196 1197 void 1198 dpt_sysinfo(struct dpt_softc *sc, struct dpt_sysinfo *info) 1199 { 1200 #ifdef i386 1201 int i, j; 1202 #endif 1203 1204 memset(info, 0, sizeof(*info)); 1205 1206 #ifdef i386 1207 outb (0x70, 0x12); 1208 i = inb(0x71); 1209 j = i >> 4; 1210 if (i == 0x0f) { 1211 outb (0x70, 0x19); 1212 j = inb (0x71); 1213 } 1214 info->drive0CMOS = j; 1215 1216 j = i & 0x0f; 1217 if (i == 0x0f) { 1218 outb (0x70, 0x1a); 1219 j = inb (0x71); 1220 } 1221 info->drive1CMOS = j; 1222 info->processorFamily = dpt_sig.dsProcessorFamily; 1223 1224 /* 1225 * Get the conventional memory size from CMOS. 1226 */ 1227 outb(0x70, 0x16); 1228 j = inb(0x71); 1229 j <<= 8; 1230 outb(0x70, 0x15); 1231 j |= inb(0x71); 1232 info->conventionalMemSize = j; 1233 1234 /* 1235 * Get the extended memory size from CMOS. 1236 */ 1237 outb(0x70, 0x31); 1238 j = inb(0x71); 1239 j <<= 8; 1240 outb(0x70, 0x30); 1241 j |= inb(0x71); 1242 info->extendedMemSize = j; 1243 1244 switch (cpu_class) { 1245 case CPUCLASS_386: 1246 info->processorType = PROC_386; 1247 break; 1248 case CPUCLASS_486: 1249 info->processorType = PROC_486; 1250 break; 1251 case CPUCLASS_586: 1252 info->processorType = PROC_PENTIUM; 1253 break; 1254 case CPUCLASS_686: 1255 default: 1256 info->processorType = PROC_SEXIUM; 1257 break; 1258 } 1259 1260 info->flags = SI_CMOS_Valid | SI_BusTypeValid | 1261 SI_MemorySizeValid | SI_NO_SmartROM; 1262 #else 1263 info->flags = SI_BusTypeValid | SI_NO_SmartROM; 1264 #endif 1265 1266 info->busType = sc->sc_bustype; 1267 } 1268 1269 int 1270 dpt_passthrough(struct dpt_softc *sc, struct eata_ucp *ucp, struct proc *proc) 1271 { 1272 struct dpt_ccb *ccb; 1273 struct eata_sp sp; 1274 struct eata_cp *cp; 1275 struct eata_sg *sg; 1276 bus_dmamap_t xfer; 1277 bus_dma_segment_t *ds; 1278 int datain, s, rv, i, uslen; 1279 1280 /* 1281 * Get a CCB and fill. 1282 */ 1283 ccb = dpt_ccb_alloc(sc); 1284 ccb->ccb_flg |= CCB_PRIVATE | CCB_WAIT; 1285 ccb->ccb_timeout = 0; 1286 ccb->ccb_savesp = &sp; 1287 1288 cp = &ccb->ccb_eata_cp; 1289 memcpy(cp, ucp->ucp_cp, sizeof(ucp->ucp_cp)); 1290 uslen = cp->cp_senselen; 1291 cp->cp_ccbid = ccb->ccb_id; 1292 cp->cp_senselen = sizeof(ccb->ccb_sense); 1293 cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr + 1294 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense)); 1295 cp->cp_stataddr = htobe32(sc->sc_stppa); 1296 1297 /* 1298 * Map data transfers. 1299 */ 1300 if (ucp->ucp_dataaddr && ucp->ucp_datalen) { 1301 xfer = ccb->ccb_dmamap_xfer; 1302 datain = ((cp->cp_ctl0 & CP_C0_DATA_IN) != 0); 1303 1304 if (ucp->ucp_datalen > DPT_MAX_XFER) { 1305 DPRINTF(("%s: xfer too big\n", sc->sc_dv.dv_xname)); 1306 dpt_ccb_free(sc, ccb); 1307 return (EFBIG); 1308 } 1309 rv = bus_dmamap_load(sc->sc_dmat, xfer, 1310 ucp->ucp_dataaddr, ucp->ucp_datalen, proc, 1311 BUS_DMA_WAITOK | BUS_DMA_STREAMING | 1312 (datain ? BUS_DMA_READ : BUS_DMA_WRITE)); 1313 if (rv != 0) { 1314 DPRINTF(("%s: map failed; %d\n", sc->sc_dv.dv_xname, 1315 rv)); 1316 dpt_ccb_free(sc, ccb); 1317 return (rv); 1318 } 1319 1320 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize, 1321 (datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1322 1323 sg = ccb->ccb_sg; 1324 ds = xfer->dm_segs; 1325 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) { 1326 sg->sg_addr = htobe32(ds->ds_addr); 1327 sg->sg_len = htobe32(ds->ds_len); 1328 } 1329 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) + 1330 sc->sc_dmamap->dm_segs[0].ds_addr + 1331 offsetof(struct dpt_ccb, ccb_sg)); 1332 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg)); 1333 cp->cp_ctl0 |= CP_C0_SCATTER; 1334 } else { 1335 cp->cp_dataaddr = 0; 1336 cp->cp_datalen = 0; 1337 } 1338 1339 /* 1340 * Start the command and sleep on completion. 1341 */ 1342 PHOLD(curlwp); /* XXXJRT curlwp */ 1343 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb), 1344 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE); 1345 s = splbio(); 1346 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff, 1347 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD); 1348 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) 1349 panic("%s: dpt_cmd failed", sc->sc_dv.dv_xname); 1350 tsleep(ccb, PWAIT, "dptucmd", 0); 1351 splx(s); 1352 PRELE(curlwp); /* XXXJRT curlwp */ 1353 1354 /* 1355 * Sync up the DMA map and copy out results. 1356 */ 1357 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb), 1358 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE); 1359 1360 if (cp->cp_datalen != 0) { 1361 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize, 1362 (datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 1363 bus_dmamap_unload(sc->sc_dmat, xfer); 1364 } 1365 1366 if (ucp->ucp_stataddr != NULL) { 1367 rv = copyout(&sp, ucp->ucp_stataddr, sizeof(sp)); 1368 if (rv != 0) 1369 DPRINTF(("%s: sp copyout() failed\n", 1370 sc->sc_dv.dv_xname)); 1371 } 1372 if (rv == 0 && ucp->ucp_senseaddr != NULL) { 1373 i = min(uslen, sizeof(ccb->ccb_sense)); 1374 rv = copyout(&ccb->ccb_sense, ucp->ucp_senseaddr, i); 1375 if (rv != 0) 1376 DPRINTF(("%s: sense copyout() failed\n", 1377 sc->sc_dv.dv_xname)); 1378 } 1379 1380 ucp->ucp_hstatus = (u_int8_t)ccb->ccb_hba_status; 1381 ucp->ucp_tstatus = (u_int8_t)ccb->ccb_scsi_status; 1382 dpt_ccb_free(sc, ccb); 1383 return (rv); 1384 } 1385