1 /* $NetBSD: dpt.c,v 1.64 2010/11/13 13:52:01 uebayasi Exp $ */ 2 3 /*- 4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical 9 * Aerospace Simulation Facility, NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation 35 * Copyright (c) 2000 Adaptec Corporation 36 * All rights reserved. 37 * 38 * TERMS AND CONDITIONS OF USE 39 * 40 * Redistribution and use in source form, with or without modification, are 41 * permitted provided that redistributions of source code must retain the 42 * above copyright notice, this list of conditions and the following disclaimer. 43 * 44 * This software is provided `as is' by Adaptec and any express or implied 45 * warranties, including, but not limited to, the implied warranties of 46 * merchantability and fitness for a particular purpose, are disclaimed. In no 47 * event shall Adaptec be liable for any direct, indirect, incidental, special, 48 * exemplary or consequential damages (including, but not limited to, 49 * procurement of substitute goods or services; loss of use, data, or profits; 50 * or business interruptions) however caused and on any theory of liability, 51 * whether in contract, strict liability, or tort (including negligence or 52 * otherwise) arising in any way out of the use of this driver software, even 53 * if advised of the possibility of such damage. 54 */ 55 56 /* 57 * Portions of this code fall under the following copyright: 58 * 59 * Originally written by Julian Elischer (julian@tfs.com) 60 * for TRW Financial Systems for use under the MACH(2.5) operating system. 61 * 62 * TRW Financial Systems, in accordance with their agreement with Carnegie 63 * Mellon University, makes this software available to CMU to distribute 64 * or use in any manner that they see fit as long as this message is kept with 65 * the software. For this reason TFS also grants any other persons or 66 * organisations permission to use or modify this software. 67 * 68 * TFS supplies this software to be publicly redistributed 69 * on the understanding that TFS is not responsible for the correct 70 * functioning of this software in any circumstances. 71 */ 72 73 #include <sys/cdefs.h> 74 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.64 2010/11/13 13:52:01 uebayasi Exp $"); 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/device.h> 79 #include <sys/queue.h> 80 #include <sys/buf.h> 81 #include <sys/endian.h> 82 #include <sys/conf.h> 83 #include <sys/kauth.h> 84 #include <sys/proc.h> 85 86 #include <sys/bus.h> 87 #ifdef i386 88 #include <machine/pio.h> 89 #include <machine/cputypes.h> 90 #endif 91 92 #include <dev/scsipi/scsi_all.h> 93 #include <dev/scsipi/scsipi_all.h> 94 #include <dev/scsipi/scsiconf.h> 95 96 #include <dev/ic/dptreg.h> 97 #include <dev/ic/dptvar.h> 98 99 #include <dev/i2o/dptivar.h> 100 101 #ifdef DEBUG 102 #define DPRINTF(x) printf x 103 #else 104 #define DPRINTF(x) 105 #endif 106 107 #define dpt_inb(x, o) \ 108 bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o)) 109 #define dpt_outb(x, o, d) \ 110 bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d)) 111 112 static const char * const dpt_cname[] = { 113 "3334", "SmartRAID IV", 114 "3332", "SmartRAID IV", 115 "2144", "SmartCache IV", 116 "2044", "SmartCache IV", 117 "2142", "SmartCache IV", 118 "2042", "SmartCache IV", 119 "2041", "SmartCache IV", 120 "3224", "SmartRAID III", 121 "3222", "SmartRAID III", 122 "3021", "SmartRAID III", 123 "2124", "SmartCache III", 124 "2024", "SmartCache III", 125 "2122", "SmartCache III", 126 "2022", "SmartCache III", 127 "2021", "SmartCache III", 128 "2012", "SmartCache Plus", 129 "2011", "SmartCache Plus", 130 NULL, "<unknown>", 131 }; 132 133 static void *dpt_sdh; 134 135 dev_type_open(dptopen); 136 dev_type_ioctl(dptioctl); 137 138 const struct cdevsw dpt_cdevsw = { 139 dptopen, nullclose, noread, nowrite, dptioctl, 140 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER, 141 }; 142 143 extern struct cfdriver dpt_cd; 144 145 static struct dpt_sig dpt_sig = { 146 { 'd', 'P', 't', 'S', 'i', 'G'}, 147 SIG_VERSION, 148 #if defined(i386) 149 PROC_INTEL, 150 #elif defined(powerpc) 151 PROC_POWERPC, 152 #elif defined(alpha) 153 PROC_ALPHA, 154 #elif defined(__mips__) 155 PROC_MIPS, 156 #elif defined(sparc64) 157 PROC_ULTRASPARC, 158 #else 159 0xff, 160 #endif 161 #if defined(i386) 162 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, 163 #else 164 0, 165 #endif 166 FT_HBADRVR, 167 0, 168 OEM_DPT, 169 OS_FREE_BSD, /* XXX */ 170 CAP_ABOVE16MB, 171 DEV_ALL, 172 ADF_ALL_EATA, 173 0, 174 0, 175 DPT_VERSION, 176 DPT_REVISION, 177 DPT_SUBREVISION, 178 DPT_MONTH, 179 DPT_DAY, 180 DPT_YEAR, 181 "" /* Will be filled later */ 182 }; 183 184 static void dpt_ccb_abort(struct dpt_softc *, struct dpt_ccb *); 185 static void dpt_ccb_done(struct dpt_softc *, struct dpt_ccb *); 186 static int dpt_ccb_map(struct dpt_softc *, struct dpt_ccb *); 187 static int dpt_ccb_poll(struct dpt_softc *, struct dpt_ccb *); 188 static void dpt_ccb_unmap(struct dpt_softc *, struct dpt_ccb *); 189 static int dpt_cmd(struct dpt_softc *, struct dpt_ccb *, int, int); 190 static void dpt_ctlrinfo(struct dpt_softc *, struct dpt_eata_ctlrinfo *); 191 static void dpt_hba_inquire(struct dpt_softc *, struct eata_inquiry_data **); 192 static void dpt_minphys(struct buf *); 193 static int dpt_passthrough(struct dpt_softc *, struct eata_ucp *, 194 struct lwp *); 195 static void dpt_scsipi_request(struct scsipi_channel *, 196 scsipi_adapter_req_t, void *); 197 static void dpt_shutdown(void *); 198 static void dpt_sysinfo(struct dpt_softc *, struct dpt_sysinfo *); 199 static int dpt_wait(struct dpt_softc *, u_int8_t, u_int8_t, int); 200 201 static inline struct dpt_ccb *dpt_ccb_alloc(struct dpt_softc *); 202 static inline void dpt_ccb_free(struct dpt_softc *, struct dpt_ccb *); 203 204 static inline struct dpt_ccb * 205 dpt_ccb_alloc(struct dpt_softc *sc) 206 { 207 struct dpt_ccb *ccb; 208 int s; 209 210 s = splbio(); 211 ccb = SLIST_FIRST(&sc->sc_ccb_free); 212 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain); 213 splx(s); 214 215 return (ccb); 216 } 217 218 static inline void 219 dpt_ccb_free(struct dpt_softc *sc, struct dpt_ccb *ccb) 220 { 221 int s; 222 223 ccb->ccb_flg = 0; 224 ccb->ccb_savesp = NULL; 225 s = splbio(); 226 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain); 227 splx(s); 228 } 229 230 /* 231 * Handle an interrupt from the HBA. 232 */ 233 int 234 dpt_intr(void *cookie) 235 { 236 struct dpt_softc *sc; 237 struct dpt_ccb *ccb; 238 struct eata_sp *sp; 239 volatile int junk; 240 int forus; 241 242 sc = cookie; 243 sp = sc->sc_stp; 244 forus = 0; 245 246 for (;;) { 247 /* 248 * HBA might have interrupted while we were dealing with the 249 * last completed command, since we ACK before we deal; keep 250 * polling. 251 */ 252 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0) 253 break; 254 forus = 1; 255 256 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff, 257 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD); 258 259 /* Might have looped before HBA can reset HBA_AUX_INTR. */ 260 if (sp->sp_ccbid == -1) { 261 DELAY(50); 262 263 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0) 264 return (0); 265 266 printf("%s: no status\n", device_xname(&sc->sc_dv)); 267 268 /* Re-sync DMA map */ 269 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 270 sc->sc_stpoff, sizeof(struct eata_sp), 271 BUS_DMASYNC_POSTREAD); 272 } 273 274 /* Make sure CCB ID from status packet is realistic. */ 275 if ((u_int)sp->sp_ccbid >= sc->sc_nccbs) { 276 printf("%s: bogus status (returned CCB id %d)\n", 277 device_xname(&sc->sc_dv), sp->sp_ccbid); 278 279 /* Ack the interrupt */ 280 sp->sp_ccbid = -1; 281 junk = dpt_inb(sc, HA_STATUS); 282 continue; 283 } 284 285 /* Sync up DMA map and cache cmd status. */ 286 ccb = sc->sc_ccbs + sp->sp_ccbid; 287 288 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb), 289 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE); 290 291 ccb->ccb_hba_status = sp->sp_hba_status & 0x7f; 292 ccb->ccb_scsi_status = sp->sp_scsi_status; 293 if (ccb->ccb_savesp != NULL) 294 memcpy(ccb->ccb_savesp, sp, sizeof(*sp)); 295 296 /* 297 * Ack the interrupt and process the CCB. If this 298 * is a private CCB it's up to dpt_ccb_poll() to 299 * notice. 300 */ 301 sp->sp_ccbid = -1; 302 ccb->ccb_flg |= CCB_INTR; 303 junk = dpt_inb(sc, HA_STATUS); 304 if ((ccb->ccb_flg & CCB_PRIVATE) == 0) 305 dpt_ccb_done(sc, ccb); 306 else if ((ccb->ccb_flg & CCB_WAIT) != 0) 307 wakeup(ccb); 308 } 309 310 return (forus); 311 } 312 313 /* 314 * Initialize and attach the HBA. This is the entry point from bus 315 * specific probe-and-attach code. 316 */ 317 void 318 dpt_init(struct dpt_softc *sc, const char *intrstr) 319 { 320 struct scsipi_adapter *adapt; 321 struct scsipi_channel *chan; 322 struct eata_inquiry_data *ei; 323 int i, j, rv, rseg, maxchannel, maxtarget, mapsize; 324 bus_dma_segment_t seg; 325 struct eata_cfg *ec; 326 struct dpt_ccb *ccb; 327 char model[__arraycount(ei->ei_model) + __arraycount(ei->ei_suffix) + 1]; 328 char vendor[__arraycount(ei->ei_vendor) + 1]; 329 330 ec = &sc->sc_ec; 331 snprintf(dpt_sig.dsDescription, sizeof(dpt_sig.dsDescription), 332 "NetBSD %s DPT driver", osrelease); 333 334 /* 335 * Allocate the CCB/status packet/scratch DMA map and load. 336 */ 337 sc->sc_nccbs = 338 min(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS); 339 sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb); 340 sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp); 341 mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) + 342 DPT_SCRATCH_SIZE + sizeof(struct eata_sp); 343 344 if ((rv = bus_dmamem_alloc(sc->sc_dmat, mapsize, 345 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 346 aprint_error_dev(&sc->sc_dv, "unable to allocate CCBs, rv = %d\n", rv); 347 return; 348 } 349 350 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize, 351 (void **)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 352 aprint_error_dev(&sc->sc_dv, "unable to map CCBs, rv = %d\n", 353 rv); 354 return; 355 } 356 357 if ((rv = bus_dmamap_create(sc->sc_dmat, mapsize, 358 mapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 359 aprint_error_dev(&sc->sc_dv, "unable to create CCB DMA map, rv = %d\n", rv); 360 return; 361 } 362 363 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, 364 sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) { 365 aprint_error_dev(&sc->sc_dv, "unable to load CCB DMA map, rv = %d\n", rv); 366 return; 367 } 368 369 sc->sc_stp = (struct eata_sp *)((char *)sc->sc_ccbs + sc->sc_stpoff); 370 sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff; 371 sc->sc_scr = (char *)sc->sc_ccbs + sc->sc_scroff; 372 sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff; 373 sc->sc_stp->sp_ccbid = -1; 374 375 /* 376 * Create the CCBs. 377 */ 378 SLIST_INIT(&sc->sc_ccb_free); 379 memset(sc->sc_ccbs, 0, sizeof(struct dpt_ccb) * sc->sc_nccbs); 380 381 for (i = 0, ccb = sc->sc_ccbs; i < sc->sc_nccbs; i++, ccb++) { 382 rv = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER, 383 DPT_SG_SIZE, DPT_MAX_XFER, 0, 384 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 385 &ccb->ccb_dmamap_xfer); 386 if (rv) { 387 aprint_error_dev(&sc->sc_dv, "can't create ccb dmamap (%d)\n", rv); 388 break; 389 } 390 391 ccb->ccb_id = i; 392 ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr + 393 CCB_OFF(sc, ccb); 394 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain); 395 } 396 397 if (i == 0) { 398 aprint_error_dev(&sc->sc_dv, "unable to create CCBs\n"); 399 return; 400 } else if (i != sc->sc_nccbs) { 401 aprint_error_dev(&sc->sc_dv, "%d/%d CCBs created!\n", 402 i, sc->sc_nccbs); 403 sc->sc_nccbs = i; 404 } 405 406 /* Set shutdownhook before we start any device activity. */ 407 if (dpt_sdh == NULL) 408 dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL); 409 410 /* Get the inquiry data from the HBA. */ 411 dpt_hba_inquire(sc, &ei); 412 413 /* 414 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R) 415 * dpt0: interrupting at irq 10 416 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7 417 */ 418 for (i = 0; ei->ei_vendor[i] != ' ' && i < __arraycount(ei->ei_vendor); 419 i++) 420 vendor[i] = ei->ei_vendor[i]; 421 vendor[i] = '\0'; 422 423 for (i = 0; ei->ei_model[i] != ' ' && i < __arraycount(ei->ei_model); 424 i++) 425 model[i] = ei->ei_model[i]; 426 for (j = 0; ei->ei_suffix[j] != ' ' && j < __arraycount(ei->ei_suffix); 427 i++, j++) 428 model[i] = ei->ei_suffix[j]; 429 model[i] = '\0'; 430 431 /* Find the marketing name for the board. */ 432 for (i = 0; dpt_cname[i] != NULL; i += 2) 433 if (memcmp(ei->ei_model + 2, dpt_cname[i], 4) == 0) 434 break; 435 436 aprint_normal("%s %s (%s)\n", vendor, dpt_cname[i + 1], model); 437 438 if (intrstr != NULL) 439 aprint_normal_dev(&sc->sc_dv, "interrupting at %s\n", 440 intrstr); 441 442 maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >> 443 EC_F3_MAX_CHANNEL_SHIFT; 444 maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >> 445 EC_F3_MAX_TARGET_SHIFT; 446 447 aprint_normal_dev(&sc->sc_dv, "%d queued commands, %d channel(s), adapter on ID(s)", 448 sc->sc_nccbs, maxchannel + 1); 449 450 for (i = 0; i <= maxchannel; i++) { 451 sc->sc_hbaid[i] = ec->ec_hba[3 - i]; 452 aprint_normal(" %d", sc->sc_hbaid[i]); 453 } 454 aprint_normal("\n"); 455 456 /* 457 * Reset the SCSI controller chip(s) and bus. XXX Do we need to do 458 * this for each bus? 459 */ 460 if (dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_BUS_RESET)) 461 panic("%s: dpt_cmd failed", device_xname(&sc->sc_dv)); 462 463 /* Fill in the scsipi_adapter. */ 464 adapt = &sc->sc_adapt; 465 memset(adapt, 0, sizeof(*adapt)); 466 adapt->adapt_dev = &sc->sc_dv; 467 adapt->adapt_nchannels = maxchannel + 1; 468 adapt->adapt_openings = sc->sc_nccbs - 1; 469 adapt->adapt_max_periph = sc->sc_nccbs - 1; 470 adapt->adapt_request = dpt_scsipi_request; 471 adapt->adapt_minphys = dpt_minphys; 472 473 for (i = 0; i <= maxchannel; i++) { 474 /* Fill in the scsipi_channel. */ 475 chan = &sc->sc_chans[i]; 476 memset(chan, 0, sizeof(*chan)); 477 chan->chan_adapter = adapt; 478 chan->chan_bustype = &scsi_bustype; 479 chan->chan_channel = i; 480 chan->chan_ntargets = maxtarget + 1; 481 chan->chan_nluns = ec->ec_maxlun + 1; 482 chan->chan_id = sc->sc_hbaid[i]; 483 config_found(&sc->sc_dv, chan, scsiprint); 484 } 485 } 486 487 /* 488 * Read the EATA configuration from the HBA and perform some sanity checks. 489 */ 490 int 491 dpt_readcfg(struct dpt_softc *sc) 492 { 493 struct eata_cfg *ec; 494 int i, j, stat; 495 u_int16_t *p; 496 497 ec = &sc->sc_ec; 498 499 /* Older firmware may puke if we talk to it too soon after reset. */ 500 dpt_outb(sc, HA_COMMAND, CP_RESET); 501 DELAY(750000); 502 503 for (i = 1000; i; i--) { 504 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0) 505 break; 506 DELAY(2000); 507 } 508 509 if (i == 0) { 510 printf("%s: HBA not ready after reset (hba status:%02x)\n", 511 device_xname(&sc->sc_dv), dpt_inb(sc, HA_STATUS)); 512 return (-1); 513 } 514 515 while((((stat = dpt_inb(sc, HA_STATUS)) 516 != (HA_ST_READY|HA_ST_SEEK_COMPLETE)) 517 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR)) 518 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ))) 519 || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) { 520 /* RAID drives still spinning up? */ 521 if(dpt_inb(sc, HA_ERROR) != 'D' || 522 dpt_inb(sc, HA_ERROR + 1) != 'P' || 523 dpt_inb(sc, HA_ERROR + 2) != 'T') { 524 printf("%s: HBA not ready\n", device_xname(&sc->sc_dv)); 525 return (-1); 526 } 527 } 528 529 /* 530 * Issue the read-config command and wait for the data to appear. 531 * 532 * Apparently certian firmware revisions won't DMA later on if we 533 * request the config data using PIO, but it makes it a lot easier 534 * as no DMA setup is required. 535 */ 536 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG); 537 memset(ec, 0, sizeof(*ec)); 538 i = ((int)&((struct eata_cfg *)0)->ec_cfglen + 539 sizeof(ec->ec_cfglen)) >> 1; 540 p = (u_int16_t *)ec; 541 542 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) { 543 printf("%s: cfg data didn't appear (hba status:%02x)\n", 544 device_xname(&sc->sc_dv), dpt_inb(sc, HA_STATUS)); 545 return (-1); 546 } 547 548 /* Begin reading. */ 549 while (i--) 550 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA); 551 552 if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg) 553 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) 554 - sizeof(ec->ec_cfglen))) 555 i = sizeof(struct eata_cfg) 556 - (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) 557 - sizeof(ec->ec_cfglen); 558 559 j = i + (int)(&(((struct eata_cfg *)0L)->ec_cfglen)) + 560 sizeof(ec->ec_cfglen); 561 i >>= 1; 562 563 while (i--) 564 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA); 565 566 /* Flush until we have read 512 bytes. */ 567 i = (512 - j + 1) >> 1; 568 while (i--) 569 (void)bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA); 570 571 /* Defaults for older firmware... */ 572 if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1]) 573 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7; 574 575 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) { 576 aprint_error_dev(&sc->sc_dv, "HBA error\n"); 577 return (-1); 578 } 579 580 if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) { 581 aprint_error_dev(&sc->sc_dv, "EATA signature mismatch\n"); 582 return (-1); 583 } 584 585 if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) { 586 aprint_error_dev(&sc->sc_dv, "ec_hba field invalid\n"); 587 return (-1); 588 } 589 590 if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) { 591 aprint_error_dev(&sc->sc_dv, "DMA not supported\n"); 592 return (-1); 593 } 594 595 return (0); 596 } 597 598 /* 599 * Our `shutdownhook' to cleanly shut down the HBA. The HBA must flush all 600 * data from it's cache and mark array groups as clean. 601 * 602 * XXX This doesn't always work (i.e., the HBA may still be flushing after 603 * we tell root that it's safe to power off). 604 */ 605 static void 606 dpt_shutdown(void *cookie) 607 { 608 extern struct cfdriver dpt_cd; 609 struct dpt_softc *sc; 610 int i; 611 612 printf("shutting down dpt devices..."); 613 614 for (i = 0; i < dpt_cd.cd_ndevs; i++) { 615 if ((sc = device_lookup_private(&dpt_cd, i)) == NULL) 616 continue; 617 dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_POWEROFF_WARN); 618 } 619 620 delay(10000*1000); 621 printf(" done\n"); 622 } 623 624 /* 625 * Send an EATA command to the HBA. 626 */ 627 static int 628 dpt_cmd(struct dpt_softc *sc, struct dpt_ccb *ccb, int eatacmd, int icmd) 629 { 630 u_int32_t pa; 631 int i, s; 632 633 s = splbio(); 634 635 for (i = 20000; i != 0; i--) { 636 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0) 637 break; 638 DELAY(50); 639 } 640 if (i == 0) { 641 splx(s); 642 return (-1); 643 } 644 645 pa = (ccb != NULL ? ccb->ccb_ccbpa : 0); 646 dpt_outb(sc, HA_DMA_BASE + 0, (pa ) & 0xff); 647 dpt_outb(sc, HA_DMA_BASE + 1, (pa >> 8) & 0xff); 648 dpt_outb(sc, HA_DMA_BASE + 2, (pa >> 16) & 0xff); 649 dpt_outb(sc, HA_DMA_BASE + 3, (pa >> 24) & 0xff); 650 651 if (eatacmd == CP_IMMEDIATE) 652 dpt_outb(sc, HA_ICMD, icmd); 653 654 dpt_outb(sc, HA_COMMAND, eatacmd); 655 656 splx(s); 657 return (0); 658 } 659 660 /* 661 * Wait for the HBA status register to reach a specific state. 662 */ 663 static int 664 dpt_wait(struct dpt_softc *sc, u_int8_t mask, u_int8_t state, int ms) 665 { 666 667 for (ms *= 10; ms != 0; ms--) { 668 if ((dpt_inb(sc, HA_STATUS) & mask) == state) 669 return (0); 670 DELAY(100); 671 } 672 673 return (-1); 674 } 675 676 /* 677 * Spin waiting for a command to finish. The timeout value from the CCB is 678 * used. The CCB must be marked with CCB_PRIVATE, otherwise it'll will get 679 * recycled before we get a look at it. 680 */ 681 static int 682 dpt_ccb_poll(struct dpt_softc *sc, struct dpt_ccb *ccb) 683 { 684 int i, s; 685 686 #ifdef DEBUG 687 if ((ccb->ccb_flg & CCB_PRIVATE) == 0) 688 panic("dpt_ccb_poll: called for non-CCB_PRIVATE request"); 689 #endif 690 691 s = splbio(); 692 693 if ((ccb->ccb_flg & CCB_INTR) != 0) { 694 splx(s); 695 return (0); 696 } 697 698 for (i = ccb->ccb_timeout * 20; i != 0; i--) { 699 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0) 700 dpt_intr(sc); 701 if ((ccb->ccb_flg & CCB_INTR) != 0) 702 break; 703 DELAY(50); 704 } 705 706 splx(s); 707 return (i == 0); 708 } 709 710 /* 711 * We have a command which has been processed by the HBA, so now we look to 712 * see how the operation went. CCBs marked CCB_PRIVATE are not passed here 713 * by dpt_intr(). 714 */ 715 static void 716 dpt_ccb_done(struct dpt_softc *sc, struct dpt_ccb *ccb) 717 { 718 struct scsipi_xfer *xs; 719 720 xs = ccb->ccb_xs; 721 722 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("dpt_ccb_done\n")); 723 724 /* 725 * If we were a data transfer, unload the map that described the 726 * data buffer. 727 */ 728 if (xs->datalen != 0) 729 dpt_ccb_unmap(sc, ccb); 730 731 if (xs->error == XS_NOERROR) { 732 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) { 733 switch (ccb->ccb_hba_status) { 734 case SP_HBA_ERROR_SEL_TO: 735 xs->error = XS_SELTIMEOUT; 736 break; 737 case SP_HBA_ERROR_RESET: 738 xs->error = XS_RESET; 739 break; 740 default: 741 printf("%s: HBA status %x\n", 742 device_xname(&sc->sc_dv), ccb->ccb_hba_status); 743 xs->error = XS_DRIVER_STUFFUP; 744 break; 745 } 746 } else if (ccb->ccb_scsi_status != SCSI_OK) { 747 switch (ccb->ccb_scsi_status) { 748 case SCSI_CHECK: 749 memcpy(&xs->sense.scsi_sense, &ccb->ccb_sense, 750 sizeof(xs->sense.scsi_sense)); 751 xs->error = XS_SENSE; 752 break; 753 case SCSI_BUSY: 754 case SCSI_QUEUE_FULL: 755 xs->error = XS_BUSY; 756 break; 757 default: 758 scsipi_printaddr(xs->xs_periph); 759 printf("SCSI status %x\n", 760 ccb->ccb_scsi_status); 761 xs->error = XS_DRIVER_STUFFUP; 762 break; 763 } 764 } else 765 xs->resid = 0; 766 767 xs->status = ccb->ccb_scsi_status; 768 } 769 770 /* Free up the CCB and mark the command as done. */ 771 dpt_ccb_free(sc, ccb); 772 scsipi_done(xs); 773 } 774 775 /* 776 * Specified CCB has timed out, abort it. 777 */ 778 static void 779 dpt_ccb_abort(struct dpt_softc *sc, struct dpt_ccb *ccb) 780 { 781 struct scsipi_periph *periph; 782 struct scsipi_xfer *xs; 783 int s; 784 785 xs = ccb->ccb_xs; 786 periph = xs->xs_periph; 787 788 scsipi_printaddr(periph); 789 printf("timed out (status:%02x aux status:%02x)", 790 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS)); 791 792 s = splbio(); 793 794 if ((ccb->ccb_flg & CCB_ABORT) != 0) { 795 /* Abort timed out, reset the HBA */ 796 printf(" AGAIN, resetting HBA\n"); 797 dpt_outb(sc, HA_COMMAND, CP_RESET); 798 DELAY(750000); 799 } else { 800 /* Abort the operation that has timed out */ 801 printf("\n"); 802 xs->error = XS_TIMEOUT; 803 ccb->ccb_timeout = DPT_ABORT_TIMEOUT; 804 ccb->ccb_flg |= CCB_ABORT; 805 /* Start the abort */ 806 if (dpt_cmd(sc, ccb, CP_IMMEDIATE, CPI_SPEC_ABORT)) 807 aprint_error_dev(&sc->sc_dv, "dpt_cmd failed\n"); 808 } 809 810 splx(s); 811 } 812 813 /* 814 * Map a data transfer. 815 */ 816 static int 817 dpt_ccb_map(struct dpt_softc *sc, struct dpt_ccb *ccb) 818 { 819 struct scsipi_xfer *xs; 820 bus_dmamap_t xfer; 821 bus_dma_segment_t *ds; 822 struct eata_sg *sg; 823 struct eata_cp *cp; 824 int rv, i; 825 826 xs = ccb->ccb_xs; 827 xfer = ccb->ccb_dmamap_xfer; 828 cp = &ccb->ccb_eata_cp; 829 830 rv = bus_dmamap_load(sc->sc_dmat, xfer, xs->data, xs->datalen, NULL, 831 ((xs->xs_control & XS_CTL_NOSLEEP) != 0 ? 832 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING | 833 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE)); 834 835 switch (rv) { 836 case 0: 837 break; 838 case ENOMEM: 839 case EAGAIN: 840 xs->error = XS_RESOURCE_SHORTAGE; 841 break; 842 default: 843 xs->error = XS_DRIVER_STUFFUP; 844 printf("%s: error %d loading map\n", device_xname(&sc->sc_dv), rv); 845 break; 846 } 847 848 if (xs->error != XS_NOERROR) { 849 dpt_ccb_free(sc, ccb); 850 scsipi_done(xs); 851 return (-1); 852 } 853 854 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize, 855 (xs->xs_control & XS_CTL_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD : 856 BUS_DMASYNC_PREWRITE); 857 858 /* Don't bother using scatter/gather for just 1 seg */ 859 if (xfer->dm_nsegs == 1) { 860 cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr); 861 cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len); 862 } else { 863 /* 864 * Load the hardware scatter/gather map with 865 * the contents of the DMA map. 866 */ 867 sg = ccb->ccb_sg; 868 ds = xfer->dm_segs; 869 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) { 870 sg->sg_addr = htobe32(ds->ds_addr); 871 sg->sg_len = htobe32(ds->ds_len); 872 } 873 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) + 874 sc->sc_dmamap->dm_segs[0].ds_addr + 875 offsetof(struct dpt_ccb, ccb_sg)); 876 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg)); 877 cp->cp_ctl0 |= CP_C0_SCATTER; 878 } 879 880 return (0); 881 } 882 883 /* 884 * Unmap a transfer. 885 */ 886 static void 887 dpt_ccb_unmap(struct dpt_softc *sc, struct dpt_ccb *ccb) 888 { 889 890 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, 891 ccb->ccb_dmamap_xfer->dm_mapsize, 892 (ccb->ccb_eata_cp.cp_ctl0 & CP_C0_DATA_IN) != 0 ? 893 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 894 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer); 895 } 896 897 /* 898 * Adjust the size of each I/O before it passes to the SCSI layer. 899 */ 900 static void 901 dpt_minphys(struct buf *bp) 902 { 903 904 if (bp->b_bcount > DPT_MAX_XFER) 905 bp->b_bcount = DPT_MAX_XFER; 906 minphys(bp); 907 } 908 909 /* 910 * Start a SCSI command. 911 */ 912 static void 913 dpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 914 void *arg) 915 { 916 struct dpt_softc *sc; 917 struct scsipi_xfer *xs; 918 int flags; 919 struct scsipi_periph *periph; 920 struct dpt_ccb *ccb; 921 struct eata_cp *cp; 922 923 sc = (struct dpt_softc *)chan->chan_adapter->adapt_dev; 924 925 switch (req) { 926 case ADAPTER_REQ_RUN_XFER: 927 xs = arg; 928 periph = xs->xs_periph; 929 flags = xs->xs_control; 930 931 #ifdef DIAGNOSTIC 932 /* Cmds must be no more than 12 bytes for us. */ 933 if (xs->cmdlen > 12) { 934 xs->error = XS_DRIVER_STUFFUP; 935 scsipi_done(xs); 936 break; 937 } 938 #endif 939 /* 940 * XXX We can't reset devices just yet. Apparently some 941 * older firmware revisions don't even support it. 942 */ 943 if ((flags & XS_CTL_RESET) != 0) { 944 xs->error = XS_DRIVER_STUFFUP; 945 scsipi_done(xs); 946 break; 947 } 948 949 /* 950 * Get a CCB and fill it. 951 */ 952 ccb = dpt_ccb_alloc(sc); 953 ccb->ccb_xs = xs; 954 ccb->ccb_timeout = xs->timeout; 955 956 cp = &ccb->ccb_eata_cp; 957 memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen); 958 cp->cp_ccbid = ccb->ccb_id; 959 cp->cp_senselen = sizeof(ccb->ccb_sense); 960 cp->cp_stataddr = htobe32(sc->sc_stppa); 961 cp->cp_ctl0 = CP_C0_AUTO_SENSE; 962 cp->cp_ctl1 = 0; 963 cp->cp_ctl2 = 0; 964 cp->cp_ctl3 = periph->periph_target << CP_C3_ID_SHIFT; 965 cp->cp_ctl3 |= chan->chan_channel << CP_C3_CHANNEL_SHIFT; 966 cp->cp_ctl4 = periph->periph_lun << CP_C4_LUN_SHIFT; 967 cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY; 968 969 if ((flags & XS_CTL_DATA_IN) != 0) 970 cp->cp_ctl0 |= CP_C0_DATA_IN; 971 if ((flags & XS_CTL_DATA_OUT) != 0) 972 cp->cp_ctl0 |= CP_C0_DATA_OUT; 973 if (sc->sc_hbaid[chan->chan_channel] == periph->periph_target) 974 cp->cp_ctl0 |= CP_C0_INTERPRET; 975 976 /* Synchronous xfers musn't write-back through the cache. */ 977 if (xs->bp != NULL) 978 if ((xs->bp->b_flags & (B_ASYNC | B_READ)) == 0) 979 cp->cp_ctl2 |= CP_C2_NO_CACHE; 980 981 cp->cp_senseaddr = 982 htobe32(sc->sc_dmamap->dm_segs[0].ds_addr + 983 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense)); 984 985 if (xs->datalen != 0) { 986 if (dpt_ccb_map(sc, ccb)) 987 break; 988 } else { 989 cp->cp_dataaddr = 0; 990 cp->cp_datalen = 0; 991 } 992 993 /* Sync up CCB and status packet. */ 994 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 995 CCB_OFF(sc, ccb), sizeof(struct dpt_ccb), 996 BUS_DMASYNC_PREWRITE); 997 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff, 998 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD); 999 1000 /* 1001 * Start the command. 1002 */ 1003 if ((xs->xs_control & XS_CTL_POLL) != 0) 1004 ccb->ccb_flg |= CCB_PRIVATE; 1005 1006 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) { 1007 aprint_error_dev(&sc->sc_dv, "dpt_cmd failed\n"); 1008 xs->error = XS_DRIVER_STUFFUP; 1009 if (xs->datalen != 0) 1010 dpt_ccb_unmap(sc, ccb); 1011 dpt_ccb_free(sc, ccb); 1012 break; 1013 } 1014 1015 if ((xs->xs_control & XS_CTL_POLL) == 0) 1016 break; 1017 1018 if (dpt_ccb_poll(sc, ccb)) { 1019 dpt_ccb_abort(sc, ccb); 1020 /* Wait for abort to complete... */ 1021 if (dpt_ccb_poll(sc, ccb)) 1022 dpt_ccb_abort(sc, ccb); 1023 } 1024 1025 dpt_ccb_done(sc, ccb); 1026 break; 1027 1028 case ADAPTER_REQ_GROW_RESOURCES: 1029 /* 1030 * Not supported, since we allocate the maximum number of 1031 * CCBs up front. 1032 */ 1033 break; 1034 1035 case ADAPTER_REQ_SET_XFER_MODE: 1036 /* 1037 * This will be handled by the HBA itself, and we can't 1038 * modify that (ditto for tagged queueing). 1039 */ 1040 break; 1041 } 1042 } 1043 1044 /* 1045 * Get inquiry data from the adapter. 1046 */ 1047 static void 1048 dpt_hba_inquire(struct dpt_softc *sc, struct eata_inquiry_data **ei) 1049 { 1050 struct dpt_ccb *ccb; 1051 struct eata_cp *cp; 1052 1053 *ei = (struct eata_inquiry_data *)sc->sc_scr; 1054 1055 /* Get a CCB and mark as private */ 1056 ccb = dpt_ccb_alloc(sc); 1057 ccb->ccb_flg |= CCB_PRIVATE; 1058 ccb->ccb_timeout = 200; 1059 1060 /* Put all the arguments into the CCB. */ 1061 cp = &ccb->ccb_eata_cp; 1062 cp->cp_ccbid = ccb->ccb_id; 1063 cp->cp_senselen = sizeof(ccb->ccb_sense); 1064 cp->cp_senseaddr = 0; 1065 cp->cp_stataddr = htobe32(sc->sc_stppa); 1066 cp->cp_dataaddr = htobe32(sc->sc_scrpa); 1067 cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data)); 1068 cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET; 1069 cp->cp_ctl1 = 0; 1070 cp->cp_ctl2 = 0; 1071 cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT; 1072 cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY; 1073 1074 /* Put together the SCSI inquiry command. */ 1075 memset(&cp->cp_cdb_cmd, 0, 12); 1076 cp->cp_cdb_cmd = INQUIRY; 1077 cp->cp_cdb_len = sizeof(struct eata_inquiry_data); 1078 1079 /* Sync up CCB, status packet and scratch area. */ 1080 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb), 1081 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE); 1082 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff, 1083 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD); 1084 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff, 1085 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD); 1086 1087 /* Start the command and poll on completion. */ 1088 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) 1089 panic("%s: dpt_cmd failed", device_xname(&sc->sc_dv)); 1090 1091 if (dpt_ccb_poll(sc, ccb)) 1092 panic("%s: inquiry timed out", device_xname(&sc->sc_dv)); 1093 1094 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR || 1095 ccb->ccb_scsi_status != SCSI_OK) 1096 panic("%s: inquiry failed (hba:%02x scsi:%02x)", 1097 device_xname(&sc->sc_dv), ccb->ccb_hba_status, 1098 ccb->ccb_scsi_status); 1099 1100 /* Sync up the DMA map and free CCB, returning. */ 1101 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff, 1102 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD); 1103 dpt_ccb_free(sc, ccb); 1104 } 1105 1106 int 1107 dptopen(dev_t dev, int flag, int mode, struct lwp *l) 1108 { 1109 1110 if (device_lookup(&dpt_cd, minor(dev)) == NULL) 1111 return (ENXIO); 1112 1113 return (0); 1114 } 1115 1116 int 1117 dptioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) 1118 { 1119 struct dpt_softc *sc; 1120 int rv; 1121 1122 sc = device_lookup_private(&dpt_cd, minor(dev)); 1123 1124 switch (cmd & 0xffff) { 1125 case DPT_SIGNATURE: 1126 memcpy(data, &dpt_sig, min(IOCPARM_LEN(cmd), sizeof(dpt_sig))); 1127 break; 1128 1129 case DPT_CTRLINFO: 1130 dpt_ctlrinfo(sc, (struct dpt_eata_ctlrinfo *)data); 1131 break; 1132 1133 case DPT_SYSINFO: 1134 dpt_sysinfo(sc, (struct dpt_sysinfo *)data); 1135 break; 1136 1137 case DPT_BLINKLED: 1138 /* 1139 * XXX Don't know how to get this from EATA boards. I think 1140 * it involves waiting for a "DPT" sequence from HA_ERROR 1141 * and then reading one of the HA_ICMD registers. 1142 */ 1143 *(int *)data = 0; 1144 break; 1145 1146 case DPT_EATAUSRCMD: 1147 rv = kauth_authorize_device_passthru(l->l_cred, dev, 1148 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data); 1149 if (rv) 1150 return (rv); 1151 1152 if (IOCPARM_LEN(cmd) < sizeof(struct eata_ucp)) { 1153 DPRINTF(("%s: ucp %lu vs %lu bytes\n", 1154 device_xname(&sc->sc_dv), IOCPARM_LEN(cmd), 1155 (unsigned long int)sizeof(struct eata_ucp))); 1156 return (EINVAL); 1157 } 1158 1159 if (sc->sc_uactive++) 1160 tsleep(&sc->sc_uactive, PRIBIO, "dptslp", 0); 1161 1162 rv = dpt_passthrough(sc, (struct eata_ucp *)data, l); 1163 1164 sc->sc_uactive--; 1165 wakeup_one(&sc->sc_uactive); 1166 return (rv); 1167 1168 default: 1169 DPRINTF(("%s: unknown ioctl %lx\n", device_xname(&sc->sc_dv), cmd)); 1170 return (ENOTTY); 1171 } 1172 1173 return (0); 1174 } 1175 1176 void 1177 dpt_ctlrinfo(struct dpt_softc *sc, struct dpt_eata_ctlrinfo *info) 1178 { 1179 1180 memset(info, 0, sizeof(*info)); 1181 info->id = sc->sc_hbaid[0]; 1182 info->vect = sc->sc_isairq; 1183 info->base = sc->sc_isaport; 1184 info->qdepth = sc->sc_nccbs; 1185 info->sgsize = DPT_SG_SIZE * sizeof(struct eata_sg); 1186 info->heads = 16; 1187 info->sectors = 63; 1188 info->do_drive32 = 1; 1189 info->primary = 1; 1190 info->cpLength = sizeof(struct eata_cp); 1191 info->spLength = sizeof(struct eata_sp); 1192 info->drqNum = sc->sc_isadrq; 1193 } 1194 1195 void 1196 dpt_sysinfo(struct dpt_softc *sc, struct dpt_sysinfo *info) 1197 { 1198 #ifdef i386 1199 int i, j; 1200 #endif 1201 1202 memset(info, 0, sizeof(*info)); 1203 1204 #ifdef i386 1205 outb (0x70, 0x12); 1206 i = inb(0x71); 1207 j = i >> 4; 1208 if (i == 0x0f) { 1209 outb (0x70, 0x19); 1210 j = inb (0x71); 1211 } 1212 info->drive0CMOS = j; 1213 1214 j = i & 0x0f; 1215 if (i == 0x0f) { 1216 outb (0x70, 0x1a); 1217 j = inb (0x71); 1218 } 1219 info->drive1CMOS = j; 1220 info->processorFamily = dpt_sig.dsProcessorFamily; 1221 1222 /* 1223 * Get the conventional memory size from CMOS. 1224 */ 1225 outb(0x70, 0x16); 1226 j = inb(0x71); 1227 j <<= 8; 1228 outb(0x70, 0x15); 1229 j |= inb(0x71); 1230 info->conventionalMemSize = j; 1231 1232 /* 1233 * Get the extended memory size from CMOS. 1234 */ 1235 outb(0x70, 0x31); 1236 j = inb(0x71); 1237 j <<= 8; 1238 outb(0x70, 0x30); 1239 j |= inb(0x71); 1240 info->extendedMemSize = j; 1241 1242 switch (cpu_class) { 1243 case CPUCLASS_386: 1244 info->processorType = PROC_386; 1245 break; 1246 case CPUCLASS_486: 1247 info->processorType = PROC_486; 1248 break; 1249 case CPUCLASS_586: 1250 info->processorType = PROC_PENTIUM; 1251 break; 1252 case CPUCLASS_686: 1253 default: 1254 info->processorType = PROC_SEXIUM; 1255 break; 1256 } 1257 1258 info->flags = SI_CMOS_Valid | SI_BusTypeValid | 1259 SI_MemorySizeValid | SI_NO_SmartROM; 1260 #else 1261 info->flags = SI_BusTypeValid | SI_NO_SmartROM; 1262 #endif 1263 1264 info->busType = sc->sc_bustype; 1265 } 1266 1267 int 1268 dpt_passthrough(struct dpt_softc *sc, struct eata_ucp *ucp, struct lwp *l) 1269 { 1270 struct dpt_ccb *ccb; 1271 struct eata_sp sp; 1272 struct eata_cp *cp; 1273 struct eata_sg *sg; 1274 bus_dmamap_t xfer = 0; /* XXX: gcc */ 1275 bus_dma_segment_t *ds; 1276 int datain = 0, s, rv = 0, i, uslen; /* XXX: gcc */ 1277 1278 /* 1279 * Get a CCB and fill. 1280 */ 1281 ccb = dpt_ccb_alloc(sc); 1282 ccb->ccb_flg |= CCB_PRIVATE | CCB_WAIT; 1283 ccb->ccb_timeout = 0; 1284 ccb->ccb_savesp = &sp; 1285 1286 cp = &ccb->ccb_eata_cp; 1287 memcpy(cp, ucp->ucp_cp, sizeof(ucp->ucp_cp)); 1288 uslen = cp->cp_senselen; 1289 cp->cp_ccbid = ccb->ccb_id; 1290 cp->cp_senselen = sizeof(ccb->ccb_sense); 1291 cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr + 1292 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense)); 1293 cp->cp_stataddr = htobe32(sc->sc_stppa); 1294 1295 /* 1296 * Map data transfers. 1297 */ 1298 if (ucp->ucp_dataaddr && ucp->ucp_datalen) { 1299 xfer = ccb->ccb_dmamap_xfer; 1300 datain = ((cp->cp_ctl0 & CP_C0_DATA_IN) != 0); 1301 1302 if (ucp->ucp_datalen > DPT_MAX_XFER) { 1303 DPRINTF(("%s: xfer too big\n", device_xname(&sc->sc_dv))); 1304 dpt_ccb_free(sc, ccb); 1305 return (EFBIG); 1306 } 1307 rv = bus_dmamap_load(sc->sc_dmat, xfer, 1308 ucp->ucp_dataaddr, ucp->ucp_datalen, l->l_proc, 1309 BUS_DMA_WAITOK | BUS_DMA_STREAMING | 1310 (datain ? BUS_DMA_READ : BUS_DMA_WRITE)); 1311 if (rv != 0) { 1312 DPRINTF(("%s: map failed; %d\n", device_xname(&sc->sc_dv), 1313 rv)); 1314 dpt_ccb_free(sc, ccb); 1315 return (rv); 1316 } 1317 1318 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize, 1319 (datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1320 1321 sg = ccb->ccb_sg; 1322 ds = xfer->dm_segs; 1323 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) { 1324 sg->sg_addr = htobe32(ds->ds_addr); 1325 sg->sg_len = htobe32(ds->ds_len); 1326 } 1327 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) + 1328 sc->sc_dmamap->dm_segs[0].ds_addr + 1329 offsetof(struct dpt_ccb, ccb_sg)); 1330 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg)); 1331 cp->cp_ctl0 |= CP_C0_SCATTER; 1332 } else { 1333 cp->cp_dataaddr = 0; 1334 cp->cp_datalen = 0; 1335 } 1336 1337 /* 1338 * Start the command and sleep on completion. 1339 */ 1340 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb), 1341 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE); 1342 s = splbio(); 1343 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff, 1344 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD); 1345 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) 1346 panic("%s: dpt_cmd failed", device_xname(&sc->sc_dv)); 1347 tsleep(ccb, PWAIT, "dptucmd", 0); 1348 splx(s); 1349 1350 /* 1351 * Sync up the DMA map and copy out results. 1352 */ 1353 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb), 1354 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE); 1355 1356 if (cp->cp_datalen != 0) { 1357 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize, 1358 (datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)); 1359 bus_dmamap_unload(sc->sc_dmat, xfer); 1360 } 1361 1362 if (ucp->ucp_stataddr != NULL) { 1363 rv = copyout(&sp, ucp->ucp_stataddr, sizeof(sp)); 1364 if (rv != 0) { 1365 DPRINTF(("%s: sp copyout() failed\n", 1366 device_xname(&sc->sc_dv))); 1367 } 1368 } 1369 if (rv == 0 && ucp->ucp_senseaddr != NULL) { 1370 i = min(uslen, sizeof(ccb->ccb_sense)); 1371 rv = copyout(&ccb->ccb_sense, ucp->ucp_senseaddr, i); 1372 if (rv != 0) { 1373 DPRINTF(("%s: sense copyout() failed\n", 1374 device_xname(&sc->sc_dv))); 1375 } 1376 } 1377 1378 ucp->ucp_hstatus = (u_int8_t)ccb->ccb_hba_status; 1379 ucp->ucp_tstatus = (u_int8_t)ccb->ccb_scsi_status; 1380 dpt_ccb_free(sc, ccb); 1381 return (rv); 1382 } 1383