1 /* $NetBSD: ciss.c,v 1.23 2010/09/07 18:19:16 mhitch Exp $ */ 2 /* $OpenBSD: ciss.c,v 1.14 2006/03/13 16:02:23 mickey Exp $ */ 3 4 /* 5 * Copyright (c) 2005 Michael Shalayeff 6 * All rights reserved. 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN 17 * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 18 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include <sys/cdefs.h> 22 __KERNEL_RCSID(0, "$NetBSD: ciss.c,v 1.23 2010/09/07 18:19:16 mhitch Exp $"); 23 24 #include "bio.h" 25 26 /* #define CISS_DEBUG */ 27 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/buf.h> 31 #include <sys/ioctl.h> 32 #include <sys/device.h> 33 #include <sys/kernel.h> 34 #include <sys/malloc.h> 35 #include <sys/proc.h> 36 37 #include <uvm/uvm_extern.h> 38 39 #include <sys/bus.h> 40 41 #include <dev/scsipi/scsi_all.h> 42 #include <dev/scsipi/scsi_disk.h> 43 #include <dev/scsipi/scsiconf.h> 44 #include <dev/scsipi/scsipi_all.h> 45 46 #include <dev/ic/cissreg.h> 47 #include <dev/ic/cissvar.h> 48 49 #if NBIO > 0 50 #include <dev/biovar.h> 51 #endif /* NBIO > 0 */ 52 53 #ifdef CISS_DEBUG 54 #define CISS_DPRINTF(m,a) if (ciss_debug & (m)) printf a 55 #define CISS_D_CMD 0x0001 56 #define CISS_D_INTR 0x0002 57 #define CISS_D_MISC 0x0004 58 #define CISS_D_DMA 0x0008 59 #define CISS_D_IOCTL 0x0010 60 #define CISS_D_ERR 0x0020 61 int ciss_debug = 0 62 | CISS_D_CMD 63 | CISS_D_INTR 64 | CISS_D_MISC 65 | CISS_D_DMA 66 | CISS_D_IOCTL 67 | CISS_D_ERR 68 ; 69 #else 70 #define CISS_DPRINTF(m,a) /* m, a */ 71 #endif 72 73 static void ciss_scsi_cmd(struct scsipi_channel *chan, 74 scsipi_adapter_req_t req, void *arg); 75 static int ciss_scsi_ioctl(struct scsipi_channel *chan, u_long cmd, 76 void *addr, int flag, struct proc *p); 77 static void cissminphys(struct buf *bp); 78 79 #if 0 80 static void ciss_scsi_raw_cmd(struct scsipi_channel *chan, 81 scsipi_adapter_req_t req, void *arg); 82 #endif 83 84 static int ciss_sync(struct ciss_softc *sc); 85 static void ciss_heartbeat(void *v); 86 static void ciss_shutdown(void *v); 87 88 static struct ciss_ccb *ciss_get_ccb(struct ciss_softc *sc); 89 static void ciss_put_ccb(struct ciss_ccb *ccb); 90 static int ciss_cmd(struct ciss_ccb *ccb, int flags, int wait); 91 static int ciss_done(struct ciss_ccb *ccb); 92 static int ciss_error(struct ciss_ccb *ccb); 93 struct ciss_ld *ciss_pdscan(struct ciss_softc *sc, int ld); 94 static int ciss_inq(struct ciss_softc *sc, struct ciss_inquiry *inq); 95 int ciss_ldid(struct ciss_softc *, int, struct ciss_ldid *); 96 int ciss_ldstat(struct ciss_softc *, int, struct ciss_ldstat *); 97 static int ciss_ldmap(struct ciss_softc *sc); 98 int ciss_pdid(struct ciss_softc *, u_int8_t, struct ciss_pdid *, int); 99 100 #if NBIO > 0 101 int ciss_ioctl(device_t, u_long, void *); 102 int ciss_ioctl_vol(struct ciss_softc *, struct bioc_vol *); 103 int ciss_blink(struct ciss_softc *, int, int, int, struct ciss_blink *); 104 int ciss_create_sensors(struct ciss_softc *); 105 void ciss_sensor_refresh(struct sysmon_envsys *, envsys_data_t *); 106 #endif /* NBIO > 0 */ 107 108 static struct ciss_ccb * 109 ciss_get_ccb(struct ciss_softc *sc) 110 { 111 struct ciss_ccb *ccb; 112 113 mutex_enter(&sc->sc_mutex); 114 if ((ccb = TAILQ_LAST(&sc->sc_free_ccb, ciss_queue_head))) { 115 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, ccb_link); 116 ccb->ccb_state = CISS_CCB_READY; 117 } 118 mutex_exit(&sc->sc_mutex); 119 return ccb; 120 } 121 122 static void 123 ciss_put_ccb(struct ciss_ccb *ccb) 124 { 125 struct ciss_softc *sc = ccb->ccb_sc; 126 127 ccb->ccb_state = CISS_CCB_FREE; 128 mutex_enter(&sc->sc_mutex); 129 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link); 130 mutex_exit(&sc->sc_mutex); 131 } 132 133 int 134 ciss_attach(struct ciss_softc *sc) 135 { 136 struct ciss_ccb *ccb; 137 struct ciss_cmd *cmd; 138 struct ciss_inquiry *inq; 139 bus_dma_segment_t seg[1]; 140 int error, i, total, rseg, maxfer; 141 paddr_t pa; 142 143 bus_space_read_region_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff, 144 (u_int32_t *)&sc->cfg, sizeof(sc->cfg) / 4); 145 146 if (sc->cfg.signature != CISS_SIGNATURE) { 147 printf(": bad sign 0x%08x\n", sc->cfg.signature); 148 return -1; 149 } 150 151 if (!(sc->cfg.methods & CISS_METH_SIMPL)) { 152 printf(": not simple 0x%08x\n", sc->cfg.methods); 153 return -1; 154 } 155 156 sc->cfg.rmethod = CISS_METH_SIMPL; 157 sc->cfg.paddr_lim = 0; /* 32bit addrs */ 158 sc->cfg.int_delay = 0; /* disable coalescing */ 159 sc->cfg.int_count = 0; 160 strlcpy(sc->cfg.hostname, "HUMPPA", sizeof(sc->cfg.hostname)); 161 sc->cfg.driverf |= CISS_DRV_PRF; /* enable prefetch */ 162 if (!sc->cfg.maxsg) 163 sc->cfg.maxsg = MAXPHYS / PAGE_SIZE + 1; 164 165 bus_space_write_region_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff, 166 (u_int32_t *)&sc->cfg, sizeof(sc->cfg) / 4); 167 bus_space_barrier(sc->sc_iot, sc->cfg_ioh, sc->cfgoff, sizeof(sc->cfg), 168 BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE); 169 170 bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_IDB, CISS_IDB_CFG); 171 bus_space_barrier(sc->sc_iot, sc->sc_ioh, CISS_IDB, 4, 172 BUS_SPACE_BARRIER_WRITE); 173 for (i = 1000; i--; DELAY(1000)) { 174 /* XXX maybe IDB is really 64bit? - hp dl380 needs this */ 175 (void)bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IDB + 4); 176 if (!(bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IDB) & CISS_IDB_CFG)) 177 break; 178 bus_space_barrier(sc->sc_iot, sc->sc_ioh, CISS_IDB, 4, 179 BUS_SPACE_BARRIER_READ); 180 } 181 182 if (bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IDB) & CISS_IDB_CFG) { 183 printf(": cannot set config\n"); 184 return -1; 185 } 186 187 bus_space_read_region_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff, 188 (u_int32_t *)&sc->cfg, sizeof(sc->cfg) / 4); 189 190 if (!(sc->cfg.amethod & CISS_METH_SIMPL)) { 191 printf(": cannot simplify 0x%08x\n", sc->cfg.amethod); 192 return -1; 193 } 194 195 /* i'm ready for you and i hope you're ready for me */ 196 for (i = 30000; i--; DELAY(1000)) { 197 if (bus_space_read_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff + 198 offsetof(struct ciss_config, amethod)) & CISS_METH_READY) 199 break; 200 bus_space_barrier(sc->sc_iot, sc->cfg_ioh, sc->cfgoff + 201 offsetof(struct ciss_config, amethod), 4, 202 BUS_SPACE_BARRIER_READ); 203 } 204 205 if (!(bus_space_read_4(sc->sc_iot, sc->cfg_ioh, sc->cfgoff + 206 offsetof(struct ciss_config, amethod)) & CISS_METH_READY)) { 207 printf(": she never came ready for me 0x%08x\n", 208 sc->cfg.amethod); 209 return -1; 210 } 211 212 mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM); 213 mutex_init(&sc->sc_mutex_scratch, MUTEX_DEFAULT, IPL_VM); 214 cv_init(&sc->sc_condvar, "ciss_cmd"); 215 sc->maxcmd = sc->cfg.maxcmd; 216 sc->maxsg = sc->cfg.maxsg; 217 if (sc->maxsg > MAXPHYS / PAGE_SIZE + 1) 218 sc->maxsg = MAXPHYS / PAGE_SIZE + 1; 219 i = sizeof(struct ciss_ccb) + 220 sizeof(ccb->ccb_cmd.sgl[0]) * (sc->maxsg - 1); 221 for (sc->ccblen = 0x10; sc->ccblen < i; sc->ccblen <<= 1); 222 223 total = sc->ccblen * sc->maxcmd; 224 if ((error = bus_dmamem_alloc(sc->sc_dmat, total, PAGE_SIZE, 0, 225 sc->cmdseg, 1, &rseg, BUS_DMA_NOWAIT))) { 226 printf(": cannot allocate CCBs (%d)\n", error); 227 return -1; 228 } 229 230 if ((error = bus_dmamem_map(sc->sc_dmat, sc->cmdseg, rseg, total, 231 (void **)&sc->ccbs, BUS_DMA_NOWAIT))) { 232 printf(": cannot map CCBs (%d)\n", error); 233 return -1; 234 } 235 memset(sc->ccbs, 0, total); 236 237 if ((error = bus_dmamap_create(sc->sc_dmat, total, 1, 238 total, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->cmdmap))) { 239 printf(": cannot create CCBs dmamap (%d)\n", error); 240 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1); 241 return -1; 242 } 243 244 if ((error = bus_dmamap_load(sc->sc_dmat, sc->cmdmap, sc->ccbs, total, 245 NULL, BUS_DMA_NOWAIT))) { 246 printf(": cannot load CCBs dmamap (%d)\n", error); 247 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1); 248 bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap); 249 return -1; 250 } 251 252 TAILQ_INIT(&sc->sc_ccbq); 253 TAILQ_INIT(&sc->sc_ccbdone); 254 TAILQ_INIT(&sc->sc_free_ccb); 255 256 maxfer = sc->maxsg * PAGE_SIZE; 257 for (i = 0; total > 0 && i < sc->maxcmd; i++, total -= sc->ccblen) { 258 ccb = (struct ciss_ccb *) ((char *)sc->ccbs + i * sc->ccblen); 259 cmd = &ccb->ccb_cmd; 260 pa = sc->cmdseg[0].ds_addr + i * sc->ccblen; 261 262 ccb->ccb_sc = sc; 263 ccb->ccb_cmdpa = pa + offsetof(struct ciss_ccb, ccb_cmd); 264 ccb->ccb_state = CISS_CCB_FREE; 265 266 cmd->id = htole32(i << 2); 267 cmd->id_hi = htole32(0); 268 cmd->sgin = sc->maxsg; 269 cmd->sglen = htole16((u_int16_t)cmd->sgin); 270 cmd->err_len = htole32(sizeof(ccb->ccb_err)); 271 pa += offsetof(struct ciss_ccb, ccb_err); 272 cmd->err_pa = htole64((u_int64_t)pa); 273 274 if ((error = bus_dmamap_create(sc->sc_dmat, maxfer, sc->maxsg, 275 maxfer, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 276 &ccb->ccb_dmamap))) 277 break; 278 279 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, ccb, ccb_link); 280 } 281 282 if (i < sc->maxcmd) { 283 printf(": cannot create ccb#%d dmamap (%d)\n", i, error); 284 if (i == 0) { 285 /* TODO leaking cmd's dmamaps and shitz */ 286 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1); 287 bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap); 288 return -1; 289 } 290 } 291 292 if ((error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, 293 seg, 1, &rseg, BUS_DMA_NOWAIT))) { 294 printf(": cannot allocate scratch buffer (%d)\n", error); 295 return -1; 296 } 297 298 if ((error = bus_dmamem_map(sc->sc_dmat, seg, rseg, PAGE_SIZE, 299 (void **)&sc->scratch, BUS_DMA_NOWAIT))) { 300 printf(": cannot map scratch buffer (%d)\n", error); 301 return -1; 302 } 303 memset(sc->scratch, 0, PAGE_SIZE); 304 sc->sc_waitflag = XS_CTL_NOSLEEP; /* can't sleep yet */ 305 306 mutex_enter(&sc->sc_mutex_scratch); /* is this really needed? */ 307 inq = sc->scratch; 308 if (ciss_inq(sc, inq)) { 309 printf(": adapter inquiry failed\n"); 310 mutex_exit(&sc->sc_mutex_scratch); 311 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1); 312 bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap); 313 return -1; 314 } 315 316 if (!(inq->flags & CISS_INQ_BIGMAP)) { 317 printf(": big map is not supported, flags=0x%x\n", 318 inq->flags); 319 mutex_exit(&sc->sc_mutex_scratch); 320 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1); 321 bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap); 322 return -1; 323 } 324 325 sc->maxunits = inq->numld; 326 sc->nbus = inq->nscsi_bus; 327 sc->ndrives = inq->buswidth ? inq->buswidth : 256; 328 printf(": %d LD%s, HW rev %d, FW %4.4s/%4.4s\n", 329 inq->numld, inq->numld == 1? "" : "s", 330 inq->hw_rev, inq->fw_running, inq->fw_stored); 331 332 mutex_exit(&sc->sc_mutex_scratch); 333 334 callout_init(&sc->sc_hb, 0); 335 callout_setfunc(&sc->sc_hb, ciss_heartbeat, sc); 336 callout_schedule(&sc->sc_hb, hz * 3); 337 338 /* map LDs */ 339 if (ciss_ldmap(sc)) { 340 aprint_error_dev(&sc->sc_dev, "adapter LD map failed\n"); 341 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1); 342 bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap); 343 return -1; 344 } 345 346 if (!(sc->sc_lds = malloc(sc->maxunits * sizeof(*sc->sc_lds), 347 M_DEVBUF, M_NOWAIT))) { 348 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1); 349 bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap); 350 return -1; 351 } 352 memset(sc->sc_lds, 0, sc->maxunits * sizeof(*sc->sc_lds)); 353 354 sc->sc_flush = CISS_FLUSH_ENABLE; 355 if (!(sc->sc_sh = shutdownhook_establish(ciss_shutdown, sc))) { 356 printf(": unable to establish shutdown hook\n"); 357 bus_dmamem_free(sc->sc_dmat, sc->cmdseg, 1); 358 bus_dmamap_destroy(sc->sc_dmat, sc->cmdmap); 359 return -1; 360 } 361 362 sc->sc_channel.chan_adapter = &sc->sc_adapter; 363 sc->sc_channel.chan_bustype = &scsi_bustype; 364 sc->sc_channel.chan_channel = 0; 365 sc->sc_channel.chan_ntargets = sc->maxunits; 366 sc->sc_channel.chan_nluns = 1; /* ciss doesn't really have SCSI luns */ 367 sc->sc_channel.chan_openings = sc->maxcmd; 368 #if NBIO > 0 369 /* XXX Reserve some ccb's for sensor and bioctl. */ 370 if (sc->sc_channel.chan_openings > 2) 371 sc->sc_channel.chan_openings -= 2; 372 #endif 373 sc->sc_channel.chan_flags = 0; 374 sc->sc_channel.chan_id = sc->maxunits; 375 376 sc->sc_adapter.adapt_dev = (device_t) sc; 377 sc->sc_adapter.adapt_openings = sc->sc_channel.chan_openings; 378 sc->sc_adapter.adapt_max_periph = sc->sc_channel.chan_openings; 379 sc->sc_adapter.adapt_request = ciss_scsi_cmd; 380 sc->sc_adapter.adapt_minphys = cissminphys; 381 sc->sc_adapter.adapt_ioctl = ciss_scsi_ioctl; 382 sc->sc_adapter.adapt_nchannels = 1; 383 config_found(&sc->sc_dev, &sc->sc_channel, scsiprint); 384 385 #if 0 386 sc->sc_link_raw.adapter_softc = sc; 387 sc->sc_link.openings = sc->sc_channel.chan_openings; 388 sc->sc_link_raw.adapter = &ciss_raw_switch; 389 sc->sc_link_raw.adapter_target = sc->ndrives; 390 sc->sc_link_raw.adapter_buswidth = sc->ndrives; 391 config_found(&sc->sc_dev, &sc->sc_channel, scsiprint); 392 #endif 393 394 #if NBIO > 0 395 /* now map all the physdevs into their lds */ 396 /* XXX currently we assign all of them into ld0 */ 397 for (i = 0; i < sc->maxunits && i < 1; i++) 398 if (!(sc->sc_lds[i] = ciss_pdscan(sc, i))) { 399 sc->sc_waitflag = 0; /* we can sleep now */ 400 return 0; 401 } 402 403 if (bio_register(&sc->sc_dev, ciss_ioctl) != 0) 404 aprint_error_dev(&sc->sc_dev, "controller registration failed"); 405 else 406 sc->sc_ioctl = ciss_ioctl; 407 if (ciss_create_sensors(sc) != 0) 408 aprint_error_dev(&sc->sc_dev, "unable to create sensors"); 409 #endif 410 sc->sc_waitflag = 0; /* we can sleep now */ 411 412 return 0; 413 } 414 415 static void 416 ciss_shutdown(void *v) 417 { 418 struct ciss_softc *sc = v; 419 420 sc->sc_flush = CISS_FLUSH_DISABLE; 421 /* timeout_del(&sc->sc_hb); */ 422 ciss_sync(sc); 423 } 424 425 static void 426 cissminphys(struct buf *bp) 427 { 428 #if 0 /* TOSO */ 429 #define CISS_MAXFER (PAGE_SIZE * (sc->maxsg + 1)) 430 if (bp->b_bcount > CISS_MAXFER) 431 bp->b_bcount = CISS_MAXFER; 432 #endif 433 minphys(bp); 434 } 435 436 /* 437 * submit a command and optionally wait for completition. 438 * wait arg abuses XS_CTL_POLL|XS_CTL_NOSLEEP flags to request 439 * to wait (XS_CTL_POLL) and to allow tsleep() (!XS_CTL_NOSLEEP) 440 * instead of busy loop waiting 441 */ 442 static int 443 ciss_cmd(struct ciss_ccb *ccb, int flags, int wait) 444 { 445 struct ciss_softc *sc = ccb->ccb_sc; 446 struct ciss_cmd *cmd = &ccb->ccb_cmd; 447 struct ciss_ccb *ccb1; 448 bus_dmamap_t dmap = ccb->ccb_dmamap; 449 u_int32_t id; 450 int i, tohz, error = 0; 451 452 if (ccb->ccb_state != CISS_CCB_READY) { 453 printf("%s: ccb %d not ready state=0x%x\n", device_xname(&sc->sc_dev), 454 cmd->id, ccb->ccb_state); 455 return (EINVAL); 456 } 457 458 if (ccb->ccb_data) { 459 bus_dma_segment_t *sgd; 460 461 if ((error = bus_dmamap_load(sc->sc_dmat, dmap, ccb->ccb_data, 462 ccb->ccb_len, NULL, flags))) { 463 if (error == EFBIG) 464 printf("more than %d dma segs\n", sc->maxsg); 465 else 466 printf("error %d loading dma map\n", error); 467 ciss_put_ccb(ccb); 468 return (error); 469 } 470 cmd->sgin = dmap->dm_nsegs; 471 472 sgd = dmap->dm_segs; 473 CISS_DPRINTF(CISS_D_DMA, ("data=%p/%zu<%#" PRIxPADDR "/%zu", 474 ccb->ccb_data, ccb->ccb_len, sgd->ds_addr, sgd->ds_len)); 475 476 for (i = 0; i < dmap->dm_nsegs; sgd++, i++) { 477 cmd->sgl[i].addr_lo = htole32(sgd->ds_addr); 478 cmd->sgl[i].addr_hi = 479 htole32((u_int64_t)sgd->ds_addr >> 32); 480 cmd->sgl[i].len = htole32(sgd->ds_len); 481 cmd->sgl[i].flags = htole32(0); 482 if (i) { 483 CISS_DPRINTF(CISS_D_DMA, 484 (",%#" PRIxPADDR "/%zu", sgd->ds_addr, 485 sgd->ds_len)); 486 } 487 } 488 489 CISS_DPRINTF(CISS_D_DMA, ("> ")); 490 491 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 492 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 493 } else 494 cmd->sgin = 0; 495 cmd->sglen = htole16((u_int16_t)cmd->sgin); 496 memset(&ccb->ccb_err, 0, sizeof(ccb->ccb_err)); 497 498 bus_dmamap_sync(sc->sc_dmat, sc->cmdmap, 0, sc->cmdmap->dm_mapsize, 499 BUS_DMASYNC_PREWRITE); 500 501 if ((wait & (XS_CTL_POLL|XS_CTL_NOSLEEP)) == (XS_CTL_POLL|XS_CTL_NOSLEEP)) 502 bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_IMR, 503 bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IMR) | sc->iem); 504 505 mutex_enter(&sc->sc_mutex); 506 TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, ccb_link); 507 mutex_exit(&sc->sc_mutex); 508 ccb->ccb_state = CISS_CCB_ONQ; 509 CISS_DPRINTF(CISS_D_CMD, ("submit=0x%x ", cmd->id)); 510 bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_INQ, ccb->ccb_cmdpa); 511 512 if (wait & XS_CTL_POLL) { 513 int etick; 514 CISS_DPRINTF(CISS_D_CMD, ("waiting ")); 515 516 i = ccb->ccb_xs? ccb->ccb_xs->timeout : 60000; 517 tohz = (i / 1000) * hz + (i % 1000) * (hz / 1000); 518 if (tohz == 0) 519 tohz = 1; 520 for (i *= 100, etick = tick + tohz; i--; ) { 521 if (!(wait & XS_CTL_NOSLEEP)) { 522 ccb->ccb_state = CISS_CCB_POLL; 523 CISS_DPRINTF(CISS_D_CMD, ("cv_timedwait(%d) ", tohz)); 524 mutex_enter(&sc->sc_mutex); 525 if (cv_timedwait(&sc->sc_condvar, 526 &sc->sc_mutex, tohz) == EWOULDBLOCK) { 527 mutex_exit(&sc->sc_mutex); 528 break; 529 } 530 mutex_exit(&sc->sc_mutex); 531 if (ccb->ccb_state != CISS_CCB_ONQ) { 532 tohz = etick - tick; 533 if (tohz <= 0) 534 break; 535 CISS_DPRINTF(CISS_D_CMD, ("T")); 536 continue; 537 } 538 ccb1 = ccb; 539 } else { 540 DELAY(10); 541 542 if (!(bus_space_read_4(sc->sc_iot, sc->sc_ioh, 543 CISS_ISR) & sc->iem)) { 544 CISS_DPRINTF(CISS_D_CMD, ("N")); 545 continue; 546 } 547 548 if ((id = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 549 CISS_OUTQ)) == 0xffffffff) { 550 CISS_DPRINTF(CISS_D_CMD, ("Q")); 551 continue; 552 } 553 554 CISS_DPRINTF(CISS_D_CMD, ("got=0x%x ", id)); 555 ccb1 = (struct ciss_ccb *) 556 ((char *)sc->ccbs + (id >> 2) * sc->ccblen); 557 ccb1->ccb_cmd.id = htole32(id); 558 } 559 560 error = ciss_done(ccb1); 561 if (ccb1 == ccb) 562 break; 563 } 564 565 /* if never got a chance to be done above... */ 566 if (ccb->ccb_state != CISS_CCB_FREE) { 567 ccb->ccb_err.cmd_stat = CISS_ERR_TMO; 568 error = ciss_done(ccb); 569 } 570 571 CISS_DPRINTF(CISS_D_CMD, ("done %d:%d", 572 ccb->ccb_err.cmd_stat, ccb->ccb_err.scsi_stat)); 573 } 574 575 if ((wait & (XS_CTL_POLL|XS_CTL_NOSLEEP)) == (XS_CTL_POLL|XS_CTL_NOSLEEP)) 576 bus_space_write_4(sc->sc_iot, sc->sc_ioh, CISS_IMR, 577 bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_IMR) & ~sc->iem); 578 579 return (error); 580 } 581 582 static int 583 ciss_done(struct ciss_ccb *ccb) 584 { 585 struct ciss_softc *sc = ccb->ccb_sc; 586 struct scsipi_xfer *xs = ccb->ccb_xs; 587 struct ciss_cmd *cmd; 588 int error = 0; 589 590 CISS_DPRINTF(CISS_D_CMD, ("ciss_done(%p) ", ccb)); 591 592 if (ccb->ccb_state != CISS_CCB_ONQ) { 593 printf("%s: unqueued ccb %p ready, state=0x%x\n", 594 device_xname(&sc->sc_dev), ccb, ccb->ccb_state); 595 return 1; 596 } 597 598 ccb->ccb_state = CISS_CCB_READY; 599 mutex_enter(&sc->sc_mutex); 600 TAILQ_REMOVE(&sc->sc_ccbq, ccb, ccb_link); 601 mutex_exit(&sc->sc_mutex); 602 603 if (ccb->ccb_cmd.id & CISS_CMD_ERR) 604 error = ciss_error(ccb); 605 606 cmd = &ccb->ccb_cmd; 607 if (ccb->ccb_data) { 608 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 609 ccb->ccb_dmamap->dm_mapsize, (cmd->flags & CISS_CDB_IN) ? 610 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 611 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 612 ccb->ccb_xs = NULL; 613 ccb->ccb_data = NULL; 614 } 615 616 ciss_put_ccb(ccb); 617 618 if (xs) { 619 xs->resid = 0; 620 CISS_DPRINTF(CISS_D_CMD, ("scsipi_done(%p) ", xs)); 621 if (xs->cmd->opcode == INQUIRY) { 622 struct scsipi_inquiry_data *inq; 623 inq = (struct scsipi_inquiry_data *)xs->data; 624 if ((inq->version & SID_ANSII) == 0 && 625 (inq->flags3 & SID_CmdQue) != 0) { 626 inq->version |= 2; 627 } 628 } 629 scsipi_done(xs); 630 } 631 632 return error; 633 } 634 635 static int 636 ciss_error(struct ciss_ccb *ccb) 637 { 638 struct ciss_softc *sc = ccb->ccb_sc; 639 struct ciss_error *err = &ccb->ccb_err; 640 struct scsipi_xfer *xs = ccb->ccb_xs; 641 int rv; 642 643 switch ((rv = le16toh(err->cmd_stat))) { 644 case CISS_ERR_OK: 645 break; 646 647 case CISS_ERR_INVCMD: 648 if (xs == NULL || 649 xs->cmd->opcode != SCSI_SYNCHRONIZE_CACHE_10) 650 printf("%s: invalid cmd 0x%x: 0x%x is not valid @ 0x%x[%d]\n", 651 device_xname(&sc->sc_dev), ccb->ccb_cmd.id, 652 err->err_info, err->err_type[3], err->err_type[2]); 653 if (xs) { 654 memset(&xs->sense, 0, sizeof(xs->sense)); 655 xs->sense.scsi_sense.response_code = 656 SSD_RCODE_CURRENT | SSD_RCODE_VALID; 657 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST; 658 xs->sense.scsi_sense.asc = 0x24; /* ill field */ 659 xs->sense.scsi_sense.ascq = 0x0; 660 xs->error = XS_SENSE; 661 } 662 break; 663 664 case CISS_ERR_TMO: 665 xs->error = XS_TIMEOUT; 666 break; 667 668 case CISS_ERR_UNRUN: 669 /* Underrun */ 670 xs->resid = le32toh(err->resid); 671 CISS_DPRINTF(CISS_D_CMD, (" underrun resid=0x%x ", 672 xs->resid)); 673 break; 674 default: 675 if (xs) { 676 CISS_DPRINTF(CISS_D_CMD, ("scsi_stat=%x ", err->scsi_stat)); 677 switch (err->scsi_stat) { 678 case SCSI_CHECK: 679 xs->error = XS_SENSE; 680 memcpy(&xs->sense, &err->sense[0], 681 sizeof(xs->sense)); 682 CISS_DPRINTF(CISS_D_CMD, (" sense=%02x %02x %02x %02x ", 683 err->sense[0], err->sense[1], err->sense[2], err->sense[3])); 684 break; 685 686 case XS_BUSY: 687 xs->error = XS_BUSY; 688 break; 689 690 default: 691 CISS_DPRINTF(CISS_D_ERR, ("%s: " 692 "cmd_stat=%x scsi_stat=0x%x resid=0x%x\n", 693 device_xname(&sc->sc_dev), rv, err->scsi_stat, 694 le32toh(err->resid))); 695 printf("ciss driver stuffup in %s:%d: %s()\n", 696 __FILE__, __LINE__, __func__); 697 xs->error = XS_DRIVER_STUFFUP; 698 break; 699 } 700 xs->resid = le32toh(err->resid); 701 } 702 } 703 ccb->ccb_cmd.id &= htole32(~3); 704 705 return rv; 706 } 707 708 static int 709 ciss_inq(struct ciss_softc *sc, struct ciss_inquiry *inq) 710 { 711 struct ciss_ccb *ccb; 712 struct ciss_cmd *cmd; 713 714 ccb = ciss_get_ccb(sc); 715 ccb->ccb_len = sizeof(*inq); 716 ccb->ccb_data = inq; 717 ccb->ccb_xs = NULL; 718 cmd = &ccb->ccb_cmd; 719 cmd->tgt = htole32(CISS_CMD_MODE_PERIPH); 720 cmd->tgt2 = 0; 721 cmd->cdblen = 10; 722 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN; 723 cmd->tmo = htole16(0); 724 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb)); 725 cmd->cdb[0] = CISS_CMD_CTRL_GET; 726 cmd->cdb[6] = CISS_CMS_CTRL_CTRL; 727 cmd->cdb[7] = sizeof(*inq) >> 8; /* biiiig endian */ 728 cmd->cdb[8] = sizeof(*inq) & 0xff; 729 730 return ciss_cmd(ccb, BUS_DMA_NOWAIT, XS_CTL_POLL|XS_CTL_NOSLEEP); 731 } 732 733 static int 734 ciss_ldmap(struct ciss_softc *sc) 735 { 736 struct ciss_ccb *ccb; 737 struct ciss_cmd *cmd; 738 struct ciss_ldmap *lmap; 739 int total, rv; 740 741 mutex_enter(&sc->sc_mutex_scratch); 742 lmap = sc->scratch; 743 lmap->size = htobe32(sc->maxunits * sizeof(lmap->map)); 744 total = sizeof(*lmap) + (sc->maxunits - 1) * sizeof(lmap->map); 745 746 ccb = ciss_get_ccb(sc); 747 ccb->ccb_len = total; 748 ccb->ccb_data = lmap; 749 ccb->ccb_xs = NULL; 750 cmd = &ccb->ccb_cmd; 751 cmd->tgt = CISS_CMD_MODE_PERIPH; 752 cmd->tgt2 = 0; 753 cmd->cdblen = 12; 754 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN; 755 cmd->tmo = htole16(30); 756 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb)); 757 cmd->cdb[0] = CISS_CMD_LDMAP; 758 cmd->cdb[8] = total >> 8; /* biiiig endian */ 759 cmd->cdb[9] = total & 0xff; 760 761 rv = ciss_cmd(ccb, BUS_DMA_NOWAIT, XS_CTL_POLL|XS_CTL_NOSLEEP); 762 763 if (rv) { 764 mutex_exit(&sc->sc_mutex_scratch); 765 return rv; 766 } 767 768 CISS_DPRINTF(CISS_D_MISC, ("lmap %x:%x\n", 769 lmap->map[0].tgt, lmap->map[0].tgt2)); 770 771 mutex_exit(&sc->sc_mutex_scratch); 772 return 0; 773 } 774 775 static int 776 ciss_sync(struct ciss_softc *sc) 777 { 778 struct ciss_ccb *ccb; 779 struct ciss_cmd *cmd; 780 struct ciss_flush *flush; 781 int rv; 782 783 mutex_enter(&sc->sc_mutex_scratch); 784 flush = sc->scratch; 785 memset(flush, 0, sizeof(*flush)); 786 flush->flush = sc->sc_flush; 787 788 ccb = ciss_get_ccb(sc); 789 ccb->ccb_len = sizeof(*flush); 790 ccb->ccb_data = flush; 791 ccb->ccb_xs = NULL; 792 cmd = &ccb->ccb_cmd; 793 cmd->tgt = CISS_CMD_MODE_PERIPH; 794 cmd->tgt2 = 0; 795 cmd->cdblen = 10; 796 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_OUT; 797 cmd->tmo = 0; 798 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb)); 799 cmd->cdb[0] = CISS_CMD_CTRL_SET; 800 cmd->cdb[6] = CISS_CMS_CTRL_FLUSH; 801 cmd->cdb[7] = sizeof(*flush) >> 8; /* biiiig endian */ 802 cmd->cdb[8] = sizeof(*flush) & 0xff; 803 804 rv = ciss_cmd(ccb, BUS_DMA_NOWAIT, XS_CTL_POLL|XS_CTL_NOSLEEP); 805 mutex_exit(&sc->sc_mutex_scratch); 806 807 return rv; 808 } 809 810 int 811 ciss_ldid(struct ciss_softc *sc, int target, struct ciss_ldid *id) 812 { 813 struct ciss_ccb *ccb; 814 struct ciss_cmd *cmd; 815 816 ccb = ciss_get_ccb(sc); 817 if (ccb == NULL) 818 return ENOMEM; 819 ccb->ccb_len = sizeof(*id); 820 ccb->ccb_data = id; 821 ccb->ccb_xs = NULL; 822 cmd = &ccb->ccb_cmd; 823 cmd->tgt = htole32(CISS_CMD_MODE_PERIPH); 824 cmd->tgt2 = 0; 825 cmd->cdblen = 10; 826 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN; 827 cmd->tmo = htole16(0); 828 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb)); 829 cmd->cdb[0] = CISS_CMD_CTRL_GET; 830 cmd->cdb[1] = target; 831 cmd->cdb[6] = CISS_CMS_CTRL_LDIDEXT; 832 cmd->cdb[7] = sizeof(*id) >> 8; /* biiiig endian */ 833 cmd->cdb[8] = sizeof(*id) & 0xff; 834 835 return ciss_cmd(ccb, BUS_DMA_NOWAIT, XS_CTL_POLL | sc->sc_waitflag); 836 } 837 838 int 839 ciss_ldstat(struct ciss_softc *sc, int target, struct ciss_ldstat *stat) 840 { 841 struct ciss_ccb *ccb; 842 struct ciss_cmd *cmd; 843 844 ccb = ciss_get_ccb(sc); 845 if (ccb == NULL) 846 return ENOMEM; 847 ccb->ccb_len = sizeof(*stat); 848 ccb->ccb_data = stat; 849 ccb->ccb_xs = NULL; 850 cmd = &ccb->ccb_cmd; 851 cmd->tgt = htole32(CISS_CMD_MODE_PERIPH); 852 cmd->tgt2 = 0; 853 cmd->cdblen = 10; 854 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN; 855 cmd->tmo = htole16(0); 856 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb)); 857 cmd->cdb[0] = CISS_CMD_CTRL_GET; 858 cmd->cdb[1] = target; 859 cmd->cdb[6] = CISS_CMS_CTRL_LDSTAT; 860 cmd->cdb[7] = sizeof(*stat) >> 8; /* biiiig endian */ 861 cmd->cdb[8] = sizeof(*stat) & 0xff; 862 863 return ciss_cmd(ccb, BUS_DMA_NOWAIT, XS_CTL_POLL | sc->sc_waitflag); 864 } 865 866 int 867 ciss_pdid(struct ciss_softc *sc, u_int8_t drv, struct ciss_pdid *id, int wait) 868 { 869 struct ciss_ccb *ccb; 870 struct ciss_cmd *cmd; 871 872 ccb = ciss_get_ccb(sc); 873 if (ccb == NULL) 874 return ENOMEM; 875 ccb->ccb_len = sizeof(*id); 876 ccb->ccb_data = id; 877 ccb->ccb_xs = NULL; 878 cmd = &ccb->ccb_cmd; 879 cmd->tgt = htole32(CISS_CMD_MODE_PERIPH); 880 cmd->tgt2 = 0; 881 cmd->cdblen = 10; 882 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_IN; 883 cmd->tmo = htole16(0); 884 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb)); 885 cmd->cdb[0] = CISS_CMD_CTRL_GET; 886 cmd->cdb[2] = drv; 887 cmd->cdb[6] = CISS_CMS_CTRL_PDID; 888 cmd->cdb[7] = sizeof(*id) >> 8; /* biiiig endian */ 889 cmd->cdb[8] = sizeof(*id) & 0xff; 890 891 return ciss_cmd(ccb, BUS_DMA_NOWAIT, wait); 892 } 893 894 895 struct ciss_ld * 896 ciss_pdscan(struct ciss_softc *sc, int ld) 897 { 898 struct ciss_pdid *pdid; 899 struct ciss_ld *ldp; 900 u_int8_t drv, buf[128]; 901 int i, j, k = 0; 902 903 mutex_enter(&sc->sc_mutex_scratch); 904 pdid = sc->scratch; 905 if (sc->ndrives == 256) { 906 for (i = 0; i < CISS_BIGBIT; i++) 907 if (!ciss_pdid(sc, i, pdid, 908 XS_CTL_POLL|XS_CTL_NOSLEEP) && 909 (pdid->present & CISS_PD_PRESENT)) 910 buf[k++] = i; 911 } else 912 for (i = 0; i < sc->nbus; i++) 913 for (j = 0; j < sc->ndrives; j++) { 914 drv = CISS_BIGBIT + i * sc->ndrives + j; 915 if (!ciss_pdid(sc, drv, pdid, 916 XS_CTL_POLL|XS_CTL_NOSLEEP)) 917 buf[k++] = drv; 918 } 919 mutex_exit(&sc->sc_mutex_scratch); 920 921 if (!k) 922 return NULL; 923 924 ldp = malloc(sizeof(*ldp) + (k-1), M_DEVBUF, M_NOWAIT); 925 if (!ldp) 926 return NULL; 927 928 memset(&ldp->bling, 0, sizeof(ldp->bling)); 929 ldp->ndrives = k; 930 ldp->xname[0] = 0; 931 memcpy(ldp->tgts, buf, k); 932 return ldp; 933 } 934 935 #if 0 936 static void 937 ciss_scsi_raw_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, 938 void *arg) /* TODO */ 939 { 940 struct scsipi_xfer *xs = (struct scsipi_xfer *) arg; 941 struct ciss_rawsoftc *rsc = 942 (struct ciss_rawsoftc *) chan->chan_adapter->adapt_dev; 943 struct ciss_softc *sc = rsc->sc_softc; 944 struct ciss_ccb *ccb; 945 struct ciss_cmd *cmd; 946 int error; 947 948 CISS_DPRINTF(CISS_D_CMD, ("ciss_scsi_raw_cmd ")); 949 950 switch (req) 951 { 952 case ADAPTER_REQ_RUN_XFER: 953 if (xs->cmdlen > CISS_MAX_CDB) { 954 CISS_DPRINTF(CISS_D_CMD, ("CDB too big %p ", xs)); 955 memset(&xs->sense, 0, sizeof(xs->sense)); 956 printf("ciss driver stuffup in %s:%d: %s()\n", 957 __FILE__, __LINE__, __func__); 958 xs->error = XS_DRIVER_STUFFUP; 959 scsipi_done(xs); 960 break; 961 } 962 963 error = 0; 964 xs->error = XS_NOERROR; 965 966 /* TODO check this target has not yet employed w/ any volume */ 967 968 ccb = ciss_get_ccb(sc); 969 cmd = &ccb->ccb_cmd; 970 ccb->ccb_len = xs->datalen; 971 ccb->ccb_data = xs->data; 972 ccb->ccb_xs = xs; 973 974 cmd->cdblen = xs->cmdlen; 975 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL; 976 if (xs->xs_control & XS_CTL_DATA_IN) 977 cmd->flags |= CISS_CDB_IN; 978 else if (xs->xs_control & XS_CTL_DATA_OUT) 979 cmd->flags |= CISS_CDB_OUT; 980 cmd->tmo = xs->timeout < 1000? 1 : xs->timeout / 1000; 981 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb)); 982 memcpy(&cmd->cdb[0], xs->cmd, CISS_MAX_CDB); 983 984 if (ciss_cmd(ccb, BUS_DMA_WAITOK, 985 xs->xs_control & (XS_CTL_POLL|XS_CTL_NOSLEEP))) { 986 printf("ciss driver stuffup in %s:%d: %s()\n", 987 __FILE__, __LINE__, __func__); 988 xs->error = XS_DRIVER_STUFFUP; 989 scsipi_done(xs); 990 break; 991 } 992 993 break; 994 995 case ADAPTER_REQ_GROW_RESOURCES: 996 /* 997 * Not supported. 998 */ 999 break; 1000 1001 case ADAPTER_REQ_SET_XFER_MODE: 1002 /* 1003 * We can't change the transfer mode, but at least let 1004 * scsipi know what the adapter has negociated. 1005 */ 1006 /* Get xfer mode and return it */ 1007 break; 1008 } 1009 } 1010 #endif 1011 1012 static void 1013 ciss_scsi_cmd(struct scsipi_channel *chan, scsipi_adapter_req_t req, 1014 void *arg) 1015 { 1016 struct scsipi_xfer *xs; 1017 struct scsipi_xfer_mode *xm; 1018 struct ciss_softc *sc = 1019 (struct ciss_softc *) chan->chan_adapter->adapt_dev; 1020 u_int8_t target; 1021 struct ciss_ccb *ccb; 1022 struct ciss_cmd *cmd; 1023 int error; 1024 1025 CISS_DPRINTF(CISS_D_CMD, ("ciss_scsi_cmd ")); 1026 1027 switch (req) 1028 { 1029 case ADAPTER_REQ_RUN_XFER: 1030 xs = (struct scsipi_xfer *) arg; 1031 target = xs->xs_periph->periph_target; 1032 CISS_DPRINTF(CISS_D_CMD, ("targ=%d ", target)); 1033 if (xs->cmdlen > CISS_MAX_CDB) { 1034 CISS_DPRINTF(CISS_D_CMD, ("CDB too big %p ", xs)); 1035 memset(&xs->sense, 0, sizeof(xs->sense)); 1036 printf("ciss driver stuffup in %s:%d: %s()\n", 1037 __FILE__, __LINE__, __func__); 1038 xs->error = XS_DRIVER_STUFFUP; 1039 scsipi_done(xs); 1040 break; 1041 } 1042 1043 error = 0; 1044 xs->error = XS_NOERROR; 1045 1046 /* XXX emulate SYNCHRONIZE_CACHE ??? */ 1047 1048 ccb = ciss_get_ccb(sc); 1049 cmd = &ccb->ccb_cmd; 1050 ccb->ccb_len = xs->datalen; 1051 ccb->ccb_data = xs->data; 1052 ccb->ccb_xs = xs; 1053 cmd->tgt = CISS_CMD_MODE_LD | target; 1054 cmd->tgt2 = 0; 1055 cmd->cdblen = xs->cmdlen; 1056 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL; 1057 if (xs->xs_control & XS_CTL_DATA_IN) 1058 cmd->flags |= CISS_CDB_IN; 1059 else if (xs->xs_control & XS_CTL_DATA_OUT) 1060 cmd->flags |= CISS_CDB_OUT; 1061 cmd->tmo = xs->timeout < 1000? 1 : xs->timeout / 1000; 1062 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb)); 1063 memcpy(&cmd->cdb[0], xs->cmd, CISS_MAX_CDB); 1064 CISS_DPRINTF(CISS_D_CMD, ("cmd=%02x %02x %02x %02x %02x %02x ", 1065 cmd->cdb[0], cmd->cdb[1], cmd->cdb[2], 1066 cmd->cdb[3], cmd->cdb[4], cmd->cdb[5])); 1067 1068 if (ciss_cmd(ccb, BUS_DMA_WAITOK, 1069 xs->xs_control & (XS_CTL_POLL|XS_CTL_NOSLEEP))) { 1070 printf("ciss driver stuffup in %s:%d: %s()\n", 1071 __FILE__, __LINE__, __func__); 1072 xs->error = XS_DRIVER_STUFFUP; 1073 scsipi_done(xs); 1074 return; 1075 } 1076 1077 break; 1078 case ADAPTER_REQ_GROW_RESOURCES: 1079 /* 1080 * Not supported. 1081 */ 1082 break; 1083 case ADAPTER_REQ_SET_XFER_MODE: 1084 /* 1085 * We can't change the transfer mode, but at least let 1086 * scsipi know what the adapter has negociated. 1087 */ 1088 xm = (struct scsipi_xfer_mode *)arg; 1089 xm->xm_mode |= PERIPH_CAP_TQING; 1090 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm); 1091 break; 1092 } 1093 } 1094 1095 int 1096 ciss_intr(void *v) 1097 { 1098 struct ciss_softc *sc = v; 1099 struct ciss_ccb *ccb; 1100 u_int32_t id; 1101 int hit = 0; 1102 1103 CISS_DPRINTF(CISS_D_INTR, ("intr ")); 1104 1105 if (!(bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_ISR) & sc->iem)) 1106 return 0; 1107 1108 while ((id = bus_space_read_4(sc->sc_iot, sc->sc_ioh, CISS_OUTQ)) != 1109 0xffffffff) { 1110 1111 ccb = (struct ciss_ccb *) ((char *)sc->ccbs + (id >> 2) * sc->ccblen); 1112 ccb->ccb_cmd.id = htole32(id); 1113 if (ccb->ccb_state == CISS_CCB_POLL) { 1114 ccb->ccb_state = CISS_CCB_ONQ; 1115 mutex_enter(&sc->sc_mutex); 1116 cv_broadcast(&sc->sc_condvar); 1117 mutex_exit(&sc->sc_mutex); 1118 } else 1119 ciss_done(ccb); 1120 1121 hit = 1; 1122 } 1123 1124 CISS_DPRINTF(CISS_D_INTR, ("exit\n")); 1125 return hit; 1126 } 1127 1128 static void 1129 ciss_heartbeat(void *v) 1130 { 1131 struct ciss_softc *sc = v; 1132 u_int32_t hb; 1133 1134 hb = bus_space_read_4(sc->sc_iot, sc->cfg_ioh, 1135 sc->cfgoff + offsetof(struct ciss_config, heartbeat)); 1136 if (hb == sc->heartbeat) 1137 panic("ciss: dead"); /* XX reset! */ 1138 else 1139 sc->heartbeat = hb; 1140 1141 callout_schedule(&sc->sc_hb, hz * 3); 1142 } 1143 1144 static int 1145 ciss_scsi_ioctl(struct scsipi_channel *chan, u_long cmd, 1146 void *addr, int flag, struct proc *p) 1147 { 1148 #if NBIO > 0 1149 return ciss_ioctl(chan->chan_adapter->adapt_dev, cmd, addr); 1150 #else 1151 return ENOTTY; 1152 #endif 1153 } 1154 1155 #if NBIO > 0 1156 const int ciss_level[] = { 0, 4, 1, 5, 51, 7 }; 1157 const int ciss_stat[] = { BIOC_SVONLINE, BIOC_SVOFFLINE, BIOC_SVOFFLINE, 1158 BIOC_SVDEGRADED, BIOC_SVREBUILD, BIOC_SVREBUILD, BIOC_SVDEGRADED, 1159 BIOC_SVDEGRADED, BIOC_SVINVALID, BIOC_SVINVALID, BIOC_SVBUILDING, 1160 BIOC_SVOFFLINE, BIOC_SVBUILDING }; 1161 1162 int 1163 ciss_ioctl(device_t dev, u_long cmd, void *addr) 1164 { 1165 struct ciss_softc *sc = (struct ciss_softc *)dev; 1166 struct bioc_inq *bi; 1167 struct bioc_disk *bd; 1168 struct bioc_blink *bb; 1169 struct ciss_ldstat *ldstat; 1170 struct ciss_pdid *pdid; 1171 struct ciss_blink *blink; 1172 struct ciss_ld *ldp; 1173 u_int8_t drv; 1174 int ld, pd, error = 0; 1175 1176 switch (cmd) { 1177 case BIOCINQ: 1178 bi = (struct bioc_inq *)addr; 1179 strlcpy(bi->bi_dev, device_xname(&sc->sc_dev), sizeof(bi->bi_dev)); 1180 bi->bi_novol = sc->maxunits; 1181 bi->bi_nodisk = sc->sc_lds[0]->ndrives; 1182 break; 1183 1184 case BIOCVOL: 1185 error = ciss_ioctl_vol(sc, (struct bioc_vol *)addr); 1186 break; 1187 1188 case BIOCDISK_NOVOL: 1189 /* 1190 * XXX since we don't know how to associate physical drives with logical drives 1191 * yet, BIOCDISK_NOVOL is equivalent to BIOCDISK to the volume that we've 1192 * associated all physical drives to. 1193 * Maybe assoicate all physical drives to all logical volumes, but only return 1194 * physical drives on one logical volume. Which one? Either 1st volume that 1195 * is degraded, rebuilding, or failed? 1196 */ 1197 bd = (struct bioc_disk *)addr; 1198 bd->bd_volid = 0; 1199 bd->bd_disknovol = true; 1200 /* FALLTHROUGH */ 1201 case BIOCDISK: 1202 bd = (struct bioc_disk *)addr; 1203 if (bd->bd_volid > sc->maxunits) { 1204 error = EINVAL; 1205 break; 1206 } 1207 ldp = sc->sc_lds[0]; 1208 if (!ldp || (pd = bd->bd_diskid) > ldp->ndrives) { 1209 error = EINVAL; 1210 break; 1211 } 1212 ldstat = sc->scratch; 1213 if ((error = ciss_ldstat(sc, bd->bd_volid, ldstat))) { 1214 break; 1215 } 1216 bd->bd_status = -1; 1217 if (ldstat->stat == CISS_LD_REBLD && 1218 ldstat->bigrebuild == ldp->tgts[pd]) 1219 bd->bd_status = BIOC_SDREBUILD; 1220 if (ciss_bitset(ldp->tgts[pd] & (~CISS_BIGBIT), 1221 ldstat->bigfailed)) { 1222 bd->bd_status = BIOC_SDFAILED; 1223 bd->bd_size = 0; 1224 bd->bd_channel = (ldp->tgts[pd] & (~CISS_BIGBIT)) / 1225 sc->ndrives; 1226 bd->bd_target = ldp->tgts[pd] % sc->ndrives; 1227 bd->bd_lun = 0; 1228 bd->bd_vendor[0] = '\0'; 1229 bd->bd_serial[0] = '\0'; 1230 bd->bd_procdev[0] = '\0'; 1231 } else { 1232 pdid = sc->scratch; 1233 if ((error = ciss_pdid(sc, ldp->tgts[pd], pdid, 1234 XS_CTL_POLL))) { 1235 bd->bd_status = BIOC_SDFAILED; 1236 bd->bd_size = 0; 1237 bd->bd_channel = (ldp->tgts[pd] & (~CISS_BIGBIT)) / 1238 sc->ndrives; 1239 bd->bd_target = ldp->tgts[pd] % sc->ndrives; 1240 bd->bd_lun = 0; 1241 bd->bd_vendor[0] = '\0'; 1242 bd->bd_serial[0] = '\0'; 1243 bd->bd_procdev[0] = '\0'; 1244 error = 0; 1245 break; 1246 } 1247 if (bd->bd_status < 0) { 1248 if (pdid->config & CISS_PD_SPARE) 1249 bd->bd_status = BIOC_SDHOTSPARE; 1250 else if (pdid->present & CISS_PD_PRESENT) 1251 bd->bd_status = BIOC_SDONLINE; 1252 else 1253 bd->bd_status = BIOC_SDINVALID; 1254 } 1255 bd->bd_size = (u_int64_t)le32toh(pdid->nblocks) * 1256 le16toh(pdid->blksz); 1257 bd->bd_channel = pdid->bus; 1258 bd->bd_target = pdid->target; 1259 bd->bd_lun = 0; 1260 strlcpy(bd->bd_vendor, pdid->model, 1261 sizeof(bd->bd_vendor)); 1262 strlcpy(bd->bd_serial, pdid->serial, 1263 sizeof(bd->bd_serial)); 1264 bd->bd_procdev[0] = '\0'; 1265 } 1266 break; 1267 1268 case BIOCBLINK: 1269 bb = (struct bioc_blink *)addr; 1270 blink = sc->scratch; 1271 error = EINVAL; 1272 /* XXX workaround completely dumb scsi addressing */ 1273 for (ld = 0; ld < sc->maxunits; ld++) { 1274 ldp = sc->sc_lds[ld]; 1275 if (!ldp) 1276 continue; 1277 if (sc->ndrives == 256) 1278 drv = bb->bb_target; 1279 else 1280 drv = CISS_BIGBIT + 1281 bb->bb_channel * sc->ndrives + 1282 bb->bb_target; 1283 for (pd = 0; pd < ldp->ndrives; pd++) 1284 if (ldp->tgts[pd] == drv) 1285 error = ciss_blink(sc, ld, pd, 1286 bb->bb_status, blink); 1287 } 1288 break; 1289 1290 case BIOCALARM: 1291 case BIOCSETSTATE: 1292 default: 1293 error = EINVAL; 1294 } 1295 1296 return (error); 1297 } 1298 1299 int 1300 ciss_ioctl_vol(struct ciss_softc *sc, struct bioc_vol *bv) 1301 { 1302 struct ciss_ldid *ldid; 1303 struct ciss_ld *ldp; 1304 struct ciss_ldstat *ldstat; 1305 struct ciss_pdid *pdid; 1306 int error = 0; 1307 u_int blks; 1308 1309 if (bv->bv_volid > sc->maxunits) { 1310 return EINVAL; 1311 } 1312 ldp = sc->sc_lds[bv->bv_volid]; 1313 ldid = sc->scratch; 1314 if ((error = ciss_ldid(sc, bv->bv_volid, ldid))) { 1315 return error; 1316 } 1317 bv->bv_status = BIOC_SVINVALID; 1318 blks = (u_int)le16toh(ldid->nblocks[1]) << 16 | 1319 le16toh(ldid->nblocks[0]); 1320 bv->bv_size = blks * (u_quad_t)le16toh(ldid->blksize); 1321 bv->bv_level = ciss_level[ldid->type]; 1322 /* 1323 * XXX Should only return bv_nodisk for logigal volume that we've associated 1324 * the physical drives to: either the 1st degraded, rebuilding, or failed 1325 * volume else volume 0? 1326 */ 1327 if (ldp) { 1328 bv->bv_nodisk = ldp->ndrives; 1329 strlcpy(bv->bv_dev, ldp->xname, sizeof(bv->bv_dev)); 1330 } 1331 strlcpy(bv->bv_vendor, "CISS", sizeof(bv->bv_vendor)); 1332 ldstat = sc->scratch; 1333 memset(ldstat, 0, sizeof(*ldstat)); 1334 if ((error = ciss_ldstat(sc, bv->bv_volid, ldstat))) { 1335 return error; 1336 } 1337 bv->bv_percent = -1; 1338 bv->bv_seconds = 0; 1339 if (ldstat->stat < sizeof(ciss_stat)/sizeof(ciss_stat[0])) 1340 bv->bv_status = ciss_stat[ldstat->stat]; 1341 if (bv->bv_status == BIOC_SVREBUILD || 1342 bv->bv_status == BIOC_SVBUILDING) { 1343 u_int64_t prog; 1344 1345 ldp = sc->sc_lds[0]; 1346 if (ldp) { 1347 bv->bv_nodisk = ldp->ndrives; 1348 strlcpy(bv->bv_dev, ldp->xname, sizeof(bv->bv_dev)); 1349 } 1350 /* 1351 * XXX ldstat->prog is blocks remaining on physical drive being rebuilt 1352 * blks is only correct for a RAID1 set; RAID5 needs to determine the 1353 * size of the physical device - which we don't yet know. 1354 * ldstat->bigrebuild has physical device target, so could be used with 1355 * pdid to get size. Another way is to save pd information in sc so it's 1356 * easy to reference. 1357 */ 1358 prog = (u_int64_t)((ldstat->prog[3] << 24) | 1359 (ldstat->prog[2] << 16) | (ldstat->prog[1] << 8) | 1360 ldstat->prog[0]); 1361 pdid = sc->scratch; 1362 if (!ciss_pdid(sc, ldstat->bigrebuild, pdid, XS_CTL_POLL)) { 1363 blks = le32toh(pdid->nblocks); 1364 bv->bv_percent = (blks - prog) * 1000ULL / blks; 1365 } 1366 } 1367 return 0; 1368 } 1369 1370 int 1371 ciss_blink(struct ciss_softc *sc, int ld, int pd, int stat, 1372 struct ciss_blink *blink) 1373 { 1374 struct ciss_ccb *ccb; 1375 struct ciss_cmd *cmd; 1376 struct ciss_ld *ldp; 1377 1378 if (ld > sc->maxunits) 1379 return EINVAL; 1380 1381 ldp = sc->sc_lds[ld]; 1382 if (!ldp || pd > ldp->ndrives) 1383 return EINVAL; 1384 1385 ldp->bling.pdtab[ldp->tgts[pd]] = stat == BIOC_SBUNBLINK? 0 : 1386 CISS_BLINK_ALL; 1387 memcpy(blink, &ldp->bling, sizeof(*blink)); 1388 1389 ccb = ciss_get_ccb(sc); 1390 if (ccb == NULL) 1391 return ENOMEM; 1392 ccb->ccb_len = sizeof(*blink); 1393 ccb->ccb_data = blink; 1394 ccb->ccb_xs = NULL; 1395 cmd = &ccb->ccb_cmd; 1396 cmd->tgt = htole32(CISS_CMD_MODE_PERIPH); 1397 cmd->tgt2 = 0; 1398 cmd->cdblen = 10; 1399 cmd->flags = CISS_CDB_CMD | CISS_CDB_SIMPL | CISS_CDB_OUT; 1400 cmd->tmo = htole16(0); 1401 memset(&cmd->cdb[0], 0, sizeof(cmd->cdb)); 1402 cmd->cdb[0] = CISS_CMD_CTRL_SET; 1403 cmd->cdb[6] = CISS_CMS_CTRL_PDBLINK; 1404 cmd->cdb[7] = sizeof(*blink) >> 8; /* biiiig endian */ 1405 cmd->cdb[8] = sizeof(*blink) & 0xff; 1406 1407 return ciss_cmd(ccb, BUS_DMA_NOWAIT, XS_CTL_POLL); 1408 } 1409 1410 int 1411 ciss_create_sensors(struct ciss_softc *sc) 1412 { 1413 int i; 1414 int nsensors = sc->maxunits; 1415 1416 sc->sc_sme = sysmon_envsys_create(); 1417 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors, 1418 M_DEVBUF, M_NOWAIT | M_ZERO); 1419 if (sc->sc_sensor == NULL) { 1420 aprint_error_dev(&sc->sc_dev, "can't allocate envsys_data"); 1421 return(ENOMEM); 1422 } 1423 1424 for (i = 0; i < nsensors; i++) { 1425 sc->sc_sensor[i].units = ENVSYS_DRIVE; 1426 /* Enable monitoring for drive state changes */ 1427 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED; 1428 /* logical drives */ 1429 snprintf(sc->sc_sensor[i].desc, 1430 sizeof(sc->sc_sensor[i].desc), "%s:%d", 1431 device_xname(&sc->sc_dev), i); 1432 if (sysmon_envsys_sensor_attach(sc->sc_sme, 1433 &sc->sc_sensor[i])) 1434 goto out; 1435 } 1436 1437 sc->sc_sme->sme_name = device_xname(&sc->sc_dev); 1438 sc->sc_sme->sme_cookie = sc; 1439 sc->sc_sme->sme_refresh = ciss_sensor_refresh; 1440 if (sysmon_envsys_register(sc->sc_sme)) { 1441 printf("%s: unable to register with sysmon\n", device_xname(&sc->sc_dev)); 1442 return(1); 1443 } 1444 return (0); 1445 1446 out: 1447 free(sc->sc_sensor, M_DEVBUF); 1448 sysmon_envsys_destroy(sc->sc_sme); 1449 return EINVAL; 1450 } 1451 1452 void 1453 ciss_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata) 1454 { 1455 struct ciss_softc *sc = sme->sme_cookie; 1456 struct bioc_vol bv; 1457 1458 if (edata->sensor >= sc->maxunits) 1459 return; 1460 1461 memset(&bv, 0, sizeof(bv)); 1462 bv.bv_volid = edata->sensor; 1463 if (ciss_ioctl_vol(sc, &bv)) { 1464 return; 1465 } 1466 1467 switch(bv.bv_status) { 1468 case BIOC_SVOFFLINE: 1469 edata->value_cur = ENVSYS_DRIVE_FAIL; 1470 edata->state = ENVSYS_SCRITICAL; 1471 break; 1472 1473 case BIOC_SVDEGRADED: 1474 edata->value_cur = ENVSYS_DRIVE_PFAIL; 1475 edata->state = ENVSYS_SCRITICAL; 1476 break; 1477 1478 case BIOC_SVSCRUB: 1479 case BIOC_SVONLINE: 1480 edata->value_cur = ENVSYS_DRIVE_ONLINE; 1481 edata->state = ENVSYS_SVALID; 1482 break; 1483 1484 case BIOC_SVREBUILD: 1485 case BIOC_SVBUILDING: 1486 edata->value_cur = ENVSYS_DRIVE_REBUILD; 1487 edata->state = ENVSYS_SVALID; 1488 break; 1489 1490 case BIOC_SVINVALID: 1491 /* FALLTRHOUGH */ 1492 default: 1493 edata->value_cur = 0; /* unknown */ 1494 edata->state = ENVSYS_SINVALID; 1495 } 1496 } 1497 #endif /* NBIO > 0 */ 1498