1 /* $NetBSD: icp.c,v 1.26 2007/10/19 11:59:53 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by Niklas Hallqvist. 53 * 4. The name of the author may not be used to endorse or promote products 54 * derived from this software without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 66 * 67 * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp 68 */ 69 70 /* 71 * This driver would not have written if it was not for the hardware donations 72 * from both ICP-Vortex and �ko.neT. I want to thank them for their support. 73 * 74 * Re-worked for NetBSD by Andrew Doran. Test hardware kindly supplied by 75 * Intel. 76 * 77 * Support for the ICP-Vortex management tools added by 78 * Jason R. Thorpe of Wasabi Systems, Inc., based on code 79 * provided by Achim Leubner <achim.leubner@intel.com>. 80 * 81 * Additional support for dynamic rescan of cacheservice drives by 82 * Jason R. Thorpe of Wasabi Systems, Inc. 83 */ 84 85 #include <sys/cdefs.h> 86 __KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.26 2007/10/19 11:59:53 ad Exp $"); 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/kernel.h> 91 #include <sys/device.h> 92 #include <sys/queue.h> 93 #include <sys/proc.h> 94 #include <sys/buf.h> 95 #include <sys/endian.h> 96 #include <sys/malloc.h> 97 #include <sys/disk.h> 98 99 #include <uvm/uvm_extern.h> 100 101 #include <sys/bswap.h> 102 #include <sys/bus.h> 103 104 #include <dev/pci/pcireg.h> 105 #include <dev/pci/pcivar.h> 106 #include <dev/pci/pcidevs.h> 107 108 #include <dev/ic/icpreg.h> 109 #include <dev/ic/icpvar.h> 110 111 #include <dev/scsipi/scsipi_all.h> 112 #include <dev/scsipi/scsiconf.h> 113 114 #include "locators.h" 115 116 int icp_async_event(struct icp_softc *, int); 117 void icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic); 118 void icp_chain(struct icp_softc *); 119 int icp_print(void *, const char *); 120 void icp_watchdog(void *); 121 void icp_ucmd_intr(struct icp_ccb *); 122 void icp_recompute_openings(struct icp_softc *); 123 124 int icp_count; /* total # of controllers, for ioctl interface */ 125 126 /* 127 * Statistics for the ioctl interface to query. 128 * 129 * XXX Global. They should probably be made per-controller 130 * XXX at some point. 131 */ 132 gdt_statist_t icp_stats; 133 134 int 135 icp_init(struct icp_softc *icp, const char *intrstr) 136 { 137 struct icp_attach_args icpa; 138 struct icp_binfo binfo; 139 struct icp_ccb *ic; 140 u_int16_t cdev_cnt; 141 int i, j, state, feat, nsegs, rv; 142 int locs[ICPCF_NLOCS]; 143 144 state = 0; 145 146 if (intrstr != NULL) 147 aprint_normal("%s: interrupting at %s\n", icp->icp_dv.dv_xname, 148 intrstr); 149 150 SIMPLEQ_INIT(&icp->icp_ccb_queue); 151 SIMPLEQ_INIT(&icp->icp_ccb_freelist); 152 SIMPLEQ_INIT(&icp->icp_ucmd_queue); 153 callout_init(&icp->icp_wdog_callout, 0); 154 155 /* 156 * Allocate a scratch area. 157 */ 158 if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1, 159 ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 160 &icp->icp_scr_dmamap) != 0) { 161 aprint_error("%s: cannot create scratch dmamap\n", 162 icp->icp_dv.dv_xname); 163 return (1); 164 } 165 state++; 166 167 if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0, 168 icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) { 169 aprint_error("%s: cannot alloc scratch dmamem\n", 170 icp->icp_dv.dv_xname); 171 goto bail_out; 172 } 173 state++; 174 175 if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs, 176 ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) { 177 aprint_error("%s: cannot map scratch dmamem\n", 178 icp->icp_dv.dv_xname); 179 goto bail_out; 180 } 181 state++; 182 183 if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr, 184 ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) { 185 aprint_error("%s: cannot load scratch dmamap\n", 186 icp->icp_dv.dv_xname); 187 goto bail_out; 188 } 189 state++; 190 191 /* 192 * Allocate and initialize the command control blocks. 193 */ 194 ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_NOWAIT | M_ZERO); 195 if ((icp->icp_ccbs = ic) == NULL) { 196 aprint_error("%s: malloc() failed\n", icp->icp_dv.dv_xname); 197 goto bail_out; 198 } 199 state++; 200 201 for (i = 0; i < ICP_NCCBS; i++, ic++) { 202 /* 203 * The first two command indexes have special meanings, so 204 * we can't use them. 205 */ 206 ic->ic_ident = i + 2; 207 rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER, 208 ICP_MAXSG, ICP_MAX_XFER, 0, 209 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 210 &ic->ic_xfer_map); 211 if (rv != 0) 212 break; 213 icp->icp_nccbs++; 214 icp_ccb_free(icp, ic); 215 } 216 #ifdef DIAGNOSTIC 217 if (icp->icp_nccbs != ICP_NCCBS) 218 aprint_error("%s: %d/%d CCBs usable\n", icp->icp_dv.dv_xname, 219 icp->icp_nccbs, ICP_NCCBS); 220 #endif 221 222 /* 223 * Initalize the controller. 224 */ 225 if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) { 226 aprint_error("%s: screen service init error %d\n", 227 icp->icp_dv.dv_xname, icp->icp_status); 228 goto bail_out; 229 } 230 231 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) { 232 aprint_error("%s: cache service init error %d\n", 233 icp->icp_dv.dv_xname, icp->icp_status); 234 goto bail_out; 235 } 236 237 icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0); 238 239 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) { 240 aprint_error("%s: cache service mount error %d\n", 241 icp->icp_dv.dv_xname, icp->icp_status); 242 goto bail_out; 243 } 244 245 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) { 246 aprint_error("%s: cache service post-mount init error %d\n", 247 icp->icp_dv.dv_xname, icp->icp_status); 248 goto bail_out; 249 } 250 cdev_cnt = (u_int16_t)icp->icp_info; 251 icp->icp_fw_vers = icp->icp_service; 252 253 if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) { 254 aprint_error("%s: raw service init error %d\n", 255 icp->icp_dv.dv_xname, icp->icp_status); 256 goto bail_out; 257 } 258 259 /* 260 * Set/get raw service features (scatter/gather). 261 */ 262 feat = 0; 263 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER, 264 0, 0)) 265 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0)) 266 feat = icp->icp_info; 267 268 if ((feat & ICP_SCATTER_GATHER) == 0) { 269 #ifdef DIAGNOSTIC 270 aprint_normal( 271 "%s: scatter/gather not supported (raw service)\n", 272 icp->icp_dv.dv_xname); 273 #endif 274 } else 275 icp->icp_features |= ICP_FEAT_RAWSERVICE; 276 277 /* 278 * Set/get cache service features (scatter/gather). 279 */ 280 feat = 0; 281 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0, 282 ICP_SCATTER_GATHER, 0)) 283 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0)) 284 feat = icp->icp_info; 285 286 if ((feat & ICP_SCATTER_GATHER) == 0) { 287 #ifdef DIAGNOSTIC 288 aprint_normal( 289 "%s: scatter/gather not supported (cache service)\n", 290 icp->icp_dv.dv_xname); 291 #endif 292 } else 293 icp->icp_features |= ICP_FEAT_CACHESERVICE; 294 295 /* 296 * Pull some information from the board and dump. 297 */ 298 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO, 299 ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) { 300 aprint_error("%s: unable to retrive board info\n", 301 icp->icp_dv.dv_xname); 302 goto bail_out; 303 } 304 memcpy(&binfo, icp->icp_scr, sizeof(binfo)); 305 306 aprint_normal( 307 "%s: model <%s>, firmware <%s>, %d channel(s), %dMB memory\n", 308 icp->icp_dv.dv_xname, binfo.bi_type_string, binfo.bi_raid_string, 309 binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20); 310 311 /* 312 * Determine the number of devices, and number of openings per 313 * device. 314 */ 315 if (icp->icp_features & ICP_FEAT_CACHESERVICE) { 316 for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) { 317 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0, 318 0)) 319 continue; 320 321 icp->icp_cdr[j].cd_size = icp->icp_info; 322 if (icp->icp_cdr[j].cd_size != 0) 323 icp->icp_ndevs++; 324 325 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0, 326 0)) 327 icp->icp_cdr[j].cd_type = icp->icp_info; 328 } 329 } 330 331 if (icp->icp_features & ICP_FEAT_RAWSERVICE) { 332 icp->icp_nchan = binfo.bi_chan_count; 333 icp->icp_ndevs += icp->icp_nchan; 334 } 335 336 icp_recompute_openings(icp); 337 338 /* 339 * Attach SCSI channels. 340 */ 341 if (icp->icp_features & ICP_FEAT_RAWSERVICE) { 342 struct icp_ioc_version *iv; 343 struct icp_rawioc *ri; 344 struct icp_getch *gc; 345 346 iv = (struct icp_ioc_version *)icp->icp_scr; 347 iv->iv_version = htole32(ICP_IOC_NEWEST); 348 iv->iv_listents = ICP_MAXBUS; 349 iv->iv_firstchan = 0; 350 iv->iv_lastchan = ICP_MAXBUS - 1; 351 iv->iv_listoffset = htole32(sizeof(*iv)); 352 353 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, 354 ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL, 355 sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) { 356 ri = (struct icp_rawioc *)(iv + 1); 357 for (j = 0; j < binfo.bi_chan_count; j++, ri++) 358 icp->icp_bus_id[j] = ri->ri_procid; 359 } else { 360 /* 361 * Fall back to the old method. 362 */ 363 gc = (struct icp_getch *)icp->icp_scr; 364 365 for (j = 0; j < binfo.bi_chan_count; j++) { 366 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, 367 ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN, 368 ICP_IO_CHANNEL | ICP_INVALID_CHANNEL, 369 sizeof(*gc))) { 370 aprint_error( 371 "%s: unable to get chan info", 372 icp->icp_dv.dv_xname); 373 goto bail_out; 374 } 375 icp->icp_bus_id[j] = gc->gc_scsiid; 376 } 377 } 378 379 for (j = 0; j < binfo.bi_chan_count; j++) { 380 if (icp->icp_bus_id[j] > ICP_MAXID_FC) 381 icp->icp_bus_id[j] = ICP_MAXID_FC; 382 383 icpa.icpa_unit = j + ICPA_UNIT_SCSI; 384 385 locs[ICPCF_UNIT] = j + ICPA_UNIT_SCSI; 386 387 icp->icp_children[icpa.icpa_unit] = 388 config_found_sm_loc(&icp->icp_dv, "icp", locs, 389 &icpa, icp_print, config_stdsubmatch); 390 } 391 } 392 393 /* 394 * Attach cache devices. 395 */ 396 if (icp->icp_features & ICP_FEAT_CACHESERVICE) { 397 for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) { 398 if (icp->icp_cdr[j].cd_size == 0) 399 continue; 400 401 icpa.icpa_unit = j; 402 403 locs[ICPCF_UNIT] = j; 404 405 icp->icp_children[icpa.icpa_unit] = 406 config_found_sm_loc(&icp->icp_dv, "icp", locs, 407 &icpa, icp_print, config_stdsubmatch); 408 } 409 } 410 411 /* 412 * Start the watchdog. 413 */ 414 icp_watchdog(icp); 415 416 /* 417 * Count the controller, and we're done! 418 */ 419 if (icp_count++ == 0) 420 mutex_init(&icp_ioctl_mutex, MUTEX_DEFAULT, IPL_NONE); 421 422 return (0); 423 424 bail_out: 425 if (state > 4) 426 for (j = 0; j < i; j++) 427 bus_dmamap_destroy(icp->icp_dmat, 428 icp->icp_ccbs[j].ic_xfer_map); 429 if (state > 3) 430 free(icp->icp_ccbs, M_DEVBUF); 431 if (state > 2) 432 bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap); 433 if (state > 1) 434 bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr, 435 ICP_SCRATCH_SIZE); 436 if (state > 0) 437 bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs); 438 bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap); 439 440 return (1); 441 } 442 443 void 444 icp_register_servicecb(struct icp_softc *icp, int unit, 445 const struct icp_servicecb *cb) 446 { 447 448 icp->icp_servicecb[unit] = cb; 449 } 450 451 void 452 icp_rescan(struct icp_softc *icp, int unit) 453 { 454 struct icp_attach_args icpa; 455 u_int newsize, newtype; 456 int locs[ICPCF_NLOCS]; 457 458 /* 459 * NOTE: It is very important that the queue be frozen and not 460 * commands running when this is called. The ioctl mutex must 461 * also be held. 462 */ 463 464 KASSERT(icp->icp_qfreeze != 0); 465 KASSERT(icp->icp_running == 0); 466 KASSERT(unit < ICP_MAX_HDRIVES); 467 468 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, unit, 0, 0)) { 469 #ifdef ICP_DEBUG 470 printf("%s: rescan: unit %d ICP_INFO failed -> 0x%04x\n", 471 icp->icp_dv.dv_xname, unit, icp->icp_status); 472 #endif 473 goto gone; 474 } 475 if ((newsize = icp->icp_info) == 0) { 476 #ifdef ICP_DEBUG 477 printf("%s: rescan: unit %d has zero size\n", 478 icp->icp_dv.dv_xname, unit); 479 #endif 480 gone: 481 /* 482 * Host drive is no longer present; detach if a child 483 * is currently there. 484 */ 485 if (icp->icp_cdr[unit].cd_size != 0) 486 icp->icp_ndevs--; 487 icp->icp_cdr[unit].cd_size = 0; 488 if (icp->icp_children[unit] != NULL) { 489 (void) config_detach(icp->icp_children[unit], 490 DETACH_FORCE); 491 icp->icp_children[unit] = NULL; 492 } 493 return; 494 } 495 496 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, unit, 0, 0)) 497 newtype = icp->icp_info; 498 else { 499 #ifdef ICP_DEBUG 500 printf("%s: rescan: unit %d ICP_DEVTYPE failed\n", 501 icp->icp_dv.dv_xname, unit); 502 #endif 503 newtype = 0; /* XXX? */ 504 } 505 506 #ifdef ICP_DEBUG 507 printf("%s: rescan: unit %d old %u/%u, new %u/%u\n", 508 icp->icp_dv.dv_xname, unit, icp->icp_cdr[unit].cd_size, 509 icp->icp_cdr[unit].cd_type, newsize, newtype); 510 #endif 511 512 /* 513 * If the type or size changed, detach any old child (if it exists) 514 * and attach a new one. 515 */ 516 if (icp->icp_children[unit] == NULL || 517 newsize != icp->icp_cdr[unit].cd_size || 518 newtype != icp->icp_cdr[unit].cd_type) { 519 if (icp->icp_cdr[unit].cd_size == 0) 520 icp->icp_ndevs++; 521 icp->icp_cdr[unit].cd_size = newsize; 522 icp->icp_cdr[unit].cd_type = newtype; 523 if (icp->icp_children[unit] != NULL) 524 (void) config_detach(icp->icp_children[unit], 525 DETACH_FORCE); 526 527 icpa.icpa_unit = unit; 528 529 locs[ICPCF_UNIT] = unit; 530 531 icp->icp_children[unit] = config_found_sm_loc(&icp->icp_dv, 532 "icp", locs, &icpa, icp_print, config_stdsubmatch); 533 } 534 535 icp_recompute_openings(icp); 536 } 537 538 void 539 icp_rescan_all(struct icp_softc *icp) 540 { 541 int unit; 542 u_int16_t cdev_cnt; 543 544 /* 545 * This is the old method of rescanning the host drives. We 546 * start by reinitializing the cache service. 547 */ 548 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) { 549 printf("%s: unable to re-initialize cache service for rescan\n", 550 icp->icp_dv.dv_xname); 551 return; 552 } 553 cdev_cnt = (u_int16_t) icp->icp_info; 554 555 /* For each host drive, do the new-style rescan. */ 556 for (unit = 0; unit < cdev_cnt && unit < ICP_MAX_HDRIVES; unit++) 557 icp_rescan(icp, unit); 558 559 /* Now detach anything in the slots after cdev_cnt. */ 560 for (; unit < ICP_MAX_HDRIVES; unit++) { 561 if (icp->icp_cdr[unit].cd_size != 0) { 562 #ifdef ICP_DEBUG 563 printf("%s: rescan all: unit %d < new cdev_cnt (%d)\n", 564 icp->icp_dv.dv_xname, unit, cdev_cnt); 565 #endif 566 icp->icp_ndevs--; 567 icp->icp_cdr[unit].cd_size = 0; 568 if (icp->icp_children[unit] != NULL) { 569 (void) config_detach(icp->icp_children[unit], 570 DETACH_FORCE); 571 icp->icp_children[unit] = NULL; 572 } 573 } 574 } 575 576 icp_recompute_openings(icp); 577 } 578 579 void 580 icp_recompute_openings(struct icp_softc *icp) 581 { 582 int unit, openings; 583 584 if (icp->icp_ndevs != 0) 585 openings = 586 (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs; 587 else 588 openings = 0; 589 if (openings == icp->icp_openings) 590 return; 591 icp->icp_openings = openings; 592 593 #ifdef ICP_DEBUG 594 printf("%s: %d device%s, %d openings per device\n", 595 icp->icp_dv.dv_xname, icp->icp_ndevs, 596 icp->icp_ndevs == 1 ? "" : "s", icp->icp_openings); 597 #endif 598 599 for (unit = 0; unit < ICP_MAX_HDRIVES + ICP_MAXBUS; unit++) { 600 if (icp->icp_children[unit] != NULL) 601 (*icp->icp_servicecb[unit]->iscb_openings)( 602 icp->icp_children[unit], icp->icp_openings); 603 } 604 } 605 606 void 607 icp_watchdog(void *cookie) 608 { 609 struct icp_softc *icp; 610 int s; 611 612 icp = cookie; 613 614 s = splbio(); 615 icp_intr(icp); 616 if (ICP_HAS_WORK(icp)) 617 icp_ccb_enqueue(icp, NULL); 618 splx(s); 619 620 callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ, 621 icp_watchdog, icp); 622 } 623 624 int 625 icp_print(void *aux, const char *pnp) 626 { 627 struct icp_attach_args *icpa; 628 const char *str; 629 630 icpa = (struct icp_attach_args *)aux; 631 632 if (pnp != NULL) { 633 if (icpa->icpa_unit < ICPA_UNIT_SCSI) 634 str = "block device"; 635 else 636 str = "SCSI channel"; 637 aprint_normal("%s at %s", str, pnp); 638 } 639 aprint_normal(" unit %d", icpa->icpa_unit); 640 641 return (UNCONF); 642 } 643 644 int 645 icp_async_event(struct icp_softc *icp, int service) 646 { 647 648 if (service == ICP_SCREENSERVICE) { 649 if (icp->icp_status == ICP_S_MSG_REQUEST) { 650 /* XXX */ 651 } 652 } else { 653 if ((icp->icp_fw_vers & 0xff) >= 0x1a) { 654 icp->icp_evt.size = 0; 655 icp->icp_evt.eu.async.ionode = 656 device_unit(&icp->icp_dv); 657 icp->icp_evt.eu.async.status = icp->icp_status; 658 /* 659 * Severity and event string are filled in by the 660 * hardware interface interrupt handler. 661 */ 662 printf("%s: %s\n", icp->icp_dv.dv_xname, 663 icp->icp_evt.event_string); 664 } else { 665 icp->icp_evt.size = sizeof(icp->icp_evt.eu.async); 666 icp->icp_evt.eu.async.ionode = 667 device_unit(&icp->icp_dv); 668 icp->icp_evt.eu.async.service = service; 669 icp->icp_evt.eu.async.status = icp->icp_status; 670 icp->icp_evt.eu.async.info = icp->icp_info; 671 /* XXXJRT FIX THIS */ 672 *(u_int32_t *) icp->icp_evt.eu.async.scsi_coord = 673 icp->icp_info2; 674 } 675 icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt); 676 } 677 678 return (0); 679 } 680 681 int 682 icp_intr(void *cookie) 683 { 684 struct icp_softc *icp; 685 struct icp_intr_ctx ctx; 686 struct icp_ccb *ic; 687 688 icp = cookie; 689 690 ctx.istatus = (*icp->icp_get_status)(icp); 691 if (!ctx.istatus) { 692 icp->icp_status = ICP_S_NO_STATUS; 693 return (0); 694 } 695 696 (*icp->icp_intr)(icp, &ctx); 697 698 icp->icp_status = ctx.cmd_status; 699 icp->icp_service = ctx.service; 700 icp->icp_info = ctx.info; 701 icp->icp_info2 = ctx.info2; 702 703 switch (ctx.istatus) { 704 case ICP_ASYNCINDEX: 705 icp_async_event(icp, ctx.service); 706 return (1); 707 708 case ICP_SPEZINDEX: 709 printf("%s: uninitialized or unknown service (%d/%d)\n", 710 icp->icp_dv.dv_xname, ctx.info, ctx.info2); 711 icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver); 712 icp->icp_evt.eu.driver.ionode = device_unit(&icp->icp_dv); 713 icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt); 714 return (1); 715 } 716 717 if ((ctx.istatus - 2) > icp->icp_nccbs) 718 panic("icp_intr: bad command index returned"); 719 720 ic = &icp->icp_ccbs[ctx.istatus - 2]; 721 ic->ic_status = icp->icp_status; 722 723 if ((ic->ic_flags & IC_ALLOCED) == 0) { 724 /* XXX ICP's "iir" driver just sends an event here. */ 725 panic("icp_intr: inactive CCB identified"); 726 } 727 728 /* 729 * Try to protect ourselves from the running command count already 730 * being 0 (e.g. if a polled command times out). 731 */ 732 KDASSERT(icp->icp_running != 0); 733 if (--icp->icp_running == 0 && 734 (icp->icp_flags & ICP_F_WAIT_FREEZE) != 0) { 735 icp->icp_flags &= ~ICP_F_WAIT_FREEZE; 736 wakeup(&icp->icp_qfreeze); 737 } 738 739 switch (icp->icp_status) { 740 case ICP_S_BSY: 741 #ifdef ICP_DEBUG 742 printf("%s: ICP_S_BSY received\n", icp->icp_dv.dv_xname); 743 #endif 744 if (__predict_false((ic->ic_flags & IC_UCMD) != 0)) 745 SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain); 746 else 747 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain); 748 break; 749 750 default: 751 ic->ic_flags |= IC_COMPLETE; 752 753 if ((ic->ic_flags & IC_WAITING) != 0) 754 wakeup(ic); 755 else if (ic->ic_intr != NULL) 756 (*ic->ic_intr)(ic); 757 758 if (ICP_HAS_WORK(icp)) 759 icp_ccb_enqueue(icp, NULL); 760 761 break; 762 } 763 764 return (1); 765 } 766 767 struct icp_ucmd_ctx { 768 gdt_ucmd_t *iu_ucmd; 769 u_int32_t iu_cnt; 770 }; 771 772 void 773 icp_ucmd_intr(struct icp_ccb *ic) 774 { 775 struct icp_softc *icp = (void *) ic->ic_dv; 776 struct icp_ucmd_ctx *iu = ic->ic_context; 777 gdt_ucmd_t *ucmd = iu->iu_ucmd; 778 779 ucmd->status = icp->icp_status; 780 ucmd->info = icp->icp_info; 781 782 if (iu->iu_cnt != 0) { 783 bus_dmamap_sync(icp->icp_dmat, 784 icp->icp_scr_dmamap, 785 ICP_SCRATCH_UCMD, iu->iu_cnt, 786 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 787 memcpy(ucmd->data, 788 (char *)icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt); 789 } 790 791 icp->icp_ucmd_ccb = NULL; 792 793 ic->ic_flags |= IC_COMPLETE; 794 wakeup(ic); 795 } 796 797 /* 798 * NOTE: We assume that it is safe to sleep here! 799 */ 800 int 801 icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode, 802 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 803 { 804 struct icp_ioctlcmd *icmd; 805 struct icp_cachecmd *cc; 806 struct icp_rawcmd *rc; 807 int retries, rv; 808 struct icp_ccb *ic; 809 810 retries = ICP_RETRIES; 811 812 do { 813 ic = icp_ccb_alloc_wait(icp); 814 memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd)); 815 ic->ic_cmd.cmd_opcode = htole16(opcode); 816 817 switch (service) { 818 case ICP_CACHESERVICE: 819 if (opcode == ICP_IOCTL) { 820 icmd = &ic->ic_cmd.cmd_packet.ic; 821 icmd->ic_subfunc = htole16(arg1); 822 icmd->ic_channel = htole32(arg2); 823 icmd->ic_bufsize = htole32(arg3); 824 icmd->ic_addr = 825 htole32(icp->icp_scr_seg[0].ds_addr); 826 827 bus_dmamap_sync(icp->icp_dmat, 828 icp->icp_scr_dmamap, 0, arg3, 829 BUS_DMASYNC_PREWRITE | 830 BUS_DMASYNC_PREREAD); 831 } else { 832 cc = &ic->ic_cmd.cmd_packet.cc; 833 cc->cc_deviceno = htole16(arg1); 834 cc->cc_blockno = htole32(arg2); 835 } 836 break; 837 838 case ICP_SCSIRAWSERVICE: 839 rc = &ic->ic_cmd.cmd_packet.rc; 840 rc->rc_direction = htole32(arg1); 841 rc->rc_bus = arg2; 842 rc->rc_target = arg3; 843 rc->rc_lun = arg3 >> 8; 844 break; 845 } 846 847 ic->ic_service = service; 848 ic->ic_cmdlen = sizeof(ic->ic_cmd); 849 rv = icp_ccb_poll(icp, ic, 10000); 850 851 switch (service) { 852 case ICP_CACHESERVICE: 853 if (opcode == ICP_IOCTL) { 854 bus_dmamap_sync(icp->icp_dmat, 855 icp->icp_scr_dmamap, 0, arg3, 856 BUS_DMASYNC_POSTWRITE | 857 BUS_DMASYNC_POSTREAD); 858 } 859 break; 860 } 861 862 icp_ccb_free(icp, ic); 863 } while (rv != 0 && --retries > 0); 864 865 return (icp->icp_status == ICP_S_OK); 866 } 867 868 int 869 icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd) 870 { 871 struct icp_ccb *ic; 872 struct icp_ucmd_ctx iu; 873 u_int32_t cnt; 874 int error; 875 876 if (ucmd->service == ICP_CACHESERVICE) { 877 if (ucmd->command.cmd_opcode == ICP_IOCTL) { 878 cnt = ucmd->command.cmd_packet.ic.ic_bufsize; 879 if (cnt > GDT_SCRATCH_SZ) { 880 printf("%s: scratch buffer too small (%d/%d)\n", 881 icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt); 882 return (EINVAL); 883 } 884 } else { 885 cnt = ucmd->command.cmd_packet.cc.cc_blockcnt * 886 ICP_SECTOR_SIZE; 887 if (cnt > GDT_SCRATCH_SZ) { 888 printf("%s: scratch buffer too small (%d/%d)\n", 889 icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt); 890 return (EINVAL); 891 } 892 } 893 } else { 894 cnt = ucmd->command.cmd_packet.rc.rc_sdlen + 895 ucmd->command.cmd_packet.rc.rc_sense_len; 896 if (cnt > GDT_SCRATCH_SZ) { 897 printf("%s: scratch buffer too small (%d/%d)\n", 898 icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt); 899 return (EINVAL); 900 } 901 } 902 903 iu.iu_ucmd = ucmd; 904 iu.iu_cnt = cnt; 905 906 ic = icp_ccb_alloc_wait(icp); 907 memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd)); 908 ic->ic_cmd.cmd_opcode = htole16(ucmd->command.cmd_opcode); 909 910 if (ucmd->service == ICP_CACHESERVICE) { 911 if (ucmd->command.cmd_opcode == ICP_IOCTL) { 912 struct icp_ioctlcmd *icmd, *uicmd; 913 914 icmd = &ic->ic_cmd.cmd_packet.ic; 915 uicmd = &ucmd->command.cmd_packet.ic; 916 917 icmd->ic_subfunc = htole16(uicmd->ic_subfunc); 918 icmd->ic_channel = htole32(uicmd->ic_channel); 919 icmd->ic_bufsize = htole32(uicmd->ic_bufsize); 920 icmd->ic_addr = 921 htole32(icp->icp_scr_seg[0].ds_addr + 922 ICP_SCRATCH_UCMD); 923 } else { 924 struct icp_cachecmd *cc, *ucc; 925 926 cc = &ic->ic_cmd.cmd_packet.cc; 927 ucc = &ucmd->command.cmd_packet.cc; 928 929 cc->cc_deviceno = htole16(ucc->cc_deviceno); 930 cc->cc_blockno = htole32(ucc->cc_blockno); 931 cc->cc_blockcnt = htole32(ucc->cc_blockcnt); 932 cc->cc_addr = htole32(0xffffffffU); 933 cc->cc_nsgent = htole32(1); 934 cc->cc_sg[0].sg_addr = 935 htole32(icp->icp_scr_seg[0].ds_addr + 936 ICP_SCRATCH_UCMD); 937 cc->cc_sg[0].sg_len = htole32(cnt); 938 } 939 } else { 940 struct icp_rawcmd *rc, *urc; 941 942 rc = &ic->ic_cmd.cmd_packet.rc; 943 urc = &ucmd->command.cmd_packet.rc; 944 945 rc->rc_direction = htole32(urc->rc_direction); 946 rc->rc_sdata = htole32(0xffffffffU); 947 rc->rc_sdlen = htole32(urc->rc_sdlen); 948 rc->rc_clen = htole32(urc->rc_clen); 949 memcpy(rc->rc_cdb, urc->rc_cdb, sizeof(rc->rc_cdb)); 950 rc->rc_target = urc->rc_target; 951 rc->rc_lun = urc->rc_lun; 952 rc->rc_bus = urc->rc_bus; 953 rc->rc_sense_len = htole32(urc->rc_sense_len); 954 rc->rc_sense_addr = 955 htole32(icp->icp_scr_seg[0].ds_addr + 956 ICP_SCRATCH_UCMD + urc->rc_sdlen); 957 rc->rc_nsgent = htole32(1); 958 rc->rc_sg[0].sg_addr = 959 htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD); 960 rc->rc_sg[0].sg_len = htole32(cnt - urc->rc_sense_len); 961 } 962 963 ic->ic_service = ucmd->service; 964 ic->ic_cmdlen = sizeof(ic->ic_cmd); 965 ic->ic_context = &iu; 966 967 /* 968 * XXX What units are ucmd->timeout in? Until we know, we 969 * XXX just pull a number out of thin air. 970 */ 971 if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0)) 972 printf("%s: error %d waiting for ucmd to complete\n", 973 icp->icp_dv.dv_xname, error); 974 975 /* icp_ucmd_intr() has updated ucmd. */ 976 icp_ccb_free(icp, ic); 977 978 return (error); 979 } 980 981 struct icp_ccb * 982 icp_ccb_alloc(struct icp_softc *icp) 983 { 984 struct icp_ccb *ic; 985 int s; 986 987 s = splbio(); 988 if (__predict_false((ic = 989 SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) { 990 splx(s); 991 return (NULL); 992 } 993 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain); 994 splx(s); 995 996 ic->ic_flags = IC_ALLOCED; 997 return (ic); 998 } 999 1000 struct icp_ccb * 1001 icp_ccb_alloc_wait(struct icp_softc *icp) 1002 { 1003 struct icp_ccb *ic; 1004 int s; 1005 1006 s = splbio(); 1007 while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) { 1008 icp->icp_flags |= ICP_F_WAIT_CCB; 1009 (void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0); 1010 } 1011 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain); 1012 splx(s); 1013 1014 ic->ic_flags = IC_ALLOCED; 1015 return (ic); 1016 } 1017 1018 void 1019 icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic) 1020 { 1021 int s; 1022 1023 s = splbio(); 1024 ic->ic_flags = 0; 1025 ic->ic_intr = NULL; 1026 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain); 1027 if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) { 1028 icp->icp_flags &= ~ICP_F_WAIT_CCB; 1029 wakeup(&icp->icp_ccb_freelist); 1030 } 1031 splx(s); 1032 } 1033 1034 void 1035 icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic) 1036 { 1037 int s; 1038 1039 s = splbio(); 1040 1041 if (ic != NULL) { 1042 if (__predict_false((ic->ic_flags & IC_UCMD) != 0)) 1043 SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain); 1044 else 1045 SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain); 1046 } 1047 1048 for (; icp->icp_qfreeze == 0;) { 1049 if (__predict_false((ic = 1050 SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) { 1051 struct icp_ucmd_ctx *iu = ic->ic_context; 1052 gdt_ucmd_t *ucmd = iu->iu_ucmd; 1053 1054 /* 1055 * All user-generated commands share the same 1056 * scratch space, so if one is already running, 1057 * we have to stall the command queue. 1058 */ 1059 if (icp->icp_ucmd_ccb != NULL) 1060 break; 1061 if ((*icp->icp_test_busy)(icp)) 1062 break; 1063 icp->icp_ucmd_ccb = ic; 1064 1065 if (iu->iu_cnt != 0) { 1066 memcpy((char *)icp->icp_scr + ICP_SCRATCH_UCMD, 1067 ucmd->data, iu->iu_cnt); 1068 bus_dmamap_sync(icp->icp_dmat, 1069 icp->icp_scr_dmamap, 1070 ICP_SCRATCH_UCMD, iu->iu_cnt, 1071 BUS_DMASYNC_PREREAD | 1072 BUS_DMASYNC_PREWRITE); 1073 } 1074 } else if (__predict_true((ic = 1075 SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL)) { 1076 if ((*icp->icp_test_busy)(icp)) 1077 break; 1078 } else { 1079 /* no command found */ 1080 break; 1081 } 1082 icp_ccb_submit(icp, ic); 1083 if (__predict_false((ic->ic_flags & IC_UCMD) != 0)) 1084 SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain); 1085 else 1086 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain); 1087 } 1088 1089 splx(s); 1090 } 1091 1092 int 1093 icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size, 1094 int dir) 1095 { 1096 struct icp_sg *sg; 1097 int nsegs, i, rv; 1098 bus_dmamap_t xfer; 1099 1100 xfer = ic->ic_xfer_map; 1101 1102 rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL, 1103 BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1104 ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE)); 1105 if (rv != 0) 1106 return (rv); 1107 1108 nsegs = xfer->dm_nsegs; 1109 ic->ic_xfer_size = size; 1110 ic->ic_nsgent = nsegs; 1111 ic->ic_flags |= dir; 1112 sg = ic->ic_sg; 1113 1114 if (sg != NULL) { 1115 for (i = 0; i < nsegs; i++, sg++) { 1116 sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr); 1117 sg->sg_len = htole32(xfer->dm_segs[i].ds_len); 1118 } 1119 } else if (nsegs > 1) 1120 panic("icp_ccb_map: no SG list specified, but nsegs > 1"); 1121 1122 if ((dir & IC_XFER_OUT) != 0) 1123 i = BUS_DMASYNC_PREWRITE; 1124 else /* if ((dir & IC_XFER_IN) != 0) */ 1125 i = BUS_DMASYNC_PREREAD; 1126 1127 bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i); 1128 return (0); 1129 } 1130 1131 void 1132 icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic) 1133 { 1134 int i; 1135 1136 if ((ic->ic_flags & IC_XFER_OUT) != 0) 1137 i = BUS_DMASYNC_POSTWRITE; 1138 else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */ 1139 i = BUS_DMASYNC_POSTREAD; 1140 1141 bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i); 1142 bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map); 1143 } 1144 1145 int 1146 icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo) 1147 { 1148 int s, rv; 1149 1150 s = splbio(); 1151 1152 for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) { 1153 if (!(*icp->icp_test_busy)(icp)) 1154 break; 1155 DELAY(10); 1156 } 1157 if (timo == 0) { 1158 printf("%s: submit: busy\n", icp->icp_dv.dv_xname); 1159 return (EAGAIN); 1160 } 1161 1162 icp_ccb_submit(icp, ic); 1163 1164 if (cold) { 1165 for (timo *= 10; timo != 0; timo--) { 1166 DELAY(100); 1167 icp_intr(icp); 1168 if ((ic->ic_flags & IC_COMPLETE) != 0) 1169 break; 1170 } 1171 } else { 1172 ic->ic_flags |= IC_WAITING; 1173 while ((ic->ic_flags & IC_COMPLETE) == 0) { 1174 if ((rv = tsleep(ic, PRIBIO, "icpwccb", 1175 mstohz(timo))) != 0) { 1176 timo = 0; 1177 break; 1178 } 1179 } 1180 } 1181 1182 if (timo != 0) { 1183 if (ic->ic_status != ICP_S_OK) { 1184 #ifdef ICP_DEBUG 1185 printf("%s: request failed; status=0x%04x\n", 1186 icp->icp_dv.dv_xname, ic->ic_status); 1187 #endif 1188 rv = EIO; 1189 } else 1190 rv = 0; 1191 } else { 1192 printf("%s: command timed out\n", icp->icp_dv.dv_xname); 1193 rv = EIO; 1194 } 1195 1196 while ((*icp->icp_test_busy)(icp) != 0) 1197 DELAY(10); 1198 1199 splx(s); 1200 1201 return (rv); 1202 } 1203 1204 int 1205 icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo) 1206 { 1207 int s, rv; 1208 1209 ic->ic_flags |= IC_WAITING; 1210 1211 s = splbio(); 1212 icp_ccb_enqueue(icp, ic); 1213 while ((ic->ic_flags & IC_COMPLETE) == 0) { 1214 if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) { 1215 splx(s); 1216 return (rv); 1217 } 1218 } 1219 splx(s); 1220 1221 if (ic->ic_status != ICP_S_OK) { 1222 printf("%s: command failed; status=%x\n", icp->icp_dv.dv_xname, 1223 ic->ic_status); 1224 return (EIO); 1225 } 1226 1227 return (0); 1228 } 1229 1230 int 1231 icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo) 1232 { 1233 int s, rv; 1234 1235 ic->ic_dv = &icp->icp_dv; 1236 ic->ic_intr = icp_ucmd_intr; 1237 ic->ic_flags |= IC_UCMD; 1238 1239 s = splbio(); 1240 icp_ccb_enqueue(icp, ic); 1241 while ((ic->ic_flags & IC_COMPLETE) == 0) { 1242 if ((rv = tsleep(ic, PRIBIO, "icpwuccb", mstohz(timo))) != 0) { 1243 splx(s); 1244 return (rv); 1245 } 1246 } 1247 splx(s); 1248 1249 return (0); 1250 } 1251 1252 void 1253 icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic) 1254 { 1255 1256 ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3; 1257 1258 (*icp->icp_set_sema0)(icp); 1259 DELAY(10); 1260 1261 ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD); 1262 ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident); 1263 1264 icp->icp_running++; 1265 1266 (*icp->icp_copy_cmd)(icp, ic); 1267 (*icp->icp_release_event)(icp, ic); 1268 } 1269 1270 int 1271 icp_freeze(struct icp_softc *icp) 1272 { 1273 int s, error = 0; 1274 1275 s = splbio(); 1276 if (icp->icp_qfreeze++ == 0) { 1277 while (icp->icp_running != 0) { 1278 icp->icp_flags |= ICP_F_WAIT_FREEZE; 1279 error = tsleep(&icp->icp_qfreeze, PRIBIO|PCATCH, 1280 "icpqfrz", 0); 1281 if (error != 0 && --icp->icp_qfreeze == 0 && 1282 ICP_HAS_WORK(icp)) { 1283 icp_ccb_enqueue(icp, NULL); 1284 break; 1285 } 1286 } 1287 } 1288 splx(s); 1289 1290 return (error); 1291 } 1292 1293 void 1294 icp_unfreeze(struct icp_softc *icp) 1295 { 1296 int s; 1297 1298 s = splbio(); 1299 KDASSERT(icp->icp_qfreeze != 0); 1300 if (--icp->icp_qfreeze == 0 && ICP_HAS_WORK(icp)) 1301 icp_ccb_enqueue(icp, NULL); 1302 splx(s); 1303 } 1304 1305 /* XXX Global - should be per-controller? XXX */ 1306 static gdt_evt_str icp_event_buffer[ICP_MAX_EVENTS]; 1307 static int icp_event_oldidx; 1308 static int icp_event_lastidx; 1309 1310 gdt_evt_str * 1311 icp_store_event(struct icp_softc *icp, u_int16_t source, u_int16_t idx, 1312 gdt_evt_data *evt) 1313 { 1314 gdt_evt_str *e; 1315 1316 /* no source == no event */ 1317 if (source == 0) 1318 return (NULL); 1319 1320 e = &icp_event_buffer[icp_event_lastidx]; 1321 if (e->event_source == source && e->event_idx == idx && 1322 ((evt->size != 0 && e->event_data.size != 0 && 1323 memcmp(&e->event_data.eu, &evt->eu, evt->size) == 0) || 1324 (evt->size == 0 && e->event_data.size == 0 && 1325 strcmp((char *) e->event_data.event_string, 1326 (char *) evt->event_string) == 0))) { 1327 e->last_stamp = time_second; 1328 e->same_count++; 1329 } else { 1330 if (icp_event_buffer[icp_event_lastidx].event_source != 0) { 1331 icp_event_lastidx++; 1332 if (icp_event_lastidx == ICP_MAX_EVENTS) 1333 icp_event_lastidx = 0; 1334 if (icp_event_lastidx == icp_event_oldidx) { 1335 icp_event_oldidx++; 1336 if (icp_event_oldidx == ICP_MAX_EVENTS) 1337 icp_event_oldidx = 0; 1338 } 1339 } 1340 e = &icp_event_buffer[icp_event_lastidx]; 1341 e->event_source = source; 1342 e->event_idx = idx; 1343 e->first_stamp = e->last_stamp = time_second; 1344 e->same_count = 1; 1345 e->event_data = *evt; 1346 e->application = 0; 1347 } 1348 return (e); 1349 } 1350 1351 int 1352 icp_read_event(struct icp_softc *icp, int handle, gdt_evt_str *estr) 1353 { 1354 gdt_evt_str *e; 1355 int eindex, s; 1356 1357 s = splbio(); 1358 1359 if (handle == -1) 1360 eindex = icp_event_oldidx; 1361 else 1362 eindex = handle; 1363 1364 estr->event_source = 0; 1365 1366 if (eindex < 0 || eindex >= ICP_MAX_EVENTS) { 1367 splx(s); 1368 return (eindex); 1369 } 1370 1371 e = &icp_event_buffer[eindex]; 1372 if (e->event_source != 0) { 1373 if (eindex != icp_event_lastidx) { 1374 eindex++; 1375 if (eindex == ICP_MAX_EVENTS) 1376 eindex = 0; 1377 } else 1378 eindex = -1; 1379 memcpy(estr, e, sizeof(gdt_evt_str)); 1380 } 1381 1382 splx(s); 1383 1384 return (eindex); 1385 } 1386 1387 void 1388 icp_readapp_event(struct icp_softc *icp, u_int8_t application, 1389 gdt_evt_str *estr) 1390 { 1391 gdt_evt_str *e; 1392 int found = 0, eindex, s; 1393 1394 s = splbio(); 1395 1396 eindex = icp_event_oldidx; 1397 for (;;) { 1398 e = &icp_event_buffer[eindex]; 1399 if (e->event_source == 0) 1400 break; 1401 if ((e->application & application) == 0) { 1402 e->application |= application; 1403 found = 1; 1404 break; 1405 } 1406 if (eindex == icp_event_lastidx) 1407 break; 1408 eindex++; 1409 if (eindex == ICP_MAX_EVENTS) 1410 eindex = 0; 1411 } 1412 if (found) 1413 memcpy(estr, e, sizeof(gdt_evt_str)); 1414 else 1415 estr->event_source = 0; 1416 1417 splx(s); 1418 } 1419 1420 void 1421 icp_clear_events(struct icp_softc *icp) 1422 { 1423 int s; 1424 1425 s = splbio(); 1426 icp_event_oldidx = icp_event_lastidx = 0; 1427 memset(icp_event_buffer, 0, sizeof(icp_event_buffer)); 1428 splx(s); 1429 } 1430