1 /* $NetBSD: xd.c,v 1.14 1997/06/24 00:58:14 thorpej Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1995 Charles D. Cranor 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * 36 * x d . c x y l o g i c s 7 5 3 / 7 0 5 3 v m e / s m d d r i v e r 37 * 38 * author: Chuck Cranor <chuck@ccrc.wustl.edu> 39 * id: $NetBSD: xd.c,v 1.14 1997/06/24 00:58:14 thorpej Exp $ 40 * started: 27-Feb-95 41 * references: [1] Xylogics Model 753 User's Manual 42 * part number: 166-753-001, Revision B, May 21, 1988. 43 * "Your Partner For Performance" 44 * [2] other NetBSD disk device drivers 45 * 46 * Special thanks go to Scott E. Campbell of Xylogics, Inc. for taking 47 * the time to answer some of my questions about the 753/7053. 48 * 49 * note: the 753 and the 7053 are programmed the same way, but are 50 * different sizes. the 753 is a 6U VME card, while the 7053 is a 9U 51 * VME card (found in many VME based suns). 52 */ 53 54 #undef XDC_DEBUG /* full debug */ 55 #define XDC_DIAG /* extra sanity checks */ 56 #if defined(DIAGNOSTIC) && !defined(XDC_DIAG) 57 #define XDC_DIAG /* link in with master DIAG option */ 58 #endif 59 60 #include <sys/param.h> 61 #include <sys/proc.h> 62 #include <sys/systm.h> 63 #include <sys/kernel.h> 64 #include <sys/file.h> 65 #include <sys/stat.h> 66 #include <sys/ioctl.h> 67 #include <sys/buf.h> 68 #include <sys/uio.h> 69 #include <sys/malloc.h> 70 #include <sys/device.h> 71 #include <sys/disklabel.h> 72 #include <sys/disk.h> 73 #include <sys/syslog.h> 74 #include <sys/dkbad.h> 75 #include <sys/conf.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_kern.h> 79 80 #include <machine/autoconf.h> 81 #include <machine/sun_disklabel.h> 82 #include <machine/dvma.h> 83 84 #include <sun3/dev/xdreg.h> 85 #include <sun3/dev/xdvar.h> 86 #include <sun3/dev/xio.h> 87 88 /* 89 * macros 90 */ 91 92 /* 93 * XDC_TWAIT: add iorq "N" to tail of SC's wait queue 94 */ 95 #define XDC_TWAIT(SC, N) { \ 96 (SC)->waitq[(SC)->waitend] = (N); \ 97 (SC)->waitend = ((SC)->waitend + 1) % XDC_MAXIOPB; \ 98 (SC)->nwait++; \ 99 } 100 101 /* 102 * XDC_HWAIT: add iorq "N" to head of SC's wait queue 103 */ 104 #define XDC_HWAIT(SC, N) { \ 105 (SC)->waithead = ((SC)->waithead == 0) ? \ 106 (XDC_MAXIOPB - 1) : ((SC)->waithead - 1); \ 107 (SC)->waitq[(SC)->waithead] = (N); \ 108 (SC)->nwait++; \ 109 } 110 111 /* 112 * XDC_GET_WAITER: gets the first request waiting on the waitq 113 * and removes it (so it can be submitted) 114 */ 115 #define XDC_GET_WAITER(XDCSC, RQ) { \ 116 (RQ) = (XDCSC)->waitq[(XDCSC)->waithead]; \ 117 (XDCSC)->waithead = ((XDCSC)->waithead + 1) % XDC_MAXIOPB; \ 118 xdcsc->nwait--; \ 119 } 120 121 /* 122 * XDC_FREE: add iorq "N" to SC's free list 123 */ 124 #define XDC_FREE(SC, N) { \ 125 (SC)->freereq[(SC)->nfree++] = (N); \ 126 (SC)->reqs[N].mode = 0; \ 127 if ((SC)->nfree == 1) wakeup(&(SC)->nfree); \ 128 } 129 130 131 /* 132 * XDC_RQALLOC: allocate an iorq off the free list (assume nfree > 0). 133 */ 134 #define XDC_RQALLOC(XDCSC) (XDCSC)->freereq[--((XDCSC)->nfree)] 135 136 /* 137 * XDC_GO: start iopb ADDR (DVMA addr in a u_long) on XDC 138 */ 139 #define XDC_GO(XDC, ADDR) { \ 140 (XDC)->xdc_iopbaddr0 = ((ADDR) & 0xff); \ 141 (ADDR) = ((ADDR) >> 8); \ 142 (XDC)->xdc_iopbaddr1 = ((ADDR) & 0xff); \ 143 (ADDR) = ((ADDR) >> 8); \ 144 (XDC)->xdc_iopbaddr2 = ((ADDR) & 0xff); \ 145 (ADDR) = ((ADDR) >> 8); \ 146 (XDC)->xdc_iopbaddr3 = (ADDR); \ 147 (XDC)->xdc_iopbamod = XDC_ADDRMOD; \ 148 (XDC)->xdc_csr = XDC_ADDIOPB; /* go! */ \ 149 } 150 151 /* 152 * XDC_WAIT: wait for XDC's csr "BITS" to come on in "TIME". 153 * LCV is a counter. If it goes to zero then we timed out. 154 */ 155 #define XDC_WAIT(XDC, LCV, TIME, BITS) { \ 156 (LCV) = (TIME); \ 157 while ((LCV) > 0) { \ 158 if ((XDC)->xdc_csr & (BITS)) break; \ 159 (LCV) = (LCV) - 1; \ 160 DELAY(1); \ 161 } \ 162 } 163 164 /* 165 * XDC_DONE: don't need IORQ, get error code and free (done after xdc_cmd) 166 */ 167 #define XDC_DONE(SC,RQ,ER) { \ 168 if ((RQ) == XD_ERR_FAIL) { \ 169 (ER) = (RQ); \ 170 } else { \ 171 if ((SC)->ndone-- == XDC_SUBWAITLIM) \ 172 wakeup(&(SC)->ndone); \ 173 (ER) = (SC)->reqs[RQ].errno; \ 174 XDC_FREE((SC), (RQ)); \ 175 } \ 176 } 177 178 /* 179 * XDC_ADVANCE: advance iorq's pointers by a number of sectors 180 */ 181 #define XDC_ADVANCE(IORQ, N) { \ 182 if (N) { \ 183 (IORQ)->sectcnt -= (N); \ 184 (IORQ)->blockno += (N); \ 185 (IORQ)->dbuf += ((N)*XDFM_BPS); \ 186 } \ 187 } 188 189 /* 190 * note - addresses you can sleep on: 191 * [1] & of xd_softc's "state" (waiting for a chance to attach a drive) 192 * [2] & of xdc_softc's "nfree" (waiting for a free iorq/iopb) 193 * [3] & of xdc_softc's "ndone" (waiting for number of done iorq/iopb's 194 * to drop below XDC_SUBWAITLIM) 195 * [4] & an iorq (waiting for an XD_SUB_WAIT iorq to finish) 196 */ 197 198 199 /* 200 * function prototypes 201 * "xdc_*" functions are internal, all others are external interfaces 202 */ 203 204 /* internals */ 205 int xdc_cmd __P((struct xdc_softc *, int, int, int, int, int, char *, int)); 206 char *xdc_e2str __P((int)); 207 int xdc_error __P((struct xdc_softc *, struct xd_iorq *, 208 struct xd_iopb *, int, int)); 209 int xdc_ioctlcmd __P((struct xd_softc *, dev_t dev, struct xd_iocmd *)); 210 void xdc_perror __P((struct xd_iorq *, struct xd_iopb *, int)); 211 int xdc_piodriver __P((struct xdc_softc *, int, int)); 212 int xdc_remove_iorq __P((struct xdc_softc *)); 213 int xdc_reset __P((struct xdc_softc *, int, int, int, struct xd_softc *)); 214 inline void xdc_rqinit __P((struct xd_iorq *, struct xdc_softc *, 215 struct xd_softc *, int, u_long, int, 216 caddr_t, struct buf *)); 217 void xdc_rqtopb __P((struct xd_iorq *, struct xd_iopb *, int, int)); 218 int xdc_start __P((struct xdc_softc *, int)); 219 int xdc_startbuf __P((struct xdc_softc *, struct xd_softc *, struct buf *)); 220 int xdc_submit_iorq __P((struct xdc_softc *, int, int)); 221 void xdc_tick __P((void *)); 222 int xdc_xdreset __P((struct xdc_softc *, struct xd_softc *)); 223 224 /* machine interrupt hook */ 225 int xdcintr __P((void *)); 226 227 /* bdevsw, cdevsw */ 228 bdev_decl(xd); 229 cdev_decl(xd); 230 231 /* autoconf */ 232 int xdcmatch __P((struct device *, struct cfdata *, void *)); 233 void xdcattach __P((struct device *, struct device *, void *)); 234 int xdmatch __P((struct device *, struct cfdata *, void *)); 235 void xdattach __P((struct device *, struct device *, void *)); 236 int xdc_print __P((void *, char *name)); 237 238 static void xddummystrat __P((struct buf *)); 239 int xdgetdisklabel __P((struct xd_softc *, void *)); 240 241 /* 242 * cfdrivers: device driver interface to autoconfig 243 */ 244 245 struct cfattach xdc_ca = { 246 sizeof(struct xdc_softc), xdcmatch, xdcattach 247 }; 248 249 struct cfdriver xdc_cd = { 250 NULL, "xdc", DV_DULL 251 }; 252 253 struct cfattach xd_ca = { 254 sizeof(struct xd_softc), xdmatch, xdattach 255 }; 256 257 struct cfdriver xd_cd = { 258 NULL, "xd", DV_DISK 259 }; 260 261 struct xdc_attach_args { /* this is the "aux" args to xdattach */ 262 int driveno; /* unit number */ 263 char *dvmabuf; /* scratch buffer for reading disk label */ 264 int fullmode; /* submit mode */ 265 int booting; /* are we booting or not? */ 266 }; 267 268 /* 269 * dkdriver 270 */ 271 272 struct dkdriver xddkdriver = {xdstrategy}; 273 274 /* 275 * start: disk label fix code (XXX) 276 */ 277 278 static void *xd_labeldata; 279 280 static void 281 xddummystrat(bp) 282 struct buf *bp; 283 { 284 if (bp->b_bcount != XDFM_BPS) 285 panic("xddummystrat"); 286 bcopy(xd_labeldata, bp->b_un.b_addr, XDFM_BPS); 287 bp->b_flags |= B_DONE; 288 bp->b_flags &= ~B_BUSY; 289 } 290 291 int 292 xdgetdisklabel(xd, b) 293 struct xd_softc *xd; 294 void *b; 295 { 296 char *err; 297 struct sun_disklabel *sdl; 298 299 /* We already have the label data in `b'; setup for dummy strategy */ 300 xd_labeldata = b; 301 302 /* Required parameter for readdisklabel() */ 303 xd->sc_dk.dk_label->d_secsize = XDFM_BPS; 304 305 err = readdisklabel(MAKEDISKDEV(0, xd->sc_dev.dv_unit, RAW_PART), 306 xddummystrat, 307 xd->sc_dk.dk_label, xd->sc_dk.dk_cpulabel); 308 if (err) { 309 printf("%s: %s\n", xd->sc_dev.dv_xname, err); 310 return(XD_ERR_FAIL); 311 } 312 313 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */ 314 sdl = (struct sun_disklabel *)xd->sc_dk.dk_cpulabel->cd_block; 315 if (sdl->sl_magic == SUN_DKMAGIC) 316 xd->pcyl = sdl->sl_pcyl; 317 else { 318 printf("%s: WARNING: no `pcyl' in disk label.\n", 319 xd->sc_dev.dv_xname); 320 xd->pcyl = xd->sc_dk.dk_label->d_ncylinders + 321 xd->sc_dk.dk_label->d_acylinders; 322 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n", 323 xd->sc_dev.dv_xname, xd->pcyl); 324 } 325 326 xd->ncyl = xd->sc_dk.dk_label->d_ncylinders; 327 xd->acyl = xd->sc_dk.dk_label->d_acylinders; 328 xd->nhead = xd->sc_dk.dk_label->d_ntracks; 329 xd->nsect = xd->sc_dk.dk_label->d_nsectors; 330 xd->sectpercyl = xd->nhead * xd->nsect; 331 xd->sc_dk.dk_label->d_secsize = XDFM_BPS; /* not handled by 332 * sun->bsd */ 333 return(XD_ERR_AOK); 334 } 335 336 /* 337 * end: disk label fix code (XXX) 338 */ 339 340 /* 341 * a u t o c o n f i g f u n c t i o n s 342 */ 343 344 /* 345 * xdcmatch: determine if xdc is present or not. we do a 346 * soft reset to detect the xdc. 347 */ 348 349 int xdcmatch(parent, cf, aux) 350 struct device *parent; 351 struct cfdata *cf; 352 void *aux; 353 { 354 struct confargs *ca = aux; 355 int x; 356 357 if (ca->ca_bustype != BUS_VME32) 358 return (0); 359 360 /* Default interrupt priority always splbio==2 */ 361 if (ca->ca_intpri == -1) 362 ca->ca_intpri = 2; 363 364 x = bus_peek(ca->ca_bustype, ca->ca_paddr + 11, 1); 365 if (x == -1) 366 return (0); 367 368 return (1); 369 } 370 371 /* 372 * xdcattach: attach controller 373 */ 374 void 375 xdcattach(parent, self, aux) 376 struct device *parent, *self; 377 void *aux; 378 379 { 380 struct xdc_softc *xdc = (void *) self; 381 struct confargs *ca = aux; 382 struct xdc_attach_args xa; 383 int lcv, rqno, err, pri; 384 struct xd_iopb_ctrl *ctl; 385 386 /* get addressing and intr level stuff from autoconfig and load it 387 * into our xdc_softc. */ 388 389 xdc->xdc = (struct xdc *) 390 bus_mapin(ca->ca_bustype, ca->ca_paddr, sizeof(struct xdc)); 391 xdc->ipl = ca->ca_intpri; 392 xdc->vector = ca->ca_intvec; 393 394 for (lcv = 0; lcv < XDC_MAXDEV; lcv++) 395 xdc->sc_drives[lcv] = (struct xd_softc *) 0; 396 397 /* allocate and zero buffers 398 * 399 * note: we simplify the code by allocating the max number of iopbs and 400 * iorq's up front. thus, we avoid linked lists and the costs 401 * associated with them in exchange for wasting a little memory. */ 402 403 xdc->iopbase = (struct xd_iopb *) 404 dvma_malloc(XDC_MAXIOPB * sizeof(struct xd_iopb)); /* KVA */ 405 bzero(xdc->iopbase, XDC_MAXIOPB * sizeof(struct xd_iopb)); 406 xdc->dvmaiopb = (struct xd_iopb *) 407 dvma_kvtopa((long) xdc->iopbase, BUS_VME32); 408 xdc->reqs = (struct xd_iorq *) 409 malloc(XDC_MAXIOPB * sizeof(struct xd_iorq), M_DEVBUF, M_NOWAIT); 410 if (xdc->reqs == NULL) 411 panic("xdc malloc"); 412 bzero(xdc->reqs, XDC_MAXIOPB * sizeof(struct xd_iorq)); 413 414 /* init free list, iorq to iopb pointers, and non-zero fields in the 415 * iopb which never change. */ 416 417 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 418 xdc->reqs[lcv].iopb = &xdc->iopbase[lcv]; 419 xdc->freereq[lcv] = lcv; 420 xdc->iopbase[lcv].fixd = 1; /* always the same */ 421 xdc->iopbase[lcv].naddrmod = XDC_ADDRMOD; /* always the same */ 422 xdc->iopbase[lcv].intr_vec = xdc->vector; /* always the same */ 423 } 424 xdc->nfree = XDC_MAXIOPB; 425 xdc->nrun = 0; 426 xdc->waithead = xdc->waitend = xdc->nwait = 0; 427 xdc->ndone = 0; 428 429 /* init queue of waiting bufs */ 430 431 xdc->sc_wq.b_active = 0; 432 xdc->sc_wq.b_actf = 0; 433 xdc->sc_wq.b_actb = &xdc->sc_wq.b_actf; 434 435 /* 436 * section 7 of the manual tells us how to init the controller: 437 * - read controller parameters (6/0) 438 * - write controller parameters (5/0) 439 */ 440 441 /* read controller parameters and insure we have a 753/7053 */ 442 443 rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL); 444 if (rqno == XD_ERR_FAIL) { 445 printf(": couldn't read controller params\n"); 446 return; /* shouldn't ever happen */ 447 } 448 ctl = (struct xd_iopb_ctrl *) & xdc->iopbase[rqno]; 449 if (ctl->ctype != XDCT_753) { 450 if (xdc->reqs[rqno].errno) 451 printf(": %s: ", xdc_e2str(xdc->reqs[rqno].errno)); 452 printf(": doesn't identify as a 753/7053\n"); 453 XDC_DONE(xdc, rqno, err); 454 return; 455 } 456 printf(": Xylogics 753/7053, PROM=%x.%02x.%02x\n", 457 ctl->eprom_partno, ctl->eprom_lvl, ctl->eprom_rev); 458 XDC_DONE(xdc, rqno, err); 459 460 /* now write controller parameters (xdc_cmd sets all params for us) */ 461 462 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL); 463 XDC_DONE(xdc, rqno, err); 464 if (err) { 465 printf("%s: controller config error: %s\n", 466 xdc->sc_dev.dv_xname, xdc_e2str(err)); 467 return; 468 } 469 470 /* link in interrupt with higher level software */ 471 isr_add_vectored(xdcintr, (void *)xdc, 472 ca->ca_intpri, ca->ca_intvec); 473 evcnt_attach(&xdc->sc_dev, "intr", &xdc->sc_intrcnt); 474 475 /* now we must look for disks using autoconfig */ 476 xa.dvmabuf = (char *) dvma_malloc(XDFM_BPS); 477 xa.fullmode = XD_SUB_POLL; 478 xa.booting = 1; 479 480 for (xa.driveno = 0; xa.driveno < XDC_MAXDEV; xa.driveno++) 481 (void) config_found(self, (void *) &xa, xdc_print); 482 483 dvma_free(xa.dvmabuf, XDFM_BPS); 484 485 /* start the watchdog clock */ 486 timeout(xdc_tick, xdc, XDC_TICKCNT); 487 } 488 489 int 490 xdc_print(aux, name) 491 void *aux; 492 char *name; 493 { 494 struct xdc_attach_args *xa = aux; 495 496 if (name != NULL) 497 printf("%s: ", name); 498 499 if (xa->driveno != -1) 500 printf(" drive %d", xa->driveno); 501 502 return UNCONF; 503 } 504 505 /* 506 * xdmatch: probe for disk. 507 * 508 * note: we almost always say disk is present. this allows us to 509 * spin up and configure a disk after the system is booted (we can 510 * call xdattach!). 511 */ 512 int 513 xdmatch(parent, cf, aux) 514 struct device *parent; 515 struct cfdata *cf; 516 void *aux; 517 518 { 519 struct xdc_softc *xdc = (void *) parent; 520 struct xdc_attach_args *xa = aux; 521 522 /* looking for autoconf wildcard or exact match */ 523 524 if (cf->cf_loc[0] != -1 && cf->cf_loc[0] != xa->driveno) 525 return 0; 526 527 return 1; 528 529 } 530 531 /* 532 * xdattach: attach a disk. this can be called from autoconf and also 533 * from xdopen/xdstrategy. 534 */ 535 void 536 xdattach(parent, self, aux) 537 struct device *parent, *self; 538 void *aux; 539 540 { 541 struct xd_softc *xd = (void *) self; 542 struct xdc_softc *xdc = (void *) parent; 543 struct xdc_attach_args *xa = aux; 544 int rqno, err, spt, mb, blk, lcv, fmode, s, newstate; 545 struct xd_iopb_drive *driopb; 546 struct dkbad *dkb; 547 struct bootpath *bp; 548 549 /* 550 * Always re-initialize the disk structure. We want statistics 551 * to start with a clean slate. 552 */ 553 bzero(&xd->sc_dk, sizeof(xd->sc_dk)); 554 xd->sc_dk.dk_driver = &xddkdriver; 555 xd->sc_dk.dk_name = xd->sc_dev.dv_xname; 556 557 /* if booting, init the xd_softc */ 558 559 if (xa->booting) { 560 xd->state = XD_DRIVE_UNKNOWN; /* to start */ 561 xd->flags = 0; 562 xd->parent = xdc; 563 } 564 xd->xd_drive = xa->driveno; 565 fmode = xa->fullmode; 566 xdc->sc_drives[xa->driveno] = xd; 567 568 /* if not booting, make sure we are the only process in the attach for 569 * this drive. if locked out, sleep on it. */ 570 571 if (!xa->booting) { 572 s = splbio(); 573 while (xd->state == XD_DRIVE_ATTACHING) { 574 if (tsleep(&xd->state, PRIBIO, "xdattach", 0)) { 575 splx(s); 576 return; 577 } 578 } 579 printf("%s at %s", 580 xd->sc_dev.dv_xname, 581 xd->parent->sc_dev.dv_xname); 582 } 583 /* we now have control */ 584 585 xd->state = XD_DRIVE_ATTACHING; 586 newstate = XD_DRIVE_UNKNOWN; 587 588 /* first try and reset the drive */ 589 590 rqno = xdc_cmd(xdc, XDCMD_RST, 0, xd->xd_drive, 0, 0, 0, fmode); 591 XDC_DONE(xdc, rqno, err); 592 if (err == XD_ERR_NRDY) { 593 printf(" drive %d: off-line\n", xa->driveno); 594 goto done; 595 } 596 if (err) { 597 printf(": ERROR 0x%02x (%s)\n", err, xdc_e2str(err)); 598 goto done; 599 } 600 printf(" drive %d: ready\n", xa->driveno); 601 602 /* now set format parameters */ 603 604 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_FMT, xd->xd_drive, 0, 0, 0, fmode); 605 XDC_DONE(xdc, rqno, err); 606 if (err) { 607 printf("%s: write format parameters failed: %s\n", 608 xd->sc_dev.dv_xname, xdc_e2str(err)); 609 goto done; 610 } 611 612 /* get drive parameters */ 613 rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode); 614 if (rqno != XD_ERR_FAIL) { 615 driopb = (struct xd_iopb_drive *) & xdc->iopbase[rqno]; 616 spt = driopb->sectpertrk; 617 } 618 XDC_DONE(xdc, rqno, err); 619 if (err) { 620 printf("%s: read drive parameters failed: %s\n", 621 xd->sc_dev.dv_xname, xdc_e2str(err)); 622 goto done; 623 } 624 625 /* 626 * now set drive parameters (to semi-bogus values) so we can read the 627 * disk label. 628 */ 629 xd->pcyl = xd->ncyl = 1; 630 xd->acyl = 0; 631 xd->nhead = 1; 632 xd->nsect = 1; 633 xd->sectpercyl = 1; 634 for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */ 635 xd->dkb.bt_bad[lcv].bt_cyl = xd->dkb.bt_bad[lcv].bt_trksec = 0xffff; 636 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode); 637 XDC_DONE(xdc, rqno, err); 638 if (err) { 639 printf("%s: write drive parameters failed: %s\n", 640 xd->sc_dev.dv_xname, xdc_e2str(err)); 641 goto done; 642 } 643 644 /* read disk label */ 645 rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, 0, 1, 646 xa->dvmabuf, fmode); 647 XDC_DONE(xdc, rqno, err); 648 if (err) { 649 printf("%s: reading disk label failed: %s\n", 650 xd->sc_dev.dv_xname, xdc_e2str(err)); 651 goto done; 652 } 653 newstate = XD_DRIVE_NOLABEL; 654 655 xd->hw_spt = spt; 656 /* Attach the disk: must be before getdisklabel to malloc label */ 657 disk_attach(&xd->sc_dk); 658 659 if (xdgetdisklabel(xd, xa->dvmabuf) != XD_ERR_AOK) 660 goto done; 661 662 /* inform the user of what is up */ 663 printf("%s: <%s>, pcyl %d, hw_spt %d\n", 664 xd->sc_dev.dv_xname, 665 xa->dvmabuf, xd->pcyl, spt); 666 mb = xd->ncyl * (xd->nhead * xd->nsect) / (1048576 / XDFM_BPS); 667 printf("%s: %dMB, %d cyl, %d head, %d sec, %d bytes/sec\n", 668 xd->sc_dev.dv_xname, mb, 669 xd->ncyl, xd->nhead, xd->nsect, XDFM_BPS); 670 671 /* now set the real drive parameters! */ 672 673 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode); 674 XDC_DONE(xdc, rqno, err); 675 if (err) { 676 printf("%s: write real drive parameters failed: %s\n", 677 xd->sc_dev.dv_xname, xdc_e2str(err)); 678 goto done; 679 } 680 newstate = XD_DRIVE_ONLINE; 681 682 /* 683 * read bad144 table. this table resides on the first sector of the 684 * last track of the disk (i.e. second cyl of "acyl" area). 685 */ 686 687 blk = (xd->ncyl + xd->acyl - 1) * (xd->nhead * xd->nsect) + /* last cyl */ 688 (xd->nhead - 1) * xd->nsect; /* last head */ 689 rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, blk, 1, xa->dvmabuf, fmode); 690 XDC_DONE(xdc, rqno, err); 691 if (err) { 692 printf("%s: reading bad144 failed: %s\n", 693 xd->sc_dev.dv_xname, xdc_e2str(err)); 694 goto done; 695 } 696 697 /* check dkbad for sanity */ 698 dkb = (struct dkbad *) xa->dvmabuf; 699 for (lcv = 0; lcv < 126; lcv++) { 700 if ((dkb->bt_bad[lcv].bt_cyl == 0xffff || 701 dkb->bt_bad[lcv].bt_cyl == 0) && 702 dkb->bt_bad[lcv].bt_trksec == 0xffff) 703 continue; /* blank */ 704 if (dkb->bt_bad[lcv].bt_cyl >= xd->ncyl) 705 break; 706 if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xd->nhead) 707 break; 708 if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xd->nsect) 709 break; 710 } 711 if (lcv != 126) { 712 printf("%s: warning: invalid bad144 sector!\n", 713 xd->sc_dev.dv_xname); 714 } else { 715 bcopy(xa->dvmabuf, &xd->dkb, XDFM_BPS); 716 } 717 718 /* XXX - Where is this and what does it do? -gwr */ 719 dk_establish(&xd->sc_dk, &xd->sc_dev); 720 721 done: 722 xd->state = newstate; 723 if (!xa->booting) { 724 wakeup(&xd->state); 725 splx(s); 726 } 727 } 728 729 /* 730 * end of autoconfig functions 731 */ 732 733 /* 734 * { b , c } d e v s w f u n c t i o n s 735 */ 736 737 /* 738 * xdclose: close device 739 */ 740 int 741 xdclose(dev, flag, fmt) 742 dev_t dev; 743 int flag, fmt; 744 745 { 746 struct xd_softc *xd = xd_cd.cd_devs[DISKUNIT(dev)]; 747 int part = DISKPART(dev); 748 749 /* clear mask bits */ 750 751 switch (fmt) { 752 case S_IFCHR: 753 xd->sc_dk.dk_copenmask &= ~(1 << part); 754 break; 755 case S_IFBLK: 756 xd->sc_dk.dk_bopenmask &= ~(1 << part); 757 break; 758 } 759 xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 760 761 return 0; 762 } 763 764 /* 765 * xddump: crash dump system 766 */ 767 int 768 xddump(dev) 769 dev_t dev; 770 771 { 772 int unit, part; 773 struct xd_softc *xd; 774 775 unit = DISKUNIT(dev); 776 if (unit >= xd_cd.cd_ndevs) 777 return ENXIO; 778 part = DISKPART(dev); 779 780 xd = xd_cd.cd_devs[unit]; 781 782 printf("%s%c: crash dump not supported (yet)\n", 783 xd->sc_dev.dv_xname, 'a' + part); 784 785 return ENXIO; 786 787 /* outline: globals: "dumplo" == sector number of partition to start 788 * dump at (convert to physical sector with partition table) 789 * "dumpsize" == size of dump in clicks "physmem" == size of physical 790 * memory (clicks, ctob() to get bytes) (normal case: dumpsize == 791 * physmem) 792 * 793 * dump a copy of physical memory to the dump device starting at sector 794 * "dumplo" in the swap partition (make sure > 0). map in pages as 795 * we go. use polled I/O. 796 * 797 * XXX how to handle NON_CONTIG? */ 798 799 } 800 801 /* 802 * xdioctl: ioctls on XD drives. based on ioctl's of other netbsd disks. 803 */ 804 int 805 xdioctl(dev, command, addr, flag, p) 806 dev_t dev; 807 u_long command; 808 caddr_t addr; 809 int flag; 810 struct proc *p; 811 812 { 813 struct xd_softc *xd; 814 struct xd_iocmd *xio; 815 int error, s, unit; 816 817 unit = DISKUNIT(dev); 818 819 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL) 820 return (ENXIO); 821 822 /* switch on ioctl type */ 823 824 switch (command) { 825 case DIOCSBAD: /* set bad144 info */ 826 if ((flag & FWRITE) == 0) 827 return EBADF; 828 s = splbio(); 829 bcopy(addr, &xd->dkb, sizeof(xd->dkb)); 830 splx(s); 831 return 0; 832 833 case DIOCGDINFO: /* get disk label */ 834 bcopy(xd->sc_dk.dk_label, addr, sizeof(struct disklabel)); 835 return 0; 836 837 case DIOCGPART: /* get partition info */ 838 ((struct partinfo *) addr)->disklab = xd->sc_dk.dk_label; 839 ((struct partinfo *) addr)->part = 840 &xd->sc_dk.dk_label->d_partitions[DISKPART(dev)]; 841 return 0; 842 843 case DIOCSDINFO: /* set disk label */ 844 if ((flag & FWRITE) == 0) 845 return EBADF; 846 error = setdisklabel(xd->sc_dk.dk_label, 847 (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0, 848 xd->sc_dk.dk_cpulabel); 849 if (error == 0) { 850 if (xd->state == XD_DRIVE_NOLABEL) 851 xd->state = XD_DRIVE_ONLINE; 852 } 853 return error; 854 855 case DIOCWLABEL: /* change write status of disk label */ 856 if ((flag & FWRITE) == 0) 857 return EBADF; 858 if (*(int *) addr) 859 xd->flags |= XD_WLABEL; 860 else 861 xd->flags &= ~XD_WLABEL; 862 return 0; 863 864 case DIOCWDINFO: /* write disk label */ 865 if ((flag & FWRITE) == 0) 866 return EBADF; 867 error = setdisklabel(xd->sc_dk.dk_label, 868 (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0, 869 xd->sc_dk.dk_cpulabel); 870 if (error == 0) { 871 if (xd->state == XD_DRIVE_NOLABEL) 872 xd->state = XD_DRIVE_ONLINE; 873 874 /* Simulate opening partition 0 so write succeeds. */ 875 xd->sc_dk.dk_openmask |= (1 << 0); 876 error = writedisklabel(MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART), 877 xdstrategy, xd->sc_dk.dk_label, 878 xd->sc_dk.dk_cpulabel); 879 xd->sc_dk.dk_openmask = 880 xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 881 } 882 return error; 883 884 case DIOSXDCMD: 885 xio = (struct xd_iocmd *) addr; 886 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 887 return (error); 888 return (xdc_ioctlcmd(xd, dev, xio)); 889 890 default: 891 return ENOTTY; 892 } 893 } 894 /* 895 * xdopen: open drive 896 */ 897 898 int 899 xdopen(dev, flag, fmt) 900 dev_t dev; 901 int flag, fmt; 902 903 { 904 int unit, part; 905 struct xd_softc *xd; 906 struct xdc_attach_args xa; 907 908 /* first, could it be a valid target? */ 909 910 unit = DISKUNIT(dev); 911 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL) 912 return (ENXIO); 913 part = DISKPART(dev); 914 915 /* do we need to attach the drive? */ 916 917 if (xd->state == XD_DRIVE_UNKNOWN) { 918 xa.driveno = xd->xd_drive; 919 xa.dvmabuf = (char *) dvma_malloc(XDFM_BPS); 920 xa.fullmode = XD_SUB_WAIT; 921 xa.booting = 0; 922 xdattach((struct device *) xd->parent, (struct device *) xd, &xa); 923 dvma_free(xa.dvmabuf, XDFM_BPS); 924 if (xd->state == XD_DRIVE_UNKNOWN) { 925 return (EIO); 926 } 927 } 928 /* check for partition */ 929 930 if (part != RAW_PART && 931 (part >= xd->sc_dk.dk_label->d_npartitions || 932 xd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 933 return (ENXIO); 934 } 935 /* set open masks */ 936 937 switch (fmt) { 938 case S_IFCHR: 939 xd->sc_dk.dk_copenmask |= (1 << part); 940 break; 941 case S_IFBLK: 942 xd->sc_dk.dk_bopenmask |= (1 << part); 943 break; 944 } 945 xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 946 947 return 0; 948 } 949 950 int 951 xdread(dev, uio) 952 dev_t dev; 953 struct uio *uio; 954 { 955 956 return (physio(xdstrategy, NULL, dev, B_READ, minphys, uio)); 957 } 958 959 int 960 xdwrite(dev, uio) 961 dev_t dev; 962 struct uio *uio; 963 { 964 965 return (physio(xdstrategy, NULL, dev, B_WRITE, minphys, uio)); 966 } 967 968 969 /* 970 * xdsize: return size of a partition for a dump 971 */ 972 973 int 974 xdsize(dev) 975 dev_t dev; 976 977 { 978 struct xd_softc *xdsc; 979 int unit, part, size, omask; 980 981 /* valid unit? */ 982 unit = DISKUNIT(dev); 983 if (unit >= xd_cd.cd_ndevs || (xdsc = xd_cd.cd_devs[unit]) == NULL) 984 return (-1); 985 986 part = DISKPART(dev); 987 omask = xdsc->sc_dk.dk_openmask & (1 << part); 988 989 if (omask == 0 && xdopen(dev, 0, S_IFBLK) != 0) 990 return (-1); 991 992 /* do it */ 993 if (xdsc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 994 size = -1; /* only give valid size for swap partitions */ 995 else 996 size = xdsc->sc_dk.dk_label->d_partitions[part].p_size * 997 (xdsc->sc_dk.dk_label->d_secsize / DEV_BSIZE); 998 if (omask == 0 && xdclose(dev, 0, S_IFBLK) != 0) 999 return (-1); 1000 return (size); 1001 } 1002 /* 1003 * xdstrategy: buffering system interface to xd. 1004 */ 1005 1006 void 1007 xdstrategy(bp) 1008 struct buf *bp; 1009 1010 { 1011 struct xd_softc *xd; 1012 struct xdc_softc *parent; 1013 struct buf *wq; 1014 int s, unit; 1015 struct xdc_attach_args xa; 1016 1017 unit = DISKUNIT(bp->b_dev); 1018 1019 /* check for live device */ 1020 1021 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == 0 || 1022 bp->b_blkno < 0 || 1023 (bp->b_bcount % xd->sc_dk.dk_label->d_secsize) != 0) { 1024 bp->b_error = EINVAL; 1025 goto bad; 1026 } 1027 /* do we need to attach the drive? */ 1028 1029 if (xd->state == XD_DRIVE_UNKNOWN) { 1030 xa.driveno = xd->xd_drive; 1031 xa.dvmabuf = (char *) dvma_malloc(XDFM_BPS); 1032 xa.fullmode = XD_SUB_WAIT; 1033 xa.booting = 0; 1034 xdattach((struct device *)xd->parent, (struct device *)xd, &xa); 1035 dvma_free(xa.dvmabuf, XDFM_BPS); 1036 if (xd->state == XD_DRIVE_UNKNOWN) { 1037 bp->b_error = EIO; 1038 goto bad; 1039 } 1040 } 1041 if (xd->state != XD_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) { 1042 /* no I/O to unlabeled disks, unless raw partition */ 1043 bp->b_error = EIO; 1044 goto bad; 1045 } 1046 /* short circuit zero length request */ 1047 1048 if (bp->b_bcount == 0) 1049 goto done; 1050 1051 /* check bounds with label (disksubr.c). Determine the size of the 1052 * transfer, and make sure it is within the boundaries of the 1053 * partition. Adjust transfer if needed, and signal errors or early 1054 * completion. */ 1055 1056 if (bounds_check_with_label(bp, xd->sc_dk.dk_label, 1057 (xd->flags & XD_WLABEL) != 0) <= 0) 1058 goto done; 1059 1060 /* 1061 * now we know we have a valid buf structure that we need to do I/O 1062 * on. 1063 * 1064 * note that we don't disksort because the controller has a sorting 1065 * algorithm built into the hardware. 1066 */ 1067 1068 s = splbio(); /* protect the queues */ 1069 1070 /* first, give jobs in front of us a chance */ 1071 1072 parent = xd->parent; 1073 while (parent->nfree > 0 && parent->sc_wq.b_actf) 1074 if (xdc_startbuf(parent, NULL, NULL) != XD_ERR_AOK) 1075 break; 1076 1077 /* if there are no free iorq's, then we just queue and return. the 1078 * buffs will get picked up later by xdcintr(). */ 1079 1080 if (parent->nfree == 0) { 1081 wq = &xd->parent->sc_wq; 1082 bp->b_actf = 0; 1083 bp->b_actb = wq->b_actb; 1084 *wq->b_actb = bp; 1085 wq->b_actb = &bp->b_actf; 1086 splx(s); 1087 return; 1088 } 1089 /* now we have free iopb's and we are at splbio... start 'em up */ 1090 1091 if (xdc_startbuf(parent, xd, bp) != XD_ERR_AOK) { 1092 return; 1093 } 1094 1095 /* done! */ 1096 1097 splx(s); 1098 return; 1099 1100 bad: /* tells upper layers we have an error */ 1101 bp->b_flags |= B_ERROR; 1102 done: /* tells upper layers we are done with this 1103 * buf */ 1104 bp->b_resid = bp->b_bcount; 1105 biodone(bp); 1106 } 1107 /* 1108 * end of {b,c}devsw functions 1109 */ 1110 1111 /* 1112 * i n t e r r u p t f u n c t i o n 1113 * 1114 * xdcintr: hardware interrupt. 1115 */ 1116 int 1117 xdcintr(v) 1118 void *v; 1119 1120 { 1121 struct xdc_softc *xdcsc = v; 1122 struct xd_softc *xd; 1123 struct buf *bp; 1124 1125 /* kick the event counter */ 1126 1127 xdcsc->sc_intrcnt.ev_count++; 1128 1129 /* remove as many done IOPBs as possible */ 1130 1131 xdc_remove_iorq(xdcsc); 1132 1133 /* start any iorq's already waiting */ 1134 1135 xdc_start(xdcsc, XDC_MAXIOPB); 1136 1137 /* fill up any remaining iorq's with queue'd buffers */ 1138 1139 while (xdcsc->nfree > 0 && xdcsc->sc_wq.b_actf) 1140 if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK) 1141 break; 1142 1143 return (1); 1144 } 1145 /* 1146 * end of interrupt function 1147 */ 1148 1149 /* 1150 * i n t e r n a l f u n c t i o n s 1151 */ 1152 1153 /* 1154 * xdc_rqinit: fill out the fields of an I/O request 1155 */ 1156 1157 inline void 1158 xdc_rqinit(rq, xdc, xd, md, blk, cnt, db, bp) 1159 struct xd_iorq *rq; 1160 struct xdc_softc *xdc; 1161 struct xd_softc *xd; 1162 int md; 1163 u_long blk; 1164 int cnt; 1165 caddr_t db; 1166 struct buf *bp; 1167 { 1168 rq->xdc = xdc; 1169 rq->xd = xd; 1170 rq->ttl = XDC_MAXTTL + 10; 1171 rq->mode = md; 1172 rq->tries = rq->errno = rq->lasterror = 0; 1173 rq->blockno = blk; 1174 rq->sectcnt = cnt; 1175 rq->dbuf = rq->dbufbase = db; 1176 rq->buf = bp; 1177 } 1178 /* 1179 * xdc_rqtopb: load up an IOPB based on an iorq 1180 */ 1181 1182 void 1183 xdc_rqtopb(iorq, iopb, cmd, subfun) 1184 struct xd_iorq *iorq; 1185 struct xd_iopb *iopb; 1186 int cmd, subfun; 1187 1188 { 1189 u_long block, dp; 1190 1191 /* standard stuff */ 1192 1193 iopb->errs = iopb->done = 0; 1194 iopb->comm = cmd; 1195 iopb->errno = iopb->status = 0; 1196 iopb->subfun = subfun; 1197 if (iorq->xd) 1198 iopb->unit = iorq->xd->xd_drive; 1199 else 1200 iopb->unit = 0; 1201 1202 /* check for alternate IOPB format */ 1203 1204 if (cmd == XDCMD_WRP) { 1205 switch (subfun) { 1206 case XDFUN_CTL:{ 1207 struct xd_iopb_ctrl *ctrl = 1208 (struct xd_iopb_ctrl *) iopb; 1209 iopb->lll = 0; 1210 iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL) 1211 ? 0 1212 : iorq->xdc->ipl; 1213 ctrl->param_a = XDPA_TMOD | XDPA_DACF; 1214 ctrl->param_b = XDPB_ROR | XDPB_TDT_3_2USEC; 1215 ctrl->param_c = XDPC_OVS | XDPC_COP | XDPC_ASR | 1216 XDPC_RBC | XDPC_ECC2; 1217 ctrl->throttle = XDC_THROTTLE; 1218 #ifdef sparc 1219 if (cputyp == CPU_SUN4 && cpumod == SUN4_300) 1220 ctrl->delay = XDC_DELAY_4_300; 1221 else 1222 ctrl->delay = XDC_DELAY_SPARC; 1223 #endif 1224 #ifdef sun3 1225 ctrl->delay = XDC_DELAY_SUN3; 1226 #endif 1227 break; 1228 } 1229 case XDFUN_DRV:{ 1230 struct xd_iopb_drive *drv = 1231 (struct xd_iopb_drive *)iopb; 1232 /* we assume that the disk label has the right 1233 * info */ 1234 if (XD_STATE(iorq->mode) == XD_SUB_POLL) 1235 drv->dparam_ipl = (XDC_DPARAM << 3); 1236 else 1237 drv->dparam_ipl = (XDC_DPARAM << 3) | 1238 iorq->xdc->ipl; 1239 drv->maxsect = iorq->xd->nsect - 1; 1240 drv->maxsector = drv->maxsect; 1241 /* note: maxsector != maxsect only if you are 1242 * doing cyl sparing */ 1243 drv->headoff = 0; 1244 drv->maxcyl = iorq->xd->pcyl - 1; 1245 drv->maxhead = iorq->xd->nhead - 1; 1246 break; 1247 } 1248 case XDFUN_FMT:{ 1249 struct xd_iopb_format *form = 1250 (struct xd_iopb_format *) iopb; 1251 if (XD_STATE(iorq->mode) == XD_SUB_POLL) 1252 form->interleave_ipl = (XDC_INTERLEAVE << 3); 1253 else 1254 form->interleave_ipl = (XDC_INTERLEAVE << 3) | 1255 iorq->xdc->ipl; 1256 form->field1 = XDFM_FIELD1; 1257 form->field2 = XDFM_FIELD2; 1258 form->field3 = XDFM_FIELD3; 1259 form->field4 = XDFM_FIELD4; 1260 form->bytespersec = XDFM_BPS; 1261 form->field6 = XDFM_FIELD6; 1262 form->field7 = XDFM_FIELD7; 1263 break; 1264 } 1265 } 1266 } else { 1267 1268 /* normal IOPB case (harmless to RDP command) */ 1269 1270 iopb->lll = 0; 1271 iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL) 1272 ? 0 1273 : iorq->xdc->ipl; 1274 iopb->sectcnt = iorq->sectcnt; 1275 block = iorq->blockno; 1276 if (iorq->xd == NULL || block == 0) { 1277 iopb->sectno = iopb->headno = iopb->cylno = 0; 1278 } else { 1279 iopb->sectno = block % iorq->xd->nsect; 1280 block = block / iorq->xd->nsect; 1281 iopb->headno = block % iorq->xd->nhead; 1282 block = block / iorq->xd->nhead; 1283 iopb->cylno = block; 1284 } 1285 iopb->daddr = dp = (iorq->dbuf == NULL) ? 0 : 1286 dvma_kvtopa((long)iorq->dbuf, BUS_VME32); 1287 iopb->addrmod = XDC_ADDRMOD; 1288 } 1289 } 1290 1291 /* 1292 * xdc_cmd: front end for POLL'd and WAIT'd commands. Returns rqno. 1293 * If you've already got an IORQ, you can call submit directly (currently 1294 * there is no need to do this). NORM requests are handled seperately. 1295 */ 1296 int 1297 xdc_cmd(xdcsc, cmd, subfn, unit, block, scnt, dptr, fullmode) 1298 struct xdc_softc *xdcsc; 1299 int cmd, subfn, unit, block, scnt; 1300 char *dptr; 1301 int fullmode; 1302 1303 { 1304 int rqno, submode = XD_STATE(fullmode), retry; 1305 u_long dp; 1306 struct xd_iorq *iorq; 1307 struct xd_iopb *iopb; 1308 1309 /* get iorq/iopb */ 1310 switch (submode) { 1311 case XD_SUB_POLL: 1312 while (xdcsc->nfree == 0) { 1313 if (xdc_piodriver(xdcsc, 0, 1) != XD_ERR_AOK) 1314 return (XD_ERR_FAIL); 1315 } 1316 break; 1317 case XD_SUB_WAIT: 1318 retry = 1; 1319 while (retry) { 1320 while (xdcsc->nfree == 0) { 1321 if (tsleep(&xdcsc->nfree, PRIBIO, "xdnfree", 0)) 1322 return (XD_ERR_FAIL); 1323 } 1324 while (xdcsc->ndone > XDC_SUBWAITLIM) { 1325 if (tsleep(&xdcsc->ndone, PRIBIO, "xdsubwait", 0)) 1326 return (XD_ERR_FAIL); 1327 } 1328 if (xdcsc->nfree) 1329 retry = 0; /* got it */ 1330 } 1331 break; 1332 default: 1333 return (XD_ERR_FAIL); /* illegal */ 1334 } 1335 if (xdcsc->nfree == 0) 1336 panic("xdcmd nfree"); 1337 rqno = XDC_RQALLOC(xdcsc); 1338 iorq = &xdcsc->reqs[rqno]; 1339 iopb = iorq->iopb; 1340 1341 1342 /* init iorq/iopb */ 1343 1344 xdc_rqinit(iorq, xdcsc, 1345 (unit == XDC_NOUNIT) ? NULL : xdcsc->sc_drives[unit], 1346 fullmode, block, scnt, dptr, NULL); 1347 1348 /* load IOPB from iorq */ 1349 1350 xdc_rqtopb(iorq, iopb, cmd, subfn); 1351 1352 /* submit it for processing */ 1353 1354 xdc_submit_iorq(xdcsc, rqno, fullmode); /* error code will be in iorq */ 1355 1356 return (rqno); 1357 } 1358 /* 1359 * xdc_startbuf 1360 * start a buffer running, assumes nfree > 0 1361 */ 1362 1363 int 1364 xdc_startbuf(xdcsc, xdsc, bp) 1365 struct xdc_softc *xdcsc; 1366 struct xd_softc *xdsc; 1367 struct buf *bp; 1368 1369 { 1370 int rqno, partno; 1371 struct xd_iorq *iorq; 1372 struct xd_iopb *iopb; 1373 struct buf *wq; 1374 u_long block, dp; 1375 caddr_t dbuf; 1376 1377 if (!xdcsc->nfree) 1378 panic("xdc_startbuf free"); 1379 rqno = XDC_RQALLOC(xdcsc); 1380 iorq = &xdcsc->reqs[rqno]; 1381 iopb = iorq->iopb; 1382 1383 /* get buf */ 1384 1385 if (bp == NULL) { 1386 bp = xdcsc->sc_wq.b_actf; 1387 if (!bp) 1388 panic("xdc_startbuf bp"); 1389 wq = bp->b_actf; 1390 if (wq) 1391 wq->b_actb = bp->b_actb; 1392 else 1393 xdcsc->sc_wq.b_actb = bp->b_actb; 1394 *bp->b_actb = wq; 1395 xdsc = xdcsc->sc_drives[DISKUNIT(bp->b_dev)]; 1396 } 1397 partno = DISKPART(bp->b_dev); 1398 #ifdef XDC_DEBUG 1399 printf("xdc_startbuf: %s%c: %s block %d\n", xdsc->sc_dev.dv_xname, 1400 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno); 1401 printf("xdc_startbuf: b_bcount %d, b_data 0x%x\n", 1402 bp->b_bcount, bp->b_data); 1403 #endif 1404 1405 /* 1406 * load request. we have to calculate the correct block number based 1407 * on partition info. 1408 * 1409 * also, note that there are two kinds of buf structures, those with 1410 * B_PHYS set and those without B_PHYS. if B_PHYS is set, then it is 1411 * a raw I/O (to a cdevsw) and we are doing I/O directly to the users' 1412 * buffer which has already been mapped into DVMA space. (Not on sun3) 1413 * However, if B_PHYS is not set, then the buffer is a normal system 1414 * buffer which does *not* live in DVMA space. In that case we call 1415 * dvma_mapin to map it into DVMA space so we can do the DMA to it. 1416 * 1417 * in cases where we do a dvma_mapin, note that iorq points to the buffer 1418 * as mapped into DVMA space, where as the bp->b_data points to its 1419 * non-DVMA mapping. 1420 * 1421 * XXX - On the sun3, B_PHYS does NOT mean the buffer is mapped 1422 * into dvma space, only that it was remapped into the kernel. 1423 * We ALWAYS have to remap the kernel buf into DVMA space. 1424 * (It is done inexpensively, using whole segments!) 1425 */ 1426 1427 block = bp->b_blkno + ((partno == RAW_PART) ? 0 : 1428 xdsc->sc_dk.dk_label->d_partitions[partno].p_offset); 1429 1430 dbuf = dvma_mapin(bp->b_data, bp->b_bcount); 1431 if (dbuf == NULL) { /* out of DVMA space */ 1432 printf("%s: warning: out of DVMA space\n", xdcsc->sc_dev.dv_xname); 1433 XDC_FREE(xdcsc, rqno); 1434 wq = &xdcsc->sc_wq; /* put at end of queue */ 1435 bp->b_actf = 0; 1436 bp->b_actb = wq->b_actb; 1437 *wq->b_actb = bp; 1438 wq->b_actb = &bp->b_actf; 1439 return (XD_ERR_FAIL); /* XXX: need some sort of 1440 * call-back scheme here? */ 1441 } 1442 1443 /* init iorq and load iopb from it */ 1444 1445 xdc_rqinit(iorq, xdcsc, xdsc, XD_SUB_NORM | XD_MODE_VERBO, block, 1446 bp->b_bcount / XDFM_BPS, dbuf, bp); 1447 1448 xdc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XDCMD_RD : XDCMD_WR, 0); 1449 1450 /* Instrumentation. */ 1451 disk_busy(&xdsc->sc_dk); 1452 1453 /* now submit [note that xdc_submit_iorq can never fail on NORM reqs] */ 1454 1455 xdc_submit_iorq(xdcsc, rqno, XD_SUB_NORM); 1456 return (XD_ERR_AOK); 1457 } 1458 1459 1460 /* 1461 * xdc_submit_iorq: submit an iorq for processing. returns XD_ERR_AOK 1462 * if ok. if it fail returns an error code. type is XD_SUB_*. 1463 * 1464 * note: caller frees iorq in all cases except NORM 1465 * 1466 * return value: 1467 * NORM: XD_AOK (req pending), XD_FAIL (couldn't submit request) 1468 * WAIT: XD_AOK (success), <error-code> (failed) 1469 * POLL: <same as WAIT> 1470 * NOQ : <same as NORM> 1471 * 1472 * there are three sources for i/o requests: 1473 * [1] xdstrategy: normal block I/O, using "struct buf" system. 1474 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts. 1475 * [3] open/ioctl: these are I/O requests done in the context of a process, 1476 * and the process should block until they are done. 1477 * 1478 * software state is stored in the iorq structure. each iorq has an 1479 * iopb structure. the hardware understands the iopb structure. 1480 * every command must go through an iopb. a 7053 can only handle 1481 * XDC_MAXIOPB (31) active iopbs at one time. iopbs are allocated in 1482 * DVMA space at boot up time. what happens if we run out of iopb's? 1483 * for i/o type [1], the buffers are queued at the "buff" layer and 1484 * picked up later by the interrupt routine. for case [2] the 1485 * programmed i/o driver is called with a special flag that says 1486 * return when one iopb is free. for case [3] the process can sleep 1487 * on the iorq free list until some iopbs are avaliable. 1488 */ 1489 1490 1491 int 1492 xdc_submit_iorq(xdcsc, iorqno, type) 1493 struct xdc_softc *xdcsc; 1494 int iorqno; 1495 int type; 1496 1497 { 1498 u_long iopbaddr; 1499 struct xd_iorq *iorq = &xdcsc->reqs[iorqno]; 1500 1501 #ifdef XDC_DEBUG 1502 printf("xdc_submit_iorq(%s, no=%d, type=%d)\n", xdcsc->sc_dev.dv_xname, 1503 iorqno, type); 1504 #endif 1505 1506 /* first check and see if controller is busy */ 1507 if (xdcsc->xdc->xdc_csr & XDC_ADDING) { 1508 #ifdef XDC_DEBUG 1509 printf("xdc_submit_iorq: XDC not ready (ADDING)\n"); 1510 #endif 1511 if (type == XD_SUB_NOQ) 1512 return (XD_ERR_FAIL); /* failed */ 1513 XDC_TWAIT(xdcsc, iorqno); /* put at end of waitq */ 1514 switch (type) { 1515 case XD_SUB_NORM: 1516 return XD_ERR_AOK; /* success */ 1517 case XD_SUB_WAIT: 1518 while (iorq->iopb->done == 0) { 1519 sleep(iorq, PRIBIO); 1520 } 1521 return (iorq->errno); 1522 case XD_SUB_POLL: 1523 return (xdc_piodriver(xdcsc, iorqno, 0)); 1524 default: 1525 panic("xdc_submit_iorq adding"); 1526 } 1527 } 1528 #ifdef XDC_DEBUG 1529 { 1530 u_char *rio = (u_char *) iorq->iopb; 1531 int sz = sizeof(struct xd_iopb), lcv; 1532 printf("%s: aio #%d [", 1533 xdcsc->sc_dev.dv_xname, iorq - xdcsc->reqs); 1534 for (lcv = 0; lcv < sz; lcv++) 1535 printf(" %02x", rio[lcv]); 1536 printf("]\n"); 1537 } 1538 #endif /* XDC_DEBUG */ 1539 1540 /* controller not busy, start command */ 1541 iopbaddr = dvma_kvtopa((long) iorq->iopb, BUS_VME32); 1542 XDC_GO(xdcsc->xdc, iopbaddr); /* go! */ 1543 xdcsc->nrun++; 1544 /* command now running, wrap it up */ 1545 switch (type) { 1546 case XD_SUB_NORM: 1547 case XD_SUB_NOQ: 1548 return (XD_ERR_AOK); /* success */ 1549 case XD_SUB_WAIT: 1550 while (iorq->iopb->done == 0) { 1551 sleep(iorq, PRIBIO); 1552 } 1553 return (iorq->errno); 1554 case XD_SUB_POLL: 1555 return (xdc_piodriver(xdcsc, iorqno, 0)); 1556 default: 1557 panic("xdc_submit_iorq wrap up"); 1558 } 1559 panic("xdc_submit_iorq"); 1560 return 0; /* not reached */ 1561 } 1562 1563 1564 /* 1565 * xdc_piodriver 1566 * 1567 * programmed i/o driver. this function takes over the computer 1568 * and drains off all i/o requests. it returns the status of the iorq 1569 * the caller is interesting in. if freeone is true, then it returns 1570 * when there is a free iorq. 1571 */ 1572 int 1573 xdc_piodriver(xdcsc, iorqno, freeone) 1574 struct xdc_softc *xdcsc; 1575 char iorqno; 1576 int freeone; 1577 1578 { 1579 int nreset = 0; 1580 int retval = 0; 1581 u_long count; 1582 struct xdc *xdc = xdcsc->xdc; 1583 #ifdef XDC_DEBUG 1584 printf("xdc_piodriver(%s, %d, freeone=%d)\n", xdcsc->sc_dev.dv_xname, 1585 iorqno, freeone); 1586 #endif 1587 1588 while (xdcsc->nwait || xdcsc->nrun) { 1589 #ifdef XDC_DEBUG 1590 printf("xdc_piodriver: wait=%d, run=%d\n", 1591 xdcsc->nwait, xdcsc->nrun); 1592 #endif 1593 XDC_WAIT(xdc, count, XDC_MAXTIME, (XDC_REMIOPB | XDC_F_ERROR)); 1594 #ifdef XDC_DEBUG 1595 printf("xdc_piodriver: done wait with count = %d\n", count); 1596 #endif 1597 /* we expect some progress soon */ 1598 if (count == 0 && nreset >= 2) { 1599 xdc_reset(xdcsc, 0, XD_RSET_ALL, XD_ERR_FAIL, 0); 1600 #ifdef XDC_DEBUG 1601 printf("xdc_piodriver: timeout\n"); 1602 #endif 1603 return (XD_ERR_FAIL); 1604 } 1605 if (count == 0) { 1606 if (xdc_reset(xdcsc, 0, 1607 (nreset++ == 0) ? XD_RSET_NONE : iorqno, 1608 XD_ERR_FAIL, 1609 0) == XD_ERR_FAIL) 1610 return (XD_ERR_FAIL); /* flushes all but POLL 1611 * requests, resets */ 1612 continue; 1613 } 1614 xdc_remove_iorq(xdcsc); /* could resubmit request */ 1615 if (freeone) { 1616 if (xdcsc->nrun < XDC_MAXIOPB) { 1617 #ifdef XDC_DEBUG 1618 printf("xdc_piodriver: done: one free\n"); 1619 #endif 1620 return (XD_ERR_AOK); 1621 } 1622 continue; /* don't xdc_start */ 1623 } 1624 xdc_start(xdcsc, XDC_MAXIOPB); 1625 } 1626 1627 /* get return value */ 1628 1629 retval = xdcsc->reqs[iorqno].errno; 1630 1631 #ifdef XDC_DEBUG 1632 printf("xdc_piodriver: done, retval = 0x%x (%s)\n", 1633 xdcsc->reqs[iorqno].errno, xdc_e2str(xdcsc->reqs[iorqno].errno)); 1634 #endif 1635 1636 /* now that we've drained everything, start up any bufs that have 1637 * queued */ 1638 1639 while (xdcsc->nfree > 0 && xdcsc->sc_wq.b_actf) 1640 if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK) 1641 break; 1642 1643 return (retval); 1644 } 1645 1646 /* 1647 * xdc_reset: reset one drive. NOTE: assumes xdc was just reset. 1648 * we steal iopb[0] for this, but we put it back when we are done. 1649 */ 1650 int 1651 xdc_xdreset(xdcsc, xdsc) 1652 struct xdc_softc *xdcsc; 1653 struct xd_softc *xdsc; 1654 1655 { 1656 struct xd_iopb tmpiopb; 1657 u_long addr; 1658 int del; 1659 bcopy(xdcsc->iopbase, &tmpiopb, sizeof(tmpiopb)); 1660 bzero(xdcsc->iopbase, sizeof(tmpiopb)); 1661 xdcsc->iopbase->comm = XDCMD_RST; 1662 xdcsc->iopbase->unit = xdsc->xd_drive; 1663 addr = (u_long) xdcsc->dvmaiopb; 1664 XDC_GO(xdcsc->xdc, addr); /* go! */ 1665 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_REMIOPB); 1666 if (del <= 0 || xdcsc->iopbase->errs) { 1667 printf("%s: off-line: %s\n", xdcsc->sc_dev.dv_xname, 1668 xdc_e2str(xdcsc->iopbase->errno)); 1669 xdcsc->xdc->xdc_csr = XDC_RESET; 1670 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET); 1671 if (del <= 0) 1672 panic("xdc_reset"); 1673 } else { 1674 xdcsc->xdc->xdc_csr = XDC_CLRRIO; /* clear RIO */ 1675 } 1676 bcopy(&tmpiopb, xdcsc->iopbase, sizeof(tmpiopb)); 1677 } 1678 1679 1680 /* 1681 * xdc_reset: reset everything: requests are marked as errors except 1682 * a polled request (which is resubmitted) 1683 */ 1684 int 1685 xdc_reset(xdcsc, quiet, blastmode, error, xdsc) 1686 struct xdc_softc *xdcsc; 1687 int quiet, blastmode, error; 1688 struct xd_softc *xdsc; 1689 1690 { 1691 int del = 0, lcv, poll = -1, retval = XD_ERR_AOK; 1692 int oldfree = xdcsc->nfree; 1693 struct xd_iorq *iorq; 1694 1695 /* soft reset hardware */ 1696 1697 if (!quiet) 1698 printf("%s: soft reset\n", xdcsc->sc_dev.dv_xname); 1699 xdcsc->xdc->xdc_csr = XDC_RESET; 1700 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET); 1701 if (del <= 0) { 1702 blastmode = XD_RSET_ALL; /* dead, flush all requests */ 1703 retval = XD_ERR_FAIL; 1704 } 1705 if (xdsc) 1706 xdc_xdreset(xdcsc, xdsc); 1707 1708 /* fix queues based on "blast-mode" */ 1709 1710 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 1711 iorq = &xdcsc->reqs[lcv]; 1712 1713 if (XD_STATE(iorq->mode) != XD_SUB_POLL && 1714 XD_STATE(iorq->mode) != XD_SUB_WAIT && 1715 XD_STATE(iorq->mode) != XD_SUB_NORM) 1716 /* is it active? */ 1717 continue; 1718 1719 xdcsc->nrun--; /* it isn't running any more */ 1720 if (blastmode == XD_RSET_ALL || blastmode != lcv) { 1721 /* failed */ 1722 iorq->errno = error; 1723 xdcsc->iopbase[lcv].done = xdcsc->iopbase[lcv].errs = 1; 1724 switch (XD_STATE(iorq->mode)) { 1725 case XD_SUB_NORM: 1726 iorq->buf->b_error = EIO; 1727 iorq->buf->b_flags |= B_ERROR; 1728 iorq->buf->b_resid = 1729 iorq->sectcnt * XDFM_BPS; 1730 /* Sun3: map/unmap regardless of B_PHYS */ 1731 dvma_mapout(iorq->dbufbase, 1732 iorq->buf->b_bcount); 1733 disk_unbusy(&iorq->xd->sc_dk, 1734 (iorq->buf->b_bcount - iorq->buf->b_resid)); 1735 biodone(iorq->buf); 1736 XDC_FREE(xdcsc, lcv); /* add to free list */ 1737 break; 1738 case XD_SUB_WAIT: 1739 wakeup(iorq); 1740 case XD_SUB_POLL: 1741 xdcsc->ndone++; 1742 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1743 break; 1744 } 1745 1746 } else { 1747 1748 /* resubmit, put at front of wait queue */ 1749 XDC_HWAIT(xdcsc, lcv); 1750 } 1751 } 1752 1753 /* 1754 * now, if stuff is waiting, start it. 1755 * since we just reset it should go 1756 */ 1757 xdc_start(xdcsc, XDC_MAXIOPB); 1758 1759 /* ok, we did it */ 1760 if (oldfree == 0 && xdcsc->nfree) 1761 wakeup(&xdcsc->nfree); 1762 1763 #ifdef XDC_DIAG 1764 del = xdcsc->nwait + xdcsc->nrun + xdcsc->nfree + xdcsc->ndone; 1765 if (del != XDC_MAXIOPB) 1766 printf("%s: diag: xdc_reset miscount (%d should be %d)!\n", 1767 xdcsc->sc_dev.dv_xname, del, XDC_MAXIOPB); 1768 else 1769 if (xdcsc->ndone > XDC_MAXIOPB - XDC_SUBWAITLIM) 1770 printf("%s: diag: lots of done jobs (%d)\n", 1771 xdcsc->sc_dev.dv_xname, xdcsc->ndone); 1772 #endif 1773 printf("RESET DONE\n"); 1774 return (retval); 1775 } 1776 /* 1777 * xdc_start: start all waiting buffers 1778 */ 1779 1780 int 1781 xdc_start(xdcsc, maxio) 1782 struct xdc_softc *xdcsc; 1783 int maxio; 1784 1785 { 1786 int rqno; 1787 while (maxio && xdcsc->nwait && 1788 (xdcsc->xdc->xdc_csr & XDC_ADDING) == 0) { 1789 XDC_GET_WAITER(xdcsc, rqno); /* note: rqno is an "out" 1790 * param */ 1791 if (xdc_submit_iorq(xdcsc, rqno, XD_SUB_NOQ) != XD_ERR_AOK) 1792 panic("xdc_start"); /* should never happen */ 1793 maxio--; 1794 } 1795 } 1796 /* 1797 * xdc_remove_iorq: remove "done" IOPB's. 1798 */ 1799 1800 int 1801 xdc_remove_iorq(xdcsc) 1802 struct xdc_softc *xdcsc; 1803 1804 { 1805 int errno, rqno, comm, errs; 1806 struct xdc *xdc = xdcsc->xdc; 1807 u_long addr; 1808 struct xd_iopb *iopb; 1809 struct xd_iorq *iorq; 1810 struct buf *bp; 1811 1812 if (xdc->xdc_csr & XDC_F_ERROR) { 1813 /* 1814 * FATAL ERROR: should never happen under normal use. This 1815 * error is so bad, you can't even tell which IOPB is bad, so 1816 * we dump them all. 1817 */ 1818 errno = xdc->xdc_f_err; 1819 printf("%s: fatal error 0x%02x: %s\n", xdcsc->sc_dev.dv_xname, 1820 errno, xdc_e2str(errno)); 1821 if (xdc_reset(xdcsc, 0, XD_RSET_ALL, errno, 0) != XD_ERR_AOK) { 1822 printf("%s: soft reset failed!\n", 1823 xdcsc->sc_dev.dv_xname); 1824 panic("xdc_remove_iorq: controller DEAD"); 1825 } 1826 return (XD_ERR_AOK); 1827 } 1828 1829 /* 1830 * get iopb that is done 1831 * 1832 * hmm... I used to read the address of the done IOPB off the VME 1833 * registers and calculate the rqno directly from that. that worked 1834 * until I started putting a load on the controller. when loaded, i 1835 * would get interrupts but neither the REMIOPB or F_ERROR bits would 1836 * be set, even after DELAY'ing a while! later on the timeout 1837 * routine would detect IOPBs that were marked "running" but their 1838 * "done" bit was set. rather than dealing directly with this 1839 * problem, it is just easier to look at all running IOPB's for the 1840 * done bit. 1841 */ 1842 if (xdc->xdc_csr & XDC_REMIOPB) { 1843 xdc->xdc_csr = XDC_CLRRIO; 1844 } 1845 1846 for (rqno = 0; rqno < XDC_MAXIOPB; rqno++) { 1847 iorq = &xdcsc->reqs[rqno]; 1848 if (iorq->mode == 0 || XD_STATE(iorq->mode) == XD_SUB_DONE) 1849 continue; /* free, or done */ 1850 iopb = &xdcsc->iopbase[rqno]; 1851 if (iopb->done == 0) 1852 continue; /* not done yet */ 1853 1854 #ifdef XDC_DEBUG 1855 { 1856 u_char *rio = (u_char *) iopb; 1857 int sz = sizeof(struct xd_iopb), lcv; 1858 printf("%s: rio #%d [", xdcsc->sc_dev.dv_xname, rqno); 1859 for (lcv = 0; lcv < sz; lcv++) 1860 printf(" %02x", rio[lcv]); 1861 printf("]\n"); 1862 } 1863 #endif /* XDC_DEBUG */ 1864 1865 xdcsc->nrun--; 1866 1867 comm = iopb->comm; 1868 errs = iopb->errs; 1869 1870 if (errs) 1871 iorq->errno = iopb->errno; 1872 else 1873 iorq->errno = 0; 1874 1875 /* handle non-fatal errors */ 1876 1877 if (errs && 1878 xdc_error(xdcsc, iorq, iopb, rqno, comm) == XD_ERR_AOK) 1879 continue; /* AOK: we resubmitted it */ 1880 1881 1882 /* this iorq is now done (hasn't been restarted or anything) */ 1883 1884 if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror) 1885 xdc_perror(iorq, iopb, 0); 1886 1887 /* now, if read/write check to make sure we got all the data 1888 * we needed. (this may not be the case if we got an error in 1889 * the middle of a multisector request). */ 1890 1891 if ((iorq->mode & XD_MODE_B144) != 0 && errs == 0 && 1892 (comm == XDCMD_RD || comm == XDCMD_WR)) { 1893 /* we just successfully processed a bad144 sector 1894 * note: if we are in bad 144 mode, the pointers have 1895 * been advanced already (see above) and are pointing 1896 * at the bad144 sector. to exit bad144 mode, we 1897 * must advance the pointers 1 sector and issue a new 1898 * request if there are still sectors left to process 1899 * 1900 */ 1901 XDC_ADVANCE(iorq, 1); /* advance 1 sector */ 1902 1903 /* exit b144 mode */ 1904 iorq->mode = iorq->mode & (~XD_MODE_B144); 1905 1906 if (iorq->sectcnt) { /* more to go! */ 1907 iorq->lasterror = iorq->errno = iopb->errno = 0; 1908 iopb->errs = iopb->done = 0; 1909 iorq->tries = 0; 1910 iopb->sectcnt = iorq->sectcnt; 1911 iopb->cylno = iorq->blockno / 1912 iorq->xd->sectpercyl; 1913 iopb->headno = 1914 (iorq->blockno / iorq->xd->nhead) % 1915 iorq->xd->nhead; 1916 iopb->sectno = iorq->blockno % XDFM_BPS; 1917 iopb->daddr = 1918 dvma_kvtopa((long)iorq->dbuf, BUS_VME32); 1919 XDC_HWAIT(xdcsc, rqno); 1920 xdc_start(xdcsc, 1); /* resubmit */ 1921 continue; 1922 } 1923 } 1924 /* final cleanup, totally done with this request */ 1925 1926 switch (XD_STATE(iorq->mode)) { 1927 case XD_SUB_NORM: 1928 bp = iorq->buf; 1929 if (errs) { 1930 bp->b_error = EIO; 1931 bp->b_flags |= B_ERROR; 1932 bp->b_resid = iorq->sectcnt * XDFM_BPS; 1933 } else { 1934 bp->b_resid = 0; /* done */ 1935 } 1936 /* Sun3: map/unmap regardless of B_PHYS */ 1937 dvma_mapout(iorq->dbufbase, 1938 iorq->buf->b_bcount); 1939 disk_unbusy(&iorq->xd->sc_dk, 1940 (bp->b_bcount - bp->b_resid)); 1941 XDC_FREE(xdcsc, rqno); 1942 biodone(bp); 1943 break; 1944 case XD_SUB_WAIT: 1945 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1946 xdcsc->ndone++; 1947 wakeup(iorq); 1948 break; 1949 case XD_SUB_POLL: 1950 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1951 xdcsc->ndone++; 1952 break; 1953 } 1954 } 1955 1956 return (XD_ERR_AOK); 1957 } 1958 1959 /* 1960 * xdc_perror: print error. 1961 * - if still_trying is true: we got an error, retried and got a 1962 * different error. in that case lasterror is the old error, 1963 * and errno is the new one. 1964 * - if still_trying is not true, then if we ever had an error it 1965 * is in lasterror. also, if iorq->errno == 0, then we recovered 1966 * from that error (otherwise iorq->errno == iorq->lasterror). 1967 */ 1968 void 1969 xdc_perror(iorq, iopb, still_trying) 1970 struct xd_iorq *iorq; 1971 struct xd_iopb *iopb; 1972 int still_trying; 1973 1974 { 1975 1976 int error = iorq->lasterror; 1977 1978 printf("%s", (iorq->xd) ? 1979 iorq->xd->sc_dev.dv_xname : 1980 iorq->xdc->sc_dev.dv_xname); 1981 if (iorq->buf) 1982 printf("%c: ", 'a' + DISKPART(iorq->buf->b_dev)); 1983 if (iopb->comm == XDCMD_RD || iopb->comm == XDCMD_WR) 1984 printf("%s %d/%d/%d: ", 1985 (iopb->comm == XDCMD_RD) ? "read" : "write", 1986 iopb->cylno, iopb->headno, iopb->sectno); 1987 printf("%s", xdc_e2str(error)); 1988 1989 if (still_trying) 1990 printf(" [still trying, new error=%s]", xdc_e2str(iorq->errno)); 1991 else 1992 if (iorq->errno == 0) 1993 printf(" [recovered in %d tries]", iorq->tries); 1994 1995 printf("\n"); 1996 } 1997 1998 /* 1999 * xdc_error: non-fatal error encountered... recover. 2000 * return AOK if resubmitted, return FAIL if this iopb is done 2001 */ 2002 int 2003 xdc_error(xdcsc, iorq, iopb, rqno, comm) 2004 struct xdc_softc *xdcsc; 2005 struct xd_iorq *iorq; 2006 struct xd_iopb *iopb; 2007 int rqno, comm; 2008 2009 { 2010 int errno = iorq->errno; 2011 int erract = errno & XD_ERA_MASK; 2012 int oldmode, advance, i; 2013 2014 if (erract == XD_ERA_RSET) { /* some errors require a reset */ 2015 oldmode = iorq->mode; 2016 iorq->mode = XD_SUB_DONE | (~XD_SUB_MASK & oldmode); 2017 xdcsc->ndone++; 2018 /* make xdc_start ignore us */ 2019 xdc_reset(xdcsc, 1, XD_RSET_NONE, errno, iorq->xd); 2020 iorq->mode = oldmode; 2021 xdcsc->ndone--; 2022 } 2023 /* check for read/write to a sector in bad144 table if bad: redirect 2024 * request to bad144 area */ 2025 2026 if ((comm == XDCMD_RD || comm == XDCMD_WR) && 2027 (iorq->mode & XD_MODE_B144) == 0) { 2028 advance = iorq->sectcnt - iopb->sectcnt; 2029 XDC_ADVANCE(iorq, advance); 2030 if ((i = isbad(&iorq->xd->dkb, iorq->blockno / iorq->xd->sectpercyl, 2031 (iorq->blockno / iorq->xd->nsect) % iorq->xd->nhead, 2032 iorq->blockno % iorq->xd->nsect)) != -1) { 2033 iorq->mode |= XD_MODE_B144; /* enter bad144 mode & 2034 * redirect */ 2035 iopb->errno = iopb->done = iopb->errs = 0; 2036 iopb->sectcnt = 1; 2037 iopb->cylno = (iorq->xd->ncyl + iorq->xd->acyl) - 2; 2038 /* second to last acyl */ 2039 i = iorq->xd->sectpercyl - 1 - i; /* follow bad144 2040 * standard */ 2041 iopb->headno = i / iorq->xd->nhead; 2042 iopb->sectno = i % iorq->xd->nhead; 2043 XDC_HWAIT(xdcsc, rqno); 2044 xdc_start(xdcsc, 1); /* resubmit */ 2045 return (XD_ERR_AOK); /* recovered! */ 2046 } 2047 } 2048 2049 /* 2050 * it isn't a bad144 sector, must be real error! see if we can retry 2051 * it? 2052 */ 2053 if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror) 2054 xdc_perror(iorq, iopb, 1); /* inform of error state 2055 * change */ 2056 iorq->lasterror = errno; 2057 2058 if ((erract == XD_ERA_RSET || erract == XD_ERA_HARD) 2059 && iorq->tries < XDC_MAXTRIES) { /* retry? */ 2060 iorq->tries++; 2061 iorq->errno = iopb->errno = iopb->done = iopb->errs = 0; 2062 XDC_HWAIT(xdcsc, rqno); 2063 xdc_start(xdcsc, 1); /* restart */ 2064 return (XD_ERR_AOK); /* recovered! */ 2065 } 2066 2067 /* failed to recover from this error */ 2068 return (XD_ERR_FAIL); 2069 } 2070 2071 /* 2072 * xdc_tick: make sure xd is still alive and ticking (err, kicking). 2073 */ 2074 void 2075 xdc_tick(arg) 2076 void *arg; 2077 2078 { 2079 struct xdc_softc *xdcsc = arg; 2080 int lcv, s, reset = 0; 2081 #ifdef XDC_DIAG 2082 int wait, run, free, done, whd; 2083 u_char fqc[XDC_MAXIOPB], wqc[XDC_MAXIOPB], mark[XDC_MAXIOPB]; 2084 s = splbio(); 2085 wait = xdcsc->nwait; 2086 run = xdcsc->nrun; 2087 free = xdcsc->nfree; 2088 done = xdcsc->ndone; 2089 bcopy(xdcsc->waitq, wqc, sizeof(wqc)); 2090 bcopy(xdcsc->freereq, fqc, sizeof(fqc)); 2091 splx(s); 2092 if (wait + run + free + done != XDC_MAXIOPB) { 2093 printf("%s: diag: IOPB miscount (got w/f/r/d %d/%d/%d/%d, wanted %d)\n", 2094 xdcsc->sc_dev.dv_xname, wait, free, run, done, XDC_MAXIOPB); 2095 bzero(mark, sizeof(mark)); 2096 printf("FREE: "); 2097 for (lcv = free; lcv > 0; lcv--) { 2098 printf("%d ", fqc[lcv - 1]); 2099 mark[fqc[lcv - 1]] = 1; 2100 } 2101 printf("\nWAIT: "); 2102 lcv = wait; 2103 while (lcv > 0) { 2104 printf("%d ", wqc[whd]); 2105 mark[wqc[whd]] = 1; 2106 whd = (whd + 1) % XDC_MAXIOPB; 2107 lcv--; 2108 } 2109 printf("\n"); 2110 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2111 if (mark[lcv] == 0) 2112 printf("MARK: running %d: mode %d done %d errs %d errno 0x%x ttl %d buf %x\n", 2113 lcv, xdcsc->reqs[lcv].mode, 2114 xdcsc->iopbase[lcv].done, 2115 xdcsc->iopbase[lcv].errs, 2116 xdcsc->iopbase[lcv].errno, 2117 xdcsc->reqs[lcv].ttl, xdcsc->reqs[lcv].buf); 2118 } 2119 } else 2120 if (done > XDC_MAXIOPB - XDC_SUBWAITLIM) 2121 printf("%s: diag: lots of done jobs (%d)\n", 2122 xdcsc->sc_dev.dv_xname, done); 2123 2124 #endif 2125 #ifdef XDC_DEBUG 2126 printf("%s: tick: csr 0x%x, w/f/r/d %d/%d/%d/%d\n", 2127 xdcsc->sc_dev.dv_xname, 2128 xdcsc->xdc->xdc_csr, xdcsc->nwait, xdcsc->nfree, xdcsc->nrun, 2129 xdcsc->ndone); 2130 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2131 if (xdcsc->reqs[lcv].mode) 2132 printf("running %d: mode %d done %d errs %d errno 0x%x\n", 2133 lcv, 2134 xdcsc->reqs[lcv].mode, xdcsc->iopbase[lcv].done, 2135 xdcsc->iopbase[lcv].errs, xdcsc->iopbase[lcv].errno); 2136 } 2137 #endif 2138 2139 /* reduce ttl for each request if one goes to zero, reset xdc */ 2140 s = splbio(); 2141 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2142 if (xdcsc->reqs[lcv].mode == 0 || 2143 XD_STATE(xdcsc->reqs[lcv].mode) == XD_SUB_DONE) 2144 continue; 2145 xdcsc->reqs[lcv].ttl--; 2146 if (xdcsc->reqs[lcv].ttl == 0) 2147 reset = 1; 2148 } 2149 if (reset) { 2150 printf("%s: watchdog timeout\n", xdcsc->sc_dev.dv_xname); 2151 xdc_reset(xdcsc, 0, XD_RSET_NONE, XD_ERR_FAIL, NULL); 2152 } 2153 splx(s); 2154 2155 /* until next time */ 2156 2157 timeout(xdc_tick, xdcsc, XDC_TICKCNT); 2158 } 2159 2160 /* 2161 * xdc_ioctlcmd: this function provides a user level interface to the 2162 * controller via ioctl. this allows "format" programs to be written 2163 * in user code, and is also useful for some debugging. we return 2164 * an error code. called at user priority. 2165 */ 2166 int 2167 xdc_ioctlcmd(xd, dev, xio) 2168 struct xd_softc *xd; 2169 dev_t dev; 2170 struct xd_iocmd *xio; 2171 2172 { 2173 int s, err, rqno, dummy; 2174 caddr_t dvmabuf = NULL; 2175 struct xdc_softc *xdcsc; 2176 2177 /* check sanity of requested command */ 2178 2179 switch (xio->cmd) { 2180 2181 case XDCMD_NOP: /* no op: everything should be zero */ 2182 if (xio->subfn || xio->dptr || xio->dlen || 2183 xio->block || xio->sectcnt) 2184 return (EINVAL); 2185 break; 2186 2187 case XDCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */ 2188 case XDCMD_WR: 2189 if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS || 2190 xio->sectcnt * XDFM_BPS != xio->dlen || xio->dptr == NULL) 2191 return (EINVAL); 2192 break; 2193 2194 case XDCMD_SK: /* seek: doesn't seem useful to export this */ 2195 return (EINVAL); 2196 2197 case XDCMD_WRP: /* write parameters */ 2198 return (EINVAL);/* not useful, except maybe drive 2199 * parameters... but drive parameters should 2200 * go via disklabel changes */ 2201 2202 case XDCMD_RDP: /* read parameters */ 2203 if (xio->subfn != XDFUN_DRV || 2204 xio->dlen || xio->block || xio->dptr) 2205 return (EINVAL); /* allow read drive params to 2206 * get hw_spt */ 2207 xio->sectcnt = xd->hw_spt; /* we already know the answer */ 2208 return (0); 2209 break; 2210 2211 case XDCMD_XRD: /* extended read/write */ 2212 case XDCMD_XWR: 2213 2214 switch (xio->subfn) { 2215 2216 case XDFUN_THD:/* track headers */ 2217 if (xio->sectcnt != xd->hw_spt || 2218 (xio->block % xd->nsect) != 0 || 2219 xio->dlen != XD_IOCMD_HSZ * xd->hw_spt || 2220 xio->dptr == NULL) 2221 return (EINVAL); 2222 xio->sectcnt = 0; 2223 break; 2224 2225 case XDFUN_FMT:/* NOTE: also XDFUN_VFY */ 2226 if (xio->cmd == XDCMD_XRD) 2227 return (EINVAL); /* no XDFUN_VFY */ 2228 if (xio->sectcnt || xio->dlen || 2229 (xio->block % xd->nsect) != 0 || xio->dptr) 2230 return (EINVAL); 2231 break; 2232 2233 case XDFUN_HDR:/* header, header verify, data, data ECC */ 2234 return (EINVAL); /* not yet */ 2235 2236 case XDFUN_DM: /* defect map */ 2237 case XDFUN_DMX:/* defect map (alternate location) */ 2238 if (xio->sectcnt || xio->dlen != XD_IOCMD_DMSZ || 2239 (xio->block % xd->nsect) != 0 || xio->dptr == NULL) 2240 return (EINVAL); 2241 break; 2242 2243 default: 2244 return (EINVAL); 2245 } 2246 break; 2247 2248 case XDCMD_TST: /* diagnostics */ 2249 return (EINVAL); 2250 2251 default: 2252 return (EINVAL);/* ??? */ 2253 } 2254 2255 /* create DVMA buffer for request if needed */ 2256 2257 if (xio->dlen) { 2258 dvmabuf = dvma_malloc(xio->dlen); 2259 if (xio->cmd == XDCMD_WR || xio->cmd == XDCMD_XWR) { 2260 if (err = copyin(xio->dptr, dvmabuf, xio->dlen)) { 2261 dvma_free(dvmabuf, xio->dlen); 2262 return (err); 2263 } 2264 } 2265 } 2266 /* do it! */ 2267 2268 err = 0; 2269 xdcsc = xd->parent; 2270 s = splbio(); 2271 rqno = xdc_cmd(xdcsc, xio->cmd, xio->subfn, xd->xd_drive, xio->block, 2272 xio->sectcnt, dvmabuf, XD_SUB_WAIT); 2273 if (rqno == XD_ERR_FAIL) { 2274 err = EIO; 2275 goto done; 2276 } 2277 xio->errno = xdcsc->reqs[rqno].errno; 2278 xio->tries = xdcsc->reqs[rqno].tries; 2279 XDC_DONE(xdcsc, rqno, dummy); 2280 2281 if (xio->cmd == XDCMD_RD || xio->cmd == XDCMD_XRD) 2282 err = copyout(dvmabuf, xio->dptr, xio->dlen); 2283 2284 done: 2285 splx(s); 2286 if (dvmabuf) 2287 dvma_free(dvmabuf, xio->dlen); 2288 return (err); 2289 } 2290 2291 /* 2292 * xdc_e2str: convert error code number into an error string 2293 */ 2294 char * 2295 xdc_e2str(no) 2296 int no; 2297 { 2298 switch (no) { 2299 case XD_ERR_FAIL: 2300 return ("Software fatal error"); 2301 case XD_ERR_AOK: 2302 return ("Successful completion"); 2303 case XD_ERR_ICYL: 2304 return ("Illegal cylinder address"); 2305 case XD_ERR_IHD: 2306 return ("Illegal head address"); 2307 case XD_ERR_ISEC: 2308 return ("Illgal sector address"); 2309 case XD_ERR_CZER: 2310 return ("Count zero"); 2311 case XD_ERR_UIMP: 2312 return ("Unimplemented command"); 2313 case XD_ERR_IF1: 2314 return ("Illegal field length 1"); 2315 case XD_ERR_IF2: 2316 return ("Illegal field length 2"); 2317 case XD_ERR_IF3: 2318 return ("Illegal field length 3"); 2319 case XD_ERR_IF4: 2320 return ("Illegal field length 4"); 2321 case XD_ERR_IF5: 2322 return ("Illegal field length 5"); 2323 case XD_ERR_IF6: 2324 return ("Illegal field length 6"); 2325 case XD_ERR_IF7: 2326 return ("Illegal field length 7"); 2327 case XD_ERR_ISG: 2328 return ("Illegal scatter/gather length"); 2329 case XD_ERR_ISPT: 2330 return ("Not enough sectors per track"); 2331 case XD_ERR_ALGN: 2332 return ("Next IOPB address alignment error"); 2333 case XD_ERR_SGAL: 2334 return ("Scatter/gather address alignment error"); 2335 case XD_ERR_SGEC: 2336 return ("Scatter/gather with auto-ECC"); 2337 case XD_ERR_SECC: 2338 return ("Soft ECC corrected"); 2339 case XD_ERR_SIGN: 2340 return ("ECC ignored"); 2341 case XD_ERR_ASEK: 2342 return ("Auto-seek retry recovered"); 2343 case XD_ERR_RTRY: 2344 return ("Soft retry recovered"); 2345 case XD_ERR_HECC: 2346 return ("Hard data ECC"); 2347 case XD_ERR_NHDR: 2348 return ("Header not found"); 2349 case XD_ERR_NRDY: 2350 return ("Drive not ready"); 2351 case XD_ERR_TOUT: 2352 return ("Operation timeout"); 2353 case XD_ERR_VTIM: 2354 return ("VMEDMA timeout"); 2355 case XD_ERR_DSEQ: 2356 return ("Disk sequencer error"); 2357 case XD_ERR_HDEC: 2358 return ("Header ECC error"); 2359 case XD_ERR_RVFY: 2360 return ("Read verify"); 2361 case XD_ERR_VFER: 2362 return ("Fatail VMEDMA error"); 2363 case XD_ERR_VBUS: 2364 return ("VMEbus error"); 2365 case XD_ERR_DFLT: 2366 return ("Drive faulted"); 2367 case XD_ERR_HECY: 2368 return ("Header error/cyliner"); 2369 case XD_ERR_HEHD: 2370 return ("Header error/head"); 2371 case XD_ERR_NOCY: 2372 return ("Drive not on-cylinder"); 2373 case XD_ERR_SEEK: 2374 return ("Seek error"); 2375 case XD_ERR_ILSS: 2376 return ("Illegal sector size"); 2377 case XD_ERR_SEC: 2378 return ("Soft ECC"); 2379 case XD_ERR_WPER: 2380 return ("Write-protect error"); 2381 case XD_ERR_IRAM: 2382 return ("IRAM self test failure"); 2383 case XD_ERR_MT3: 2384 return ("Maintenance test 3 failure (DSKCEL RAM)"); 2385 case XD_ERR_MT4: 2386 return ("Maintenance test 4 failure (header shift reg)"); 2387 case XD_ERR_MT5: 2388 return ("Maintenance test 5 failure (VMEDMA regs)"); 2389 case XD_ERR_MT6: 2390 return ("Maintenance test 6 failure (REGCEL chip)"); 2391 case XD_ERR_MT7: 2392 return ("Maintenance test 7 failure (buffer parity)"); 2393 case XD_ERR_MT8: 2394 return ("Maintenance test 8 failure (disk FIFO)"); 2395 case XD_ERR_IOCK: 2396 return ("IOPB checksum miscompare"); 2397 case XD_ERR_IODM: 2398 return ("IOPB DMA fatal"); 2399 case XD_ERR_IOAL: 2400 return ("IOPB address alignment error"); 2401 case XD_ERR_FIRM: 2402 return ("Firmware error"); 2403 case XD_ERR_MMOD: 2404 return ("Illegal maintenance mode test number"); 2405 case XD_ERR_ACFL: 2406 return ("ACFAIL asserted"); 2407 default: 2408 return ("Unknown error"); 2409 } 2410 } 2411