1 /* $NetBSD: xd.c,v 1.46 2003/09/29 09:50:22 wiz Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1995 Charles D. Cranor 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * 36 * x d . c x y l o g i c s 7 5 3 / 7 0 5 3 v m e / s m d d r i v e r 37 * 38 * author: Chuck Cranor <chuck@ccrc.wustl.edu> 39 * id: &Id: xd.c,v 1.9 1995/09/25 20:12:44 chuck Exp & 40 * started: 27-Feb-95 41 * references: [1] Xylogics Model 753 User's Manual 42 * part number: 166-753-001, Revision B, May 21, 1988. 43 * "Your Partner For Performance" 44 * [2] other NetBSD disk device drivers 45 * 46 * Special thanks go to Scott E. Campbell of Xylogics, Inc. for taking 47 * the time to answer some of my questions about the 753/7053. 48 * 49 * note: the 753 and the 7053 are programmed the same way, but are 50 * different sizes. the 753 is a 6U VME card, while the 7053 is a 9U 51 * VME card (found in many VME based suns). 52 */ 53 54 #include <sys/cdefs.h> 55 __KERNEL_RCSID(0, "$NetBSD: xd.c,v 1.46 2003/09/29 09:50:22 wiz Exp $"); 56 57 #undef XDC_DEBUG /* full debug */ 58 #define XDC_DIAG /* extra sanity checks */ 59 #if defined(DIAGNOSTIC) && !defined(XDC_DIAG) 60 #define XDC_DIAG /* link in with master DIAG option */ 61 #endif 62 63 #include <sys/param.h> 64 #include <sys/proc.h> 65 #include <sys/systm.h> 66 #include <sys/kernel.h> 67 #include <sys/file.h> 68 #include <sys/stat.h> 69 #include <sys/ioctl.h> 70 #include <sys/buf.h> 71 #include <sys/uio.h> 72 #include <sys/malloc.h> 73 #include <sys/device.h> 74 #include <sys/disklabel.h> 75 #include <sys/disk.h> 76 #include <sys/syslog.h> 77 #include <sys/dkbad.h> 78 #include <sys/conf.h> 79 80 #include <uvm/uvm_extern.h> 81 82 #include <machine/autoconf.h> 83 #include <machine/dvma.h> 84 85 #include <dev/sun/disklabel.h> 86 87 #include <sun3/dev/xdreg.h> 88 #include <sun3/dev/xdvar.h> 89 #include <sun3/dev/xio.h> 90 91 #include "locators.h" 92 93 /* 94 * Print a complaint when no xd children were specified 95 * in the config file. Better than a link error... 96 * 97 * XXX: Some folks say this driver should be split in two, 98 * but that seems pointless with ONLY one type of child. 99 */ 100 #include "xd.h" 101 #if NXD == 0 102 #error "xdc but no xd?" 103 #endif 104 105 /* 106 * macros 107 */ 108 109 /* 110 * XDC_TWAIT: add iorq "N" to tail of SC's wait queue 111 */ 112 #define XDC_TWAIT(SC, N) { \ 113 (SC)->waitq[(SC)->waitend] = (N); \ 114 (SC)->waitend = ((SC)->waitend + 1) % XDC_MAXIOPB; \ 115 (SC)->nwait++; \ 116 } 117 118 /* 119 * XDC_HWAIT: add iorq "N" to head of SC's wait queue 120 */ 121 #define XDC_HWAIT(SC, N) { \ 122 (SC)->waithead = ((SC)->waithead == 0) ? \ 123 (XDC_MAXIOPB - 1) : ((SC)->waithead - 1); \ 124 (SC)->waitq[(SC)->waithead] = (N); \ 125 (SC)->nwait++; \ 126 } 127 128 /* 129 * XDC_GET_WAITER: gets the first request waiting on the waitq 130 * and removes it (so it can be submitted) 131 */ 132 #define XDC_GET_WAITER(XDCSC, RQ) { \ 133 (RQ) = (XDCSC)->waitq[(XDCSC)->waithead]; \ 134 (XDCSC)->waithead = ((XDCSC)->waithead + 1) % XDC_MAXIOPB; \ 135 xdcsc->nwait--; \ 136 } 137 138 /* 139 * XDC_FREE: add iorq "N" to SC's free list 140 */ 141 #define XDC_FREE(SC, N) { \ 142 (SC)->freereq[(SC)->nfree++] = (N); \ 143 (SC)->reqs[N].mode = 0; \ 144 if ((SC)->nfree == 1) wakeup(&(SC)->nfree); \ 145 } 146 147 148 /* 149 * XDC_RQALLOC: allocate an iorq off the free list (assume nfree > 0). 150 */ 151 #define XDC_RQALLOC(XDCSC) (XDCSC)->freereq[--((XDCSC)->nfree)] 152 153 /* 154 * XDC_GO: start iopb ADDR (DVMA addr in a u_long) on XDC 155 */ 156 #define XDC_GO(XDC, ADDR) { \ 157 (XDC)->xdc_iopbaddr0 = ((ADDR) & 0xff); \ 158 (ADDR) = ((ADDR) >> 8); \ 159 (XDC)->xdc_iopbaddr1 = ((ADDR) & 0xff); \ 160 (ADDR) = ((ADDR) >> 8); \ 161 (XDC)->xdc_iopbaddr2 = ((ADDR) & 0xff); \ 162 (ADDR) = ((ADDR) >> 8); \ 163 (XDC)->xdc_iopbaddr3 = (ADDR); \ 164 (XDC)->xdc_iopbamod = XDC_ADDRMOD; \ 165 (XDC)->xdc_csr = XDC_ADDIOPB; /* go! */ \ 166 } 167 168 /* 169 * XDC_WAIT: wait for XDC's csr "BITS" to come on in "TIME". 170 * LCV is a counter. If it goes to zero then we timed out. 171 */ 172 #define XDC_WAIT(XDC, LCV, TIME, BITS) { \ 173 (LCV) = (TIME); \ 174 while ((LCV) > 0) { \ 175 if ((XDC)->xdc_csr & (BITS)) break; \ 176 (LCV) = (LCV) - 1; \ 177 DELAY(1); \ 178 } \ 179 } 180 181 /* 182 * XDC_DONE: don't need IORQ, get error code and free (done after xdc_cmd) 183 */ 184 #define XDC_DONE(SC,RQ,ER) { \ 185 if ((RQ) == XD_ERR_FAIL) { \ 186 (ER) = (RQ); \ 187 } else { \ 188 if ((SC)->ndone-- == XDC_SUBWAITLIM) \ 189 wakeup(&(SC)->ndone); \ 190 (ER) = (SC)->reqs[RQ].errno; \ 191 XDC_FREE((SC), (RQ)); \ 192 } \ 193 } 194 195 /* 196 * XDC_ADVANCE: advance iorq's pointers by a number of sectors 197 */ 198 #define XDC_ADVANCE(IORQ, N) { \ 199 if (N) { \ 200 (IORQ)->sectcnt -= (N); \ 201 (IORQ)->blockno += (N); \ 202 (IORQ)->dbuf += ((N)*XDFM_BPS); \ 203 } \ 204 } 205 206 /* 207 * note - addresses you can sleep on: 208 * [1] & of xd_softc's "state" (waiting for a chance to attach a drive) 209 * [2] & of xdc_softc's "nfree" (waiting for a free iorq/iopb) 210 * [3] & of xdc_softc's "ndone" (waiting for number of done iorq/iopb's 211 * to drop below XDC_SUBWAITLIM) 212 * [4] & an iorq (waiting for an XD_SUB_WAIT iorq to finish) 213 */ 214 215 216 /* 217 * function prototypes 218 * "xdc_*" functions are internal, all others are external interfaces 219 */ 220 221 /* internals */ 222 int xdc_cmd __P((struct xdc_softc *, int, int, int, int, int, char *, int)); 223 char *xdc_e2str __P((int)); 224 int xdc_error __P((struct xdc_softc *, struct xd_iorq *, 225 struct xd_iopb *, int, int)); 226 int xdc_ioctlcmd __P((struct xd_softc *, dev_t dev, struct xd_iocmd *)); 227 void xdc_perror __P((struct xd_iorq *, struct xd_iopb *, int)); 228 int xdc_piodriver __P((struct xdc_softc *, int, int)); 229 int xdc_remove_iorq __P((struct xdc_softc *)); 230 int xdc_reset __P((struct xdc_softc *, int, int, int, struct xd_softc *)); 231 inline void xdc_rqinit __P((struct xd_iorq *, struct xdc_softc *, 232 struct xd_softc *, int, u_long, int, 233 caddr_t, struct buf *)); 234 void xdc_rqtopb __P((struct xd_iorq *, struct xd_iopb *, int, int)); 235 void xdc_start __P((struct xdc_softc *, int)); 236 int xdc_startbuf __P((struct xdc_softc *, struct xd_softc *, struct buf *)); 237 int xdc_submit_iorq __P((struct xdc_softc *, int, int)); 238 void xdc_tick __P((void *)); 239 void xdc_xdreset __P((struct xdc_softc *, struct xd_softc *)); 240 241 /* machine interrupt hook */ 242 int xdcintr __P((void *)); 243 244 /* autoconf */ 245 static int xdcmatch __P((struct device *, struct cfdata *, void *)); 246 static void xdcattach __P((struct device *, struct device *, void *)); 247 static int xdc_print __P((void *, const char *name)); 248 249 static int xdmatch __P((struct device *, struct cfdata *, void *)); 250 static void xdattach __P((struct device *, struct device *, void *)); 251 static void xd_init __P((struct xd_softc *)); 252 253 static void xddummystrat __P((struct buf *)); 254 int xdgetdisklabel __P((struct xd_softc *, void *)); 255 256 /* 257 * cfattach's: device driver interface to autoconfig 258 */ 259 260 CFATTACH_DECL(xdc, sizeof(struct xdc_softc), 261 xdcmatch, xdcattach, NULL, NULL); 262 263 CFATTACH_DECL(xd, sizeof(struct xd_softc), 264 xdmatch, xdattach, NULL, NULL); 265 266 extern struct cfdriver xd_cd; 267 268 struct xdc_attach_args { /* this is the "aux" args to xdattach */ 269 int driveno; /* unit number */ 270 char *dvmabuf; /* scratch buffer for reading disk label */ 271 int fullmode; /* submit mode */ 272 int booting; /* are we booting or not? */ 273 }; 274 275 dev_type_open(xdopen); 276 dev_type_close(xdclose); 277 dev_type_read(xdread); 278 dev_type_write(xdwrite); 279 dev_type_ioctl(xdioctl); 280 dev_type_strategy(xdstrategy); 281 dev_type_dump(xddump); 282 dev_type_size(xdsize); 283 284 const struct bdevsw xd_bdevsw = { 285 xdopen, xdclose, xdstrategy, xdioctl, xddump, xdsize, D_DISK 286 }; 287 288 const struct cdevsw xd_cdevsw = { 289 xdopen, xdclose, xdread, xdwrite, xdioctl, 290 nostop, notty, nopoll, nommap, nokqfilter, D_DISK 291 }; 292 293 /* 294 * dkdriver 295 */ 296 297 struct dkdriver xddkdriver = {xdstrategy}; 298 299 /* 300 * start: disk label fix code (XXX) 301 */ 302 303 static void *xd_labeldata; 304 305 static void 306 xddummystrat(bp) 307 struct buf *bp; 308 { 309 if (bp->b_bcount != XDFM_BPS) 310 panic("xddummystrat"); 311 memcpy(bp->b_data, xd_labeldata, XDFM_BPS); 312 bp->b_flags |= B_DONE; 313 bp->b_flags &= ~B_BUSY; 314 } 315 316 int 317 xdgetdisklabel(xd, b) 318 struct xd_softc *xd; 319 void *b; 320 { 321 const char *err; 322 struct sun_disklabel *sdl; 323 324 /* We already have the label data in `b'; setup for dummy strategy */ 325 xd_labeldata = b; 326 327 /* Required parameter for readdisklabel() */ 328 xd->sc_dk.dk_label->d_secsize = XDFM_BPS; 329 330 err = readdisklabel(MAKEDISKDEV(0, xd->sc_dev.dv_unit, RAW_PART), 331 xddummystrat, 332 xd->sc_dk.dk_label, xd->sc_dk.dk_cpulabel); 333 if (err) { 334 printf("%s: %s\n", xd->sc_dev.dv_xname, err); 335 return(XD_ERR_FAIL); 336 } 337 338 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */ 339 sdl = (struct sun_disklabel *)xd->sc_dk.dk_cpulabel->cd_block; 340 if (sdl->sl_magic == SUN_DKMAGIC) 341 xd->pcyl = sdl->sl_pcyl; 342 else { 343 printf("%s: WARNING: no `pcyl' in disk label.\n", 344 xd->sc_dev.dv_xname); 345 xd->pcyl = xd->sc_dk.dk_label->d_ncylinders + 346 xd->sc_dk.dk_label->d_acylinders; 347 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n", 348 xd->sc_dev.dv_xname, xd->pcyl); 349 } 350 351 xd->ncyl = xd->sc_dk.dk_label->d_ncylinders; 352 xd->acyl = xd->sc_dk.dk_label->d_acylinders; 353 xd->nhead = xd->sc_dk.dk_label->d_ntracks; 354 xd->nsect = xd->sc_dk.dk_label->d_nsectors; 355 xd->sectpercyl = xd->nhead * xd->nsect; 356 xd->sc_dk.dk_label->d_secsize = XDFM_BPS; /* not handled by 357 * sun->bsd */ 358 return(XD_ERR_AOK); 359 } 360 361 /* 362 * end: disk label fix code (XXX) 363 */ 364 365 /* 366 * a u t o c o n f i g f u n c t i o n s 367 */ 368 369 /* 370 * xdcmatch: determine if xdc is present or not. we do a 371 * soft reset to detect the xdc. 372 */ 373 374 int xdcmatch(parent, cf, aux) 375 struct device *parent; 376 struct cfdata *cf; 377 void *aux; 378 { 379 struct confargs *ca = aux; 380 381 /* No default VME address. */ 382 if (ca->ca_paddr == -1) 383 return (0); 384 385 /* Make sure something is there... */ 386 if (bus_peek(ca->ca_bustype, ca->ca_paddr + 11, 1) == -1) 387 return (0); 388 389 /* Default interrupt priority. */ 390 if (ca->ca_intpri == -1) 391 ca->ca_intpri = 2; 392 393 return (1); 394 } 395 396 /* 397 * xdcattach: attach controller 398 */ 399 void 400 xdcattach(parent, self, aux) 401 struct device *parent, *self; 402 void *aux; 403 { 404 struct xdc_softc *xdc = (void *) self; 405 struct confargs *ca = aux; 406 struct xdc_attach_args xa; 407 int lcv, rqno, err; 408 struct xd_iopb_ctrl *ctl; 409 410 /* get addressing and intr level stuff from autoconfig and load it 411 * into our xdc_softc. */ 412 413 xdc->xdc = (struct xdc *) 414 bus_mapin(ca->ca_bustype, ca->ca_paddr, sizeof(struct xdc)); 415 xdc->bustype = ca->ca_bustype; 416 xdc->ipl = ca->ca_intpri; 417 xdc->vector = ca->ca_intvec; 418 419 for (lcv = 0; lcv < XDC_MAXDEV; lcv++) 420 xdc->sc_drives[lcv] = (struct xd_softc *) 0; 421 422 /* allocate and zero buffers 423 * 424 * note: we simplify the code by allocating the max number of iopbs and 425 * iorq's up front. thus, we avoid linked lists and the costs 426 * associated with them in exchange for wasting a little memory. */ 427 428 xdc->iopbase = (struct xd_iopb *) 429 dvma_malloc(XDC_MAXIOPB * sizeof(struct xd_iopb)); /* KVA */ 430 memset(xdc->iopbase, 0, XDC_MAXIOPB * sizeof(struct xd_iopb)); 431 xdc->dvmaiopb = (struct xd_iopb *) 432 dvma_kvtopa(xdc->iopbase, xdc->bustype); 433 xdc->reqs = (struct xd_iorq *) 434 malloc(XDC_MAXIOPB * sizeof(struct xd_iorq), M_DEVBUF, M_NOWAIT); 435 if (xdc->reqs == NULL) 436 panic("xdc malloc"); 437 memset(xdc->reqs, 0, XDC_MAXIOPB * sizeof(struct xd_iorq)); 438 439 /* init free list, iorq to iopb pointers, and non-zero fields in the 440 * iopb which never change. */ 441 442 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 443 xdc->reqs[lcv].iopb = &xdc->iopbase[lcv]; 444 xdc->freereq[lcv] = lcv; 445 xdc->iopbase[lcv].fixd = 1; /* always the same */ 446 xdc->iopbase[lcv].naddrmod = XDC_ADDRMOD; /* always the same */ 447 xdc->iopbase[lcv].intr_vec = xdc->vector; /* always the same */ 448 } 449 xdc->nfree = XDC_MAXIOPB; 450 xdc->nrun = 0; 451 xdc->waithead = xdc->waitend = xdc->nwait = 0; 452 xdc->ndone = 0; 453 454 /* init queue of waiting bufs */ 455 456 bufq_alloc(&xdc->sc_wq, BUFQ_FCFS); 457 callout_init(&xdc->sc_tick_ch); 458 459 /* 460 * section 7 of the manual tells us how to init the controller: 461 * - read controller parameters (6/0) 462 * - write controller parameters (5/0) 463 */ 464 465 /* read controller parameters and insure we have a 753/7053 */ 466 467 rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL); 468 if (rqno == XD_ERR_FAIL) { 469 printf(": couldn't read controller params\n"); 470 return; /* shouldn't ever happen */ 471 } 472 ctl = (struct xd_iopb_ctrl *) & xdc->iopbase[rqno]; 473 if (ctl->ctype != XDCT_753) { 474 if (xdc->reqs[rqno].errno) 475 printf(": %s: ", xdc_e2str(xdc->reqs[rqno].errno)); 476 printf(": doesn't identify as a 753/7053\n"); 477 XDC_DONE(xdc, rqno, err); 478 return; 479 } 480 printf(": Xylogics 753/7053, PROM=0x%x.%02x.%02x\n", 481 ctl->eprom_partno, ctl->eprom_lvl, ctl->eprom_rev); 482 XDC_DONE(xdc, rqno, err); 483 484 /* now write controller parameters (xdc_cmd sets all params for us) */ 485 486 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL); 487 XDC_DONE(xdc, rqno, err); 488 if (err) { 489 printf("%s: controller config error: %s\n", 490 xdc->sc_dev.dv_xname, xdc_e2str(err)); 491 return; 492 } 493 494 /* link in interrupt with higher level software */ 495 isr_add_vectored(xdcintr, (void *)xdc, 496 ca->ca_intpri, ca->ca_intvec); 497 evcnt_attach_dynamic(&xdc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, 498 xdc->sc_dev.dv_xname, "intr"); 499 500 /* now we must look for disks using autoconfig */ 501 xa.booting = 1; 502 for (xa.driveno = 0; xa.driveno < XDC_MAXDEV; xa.driveno++) 503 (void) config_found(self, (void *) &xa, xdc_print); 504 505 /* start the watchdog clock */ 506 callout_reset(&xdc->sc_tick_ch, XDC_TICKCNT, xdc_tick, xdc); 507 } 508 509 int 510 xdc_print(aux, name) 511 void *aux; 512 const char *name; 513 { 514 struct xdc_attach_args *xa = aux; 515 516 if (name != NULL) 517 aprint_normal("%s: ", name); 518 519 if (xa->driveno != -1) 520 aprint_normal(" drive %d", xa->driveno); 521 522 return UNCONF; 523 } 524 525 /* 526 * xdmatch: probe for disk. 527 * 528 * note: we almost always say disk is present. this allows us to 529 * spin up and configure a disk after the system is booted (we can 530 * call xdattach!). Also, wire down the relationship between the 531 * xd* and xdc* devices, to simplify boot device identification. 532 */ 533 int 534 xdmatch(parent, cf, aux) 535 struct device *parent; 536 struct cfdata *cf; 537 void *aux; 538 { 539 struct xdc_attach_args *xa = aux; 540 int xd_unit; 541 542 /* Match only on the "wired-down" controller+disk. */ 543 xd_unit = parent->dv_unit * 2 + xa->driveno; 544 if (cf->cf_unit != xd_unit) 545 return (0); 546 547 return (1); 548 } 549 550 /* 551 * xdattach: attach a disk. 552 */ 553 void 554 xdattach(parent, self, aux) 555 struct device *parent, *self; 556 void *aux; 557 558 { 559 struct xd_softc *xd = (void *) self; 560 struct xdc_softc *xdc = (void *) parent; 561 struct xdc_attach_args *xa = aux; 562 563 printf("\n"); 564 565 /* 566 * Always re-initialize the disk structure. We want statistics 567 * to start with a clean slate. 568 */ 569 memset(&xd->sc_dk, 0, sizeof(xd->sc_dk)); 570 xd->sc_dk.dk_driver = &xddkdriver; 571 xd->sc_dk.dk_name = xd->sc_dev.dv_xname; 572 573 xd->state = XD_DRIVE_UNKNOWN; /* to start */ 574 xd->flags = 0; 575 xd->parent = xdc; 576 577 xd->xd_drive = xa->driveno; 578 xdc->sc_drives[xa->driveno] = xd; 579 580 /* Do init work common to attach and open. */ 581 xd_init(xd); 582 } 583 584 /* 585 * end of autoconfig functions 586 */ 587 588 /* 589 * Initialize a disk. This can be called from both autoconf and 590 * also from xdopen/xdstrategy. 591 */ 592 static void 593 xd_init(xd) 594 struct xd_softc *xd; 595 { 596 struct xdc_softc *xdc; 597 struct dkbad *dkb; 598 struct xd_iopb_drive *driopb; 599 void *dvmabuf; 600 int rqno, err, spt, mb, blk, lcv, fullmode, newstate; 601 602 xdc = xd->parent; 603 xd->state = XD_DRIVE_ATTACHING; 604 newstate = XD_DRIVE_UNKNOWN; 605 fullmode = (cold) ? XD_SUB_POLL : XD_SUB_WAIT; 606 dvmabuf = dvma_malloc(XDFM_BPS); 607 608 /* first try and reset the drive */ 609 rqno = xdc_cmd(xdc, XDCMD_RST, 0, xd->xd_drive, 0, 0, 0, fullmode); 610 XDC_DONE(xdc, rqno, err); 611 if (err == XD_ERR_NRDY) { 612 printf("%s: drive %d: off-line\n", 613 xd->sc_dev.dv_xname, xd->xd_drive); 614 goto done; 615 } 616 if (err) { 617 printf("%s: ERROR 0x%02x (%s)\n", 618 xd->sc_dev.dv_xname, err, xdc_e2str(err)); 619 goto done; 620 } 621 printf("%s: drive %d ready\n", 622 xd->sc_dev.dv_xname, xd->xd_drive); 623 624 /* now set format parameters */ 625 626 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_FMT, xd->xd_drive, 627 0, 0, 0, fullmode); 628 XDC_DONE(xdc, rqno, err); 629 if (err) { 630 printf("%s: write format parameters failed: %s\n", 631 xd->sc_dev.dv_xname, xdc_e2str(err)); 632 goto done; 633 } 634 635 /* get drive parameters */ 636 spt = 0; 637 rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_DRV, xd->xd_drive, 638 0, 0, 0, fullmode); 639 if (rqno != XD_ERR_FAIL) { 640 driopb = (struct xd_iopb_drive *) & xdc->iopbase[rqno]; 641 spt = driopb->sectpertrk; 642 } 643 XDC_DONE(xdc, rqno, err); 644 if (err) { 645 printf("%s: read drive parameters failed: %s\n", 646 xd->sc_dev.dv_xname, xdc_e2str(err)); 647 goto done; 648 } 649 650 /* 651 * now set drive parameters (to semi-bogus values) so we can read the 652 * disk label. 653 */ 654 xd->pcyl = xd->ncyl = 1; 655 xd->acyl = 0; 656 xd->nhead = 1; 657 xd->nsect = 1; 658 xd->sectpercyl = 1; 659 for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */ 660 xd->dkb.bt_bad[lcv].bt_cyl = 661 xd->dkb.bt_bad[lcv].bt_trksec = 0xffff; 662 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 663 0, 0, 0, fullmode); 664 XDC_DONE(xdc, rqno, err); 665 if (err) { 666 printf("%s: write drive parameters failed: %s\n", 667 xd->sc_dev.dv_xname, xdc_e2str(err)); 668 goto done; 669 } 670 671 /* read disk label */ 672 rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, 673 0, 1, dvmabuf, fullmode); 674 XDC_DONE(xdc, rqno, err); 675 if (err) { 676 printf("%s: reading disk label failed: %s\n", 677 xd->sc_dev.dv_xname, xdc_e2str(err)); 678 goto done; 679 } 680 newstate = XD_DRIVE_NOLABEL; 681 682 xd->hw_spt = spt; 683 /* Attach the disk: must be before getdisklabel to malloc label */ 684 disk_attach(&xd->sc_dk); 685 686 if (xdgetdisklabel(xd, dvmabuf) != XD_ERR_AOK) 687 goto done; 688 689 /* inform the user of what is up */ 690 printf("%s: <%s>, pcyl %d, hw_spt %d\n", 691 xd->sc_dev.dv_xname, 692 (char *)dvmabuf, xd->pcyl, spt); 693 mb = xd->ncyl * (xd->nhead * xd->nsect) / (1048576 / XDFM_BPS); 694 printf("%s: %dMB, %d cyl, %d head, %d sec\n", 695 xd->sc_dev.dv_xname, mb, 696 xd->ncyl, xd->nhead, xd->nsect); 697 698 /* now set the real drive parameters! */ 699 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 700 0, 0, 0, fullmode); 701 XDC_DONE(xdc, rqno, err); 702 if (err) { 703 printf("%s: write real drive parameters failed: %s\n", 704 xd->sc_dev.dv_xname, xdc_e2str(err)); 705 goto done; 706 } 707 newstate = XD_DRIVE_ONLINE; 708 709 /* 710 * read bad144 table. this table resides on the first sector of the 711 * last track of the disk (i.e. second cyl of "acyl" area). 712 */ 713 blk = (xd->ncyl + xd->acyl - 1) * (xd->nhead * xd->nsect) + /* last cyl */ 714 (xd->nhead - 1) * xd->nsect; /* last head */ 715 rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, 716 blk, 1, dvmabuf, fullmode); 717 XDC_DONE(xdc, rqno, err); 718 if (err) { 719 printf("%s: reading bad144 failed: %s\n", 720 xd->sc_dev.dv_xname, xdc_e2str(err)); 721 goto done; 722 } 723 724 /* check dkbad for sanity */ 725 dkb = (struct dkbad *) dvmabuf; 726 for (lcv = 0; lcv < 126; lcv++) { 727 if ((dkb->bt_bad[lcv].bt_cyl == 0xffff || 728 dkb->bt_bad[lcv].bt_cyl == 0) && 729 dkb->bt_bad[lcv].bt_trksec == 0xffff) 730 continue; /* blank */ 731 if (dkb->bt_bad[lcv].bt_cyl >= xd->ncyl) 732 break; 733 if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xd->nhead) 734 break; 735 if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xd->nsect) 736 break; 737 } 738 if (lcv != 126) { 739 printf("%s: warning: invalid bad144 sector!\n", 740 xd->sc_dev.dv_xname); 741 } else { 742 memcpy(&xd->dkb, dvmabuf, XDFM_BPS); 743 } 744 745 done: 746 xd->state = newstate; 747 dvma_free(dvmabuf, XDFM_BPS); 748 } 749 750 /* 751 * { b , c } d e v s w f u n c t i o n s 752 */ 753 754 /* 755 * xdclose: close device 756 */ 757 int 758 xdclose(dev, flag, fmt, p) 759 dev_t dev; 760 int flag, fmt; 761 struct proc *p; 762 { 763 struct xd_softc *xd = xd_cd.cd_devs[DISKUNIT(dev)]; 764 int part = DISKPART(dev); 765 766 /* clear mask bits */ 767 768 switch (fmt) { 769 case S_IFCHR: 770 xd->sc_dk.dk_copenmask &= ~(1 << part); 771 break; 772 case S_IFBLK: 773 xd->sc_dk.dk_bopenmask &= ~(1 << part); 774 break; 775 } 776 xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 777 778 return 0; 779 } 780 781 /* 782 * xddump: crash dump system 783 */ 784 int 785 xddump(dev, blkno, va, sz) 786 dev_t dev; 787 daddr_t blkno; 788 caddr_t va; 789 size_t sz; 790 { 791 int unit, part; 792 struct xd_softc *xd; 793 794 unit = DISKUNIT(dev); 795 if (unit >= xd_cd.cd_ndevs) 796 return ENXIO; 797 part = DISKPART(dev); 798 799 xd = xd_cd.cd_devs[unit]; 800 801 printf("%s%c: crash dump not supported (yet)\n", 802 xd->sc_dev.dv_xname, 'a' + part); 803 804 return ENXIO; 805 806 /* outline: globals: "dumplo" == sector number of partition to start 807 * dump at (convert to physical sector with partition table) 808 * "dumpsize" == size of dump in clicks "physmem" == size of physical 809 * memory (clicks, ctob() to get bytes) (normal case: dumpsize == 810 * physmem) 811 * 812 * dump a copy of physical memory to the dump device starting at sector 813 * "dumplo" in the swap partition (make sure > 0). map in pages as 814 * we go. use polled I/O. 815 * 816 * XXX how to handle NON_CONTIG? 817 */ 818 } 819 820 /* 821 * xdioctl: ioctls on XD drives. based on ioctl's of other netbsd disks. 822 */ 823 int 824 xdioctl(dev, command, addr, flag, p) 825 dev_t dev; 826 u_long command; 827 caddr_t addr; 828 int flag; 829 struct proc *p; 830 831 { 832 struct xd_softc *xd; 833 struct xd_iocmd *xio; 834 int error, s, unit; 835 836 unit = DISKUNIT(dev); 837 838 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL) 839 return (ENXIO); 840 841 /* switch on ioctl type */ 842 843 switch (command) { 844 case DIOCSBAD: /* set bad144 info */ 845 if ((flag & FWRITE) == 0) 846 return EBADF; 847 s = splbio(); 848 memcpy(&xd->dkb, addr, sizeof(xd->dkb)); 849 splx(s); 850 return 0; 851 852 case DIOCGDINFO: /* get disk label */ 853 memcpy(addr, xd->sc_dk.dk_label, sizeof(struct disklabel)); 854 return 0; 855 856 case DIOCGPART: /* get partition info */ 857 ((struct partinfo *) addr)->disklab = xd->sc_dk.dk_label; 858 ((struct partinfo *) addr)->part = 859 &xd->sc_dk.dk_label->d_partitions[DISKPART(dev)]; 860 return 0; 861 862 case DIOCSDINFO: /* set disk label */ 863 if ((flag & FWRITE) == 0) 864 return EBADF; 865 error = setdisklabel(xd->sc_dk.dk_label, 866 (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0, 867 xd->sc_dk.dk_cpulabel); 868 if (error == 0) { 869 if (xd->state == XD_DRIVE_NOLABEL) 870 xd->state = XD_DRIVE_ONLINE; 871 } 872 return error; 873 874 case DIOCWLABEL: /* change write status of disk label */ 875 if ((flag & FWRITE) == 0) 876 return EBADF; 877 if (*(int *) addr) 878 xd->flags |= XD_WLABEL; 879 else 880 xd->flags &= ~XD_WLABEL; 881 return 0; 882 883 case DIOCWDINFO: /* write disk label */ 884 if ((flag & FWRITE) == 0) 885 return EBADF; 886 error = setdisklabel(xd->sc_dk.dk_label, 887 (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0, 888 xd->sc_dk.dk_cpulabel); 889 if (error == 0) { 890 if (xd->state == XD_DRIVE_NOLABEL) 891 xd->state = XD_DRIVE_ONLINE; 892 893 /* Simulate opening partition 0 so write succeeds. */ 894 xd->sc_dk.dk_openmask |= (1 << 0); 895 error = writedisklabel(MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART), 896 xdstrategy, xd->sc_dk.dk_label, 897 xd->sc_dk.dk_cpulabel); 898 xd->sc_dk.dk_openmask = 899 xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 900 } 901 return error; 902 903 case DIOSXDCMD: 904 xio = (struct xd_iocmd *) addr; 905 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 906 return (error); 907 return (xdc_ioctlcmd(xd, dev, xio)); 908 909 default: 910 return ENOTTY; 911 } 912 } 913 914 /* 915 * xdopen: open drive 916 */ 917 int 918 xdopen(dev, flag, fmt, p) 919 dev_t dev; 920 int flag, fmt; 921 struct proc *p; 922 { 923 int err, unit, part, s; 924 struct xd_softc *xd; 925 926 /* first, could it be a valid target? */ 927 unit = DISKUNIT(dev); 928 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL) 929 return (ENXIO); 930 part = DISKPART(dev); 931 err = 0; 932 933 /* 934 * If some other processing is doing init, sleep. 935 */ 936 s = splbio(); 937 while (xd->state == XD_DRIVE_ATTACHING) { 938 if (tsleep(&xd->state, PRIBIO, "xdopen", 0)) { 939 err = EINTR; 940 goto done; 941 } 942 } 943 /* Do we need to init the drive? */ 944 if (xd->state == XD_DRIVE_UNKNOWN) { 945 xd_init(xd); 946 wakeup(&xd->state); 947 } 948 /* Was the init successful? */ 949 if (xd->state == XD_DRIVE_UNKNOWN) { 950 err = EIO; 951 goto done; 952 } 953 954 /* check for partition */ 955 if (part != RAW_PART && 956 (part >= xd->sc_dk.dk_label->d_npartitions || 957 xd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 958 err = ENXIO; 959 goto done; 960 } 961 962 /* set open masks */ 963 switch (fmt) { 964 case S_IFCHR: 965 xd->sc_dk.dk_copenmask |= (1 << part); 966 break; 967 case S_IFBLK: 968 xd->sc_dk.dk_bopenmask |= (1 << part); 969 break; 970 } 971 xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 972 973 done: 974 splx(s); 975 return (err); 976 } 977 978 int 979 xdread(dev, uio, flags) 980 dev_t dev; 981 struct uio *uio; 982 int flags; 983 { 984 985 return (physio(xdstrategy, NULL, dev, B_READ, minphys, uio)); 986 } 987 988 int 989 xdwrite(dev, uio, flags) 990 dev_t dev; 991 struct uio *uio; 992 int flags; 993 { 994 995 return (physio(xdstrategy, NULL, dev, B_WRITE, minphys, uio)); 996 } 997 998 999 /* 1000 * xdsize: return size of a partition for a dump 1001 */ 1002 int 1003 xdsize(dev) 1004 dev_t dev; 1005 1006 { 1007 struct xd_softc *xdsc; 1008 int unit, part, size, omask; 1009 1010 /* valid unit? */ 1011 unit = DISKUNIT(dev); 1012 if (unit >= xd_cd.cd_ndevs || (xdsc = xd_cd.cd_devs[unit]) == NULL) 1013 return (-1); 1014 1015 part = DISKPART(dev); 1016 omask = xdsc->sc_dk.dk_openmask & (1 << part); 1017 1018 if (omask == 0 && xdopen(dev, 0, S_IFBLK, NULL) != 0) 1019 return (-1); 1020 1021 /* do it */ 1022 if (xdsc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 1023 size = -1; /* only give valid size for swap partitions */ 1024 else 1025 size = xdsc->sc_dk.dk_label->d_partitions[part].p_size * 1026 (xdsc->sc_dk.dk_label->d_secsize / DEV_BSIZE); 1027 if (omask == 0 && xdclose(dev, 0, S_IFBLK, NULL) != 0) 1028 return (-1); 1029 return (size); 1030 } 1031 1032 /* 1033 * xdstrategy: buffering system interface to xd. 1034 */ 1035 void 1036 xdstrategy(bp) 1037 struct buf *bp; 1038 1039 { 1040 struct xd_softc *xd; 1041 struct xdc_softc *parent; 1042 int s, unit; 1043 1044 unit = DISKUNIT(bp->b_dev); 1045 1046 /* check for live device */ 1047 1048 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == 0 || 1049 bp->b_blkno < 0 || 1050 (bp->b_bcount % xd->sc_dk.dk_label->d_secsize) != 0) { 1051 bp->b_error = EINVAL; 1052 goto bad; 1053 } 1054 1055 /* There should always be an open first. */ 1056 if (xd->state == XD_DRIVE_UNKNOWN) { 1057 bp->b_error = EIO; 1058 goto bad; 1059 } 1060 1061 if (xd->state != XD_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) { 1062 /* no I/O to unlabeled disks, unless raw partition */ 1063 bp->b_error = EIO; 1064 goto bad; 1065 } 1066 /* short circuit zero length request */ 1067 1068 if (bp->b_bcount == 0) 1069 goto done; 1070 1071 /* check bounds with label (disksubr.c). Determine the size of the 1072 * transfer, and make sure it is within the boundaries of the 1073 * partition. Adjust transfer if needed, and signal errors or early 1074 * completion. */ 1075 1076 if (bounds_check_with_label(&xd->sc_dk, bp, 1077 (xd->flags & XD_WLABEL) != 0) <= 0) 1078 goto done; 1079 1080 /* 1081 * now we know we have a valid buf structure that we need to do I/O 1082 * on. 1083 * 1084 * note that we don't disksort because the controller has a sorting 1085 * algorithm built into the hardware. 1086 */ 1087 1088 s = splbio(); /* protect the queues */ 1089 1090 /* first, give jobs in front of us a chance */ 1091 parent = xd->parent; 1092 while (parent->nfree > 0 && BUFQ_PEEK(&parent->sc_wq) != NULL) 1093 if (xdc_startbuf(parent, NULL, NULL) != XD_ERR_AOK) 1094 break; 1095 1096 /* 1097 * if there are no free iorq's, then we just queue and return. the 1098 * buffs will get picked up later by xdcintr(). 1099 */ 1100 if (parent->nfree == 0) { 1101 BUFQ_PUT(&parent->sc_wq, bp); 1102 splx(s); 1103 return; 1104 } 1105 1106 /* now we have free iopb's and we are at splbio... start 'em up */ 1107 if (xdc_startbuf(parent, xd, bp) != XD_ERR_AOK) { 1108 return; 1109 } 1110 1111 /* done! */ 1112 1113 splx(s); 1114 return; 1115 1116 bad: /* tells upper layers we have an error */ 1117 bp->b_flags |= B_ERROR; 1118 done: /* tells upper layers we are done with this 1119 * buf */ 1120 bp->b_resid = bp->b_bcount; 1121 biodone(bp); 1122 } 1123 /* 1124 * end of {b,c}devsw functions 1125 */ 1126 1127 /* 1128 * i n t e r r u p t f u n c t i o n 1129 * 1130 * xdcintr: hardware interrupt. 1131 */ 1132 int 1133 xdcintr(v) 1134 void *v; 1135 1136 { 1137 struct xdc_softc *xdcsc = v; 1138 1139 /* kick the event counter */ 1140 xdcsc->sc_intrcnt.ev_count++; 1141 1142 /* remove as many done IOPBs as possible */ 1143 xdc_remove_iorq(xdcsc); 1144 1145 /* start any iorq's already waiting */ 1146 xdc_start(xdcsc, XDC_MAXIOPB); 1147 1148 /* fill up any remaining iorq's with queue'd buffers */ 1149 while (xdcsc->nfree > 0 && BUFQ_PEEK(&xdcsc->sc_wq) != NULL) 1150 if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK) 1151 break; 1152 1153 return (1); 1154 } 1155 /* 1156 * end of interrupt function 1157 */ 1158 1159 /* 1160 * i n t e r n a l f u n c t i o n s 1161 */ 1162 1163 /* 1164 * xdc_rqinit: fill out the fields of an I/O request 1165 */ 1166 1167 inline void 1168 xdc_rqinit(rq, xdc, xd, md, blk, cnt, db, bp) 1169 struct xd_iorq *rq; 1170 struct xdc_softc *xdc; 1171 struct xd_softc *xd; 1172 int md; 1173 u_long blk; 1174 int cnt; 1175 caddr_t db; 1176 struct buf *bp; 1177 { 1178 rq->xdc = xdc; 1179 rq->xd = xd; 1180 rq->ttl = XDC_MAXTTL + 10; 1181 rq->mode = md; 1182 rq->tries = rq->errno = rq->lasterror = 0; 1183 rq->blockno = blk; 1184 rq->sectcnt = cnt; 1185 rq->dbuf = rq->dbufbase = db; 1186 rq->buf = bp; 1187 } 1188 1189 /* 1190 * xdc_rqtopb: load up an IOPB based on an iorq 1191 */ 1192 void 1193 xdc_rqtopb(iorq, iopb, cmd, subfun) 1194 struct xd_iorq *iorq; 1195 struct xd_iopb *iopb; 1196 int cmd, subfun; 1197 1198 { 1199 u_long block, dp; 1200 1201 /* standard stuff */ 1202 1203 iopb->errs = iopb->done = 0; 1204 iopb->comm = cmd; 1205 iopb->errno = iopb->status = 0; 1206 iopb->subfun = subfun; 1207 if (iorq->xd) 1208 iopb->unit = iorq->xd->xd_drive; 1209 else 1210 iopb->unit = 0; 1211 1212 /* check for alternate IOPB format */ 1213 1214 if (cmd == XDCMD_WRP) { 1215 switch (subfun) { 1216 case XDFUN_CTL:{ 1217 struct xd_iopb_ctrl *ctrl = 1218 (struct xd_iopb_ctrl *) iopb; 1219 iopb->lll = 0; 1220 iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL) 1221 ? 0 1222 : iorq->xdc->ipl; 1223 ctrl->param_a = XDPA_TMOD | XDPA_DACF; 1224 ctrl->param_b = XDPB_ROR | XDPB_TDT_3_2USEC; 1225 ctrl->param_c = XDPC_OVS | XDPC_COP | XDPC_ASR | 1226 XDPC_RBC | XDPC_ECC2; 1227 ctrl->throttle = XDC_THROTTLE; 1228 #ifdef sparc 1229 if (CPU_ISSUN4 && cpuinfo.cpu_type == CPUTYP_4_300) 1230 ctrl->delay = XDC_DELAY_4_300; 1231 else 1232 ctrl->delay = XDC_DELAY_SPARC; 1233 #endif 1234 #ifdef sun3 1235 ctrl->delay = XDC_DELAY_SUN3; 1236 #endif 1237 break; 1238 } 1239 case XDFUN_DRV:{ 1240 struct xd_iopb_drive *drv = 1241 (struct xd_iopb_drive *)iopb; 1242 /* we assume that the disk label has the right 1243 * info */ 1244 if (XD_STATE(iorq->mode) == XD_SUB_POLL) 1245 drv->dparam_ipl = (XDC_DPARAM << 3); 1246 else 1247 drv->dparam_ipl = (XDC_DPARAM << 3) | 1248 iorq->xdc->ipl; 1249 drv->maxsect = iorq->xd->nsect - 1; 1250 drv->maxsector = drv->maxsect; 1251 /* note: maxsector != maxsect only if you are 1252 * doing cyl sparing */ 1253 drv->headoff = 0; 1254 drv->maxcyl = iorq->xd->pcyl - 1; 1255 drv->maxhead = iorq->xd->nhead - 1; 1256 break; 1257 } 1258 case XDFUN_FMT:{ 1259 struct xd_iopb_format *form = 1260 (struct xd_iopb_format *) iopb; 1261 if (XD_STATE(iorq->mode) == XD_SUB_POLL) 1262 form->interleave_ipl = (XDC_INTERLEAVE << 3); 1263 else 1264 form->interleave_ipl = (XDC_INTERLEAVE << 3) | 1265 iorq->xdc->ipl; 1266 form->field1 = XDFM_FIELD1; 1267 form->field2 = XDFM_FIELD2; 1268 form->field3 = XDFM_FIELD3; 1269 form->field4 = XDFM_FIELD4; 1270 form->bytespersec = XDFM_BPS; 1271 form->field6 = XDFM_FIELD6; 1272 form->field7 = XDFM_FIELD7; 1273 break; 1274 } 1275 } 1276 } else { 1277 1278 /* normal IOPB case (harmless to RDP command) */ 1279 1280 iopb->lll = 0; 1281 iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL) 1282 ? 0 1283 : iorq->xdc->ipl; 1284 iopb->sectcnt = iorq->sectcnt; 1285 block = iorq->blockno; 1286 if (iorq->xd == NULL || block == 0) { 1287 iopb->sectno = iopb->headno = iopb->cylno = 0; 1288 } else { 1289 iopb->sectno = block % iorq->xd->nsect; 1290 block = block / iorq->xd->nsect; 1291 iopb->headno = block % iorq->xd->nhead; 1292 block = block / iorq->xd->nhead; 1293 iopb->cylno = block; 1294 } 1295 iopb->daddr = dp = (iorq->dbuf == NULL) ? 0 : 1296 dvma_kvtopa(iorq->dbuf, iorq->xdc->bustype); 1297 iopb->addrmod = XDC_ADDRMOD; 1298 } 1299 } 1300 1301 /* 1302 * xdc_cmd: front end for POLL'd and WAIT'd commands. Returns rqno. 1303 * If you've already got an IORQ, you can call submit directly (currently 1304 * there is no need to do this). NORM requests are handled separately. 1305 */ 1306 int 1307 xdc_cmd(xdcsc, cmd, subfn, unit, block, scnt, dptr, fullmode) 1308 struct xdc_softc *xdcsc; 1309 int cmd, subfn, unit, block, scnt; 1310 char *dptr; 1311 int fullmode; 1312 1313 { 1314 struct xd_iorq *iorq; 1315 struct xd_iopb *iopb; 1316 int rqno, retry; 1317 int submode = XD_STATE(fullmode); 1318 1319 /* get iorq/iopb */ 1320 switch (submode) { 1321 case XD_SUB_POLL: 1322 while (xdcsc->nfree == 0) { 1323 if (xdc_piodriver(xdcsc, 0, 1) != XD_ERR_AOK) 1324 return (XD_ERR_FAIL); 1325 } 1326 break; 1327 case XD_SUB_WAIT: 1328 retry = 1; 1329 while (retry) { 1330 while (xdcsc->nfree == 0) { 1331 if (tsleep(&xdcsc->nfree, PRIBIO, "xdnfree", 0)) 1332 return (XD_ERR_FAIL); 1333 } 1334 while (xdcsc->ndone > XDC_SUBWAITLIM) { 1335 if (tsleep(&xdcsc->ndone, PRIBIO, "xdsubwait", 0)) 1336 return (XD_ERR_FAIL); 1337 } 1338 if (xdcsc->nfree) 1339 retry = 0; /* got it */ 1340 } 1341 break; 1342 default: 1343 return (XD_ERR_FAIL); /* illegal */ 1344 } 1345 if (xdcsc->nfree == 0) 1346 panic("xdcmd nfree"); 1347 rqno = XDC_RQALLOC(xdcsc); 1348 iorq = &xdcsc->reqs[rqno]; 1349 iopb = iorq->iopb; 1350 1351 1352 /* init iorq/iopb */ 1353 xdc_rqinit(iorq, xdcsc, 1354 (unit == XDC_NOUNIT) ? NULL : xdcsc->sc_drives[unit], 1355 fullmode, block, scnt, dptr, NULL); 1356 1357 /* load IOPB from iorq */ 1358 xdc_rqtopb(iorq, iopb, cmd, subfn); 1359 1360 /* submit it for processing */ 1361 xdc_submit_iorq(xdcsc, rqno, fullmode); /* error code will be in iorq */ 1362 1363 return (rqno); 1364 } 1365 1366 /* 1367 * xdc_startbuf 1368 * start a buffer running, assumes nfree > 0 1369 */ 1370 int 1371 xdc_startbuf(xdcsc, xdsc, bp) 1372 struct xdc_softc *xdcsc; 1373 struct xd_softc *xdsc; 1374 struct buf *bp; 1375 1376 { 1377 int rqno, partno; 1378 struct xd_iorq *iorq; 1379 struct xd_iopb *iopb; 1380 u_long block; 1381 caddr_t dbuf; 1382 1383 if (!xdcsc->nfree) 1384 panic("xdc_startbuf free"); 1385 rqno = XDC_RQALLOC(xdcsc); 1386 iorq = &xdcsc->reqs[rqno]; 1387 iopb = iorq->iopb; 1388 1389 /* get buf */ 1390 1391 if (bp == NULL) { 1392 bp = BUFQ_GET(&xdcsc->sc_wq); 1393 if (bp == NULL) 1394 panic("xdc_startbuf bp"); 1395 xdsc = xdcsc->sc_drives[DISKUNIT(bp->b_dev)]; 1396 } 1397 partno = DISKPART(bp->b_dev); 1398 #ifdef XDC_DEBUG 1399 printf("xdc_startbuf: %s%c: %s block %d\n", xdsc->sc_dev.dv_xname, 1400 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno); 1401 printf("xdc_startbuf: b_bcount %d, b_data 0x%x\n", 1402 bp->b_bcount, bp->b_data); 1403 #endif 1404 1405 /* 1406 * load request. we have to calculate the correct block number based 1407 * on partition info. 1408 * 1409 * also, note that there are two kinds of buf structures, those with 1410 * B_PHYS set and those without B_PHYS. if B_PHYS is set, then it is 1411 * a raw I/O (to a cdevsw) and we are doing I/O directly to the users' 1412 * buffer which has already been mapped into DVMA space. (Not on sun3) 1413 * However, if B_PHYS is not set, then the buffer is a normal system 1414 * buffer which does *not* live in DVMA space. In that case we call 1415 * dvma_mapin to map it into DVMA space so we can do the DMA to it. 1416 * 1417 * in cases where we do a dvma_mapin, note that iorq points to the buffer 1418 * as mapped into DVMA space, where as the bp->b_data points to its 1419 * non-DVMA mapping. 1420 * 1421 * XXX - On the sun3, B_PHYS does NOT mean the buffer is mapped 1422 * into dvma space, only that it was remapped into the kernel. 1423 * We ALWAYS have to remap the kernel buf into DVMA space. 1424 * (It is done inexpensively, using whole segments!) 1425 */ 1426 1427 block = bp->b_blkno + ((partno == RAW_PART) ? 0 : 1428 xdsc->sc_dk.dk_label->d_partitions[partno].p_offset); 1429 1430 dbuf = dvma_mapin(bp->b_data, bp->b_bcount, 0); 1431 if (dbuf == NULL) { /* out of DVMA space */ 1432 printf("%s: warning: out of DVMA space\n", 1433 xdcsc->sc_dev.dv_xname); 1434 XDC_FREE(xdcsc, rqno); 1435 BUFQ_PUT(&xdcsc->sc_wq, bp); 1436 return (XD_ERR_FAIL); /* XXX: need some sort of 1437 * call-back scheme here? */ 1438 } 1439 1440 /* init iorq and load iopb from it */ 1441 1442 xdc_rqinit(iorq, xdcsc, xdsc, XD_SUB_NORM | XD_MODE_VERBO, block, 1443 bp->b_bcount / XDFM_BPS, dbuf, bp); 1444 1445 xdc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XDCMD_RD : XDCMD_WR, 0); 1446 1447 /* Instrumentation. */ 1448 disk_busy(&xdsc->sc_dk); 1449 1450 /* now submit [note that xdc_submit_iorq can never fail on NORM reqs] */ 1451 1452 xdc_submit_iorq(xdcsc, rqno, XD_SUB_NORM); 1453 return (XD_ERR_AOK); 1454 } 1455 1456 1457 /* 1458 * xdc_submit_iorq: submit an iorq for processing. returns XD_ERR_AOK 1459 * if ok. if it fail returns an error code. type is XD_SUB_*. 1460 * 1461 * note: caller frees iorq in all cases except NORM 1462 * 1463 * return value: 1464 * NORM: XD_AOK (req pending), XD_FAIL (couldn't submit request) 1465 * WAIT: XD_AOK (success), <error-code> (failed) 1466 * POLL: <same as WAIT> 1467 * NOQ : <same as NORM> 1468 * 1469 * there are three sources for i/o requests: 1470 * [1] xdstrategy: normal block I/O, using "struct buf" system. 1471 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts. 1472 * [3] open/ioctl: these are I/O requests done in the context of a process, 1473 * and the process should block until they are done. 1474 * 1475 * software state is stored in the iorq structure. each iorq has an 1476 * iopb structure. the hardware understands the iopb structure. 1477 * every command must go through an iopb. a 7053 can only handle 1478 * XDC_MAXIOPB (31) active iopbs at one time. iopbs are allocated in 1479 * DVMA space at boot up time. what happens if we run out of iopb's? 1480 * for i/o type [1], the buffers are queued at the "buff" layer and 1481 * picked up later by the interrupt routine. for case [2] the 1482 * programmed i/o driver is called with a special flag that says 1483 * return when one iopb is free. for case [3] the process can sleep 1484 * on the iorq free list until some iopbs are available. 1485 */ 1486 1487 1488 int 1489 xdc_submit_iorq(xdcsc, iorqno, type) 1490 struct xdc_softc *xdcsc; 1491 int iorqno; 1492 int type; 1493 1494 { 1495 u_long iopbaddr; 1496 struct xd_iorq *iorq = &xdcsc->reqs[iorqno]; 1497 1498 #ifdef XDC_DEBUG 1499 printf("xdc_submit_iorq(%s, no=%d, type=%d)\n", xdcsc->sc_dev.dv_xname, 1500 iorqno, type); 1501 #endif 1502 1503 /* first check and see if controller is busy */ 1504 if (xdcsc->xdc->xdc_csr & XDC_ADDING) { 1505 #ifdef XDC_DEBUG 1506 printf("xdc_submit_iorq: XDC not ready (ADDING)\n"); 1507 #endif 1508 if (type == XD_SUB_NOQ) 1509 return (XD_ERR_FAIL); /* failed */ 1510 XDC_TWAIT(xdcsc, iorqno); /* put at end of waitq */ 1511 switch (type) { 1512 case XD_SUB_NORM: 1513 return XD_ERR_AOK; /* success */ 1514 case XD_SUB_WAIT: 1515 while (iorq->iopb->done == 0) { 1516 (void) tsleep(iorq, PRIBIO, "xdciorq", 0); 1517 } 1518 return (iorq->errno); 1519 case XD_SUB_POLL: 1520 return (xdc_piodriver(xdcsc, iorqno, 0)); 1521 default: 1522 panic("xdc_submit_iorq adding"); 1523 } 1524 } 1525 #ifdef XDC_DEBUG 1526 { 1527 u_char *rio = (u_char *) iorq->iopb; 1528 int sz = sizeof(struct xd_iopb), lcv; 1529 printf("%s: aio #%d [", 1530 xdcsc->sc_dev.dv_xname, iorq - xdcsc->reqs); 1531 for (lcv = 0; lcv < sz; lcv++) 1532 printf(" %02x", rio[lcv]); 1533 printf("]\n"); 1534 } 1535 #endif /* XDC_DEBUG */ 1536 1537 /* controller not busy, start command */ 1538 iopbaddr = dvma_kvtopa(iorq->iopb, xdcsc->bustype); 1539 XDC_GO(xdcsc->xdc, iopbaddr); /* go! */ 1540 xdcsc->nrun++; 1541 /* command now running, wrap it up */ 1542 switch (type) { 1543 case XD_SUB_NORM: 1544 case XD_SUB_NOQ: 1545 return (XD_ERR_AOK); /* success */ 1546 case XD_SUB_WAIT: 1547 while (iorq->iopb->done == 0) { 1548 (void) tsleep(iorq, PRIBIO, "xdciorq", 0); 1549 } 1550 return (iorq->errno); 1551 case XD_SUB_POLL: 1552 return (xdc_piodriver(xdcsc, iorqno, 0)); 1553 default: 1554 panic("xdc_submit_iorq wrap up"); 1555 } 1556 panic("xdc_submit_iorq"); 1557 return 0; /* not reached */ 1558 } 1559 1560 1561 /* 1562 * xdc_piodriver 1563 * 1564 * programmed i/o driver. this function takes over the computer 1565 * and drains off all i/o requests. it returns the status of the iorq 1566 * the caller is interesting in. if freeone is true, then it returns 1567 * when there is a free iorq. 1568 */ 1569 int 1570 xdc_piodriver(xdcsc, iorqno, freeone) 1571 struct xdc_softc *xdcsc; 1572 int iorqno; 1573 int freeone; 1574 1575 { 1576 int nreset = 0; 1577 int retval = 0; 1578 u_long count; 1579 struct xdc *xdc = xdcsc->xdc; 1580 #ifdef XDC_DEBUG 1581 printf("xdc_piodriver(%s, %d, freeone=%d)\n", xdcsc->sc_dev.dv_xname, 1582 iorqno, freeone); 1583 #endif 1584 1585 while (xdcsc->nwait || xdcsc->nrun) { 1586 #ifdef XDC_DEBUG 1587 printf("xdc_piodriver: wait=%d, run=%d\n", 1588 xdcsc->nwait, xdcsc->nrun); 1589 #endif 1590 XDC_WAIT(xdc, count, XDC_MAXTIME, (XDC_REMIOPB | XDC_F_ERROR)); 1591 #ifdef XDC_DEBUG 1592 printf("xdc_piodriver: done wait with count = %d\n", count); 1593 #endif 1594 /* we expect some progress soon */ 1595 if (count == 0 && nreset >= 2) { 1596 xdc_reset(xdcsc, 0, XD_RSET_ALL, XD_ERR_FAIL, 0); 1597 #ifdef XDC_DEBUG 1598 printf("xdc_piodriver: timeout\n"); 1599 #endif 1600 return (XD_ERR_FAIL); 1601 } 1602 if (count == 0) { 1603 if (xdc_reset(xdcsc, 0, 1604 (nreset++ == 0) ? XD_RSET_NONE : iorqno, 1605 XD_ERR_FAIL, 1606 0) == XD_ERR_FAIL) 1607 return (XD_ERR_FAIL); /* flushes all but POLL 1608 * requests, resets */ 1609 continue; 1610 } 1611 xdc_remove_iorq(xdcsc); /* could resubmit request */ 1612 if (freeone) { 1613 if (xdcsc->nrun < XDC_MAXIOPB) { 1614 #ifdef XDC_DEBUG 1615 printf("xdc_piodriver: done: one free\n"); 1616 #endif 1617 return (XD_ERR_AOK); 1618 } 1619 continue; /* don't xdc_start */ 1620 } 1621 xdc_start(xdcsc, XDC_MAXIOPB); 1622 } 1623 1624 /* get return value */ 1625 1626 retval = xdcsc->reqs[iorqno].errno; 1627 1628 #ifdef XDC_DEBUG 1629 printf("xdc_piodriver: done, retval = 0x%x (%s)\n", 1630 xdcsc->reqs[iorqno].errno, xdc_e2str(xdcsc->reqs[iorqno].errno)); 1631 #endif 1632 1633 /* now that we've drained everything, start up any bufs that have 1634 * queued */ 1635 1636 while (xdcsc->nfree > 0 && BUFQ_PEEK(&xdcsc->sc_wq) != NULL) 1637 if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK) 1638 break; 1639 1640 return (retval); 1641 } 1642 1643 /* 1644 * xdc_reset: reset one drive. NOTE: assumes xdc was just reset. 1645 * we steal iopb[0] for this, but we put it back when we are done. 1646 */ 1647 void 1648 xdc_xdreset(xdcsc, xdsc) 1649 struct xdc_softc *xdcsc; 1650 struct xd_softc *xdsc; 1651 1652 { 1653 struct xd_iopb tmpiopb; 1654 u_long addr; 1655 int del; 1656 memcpy(&tmpiopb, xdcsc->iopbase, sizeof(tmpiopb)); 1657 memset(xdcsc->iopbase, 0, sizeof(tmpiopb)); 1658 xdcsc->iopbase->comm = XDCMD_RST; 1659 xdcsc->iopbase->unit = xdsc->xd_drive; 1660 addr = (u_long) xdcsc->dvmaiopb; 1661 XDC_GO(xdcsc->xdc, addr); /* go! */ 1662 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_REMIOPB); 1663 if (del <= 0 || xdcsc->iopbase->errs) { 1664 printf("%s: off-line: %s\n", xdcsc->sc_dev.dv_xname, 1665 xdc_e2str(xdcsc->iopbase->errno)); 1666 xdcsc->xdc->xdc_csr = XDC_RESET; 1667 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET); 1668 if (del <= 0) 1669 panic("xdc_reset"); 1670 } else { 1671 xdcsc->xdc->xdc_csr = XDC_CLRRIO; /* clear RIO */ 1672 } 1673 memcpy(xdcsc->iopbase, &tmpiopb, sizeof(tmpiopb)); 1674 } 1675 1676 1677 /* 1678 * xdc_reset: reset everything: requests are marked as errors except 1679 * a polled request (which is resubmitted) 1680 */ 1681 int 1682 xdc_reset(xdcsc, quiet, blastmode, error, xdsc) 1683 struct xdc_softc *xdcsc; 1684 int quiet, blastmode, error; 1685 struct xd_softc *xdsc; 1686 1687 { 1688 int del = 0, lcv, retval = XD_ERR_AOK; 1689 int oldfree = xdcsc->nfree; 1690 struct xd_iorq *iorq; 1691 1692 /* soft reset hardware */ 1693 1694 if (!quiet) 1695 printf("%s: soft reset\n", xdcsc->sc_dev.dv_xname); 1696 xdcsc->xdc->xdc_csr = XDC_RESET; 1697 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET); 1698 if (del <= 0) { 1699 blastmode = XD_RSET_ALL; /* dead, flush all requests */ 1700 retval = XD_ERR_FAIL; 1701 } 1702 if (xdsc) 1703 xdc_xdreset(xdcsc, xdsc); 1704 1705 /* fix queues based on "blast-mode" */ 1706 1707 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 1708 iorq = &xdcsc->reqs[lcv]; 1709 1710 if (XD_STATE(iorq->mode) != XD_SUB_POLL && 1711 XD_STATE(iorq->mode) != XD_SUB_WAIT && 1712 XD_STATE(iorq->mode) != XD_SUB_NORM) 1713 /* is it active? */ 1714 continue; 1715 1716 xdcsc->nrun--; /* it isn't running any more */ 1717 if (blastmode == XD_RSET_ALL || blastmode != lcv) { 1718 /* failed */ 1719 iorq->errno = error; 1720 xdcsc->iopbase[lcv].done = xdcsc->iopbase[lcv].errs = 1; 1721 switch (XD_STATE(iorq->mode)) { 1722 case XD_SUB_NORM: 1723 iorq->buf->b_error = EIO; 1724 iorq->buf->b_flags |= B_ERROR; 1725 iorq->buf->b_resid = 1726 iorq->sectcnt * XDFM_BPS; 1727 /* Sun3: map/unmap regardless of B_PHYS */ 1728 dvma_mapout(iorq->dbufbase, 1729 iorq->buf->b_bcount); 1730 disk_unbusy(&iorq->xd->sc_dk, 1731 (iorq->buf->b_bcount - iorq->buf->b_resid), 1732 (iorq->buf->b_flags & B_READ)); 1733 biodone(iorq->buf); 1734 XDC_FREE(xdcsc, lcv); /* add to free list */ 1735 break; 1736 case XD_SUB_WAIT: 1737 wakeup(iorq); 1738 case XD_SUB_POLL: 1739 xdcsc->ndone++; 1740 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1741 break; 1742 } 1743 1744 } else { 1745 1746 /* resubmit, put at front of wait queue */ 1747 XDC_HWAIT(xdcsc, lcv); 1748 } 1749 } 1750 1751 /* 1752 * now, if stuff is waiting, start it. 1753 * since we just reset it should go 1754 */ 1755 xdc_start(xdcsc, XDC_MAXIOPB); 1756 1757 /* ok, we did it */ 1758 if (oldfree == 0 && xdcsc->nfree) 1759 wakeup(&xdcsc->nfree); 1760 1761 #ifdef XDC_DIAG 1762 del = xdcsc->nwait + xdcsc->nrun + xdcsc->nfree + xdcsc->ndone; 1763 if (del != XDC_MAXIOPB) 1764 printf("%s: diag: xdc_reset miscount (%d should be %d)!\n", 1765 xdcsc->sc_dev.dv_xname, del, XDC_MAXIOPB); 1766 else 1767 if (xdcsc->ndone > XDC_MAXIOPB - XDC_SUBWAITLIM) 1768 printf("%s: diag: lots of done jobs (%d)\n", 1769 xdcsc->sc_dev.dv_xname, xdcsc->ndone); 1770 #endif 1771 printf("RESET DONE\n"); 1772 return (retval); 1773 } 1774 1775 /* 1776 * xdc_start: start all waiting buffers 1777 */ 1778 void 1779 xdc_start(xdcsc, maxio) 1780 struct xdc_softc *xdcsc; 1781 int maxio; 1782 1783 { 1784 int rqno; 1785 while (maxio && xdcsc->nwait && 1786 (xdcsc->xdc->xdc_csr & XDC_ADDING) == 0) { 1787 XDC_GET_WAITER(xdcsc, rqno); /* note: rqno is an "out" 1788 * param */ 1789 if (xdc_submit_iorq(xdcsc, rqno, XD_SUB_NOQ) != XD_ERR_AOK) 1790 panic("xdc_start"); /* should never happen */ 1791 maxio--; 1792 } 1793 } 1794 1795 /* 1796 * xdc_remove_iorq: remove "done" IOPB's. 1797 */ 1798 int 1799 xdc_remove_iorq(xdcsc) 1800 struct xdc_softc *xdcsc; 1801 1802 { 1803 int errno, rqno, comm, errs; 1804 struct xdc *xdc = xdcsc->xdc; 1805 struct xd_iopb *iopb; 1806 struct xd_iorq *iorq; 1807 struct buf *bp; 1808 1809 if (xdc->xdc_csr & XDC_F_ERROR) { 1810 /* 1811 * FATAL ERROR: should never happen under normal use. This 1812 * error is so bad, you can't even tell which IOPB is bad, so 1813 * we dump them all. 1814 */ 1815 errno = xdc->xdc_f_err; 1816 printf("%s: fatal error 0x%02x: %s\n", xdcsc->sc_dev.dv_xname, 1817 errno, xdc_e2str(errno)); 1818 if (xdc_reset(xdcsc, 0, XD_RSET_ALL, errno, 0) != XD_ERR_AOK) { 1819 printf("%s: soft reset failed!\n", 1820 xdcsc->sc_dev.dv_xname); 1821 panic("xdc_remove_iorq: controller DEAD"); 1822 } 1823 return (XD_ERR_AOK); 1824 } 1825 1826 /* 1827 * get iopb that is done 1828 * 1829 * hmm... I used to read the address of the done IOPB off the VME 1830 * registers and calculate the rqno directly from that. that worked 1831 * until I started putting a load on the controller. when loaded, i 1832 * would get interrupts but neither the REMIOPB or F_ERROR bits would 1833 * be set, even after DELAY'ing a while! later on the timeout 1834 * routine would detect IOPBs that were marked "running" but their 1835 * "done" bit was set. rather than dealing directly with this 1836 * problem, it is just easier to look at all running IOPB's for the 1837 * done bit. 1838 */ 1839 if (xdc->xdc_csr & XDC_REMIOPB) { 1840 xdc->xdc_csr = XDC_CLRRIO; 1841 } 1842 1843 for (rqno = 0; rqno < XDC_MAXIOPB; rqno++) { 1844 iorq = &xdcsc->reqs[rqno]; 1845 if (iorq->mode == 0 || XD_STATE(iorq->mode) == XD_SUB_DONE) 1846 continue; /* free, or done */ 1847 iopb = &xdcsc->iopbase[rqno]; 1848 if (iopb->done == 0) 1849 continue; /* not done yet */ 1850 1851 #ifdef XDC_DEBUG 1852 { 1853 u_char *rio = (u_char *) iopb; 1854 int sz = sizeof(struct xd_iopb), lcv; 1855 printf("%s: rio #%d [", xdcsc->sc_dev.dv_xname, rqno); 1856 for (lcv = 0; lcv < sz; lcv++) 1857 printf(" %02x", rio[lcv]); 1858 printf("]\n"); 1859 } 1860 #endif /* XDC_DEBUG */ 1861 1862 xdcsc->nrun--; 1863 1864 comm = iopb->comm; 1865 errs = iopb->errs; 1866 1867 if (errs) 1868 iorq->errno = iopb->errno; 1869 else 1870 iorq->errno = 0; 1871 1872 /* handle non-fatal errors */ 1873 1874 if (errs && 1875 xdc_error(xdcsc, iorq, iopb, rqno, comm) == XD_ERR_AOK) 1876 continue; /* AOK: we resubmitted it */ 1877 1878 1879 /* this iorq is now done (hasn't been restarted or anything) */ 1880 1881 if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror) 1882 xdc_perror(iorq, iopb, 0); 1883 1884 /* now, if read/write check to make sure we got all the data 1885 * we needed. (this may not be the case if we got an error in 1886 * the middle of a multisector request). */ 1887 1888 if ((iorq->mode & XD_MODE_B144) != 0 && errs == 0 && 1889 (comm == XDCMD_RD || comm == XDCMD_WR)) { 1890 /* we just successfully processed a bad144 sector 1891 * note: if we are in bad 144 mode, the pointers have 1892 * been advanced already (see above) and are pointing 1893 * at the bad144 sector. to exit bad144 mode, we 1894 * must advance the pointers 1 sector and issue a new 1895 * request if there are still sectors left to process 1896 * 1897 */ 1898 XDC_ADVANCE(iorq, 1); /* advance 1 sector */ 1899 1900 /* exit b144 mode */ 1901 iorq->mode = iorq->mode & (~XD_MODE_B144); 1902 1903 if (iorq->sectcnt) { /* more to go! */ 1904 iorq->lasterror = iorq->errno = iopb->errno = 0; 1905 iopb->errs = iopb->done = 0; 1906 iorq->tries = 0; 1907 iopb->sectcnt = iorq->sectcnt; 1908 iopb->cylno = iorq->blockno / 1909 iorq->xd->sectpercyl; 1910 iopb->headno = 1911 (iorq->blockno / iorq->xd->nhead) % 1912 iorq->xd->nhead; 1913 iopb->sectno = iorq->blockno % XDFM_BPS; 1914 iopb->daddr = 1915 dvma_kvtopa(iorq->dbuf, xdcsc->bustype); 1916 XDC_HWAIT(xdcsc, rqno); 1917 xdc_start(xdcsc, 1); /* resubmit */ 1918 continue; 1919 } 1920 } 1921 /* final cleanup, totally done with this request */ 1922 1923 switch (XD_STATE(iorq->mode)) { 1924 case XD_SUB_NORM: 1925 bp = iorq->buf; 1926 if (errs) { 1927 bp->b_error = EIO; 1928 bp->b_flags |= B_ERROR; 1929 bp->b_resid = iorq->sectcnt * XDFM_BPS; 1930 } else { 1931 bp->b_resid = 0; /* done */ 1932 } 1933 /* Sun3: map/unmap regardless of B_PHYS */ 1934 dvma_mapout(iorq->dbufbase, 1935 iorq->buf->b_bcount); 1936 disk_unbusy(&iorq->xd->sc_dk, 1937 (bp->b_bcount - bp->b_resid), 1938 (bp->b_flags & B_READ)); 1939 XDC_FREE(xdcsc, rqno); 1940 biodone(bp); 1941 break; 1942 case XD_SUB_WAIT: 1943 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1944 xdcsc->ndone++; 1945 wakeup(iorq); 1946 break; 1947 case XD_SUB_POLL: 1948 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1949 xdcsc->ndone++; 1950 break; 1951 } 1952 } 1953 1954 return (XD_ERR_AOK); 1955 } 1956 1957 /* 1958 * xdc_perror: print error. 1959 * - if still_trying is true: we got an error, retried and got a 1960 * different error. in that case lasterror is the old error, 1961 * and errno is the new one. 1962 * - if still_trying is not true, then if we ever had an error it 1963 * is in lasterror. also, if iorq->errno == 0, then we recovered 1964 * from that error (otherwise iorq->errno == iorq->lasterror). 1965 */ 1966 void 1967 xdc_perror(iorq, iopb, still_trying) 1968 struct xd_iorq *iorq; 1969 struct xd_iopb *iopb; 1970 int still_trying; 1971 1972 { 1973 1974 int error = iorq->lasterror; 1975 1976 printf("%s", (iorq->xd) ? 1977 iorq->xd->sc_dev.dv_xname : 1978 iorq->xdc->sc_dev.dv_xname); 1979 if (iorq->buf) 1980 printf("%c: ", 'a' + DISKPART(iorq->buf->b_dev)); 1981 if (iopb->comm == XDCMD_RD || iopb->comm == XDCMD_WR) 1982 printf("%s %d/%d/%d: ", 1983 (iopb->comm == XDCMD_RD) ? "read" : "write", 1984 iopb->cylno, iopb->headno, iopb->sectno); 1985 printf("%s", xdc_e2str(error)); 1986 1987 if (still_trying) 1988 printf(" [still trying, new error=%s]", xdc_e2str(iorq->errno)); 1989 else 1990 if (iorq->errno == 0) 1991 printf(" [recovered in %d tries]", iorq->tries); 1992 1993 printf("\n"); 1994 } 1995 1996 /* 1997 * xdc_error: non-fatal error encountered... recover. 1998 * return AOK if resubmitted, return FAIL if this iopb is done 1999 */ 2000 int 2001 xdc_error(xdcsc, iorq, iopb, rqno, comm) 2002 struct xdc_softc *xdcsc; 2003 struct xd_iorq *iorq; 2004 struct xd_iopb *iopb; 2005 int rqno, comm; 2006 2007 { 2008 int errno = iorq->errno; 2009 int erract = errno & XD_ERA_MASK; 2010 int oldmode, advance, i; 2011 2012 if (erract == XD_ERA_RSET) { /* some errors require a reset */ 2013 oldmode = iorq->mode; 2014 iorq->mode = XD_SUB_DONE | (~XD_SUB_MASK & oldmode); 2015 xdcsc->ndone++; 2016 /* make xdc_start ignore us */ 2017 xdc_reset(xdcsc, 1, XD_RSET_NONE, errno, iorq->xd); 2018 iorq->mode = oldmode; 2019 xdcsc->ndone--; 2020 } 2021 /* check for read/write to a sector in bad144 table if bad: redirect 2022 * request to bad144 area */ 2023 2024 if ((comm == XDCMD_RD || comm == XDCMD_WR) && 2025 (iorq->mode & XD_MODE_B144) == 0) { 2026 advance = iorq->sectcnt - iopb->sectcnt; 2027 XDC_ADVANCE(iorq, advance); 2028 if ((i = isbad(&iorq->xd->dkb, iorq->blockno / iorq->xd->sectpercyl, 2029 (iorq->blockno / iorq->xd->nsect) % iorq->xd->nhead, 2030 iorq->blockno % iorq->xd->nsect)) != -1) { 2031 iorq->mode |= XD_MODE_B144; /* enter bad144 mode & 2032 * redirect */ 2033 iopb->errno = iopb->done = iopb->errs = 0; 2034 iopb->sectcnt = 1; 2035 iopb->cylno = (iorq->xd->ncyl + iorq->xd->acyl) - 2; 2036 /* second to last acyl */ 2037 i = iorq->xd->sectpercyl - 1 - i; /* follow bad144 2038 * standard */ 2039 iopb->headno = i / iorq->xd->nhead; 2040 iopb->sectno = i % iorq->xd->nhead; 2041 XDC_HWAIT(xdcsc, rqno); 2042 xdc_start(xdcsc, 1); /* resubmit */ 2043 return (XD_ERR_AOK); /* recovered! */ 2044 } 2045 } 2046 2047 /* 2048 * it isn't a bad144 sector, must be real error! see if we can retry 2049 * it? 2050 */ 2051 if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror) 2052 xdc_perror(iorq, iopb, 1); /* inform of error state 2053 * change */ 2054 iorq->lasterror = errno; 2055 2056 if ((erract == XD_ERA_RSET || erract == XD_ERA_HARD) 2057 && iorq->tries < XDC_MAXTRIES) { /* retry? */ 2058 iorq->tries++; 2059 iorq->errno = iopb->errno = iopb->done = iopb->errs = 0; 2060 XDC_HWAIT(xdcsc, rqno); 2061 xdc_start(xdcsc, 1); /* restart */ 2062 return (XD_ERR_AOK); /* recovered! */ 2063 } 2064 2065 /* failed to recover from this error */ 2066 return (XD_ERR_FAIL); 2067 } 2068 2069 /* 2070 * xdc_tick: make sure xd is still alive and ticking (err, kicking). 2071 */ 2072 void 2073 xdc_tick(arg) 2074 void *arg; 2075 2076 { 2077 struct xdc_softc *xdcsc = arg; 2078 int lcv, s, reset = 0; 2079 #ifdef XDC_DIAG 2080 int wait, run, free, done, whd = 0; 2081 u_char fqc[XDC_MAXIOPB], wqc[XDC_MAXIOPB], mark[XDC_MAXIOPB]; 2082 s = splbio(); 2083 wait = xdcsc->nwait; 2084 run = xdcsc->nrun; 2085 free = xdcsc->nfree; 2086 done = xdcsc->ndone; 2087 memcpy(wqc, xdcsc->waitq, sizeof(wqc)); 2088 memcpy(fqc, xdcsc->freereq, sizeof(fqc)); 2089 splx(s); 2090 if (wait + run + free + done != XDC_MAXIOPB) { 2091 printf("%s: diag: IOPB miscount (got w/f/r/d %d/%d/%d/%d, wanted %d)\n", 2092 xdcsc->sc_dev.dv_xname, wait, free, run, done, XDC_MAXIOPB); 2093 memset(mark, 0, sizeof(mark)); 2094 printf("FREE: "); 2095 for (lcv = free; lcv > 0; lcv--) { 2096 printf("%d ", fqc[lcv - 1]); 2097 mark[fqc[lcv - 1]] = 1; 2098 } 2099 printf("\nWAIT: "); 2100 lcv = wait; 2101 while (lcv > 0) { 2102 printf("%d ", wqc[whd]); 2103 mark[wqc[whd]] = 1; 2104 whd = (whd + 1) % XDC_MAXIOPB; 2105 lcv--; 2106 } 2107 printf("\n"); 2108 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2109 if (mark[lcv] == 0) 2110 printf("MARK: running %d: mode %d done %d errs %d errno 0x%x ttl %d buf %p\n", 2111 lcv, xdcsc->reqs[lcv].mode, 2112 xdcsc->iopbase[lcv].done, 2113 xdcsc->iopbase[lcv].errs, 2114 xdcsc->iopbase[lcv].errno, 2115 xdcsc->reqs[lcv].ttl, 2116 xdcsc->reqs[lcv].buf); 2117 } 2118 } else 2119 if (done > XDC_MAXIOPB - XDC_SUBWAITLIM) 2120 printf("%s: diag: lots of done jobs (%d)\n", 2121 xdcsc->sc_dev.dv_xname, done); 2122 2123 #endif 2124 #ifdef XDC_DEBUG 2125 printf("%s: tick: csr 0x%x, w/f/r/d %d/%d/%d/%d\n", 2126 xdcsc->sc_dev.dv_xname, 2127 xdcsc->xdc->xdc_csr, xdcsc->nwait, xdcsc->nfree, xdcsc->nrun, 2128 xdcsc->ndone); 2129 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2130 if (xdcsc->reqs[lcv].mode) 2131 printf("running %d: mode %d done %d errs %d errno 0x%x\n", 2132 lcv, 2133 xdcsc->reqs[lcv].mode, xdcsc->iopbase[lcv].done, 2134 xdcsc->iopbase[lcv].errs, xdcsc->iopbase[lcv].errno); 2135 } 2136 #endif 2137 2138 /* reduce ttl for each request if one goes to zero, reset xdc */ 2139 s = splbio(); 2140 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2141 if (xdcsc->reqs[lcv].mode == 0 || 2142 XD_STATE(xdcsc->reqs[lcv].mode) == XD_SUB_DONE) 2143 continue; 2144 xdcsc->reqs[lcv].ttl--; 2145 if (xdcsc->reqs[lcv].ttl == 0) 2146 reset = 1; 2147 } 2148 if (reset) { 2149 printf("%s: watchdog timeout\n", xdcsc->sc_dev.dv_xname); 2150 xdc_reset(xdcsc, 0, XD_RSET_NONE, XD_ERR_FAIL, NULL); 2151 } 2152 splx(s); 2153 2154 /* until next time */ 2155 2156 callout_reset(&xdcsc->sc_tick_ch, XDC_TICKCNT, xdc_tick, xdcsc); 2157 } 2158 2159 /* 2160 * xdc_ioctlcmd: this function provides a user level interface to the 2161 * controller via ioctl. this allows "format" programs to be written 2162 * in user code, and is also useful for some debugging. we return 2163 * an error code. called at user priority. 2164 */ 2165 int 2166 xdc_ioctlcmd(xd, dev, xio) 2167 struct xd_softc *xd; 2168 dev_t dev; 2169 struct xd_iocmd *xio; 2170 2171 { 2172 int s, err, rqno; 2173 caddr_t dvmabuf = NULL; 2174 struct xdc_softc *xdcsc; 2175 2176 /* check sanity of requested command */ 2177 2178 switch (xio->cmd) { 2179 2180 case XDCMD_NOP: /* no op: everything should be zero */ 2181 if (xio->subfn || xio->dptr || xio->dlen || 2182 xio->block || xio->sectcnt) 2183 return (EINVAL); 2184 break; 2185 2186 case XDCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */ 2187 case XDCMD_WR: 2188 if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS || 2189 xio->sectcnt * XDFM_BPS != xio->dlen || xio->dptr == NULL) 2190 return (EINVAL); 2191 break; 2192 2193 case XDCMD_SK: /* seek: doesn't seem useful to export this */ 2194 return (EINVAL); 2195 2196 case XDCMD_WRP: /* write parameters */ 2197 return (EINVAL);/* not useful, except maybe drive 2198 * parameters... but drive parameters should 2199 * go via disklabel changes */ 2200 2201 case XDCMD_RDP: /* read parameters */ 2202 if (xio->subfn != XDFUN_DRV || 2203 xio->dlen || xio->block || xio->dptr) 2204 return (EINVAL); /* allow read drive params to 2205 * get hw_spt */ 2206 xio->sectcnt = xd->hw_spt; /* we already know the answer */ 2207 return (0); 2208 break; 2209 2210 case XDCMD_XRD: /* extended read/write */ 2211 case XDCMD_XWR: 2212 2213 switch (xio->subfn) { 2214 2215 case XDFUN_THD:/* track headers */ 2216 if (xio->sectcnt != xd->hw_spt || 2217 (xio->block % xd->nsect) != 0 || 2218 xio->dlen != XD_IOCMD_HSZ * xd->hw_spt || 2219 xio->dptr == NULL) 2220 return (EINVAL); 2221 xio->sectcnt = 0; 2222 break; 2223 2224 case XDFUN_FMT:/* NOTE: also XDFUN_VFY */ 2225 if (xio->cmd == XDCMD_XRD) 2226 return (EINVAL); /* no XDFUN_VFY */ 2227 if (xio->sectcnt || xio->dlen || 2228 (xio->block % xd->nsect) != 0 || xio->dptr) 2229 return (EINVAL); 2230 break; 2231 2232 case XDFUN_HDR:/* header, header verify, data, data ECC */ 2233 return (EINVAL); /* not yet */ 2234 2235 case XDFUN_DM: /* defect map */ 2236 case XDFUN_DMX:/* defect map (alternate location) */ 2237 if (xio->sectcnt || xio->dlen != XD_IOCMD_DMSZ || 2238 (xio->block % xd->nsect) != 0 || xio->dptr == NULL) 2239 return (EINVAL); 2240 break; 2241 2242 default: 2243 return (EINVAL); 2244 } 2245 break; 2246 2247 case XDCMD_TST: /* diagnostics */ 2248 return (EINVAL); 2249 2250 default: 2251 return (EINVAL);/* ??? */ 2252 } 2253 2254 /* create DVMA buffer for request if needed */ 2255 2256 if (xio->dlen) { 2257 dvmabuf = dvma_malloc(xio->dlen); 2258 if (xio->cmd == XDCMD_WR || xio->cmd == XDCMD_XWR) { 2259 err = copyin(xio->dptr, dvmabuf, xio->dlen); 2260 if (err) { 2261 dvma_free(dvmabuf, xio->dlen); 2262 return (err); 2263 } 2264 } 2265 } 2266 /* do it! */ 2267 2268 err = 0; 2269 xdcsc = xd->parent; 2270 s = splbio(); 2271 rqno = xdc_cmd(xdcsc, xio->cmd, xio->subfn, xd->xd_drive, xio->block, 2272 xio->sectcnt, dvmabuf, XD_SUB_WAIT); 2273 if (rqno == XD_ERR_FAIL) { 2274 err = EIO; 2275 goto done; 2276 } 2277 xio->errno = xdcsc->reqs[rqno].errno; 2278 xio->tries = xdcsc->reqs[rqno].tries; 2279 XDC_DONE(xdcsc, rqno, err); 2280 2281 if (xio->cmd == XDCMD_RD || xio->cmd == XDCMD_XRD) 2282 err = copyout(dvmabuf, xio->dptr, xio->dlen); 2283 2284 done: 2285 splx(s); 2286 if (dvmabuf) 2287 dvma_free(dvmabuf, xio->dlen); 2288 return (err); 2289 } 2290 2291 /* 2292 * xdc_e2str: convert error code number into an error string 2293 */ 2294 char * 2295 xdc_e2str(no) 2296 int no; 2297 { 2298 switch (no) { 2299 case XD_ERR_FAIL: 2300 return ("Software fatal error"); 2301 case XD_ERR_AOK: 2302 return ("Successful completion"); 2303 case XD_ERR_ICYL: 2304 return ("Illegal cylinder address"); 2305 case XD_ERR_IHD: 2306 return ("Illegal head address"); 2307 case XD_ERR_ISEC: 2308 return ("Illgal sector address"); 2309 case XD_ERR_CZER: 2310 return ("Count zero"); 2311 case XD_ERR_UIMP: 2312 return ("Unimplemented command"); 2313 case XD_ERR_IF1: 2314 return ("Illegal field length 1"); 2315 case XD_ERR_IF2: 2316 return ("Illegal field length 2"); 2317 case XD_ERR_IF3: 2318 return ("Illegal field length 3"); 2319 case XD_ERR_IF4: 2320 return ("Illegal field length 4"); 2321 case XD_ERR_IF5: 2322 return ("Illegal field length 5"); 2323 case XD_ERR_IF6: 2324 return ("Illegal field length 6"); 2325 case XD_ERR_IF7: 2326 return ("Illegal field length 7"); 2327 case XD_ERR_ISG: 2328 return ("Illegal scatter/gather length"); 2329 case XD_ERR_ISPT: 2330 return ("Not enough sectors per track"); 2331 case XD_ERR_ALGN: 2332 return ("Next IOPB address alignment error"); 2333 case XD_ERR_SGAL: 2334 return ("Scatter/gather address alignment error"); 2335 case XD_ERR_SGEC: 2336 return ("Scatter/gather with auto-ECC"); 2337 case XD_ERR_SECC: 2338 return ("Soft ECC corrected"); 2339 case XD_ERR_SIGN: 2340 return ("ECC ignored"); 2341 case XD_ERR_ASEK: 2342 return ("Auto-seek retry recovered"); 2343 case XD_ERR_RTRY: 2344 return ("Soft retry recovered"); 2345 case XD_ERR_HECC: 2346 return ("Hard data ECC"); 2347 case XD_ERR_NHDR: 2348 return ("Header not found"); 2349 case XD_ERR_NRDY: 2350 return ("Drive not ready"); 2351 case XD_ERR_TOUT: 2352 return ("Operation timeout"); 2353 case XD_ERR_VTIM: 2354 return ("VMEDMA timeout"); 2355 case XD_ERR_DSEQ: 2356 return ("Disk sequencer error"); 2357 case XD_ERR_HDEC: 2358 return ("Header ECC error"); 2359 case XD_ERR_RVFY: 2360 return ("Read verify"); 2361 case XD_ERR_VFER: 2362 return ("Fatail VMEDMA error"); 2363 case XD_ERR_VBUS: 2364 return ("VMEbus error"); 2365 case XD_ERR_DFLT: 2366 return ("Drive faulted"); 2367 case XD_ERR_HECY: 2368 return ("Header error/cyliner"); 2369 case XD_ERR_HEHD: 2370 return ("Header error/head"); 2371 case XD_ERR_NOCY: 2372 return ("Drive not on-cylinder"); 2373 case XD_ERR_SEEK: 2374 return ("Seek error"); 2375 case XD_ERR_ILSS: 2376 return ("Illegal sector size"); 2377 case XD_ERR_SEC: 2378 return ("Soft ECC"); 2379 case XD_ERR_WPER: 2380 return ("Write-protect error"); 2381 case XD_ERR_IRAM: 2382 return ("IRAM self test failure"); 2383 case XD_ERR_MT3: 2384 return ("Maintenance test 3 failure (DSKCEL RAM)"); 2385 case XD_ERR_MT4: 2386 return ("Maintenance test 4 failure (header shift reg)"); 2387 case XD_ERR_MT5: 2388 return ("Maintenance test 5 failure (VMEDMA regs)"); 2389 case XD_ERR_MT6: 2390 return ("Maintenance test 6 failure (REGCEL chip)"); 2391 case XD_ERR_MT7: 2392 return ("Maintenance test 7 failure (buffer parity)"); 2393 case XD_ERR_MT8: 2394 return ("Maintenance test 8 failure (disk FIFO)"); 2395 case XD_ERR_IOCK: 2396 return ("IOPB checksum miscompare"); 2397 case XD_ERR_IODM: 2398 return ("IOPB DMA fatal"); 2399 case XD_ERR_IOAL: 2400 return ("IOPB address alignment error"); 2401 case XD_ERR_FIRM: 2402 return ("Firmware error"); 2403 case XD_ERR_MMOD: 2404 return ("Illegal maintenance mode test number"); 2405 case XD_ERR_ACFL: 2406 return ("ACFAIL asserted"); 2407 default: 2408 return ("Unknown error"); 2409 } 2410 } 2411