1 /* $NetBSD: xd.c,v 1.47 2004/10/28 07:07:38 yamt Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1995 Charles D. Cranor 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * 36 * x d . c x y l o g i c s 7 5 3 / 7 0 5 3 v m e / s m d d r i v e r 37 * 38 * author: Chuck Cranor <chuck@ccrc.wustl.edu> 39 * id: &Id: xd.c,v 1.9 1995/09/25 20:12:44 chuck Exp & 40 * started: 27-Feb-95 41 * references: [1] Xylogics Model 753 User's Manual 42 * part number: 166-753-001, Revision B, May 21, 1988. 43 * "Your Partner For Performance" 44 * [2] other NetBSD disk device drivers 45 * 46 * Special thanks go to Scott E. Campbell of Xylogics, Inc. for taking 47 * the time to answer some of my questions about the 753/7053. 48 * 49 * note: the 753 and the 7053 are programmed the same way, but are 50 * different sizes. the 753 is a 6U VME card, while the 7053 is a 9U 51 * VME card (found in many VME based suns). 52 */ 53 54 #include <sys/cdefs.h> 55 __KERNEL_RCSID(0, "$NetBSD: xd.c,v 1.47 2004/10/28 07:07:38 yamt Exp $"); 56 57 #undef XDC_DEBUG /* full debug */ 58 #define XDC_DIAG /* extra sanity checks */ 59 #if defined(DIAGNOSTIC) && !defined(XDC_DIAG) 60 #define XDC_DIAG /* link in with master DIAG option */ 61 #endif 62 63 #include <sys/param.h> 64 #include <sys/proc.h> 65 #include <sys/systm.h> 66 #include <sys/kernel.h> 67 #include <sys/file.h> 68 #include <sys/stat.h> 69 #include <sys/ioctl.h> 70 #include <sys/buf.h> 71 #include <sys/bufq.h> 72 #include <sys/uio.h> 73 #include <sys/malloc.h> 74 #include <sys/device.h> 75 #include <sys/disklabel.h> 76 #include <sys/disk.h> 77 #include <sys/syslog.h> 78 #include <sys/dkbad.h> 79 #include <sys/conf.h> 80 81 #include <uvm/uvm_extern.h> 82 83 #include <machine/autoconf.h> 84 #include <machine/dvma.h> 85 86 #include <dev/sun/disklabel.h> 87 88 #include <sun3/dev/xdreg.h> 89 #include <sun3/dev/xdvar.h> 90 #include <sun3/dev/xio.h> 91 92 #include "locators.h" 93 94 /* 95 * Print a complaint when no xd children were specified 96 * in the config file. Better than a link error... 97 * 98 * XXX: Some folks say this driver should be split in two, 99 * but that seems pointless with ONLY one type of child. 100 */ 101 #include "xd.h" 102 #if NXD == 0 103 #error "xdc but no xd?" 104 #endif 105 106 /* 107 * macros 108 */ 109 110 /* 111 * XDC_TWAIT: add iorq "N" to tail of SC's wait queue 112 */ 113 #define XDC_TWAIT(SC, N) { \ 114 (SC)->waitq[(SC)->waitend] = (N); \ 115 (SC)->waitend = ((SC)->waitend + 1) % XDC_MAXIOPB; \ 116 (SC)->nwait++; \ 117 } 118 119 /* 120 * XDC_HWAIT: add iorq "N" to head of SC's wait queue 121 */ 122 #define XDC_HWAIT(SC, N) { \ 123 (SC)->waithead = ((SC)->waithead == 0) ? \ 124 (XDC_MAXIOPB - 1) : ((SC)->waithead - 1); \ 125 (SC)->waitq[(SC)->waithead] = (N); \ 126 (SC)->nwait++; \ 127 } 128 129 /* 130 * XDC_GET_WAITER: gets the first request waiting on the waitq 131 * and removes it (so it can be submitted) 132 */ 133 #define XDC_GET_WAITER(XDCSC, RQ) { \ 134 (RQ) = (XDCSC)->waitq[(XDCSC)->waithead]; \ 135 (XDCSC)->waithead = ((XDCSC)->waithead + 1) % XDC_MAXIOPB; \ 136 xdcsc->nwait--; \ 137 } 138 139 /* 140 * XDC_FREE: add iorq "N" to SC's free list 141 */ 142 #define XDC_FREE(SC, N) { \ 143 (SC)->freereq[(SC)->nfree++] = (N); \ 144 (SC)->reqs[N].mode = 0; \ 145 if ((SC)->nfree == 1) wakeup(&(SC)->nfree); \ 146 } 147 148 149 /* 150 * XDC_RQALLOC: allocate an iorq off the free list (assume nfree > 0). 151 */ 152 #define XDC_RQALLOC(XDCSC) (XDCSC)->freereq[--((XDCSC)->nfree)] 153 154 /* 155 * XDC_GO: start iopb ADDR (DVMA addr in a u_long) on XDC 156 */ 157 #define XDC_GO(XDC, ADDR) { \ 158 (XDC)->xdc_iopbaddr0 = ((ADDR) & 0xff); \ 159 (ADDR) = ((ADDR) >> 8); \ 160 (XDC)->xdc_iopbaddr1 = ((ADDR) & 0xff); \ 161 (ADDR) = ((ADDR) >> 8); \ 162 (XDC)->xdc_iopbaddr2 = ((ADDR) & 0xff); \ 163 (ADDR) = ((ADDR) >> 8); \ 164 (XDC)->xdc_iopbaddr3 = (ADDR); \ 165 (XDC)->xdc_iopbamod = XDC_ADDRMOD; \ 166 (XDC)->xdc_csr = XDC_ADDIOPB; /* go! */ \ 167 } 168 169 /* 170 * XDC_WAIT: wait for XDC's csr "BITS" to come on in "TIME". 171 * LCV is a counter. If it goes to zero then we timed out. 172 */ 173 #define XDC_WAIT(XDC, LCV, TIME, BITS) { \ 174 (LCV) = (TIME); \ 175 while ((LCV) > 0) { \ 176 if ((XDC)->xdc_csr & (BITS)) break; \ 177 (LCV) = (LCV) - 1; \ 178 DELAY(1); \ 179 } \ 180 } 181 182 /* 183 * XDC_DONE: don't need IORQ, get error code and free (done after xdc_cmd) 184 */ 185 #define XDC_DONE(SC,RQ,ER) { \ 186 if ((RQ) == XD_ERR_FAIL) { \ 187 (ER) = (RQ); \ 188 } else { \ 189 if ((SC)->ndone-- == XDC_SUBWAITLIM) \ 190 wakeup(&(SC)->ndone); \ 191 (ER) = (SC)->reqs[RQ].errno; \ 192 XDC_FREE((SC), (RQ)); \ 193 } \ 194 } 195 196 /* 197 * XDC_ADVANCE: advance iorq's pointers by a number of sectors 198 */ 199 #define XDC_ADVANCE(IORQ, N) { \ 200 if (N) { \ 201 (IORQ)->sectcnt -= (N); \ 202 (IORQ)->blockno += (N); \ 203 (IORQ)->dbuf += ((N)*XDFM_BPS); \ 204 } \ 205 } 206 207 /* 208 * note - addresses you can sleep on: 209 * [1] & of xd_softc's "state" (waiting for a chance to attach a drive) 210 * [2] & of xdc_softc's "nfree" (waiting for a free iorq/iopb) 211 * [3] & of xdc_softc's "ndone" (waiting for number of done iorq/iopb's 212 * to drop below XDC_SUBWAITLIM) 213 * [4] & an iorq (waiting for an XD_SUB_WAIT iorq to finish) 214 */ 215 216 217 /* 218 * function prototypes 219 * "xdc_*" functions are internal, all others are external interfaces 220 */ 221 222 /* internals */ 223 int xdc_cmd __P((struct xdc_softc *, int, int, int, int, int, char *, int)); 224 char *xdc_e2str __P((int)); 225 int xdc_error __P((struct xdc_softc *, struct xd_iorq *, 226 struct xd_iopb *, int, int)); 227 int xdc_ioctlcmd __P((struct xd_softc *, dev_t dev, struct xd_iocmd *)); 228 void xdc_perror __P((struct xd_iorq *, struct xd_iopb *, int)); 229 int xdc_piodriver __P((struct xdc_softc *, int, int)); 230 int xdc_remove_iorq __P((struct xdc_softc *)); 231 int xdc_reset __P((struct xdc_softc *, int, int, int, struct xd_softc *)); 232 inline void xdc_rqinit __P((struct xd_iorq *, struct xdc_softc *, 233 struct xd_softc *, int, u_long, int, 234 caddr_t, struct buf *)); 235 void xdc_rqtopb __P((struct xd_iorq *, struct xd_iopb *, int, int)); 236 void xdc_start __P((struct xdc_softc *, int)); 237 int xdc_startbuf __P((struct xdc_softc *, struct xd_softc *, struct buf *)); 238 int xdc_submit_iorq __P((struct xdc_softc *, int, int)); 239 void xdc_tick __P((void *)); 240 void xdc_xdreset __P((struct xdc_softc *, struct xd_softc *)); 241 242 /* machine interrupt hook */ 243 int xdcintr __P((void *)); 244 245 /* autoconf */ 246 static int xdcmatch __P((struct device *, struct cfdata *, void *)); 247 static void xdcattach __P((struct device *, struct device *, void *)); 248 static int xdc_print __P((void *, const char *name)); 249 250 static int xdmatch __P((struct device *, struct cfdata *, void *)); 251 static void xdattach __P((struct device *, struct device *, void *)); 252 static void xd_init __P((struct xd_softc *)); 253 254 static void xddummystrat __P((struct buf *)); 255 int xdgetdisklabel __P((struct xd_softc *, void *)); 256 257 /* 258 * cfattach's: device driver interface to autoconfig 259 */ 260 261 CFATTACH_DECL(xdc, sizeof(struct xdc_softc), 262 xdcmatch, xdcattach, NULL, NULL); 263 264 CFATTACH_DECL(xd, sizeof(struct xd_softc), 265 xdmatch, xdattach, NULL, NULL); 266 267 extern struct cfdriver xd_cd; 268 269 struct xdc_attach_args { /* this is the "aux" args to xdattach */ 270 int driveno; /* unit number */ 271 char *dvmabuf; /* scratch buffer for reading disk label */ 272 int fullmode; /* submit mode */ 273 int booting; /* are we booting or not? */ 274 }; 275 276 dev_type_open(xdopen); 277 dev_type_close(xdclose); 278 dev_type_read(xdread); 279 dev_type_write(xdwrite); 280 dev_type_ioctl(xdioctl); 281 dev_type_strategy(xdstrategy); 282 dev_type_dump(xddump); 283 dev_type_size(xdsize); 284 285 const struct bdevsw xd_bdevsw = { 286 xdopen, xdclose, xdstrategy, xdioctl, xddump, xdsize, D_DISK 287 }; 288 289 const struct cdevsw xd_cdevsw = { 290 xdopen, xdclose, xdread, xdwrite, xdioctl, 291 nostop, notty, nopoll, nommap, nokqfilter, D_DISK 292 }; 293 294 /* 295 * dkdriver 296 */ 297 298 struct dkdriver xddkdriver = {xdstrategy}; 299 300 /* 301 * start: disk label fix code (XXX) 302 */ 303 304 static void *xd_labeldata; 305 306 static void 307 xddummystrat(bp) 308 struct buf *bp; 309 { 310 if (bp->b_bcount != XDFM_BPS) 311 panic("xddummystrat"); 312 memcpy(bp->b_data, xd_labeldata, XDFM_BPS); 313 bp->b_flags |= B_DONE; 314 bp->b_flags &= ~B_BUSY; 315 } 316 317 int 318 xdgetdisklabel(xd, b) 319 struct xd_softc *xd; 320 void *b; 321 { 322 const char *err; 323 struct sun_disklabel *sdl; 324 325 /* We already have the label data in `b'; setup for dummy strategy */ 326 xd_labeldata = b; 327 328 /* Required parameter for readdisklabel() */ 329 xd->sc_dk.dk_label->d_secsize = XDFM_BPS; 330 331 err = readdisklabel(MAKEDISKDEV(0, xd->sc_dev.dv_unit, RAW_PART), 332 xddummystrat, 333 xd->sc_dk.dk_label, xd->sc_dk.dk_cpulabel); 334 if (err) { 335 printf("%s: %s\n", xd->sc_dev.dv_xname, err); 336 return(XD_ERR_FAIL); 337 } 338 339 /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */ 340 sdl = (struct sun_disklabel *)xd->sc_dk.dk_cpulabel->cd_block; 341 if (sdl->sl_magic == SUN_DKMAGIC) 342 xd->pcyl = sdl->sl_pcyl; 343 else { 344 printf("%s: WARNING: no `pcyl' in disk label.\n", 345 xd->sc_dev.dv_xname); 346 xd->pcyl = xd->sc_dk.dk_label->d_ncylinders + 347 xd->sc_dk.dk_label->d_acylinders; 348 printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n", 349 xd->sc_dev.dv_xname, xd->pcyl); 350 } 351 352 xd->ncyl = xd->sc_dk.dk_label->d_ncylinders; 353 xd->acyl = xd->sc_dk.dk_label->d_acylinders; 354 xd->nhead = xd->sc_dk.dk_label->d_ntracks; 355 xd->nsect = xd->sc_dk.dk_label->d_nsectors; 356 xd->sectpercyl = xd->nhead * xd->nsect; 357 xd->sc_dk.dk_label->d_secsize = XDFM_BPS; /* not handled by 358 * sun->bsd */ 359 return(XD_ERR_AOK); 360 } 361 362 /* 363 * end: disk label fix code (XXX) 364 */ 365 366 /* 367 * a u t o c o n f i g f u n c t i o n s 368 */ 369 370 /* 371 * xdcmatch: determine if xdc is present or not. we do a 372 * soft reset to detect the xdc. 373 */ 374 375 int xdcmatch(parent, cf, aux) 376 struct device *parent; 377 struct cfdata *cf; 378 void *aux; 379 { 380 struct confargs *ca = aux; 381 382 /* No default VME address. */ 383 if (ca->ca_paddr == -1) 384 return (0); 385 386 /* Make sure something is there... */ 387 if (bus_peek(ca->ca_bustype, ca->ca_paddr + 11, 1) == -1) 388 return (0); 389 390 /* Default interrupt priority. */ 391 if (ca->ca_intpri == -1) 392 ca->ca_intpri = 2; 393 394 return (1); 395 } 396 397 /* 398 * xdcattach: attach controller 399 */ 400 void 401 xdcattach(parent, self, aux) 402 struct device *parent, *self; 403 void *aux; 404 { 405 struct xdc_softc *xdc = (void *) self; 406 struct confargs *ca = aux; 407 struct xdc_attach_args xa; 408 int lcv, rqno, err; 409 struct xd_iopb_ctrl *ctl; 410 411 /* get addressing and intr level stuff from autoconfig and load it 412 * into our xdc_softc. */ 413 414 xdc->xdc = (struct xdc *) 415 bus_mapin(ca->ca_bustype, ca->ca_paddr, sizeof(struct xdc)); 416 xdc->bustype = ca->ca_bustype; 417 xdc->ipl = ca->ca_intpri; 418 xdc->vector = ca->ca_intvec; 419 420 for (lcv = 0; lcv < XDC_MAXDEV; lcv++) 421 xdc->sc_drives[lcv] = (struct xd_softc *) 0; 422 423 /* allocate and zero buffers 424 * 425 * note: we simplify the code by allocating the max number of iopbs and 426 * iorq's up front. thus, we avoid linked lists and the costs 427 * associated with them in exchange for wasting a little memory. */ 428 429 xdc->iopbase = (struct xd_iopb *) 430 dvma_malloc(XDC_MAXIOPB * sizeof(struct xd_iopb)); /* KVA */ 431 memset(xdc->iopbase, 0, XDC_MAXIOPB * sizeof(struct xd_iopb)); 432 xdc->dvmaiopb = (struct xd_iopb *) 433 dvma_kvtopa(xdc->iopbase, xdc->bustype); 434 xdc->reqs = (struct xd_iorq *) 435 malloc(XDC_MAXIOPB * sizeof(struct xd_iorq), M_DEVBUF, M_NOWAIT); 436 if (xdc->reqs == NULL) 437 panic("xdc malloc"); 438 memset(xdc->reqs, 0, XDC_MAXIOPB * sizeof(struct xd_iorq)); 439 440 /* init free list, iorq to iopb pointers, and non-zero fields in the 441 * iopb which never change. */ 442 443 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 444 xdc->reqs[lcv].iopb = &xdc->iopbase[lcv]; 445 xdc->freereq[lcv] = lcv; 446 xdc->iopbase[lcv].fixd = 1; /* always the same */ 447 xdc->iopbase[lcv].naddrmod = XDC_ADDRMOD; /* always the same */ 448 xdc->iopbase[lcv].intr_vec = xdc->vector; /* always the same */ 449 } 450 xdc->nfree = XDC_MAXIOPB; 451 xdc->nrun = 0; 452 xdc->waithead = xdc->waitend = xdc->nwait = 0; 453 xdc->ndone = 0; 454 455 /* init queue of waiting bufs */ 456 457 bufq_alloc(&xdc->sc_wq, BUFQ_FCFS); 458 callout_init(&xdc->sc_tick_ch); 459 460 /* 461 * section 7 of the manual tells us how to init the controller: 462 * - read controller parameters (6/0) 463 * - write controller parameters (5/0) 464 */ 465 466 /* read controller parameters and insure we have a 753/7053 */ 467 468 rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL); 469 if (rqno == XD_ERR_FAIL) { 470 printf(": couldn't read controller params\n"); 471 return; /* shouldn't ever happen */ 472 } 473 ctl = (struct xd_iopb_ctrl *) & xdc->iopbase[rqno]; 474 if (ctl->ctype != XDCT_753) { 475 if (xdc->reqs[rqno].errno) 476 printf(": %s: ", xdc_e2str(xdc->reqs[rqno].errno)); 477 printf(": doesn't identify as a 753/7053\n"); 478 XDC_DONE(xdc, rqno, err); 479 return; 480 } 481 printf(": Xylogics 753/7053, PROM=0x%x.%02x.%02x\n", 482 ctl->eprom_partno, ctl->eprom_lvl, ctl->eprom_rev); 483 XDC_DONE(xdc, rqno, err); 484 485 /* now write controller parameters (xdc_cmd sets all params for us) */ 486 487 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL); 488 XDC_DONE(xdc, rqno, err); 489 if (err) { 490 printf("%s: controller config error: %s\n", 491 xdc->sc_dev.dv_xname, xdc_e2str(err)); 492 return; 493 } 494 495 /* link in interrupt with higher level software */ 496 isr_add_vectored(xdcintr, (void *)xdc, 497 ca->ca_intpri, ca->ca_intvec); 498 evcnt_attach_dynamic(&xdc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, 499 xdc->sc_dev.dv_xname, "intr"); 500 501 /* now we must look for disks using autoconfig */ 502 xa.booting = 1; 503 for (xa.driveno = 0; xa.driveno < XDC_MAXDEV; xa.driveno++) 504 (void) config_found(self, (void *) &xa, xdc_print); 505 506 /* start the watchdog clock */ 507 callout_reset(&xdc->sc_tick_ch, XDC_TICKCNT, xdc_tick, xdc); 508 } 509 510 int 511 xdc_print(aux, name) 512 void *aux; 513 const char *name; 514 { 515 struct xdc_attach_args *xa = aux; 516 517 if (name != NULL) 518 aprint_normal("%s: ", name); 519 520 if (xa->driveno != -1) 521 aprint_normal(" drive %d", xa->driveno); 522 523 return UNCONF; 524 } 525 526 /* 527 * xdmatch: probe for disk. 528 * 529 * note: we almost always say disk is present. this allows us to 530 * spin up and configure a disk after the system is booted (we can 531 * call xdattach!). Also, wire down the relationship between the 532 * xd* and xdc* devices, to simplify boot device identification. 533 */ 534 int 535 xdmatch(parent, cf, aux) 536 struct device *parent; 537 struct cfdata *cf; 538 void *aux; 539 { 540 struct xdc_attach_args *xa = aux; 541 int xd_unit; 542 543 /* Match only on the "wired-down" controller+disk. */ 544 xd_unit = parent->dv_unit * 2 + xa->driveno; 545 if (cf->cf_unit != xd_unit) 546 return (0); 547 548 return (1); 549 } 550 551 /* 552 * xdattach: attach a disk. 553 */ 554 void 555 xdattach(parent, self, aux) 556 struct device *parent, *self; 557 void *aux; 558 559 { 560 struct xd_softc *xd = (void *) self; 561 struct xdc_softc *xdc = (void *) parent; 562 struct xdc_attach_args *xa = aux; 563 564 printf("\n"); 565 566 /* 567 * Always re-initialize the disk structure. We want statistics 568 * to start with a clean slate. 569 */ 570 memset(&xd->sc_dk, 0, sizeof(xd->sc_dk)); 571 xd->sc_dk.dk_driver = &xddkdriver; 572 xd->sc_dk.dk_name = xd->sc_dev.dv_xname; 573 574 xd->state = XD_DRIVE_UNKNOWN; /* to start */ 575 xd->flags = 0; 576 xd->parent = xdc; 577 578 xd->xd_drive = xa->driveno; 579 xdc->sc_drives[xa->driveno] = xd; 580 581 /* Do init work common to attach and open. */ 582 xd_init(xd); 583 } 584 585 /* 586 * end of autoconfig functions 587 */ 588 589 /* 590 * Initialize a disk. This can be called from both autoconf and 591 * also from xdopen/xdstrategy. 592 */ 593 static void 594 xd_init(xd) 595 struct xd_softc *xd; 596 { 597 struct xdc_softc *xdc; 598 struct dkbad *dkb; 599 struct xd_iopb_drive *driopb; 600 void *dvmabuf; 601 int rqno, err, spt, mb, blk, lcv, fullmode, newstate; 602 603 xdc = xd->parent; 604 xd->state = XD_DRIVE_ATTACHING; 605 newstate = XD_DRIVE_UNKNOWN; 606 fullmode = (cold) ? XD_SUB_POLL : XD_SUB_WAIT; 607 dvmabuf = dvma_malloc(XDFM_BPS); 608 609 /* first try and reset the drive */ 610 rqno = xdc_cmd(xdc, XDCMD_RST, 0, xd->xd_drive, 0, 0, 0, fullmode); 611 XDC_DONE(xdc, rqno, err); 612 if (err == XD_ERR_NRDY) { 613 printf("%s: drive %d: off-line\n", 614 xd->sc_dev.dv_xname, xd->xd_drive); 615 goto done; 616 } 617 if (err) { 618 printf("%s: ERROR 0x%02x (%s)\n", 619 xd->sc_dev.dv_xname, err, xdc_e2str(err)); 620 goto done; 621 } 622 printf("%s: drive %d ready\n", 623 xd->sc_dev.dv_xname, xd->xd_drive); 624 625 /* now set format parameters */ 626 627 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_FMT, xd->xd_drive, 628 0, 0, 0, fullmode); 629 XDC_DONE(xdc, rqno, err); 630 if (err) { 631 printf("%s: write format parameters failed: %s\n", 632 xd->sc_dev.dv_xname, xdc_e2str(err)); 633 goto done; 634 } 635 636 /* get drive parameters */ 637 spt = 0; 638 rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_DRV, xd->xd_drive, 639 0, 0, 0, fullmode); 640 if (rqno != XD_ERR_FAIL) { 641 driopb = (struct xd_iopb_drive *) & xdc->iopbase[rqno]; 642 spt = driopb->sectpertrk; 643 } 644 XDC_DONE(xdc, rqno, err); 645 if (err) { 646 printf("%s: read drive parameters failed: %s\n", 647 xd->sc_dev.dv_xname, xdc_e2str(err)); 648 goto done; 649 } 650 651 /* 652 * now set drive parameters (to semi-bogus values) so we can read the 653 * disk label. 654 */ 655 xd->pcyl = xd->ncyl = 1; 656 xd->acyl = 0; 657 xd->nhead = 1; 658 xd->nsect = 1; 659 xd->sectpercyl = 1; 660 for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */ 661 xd->dkb.bt_bad[lcv].bt_cyl = 662 xd->dkb.bt_bad[lcv].bt_trksec = 0xffff; 663 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 664 0, 0, 0, fullmode); 665 XDC_DONE(xdc, rqno, err); 666 if (err) { 667 printf("%s: write drive parameters failed: %s\n", 668 xd->sc_dev.dv_xname, xdc_e2str(err)); 669 goto done; 670 } 671 672 /* read disk label */ 673 rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, 674 0, 1, dvmabuf, fullmode); 675 XDC_DONE(xdc, rqno, err); 676 if (err) { 677 printf("%s: reading disk label failed: %s\n", 678 xd->sc_dev.dv_xname, xdc_e2str(err)); 679 goto done; 680 } 681 newstate = XD_DRIVE_NOLABEL; 682 683 xd->hw_spt = spt; 684 /* Attach the disk: must be before getdisklabel to malloc label */ 685 disk_attach(&xd->sc_dk); 686 687 if (xdgetdisklabel(xd, dvmabuf) != XD_ERR_AOK) 688 goto done; 689 690 /* inform the user of what is up */ 691 printf("%s: <%s>, pcyl %d, hw_spt %d\n", 692 xd->sc_dev.dv_xname, 693 (char *)dvmabuf, xd->pcyl, spt); 694 mb = xd->ncyl * (xd->nhead * xd->nsect) / (1048576 / XDFM_BPS); 695 printf("%s: %dMB, %d cyl, %d head, %d sec\n", 696 xd->sc_dev.dv_xname, mb, 697 xd->ncyl, xd->nhead, xd->nsect); 698 699 /* now set the real drive parameters! */ 700 rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 701 0, 0, 0, fullmode); 702 XDC_DONE(xdc, rqno, err); 703 if (err) { 704 printf("%s: write real drive parameters failed: %s\n", 705 xd->sc_dev.dv_xname, xdc_e2str(err)); 706 goto done; 707 } 708 newstate = XD_DRIVE_ONLINE; 709 710 /* 711 * read bad144 table. this table resides on the first sector of the 712 * last track of the disk (i.e. second cyl of "acyl" area). 713 */ 714 blk = (xd->ncyl + xd->acyl - 1) * (xd->nhead * xd->nsect) + /* last cyl */ 715 (xd->nhead - 1) * xd->nsect; /* last head */ 716 rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, 717 blk, 1, dvmabuf, fullmode); 718 XDC_DONE(xdc, rqno, err); 719 if (err) { 720 printf("%s: reading bad144 failed: %s\n", 721 xd->sc_dev.dv_xname, xdc_e2str(err)); 722 goto done; 723 } 724 725 /* check dkbad for sanity */ 726 dkb = (struct dkbad *) dvmabuf; 727 for (lcv = 0; lcv < 126; lcv++) { 728 if ((dkb->bt_bad[lcv].bt_cyl == 0xffff || 729 dkb->bt_bad[lcv].bt_cyl == 0) && 730 dkb->bt_bad[lcv].bt_trksec == 0xffff) 731 continue; /* blank */ 732 if (dkb->bt_bad[lcv].bt_cyl >= xd->ncyl) 733 break; 734 if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xd->nhead) 735 break; 736 if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xd->nsect) 737 break; 738 } 739 if (lcv != 126) { 740 printf("%s: warning: invalid bad144 sector!\n", 741 xd->sc_dev.dv_xname); 742 } else { 743 memcpy(&xd->dkb, dvmabuf, XDFM_BPS); 744 } 745 746 done: 747 xd->state = newstate; 748 dvma_free(dvmabuf, XDFM_BPS); 749 } 750 751 /* 752 * { b , c } d e v s w f u n c t i o n s 753 */ 754 755 /* 756 * xdclose: close device 757 */ 758 int 759 xdclose(dev, flag, fmt, p) 760 dev_t dev; 761 int flag, fmt; 762 struct proc *p; 763 { 764 struct xd_softc *xd = xd_cd.cd_devs[DISKUNIT(dev)]; 765 int part = DISKPART(dev); 766 767 /* clear mask bits */ 768 769 switch (fmt) { 770 case S_IFCHR: 771 xd->sc_dk.dk_copenmask &= ~(1 << part); 772 break; 773 case S_IFBLK: 774 xd->sc_dk.dk_bopenmask &= ~(1 << part); 775 break; 776 } 777 xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 778 779 return 0; 780 } 781 782 /* 783 * xddump: crash dump system 784 */ 785 int 786 xddump(dev, blkno, va, sz) 787 dev_t dev; 788 daddr_t blkno; 789 caddr_t va; 790 size_t sz; 791 { 792 int unit, part; 793 struct xd_softc *xd; 794 795 unit = DISKUNIT(dev); 796 if (unit >= xd_cd.cd_ndevs) 797 return ENXIO; 798 part = DISKPART(dev); 799 800 xd = xd_cd.cd_devs[unit]; 801 802 printf("%s%c: crash dump not supported (yet)\n", 803 xd->sc_dev.dv_xname, 'a' + part); 804 805 return ENXIO; 806 807 /* outline: globals: "dumplo" == sector number of partition to start 808 * dump at (convert to physical sector with partition table) 809 * "dumpsize" == size of dump in clicks "physmem" == size of physical 810 * memory (clicks, ctob() to get bytes) (normal case: dumpsize == 811 * physmem) 812 * 813 * dump a copy of physical memory to the dump device starting at sector 814 * "dumplo" in the swap partition (make sure > 0). map in pages as 815 * we go. use polled I/O. 816 * 817 * XXX how to handle NON_CONTIG? 818 */ 819 } 820 821 /* 822 * xdioctl: ioctls on XD drives. based on ioctl's of other netbsd disks. 823 */ 824 int 825 xdioctl(dev, command, addr, flag, p) 826 dev_t dev; 827 u_long command; 828 caddr_t addr; 829 int flag; 830 struct proc *p; 831 832 { 833 struct xd_softc *xd; 834 struct xd_iocmd *xio; 835 int error, s, unit; 836 837 unit = DISKUNIT(dev); 838 839 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL) 840 return (ENXIO); 841 842 /* switch on ioctl type */ 843 844 switch (command) { 845 case DIOCSBAD: /* set bad144 info */ 846 if ((flag & FWRITE) == 0) 847 return EBADF; 848 s = splbio(); 849 memcpy(&xd->dkb, addr, sizeof(xd->dkb)); 850 splx(s); 851 return 0; 852 853 case DIOCGDINFO: /* get disk label */ 854 memcpy(addr, xd->sc_dk.dk_label, sizeof(struct disklabel)); 855 return 0; 856 857 case DIOCGPART: /* get partition info */ 858 ((struct partinfo *) addr)->disklab = xd->sc_dk.dk_label; 859 ((struct partinfo *) addr)->part = 860 &xd->sc_dk.dk_label->d_partitions[DISKPART(dev)]; 861 return 0; 862 863 case DIOCSDINFO: /* set disk label */ 864 if ((flag & FWRITE) == 0) 865 return EBADF; 866 error = setdisklabel(xd->sc_dk.dk_label, 867 (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0, 868 xd->sc_dk.dk_cpulabel); 869 if (error == 0) { 870 if (xd->state == XD_DRIVE_NOLABEL) 871 xd->state = XD_DRIVE_ONLINE; 872 } 873 return error; 874 875 case DIOCWLABEL: /* change write status of disk label */ 876 if ((flag & FWRITE) == 0) 877 return EBADF; 878 if (*(int *) addr) 879 xd->flags |= XD_WLABEL; 880 else 881 xd->flags &= ~XD_WLABEL; 882 return 0; 883 884 case DIOCWDINFO: /* write disk label */ 885 if ((flag & FWRITE) == 0) 886 return EBADF; 887 error = setdisklabel(xd->sc_dk.dk_label, 888 (struct disklabel *) addr, /* xd->sc_dk.dk_openmask : */ 0, 889 xd->sc_dk.dk_cpulabel); 890 if (error == 0) { 891 if (xd->state == XD_DRIVE_NOLABEL) 892 xd->state = XD_DRIVE_ONLINE; 893 894 /* Simulate opening partition 0 so write succeeds. */ 895 xd->sc_dk.dk_openmask |= (1 << 0); 896 error = writedisklabel(MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART), 897 xdstrategy, xd->sc_dk.dk_label, 898 xd->sc_dk.dk_cpulabel); 899 xd->sc_dk.dk_openmask = 900 xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 901 } 902 return error; 903 904 case DIOSXDCMD: 905 xio = (struct xd_iocmd *) addr; 906 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) 907 return (error); 908 return (xdc_ioctlcmd(xd, dev, xio)); 909 910 default: 911 return ENOTTY; 912 } 913 } 914 915 /* 916 * xdopen: open drive 917 */ 918 int 919 xdopen(dev, flag, fmt, p) 920 dev_t dev; 921 int flag, fmt; 922 struct proc *p; 923 { 924 int err, unit, part, s; 925 struct xd_softc *xd; 926 927 /* first, could it be a valid target? */ 928 unit = DISKUNIT(dev); 929 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == NULL) 930 return (ENXIO); 931 part = DISKPART(dev); 932 err = 0; 933 934 /* 935 * If some other processing is doing init, sleep. 936 */ 937 s = splbio(); 938 while (xd->state == XD_DRIVE_ATTACHING) { 939 if (tsleep(&xd->state, PRIBIO, "xdopen", 0)) { 940 err = EINTR; 941 goto done; 942 } 943 } 944 /* Do we need to init the drive? */ 945 if (xd->state == XD_DRIVE_UNKNOWN) { 946 xd_init(xd); 947 wakeup(&xd->state); 948 } 949 /* Was the init successful? */ 950 if (xd->state == XD_DRIVE_UNKNOWN) { 951 err = EIO; 952 goto done; 953 } 954 955 /* check for partition */ 956 if (part != RAW_PART && 957 (part >= xd->sc_dk.dk_label->d_npartitions || 958 xd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 959 err = ENXIO; 960 goto done; 961 } 962 963 /* set open masks */ 964 switch (fmt) { 965 case S_IFCHR: 966 xd->sc_dk.dk_copenmask |= (1 << part); 967 break; 968 case S_IFBLK: 969 xd->sc_dk.dk_bopenmask |= (1 << part); 970 break; 971 } 972 xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 973 974 done: 975 splx(s); 976 return (err); 977 } 978 979 int 980 xdread(dev, uio, flags) 981 dev_t dev; 982 struct uio *uio; 983 int flags; 984 { 985 986 return (physio(xdstrategy, NULL, dev, B_READ, minphys, uio)); 987 } 988 989 int 990 xdwrite(dev, uio, flags) 991 dev_t dev; 992 struct uio *uio; 993 int flags; 994 { 995 996 return (physio(xdstrategy, NULL, dev, B_WRITE, minphys, uio)); 997 } 998 999 1000 /* 1001 * xdsize: return size of a partition for a dump 1002 */ 1003 int 1004 xdsize(dev) 1005 dev_t dev; 1006 1007 { 1008 struct xd_softc *xdsc; 1009 int unit, part, size, omask; 1010 1011 /* valid unit? */ 1012 unit = DISKUNIT(dev); 1013 if (unit >= xd_cd.cd_ndevs || (xdsc = xd_cd.cd_devs[unit]) == NULL) 1014 return (-1); 1015 1016 part = DISKPART(dev); 1017 omask = xdsc->sc_dk.dk_openmask & (1 << part); 1018 1019 if (omask == 0 && xdopen(dev, 0, S_IFBLK, NULL) != 0) 1020 return (-1); 1021 1022 /* do it */ 1023 if (xdsc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 1024 size = -1; /* only give valid size for swap partitions */ 1025 else 1026 size = xdsc->sc_dk.dk_label->d_partitions[part].p_size * 1027 (xdsc->sc_dk.dk_label->d_secsize / DEV_BSIZE); 1028 if (omask == 0 && xdclose(dev, 0, S_IFBLK, NULL) != 0) 1029 return (-1); 1030 return (size); 1031 } 1032 1033 /* 1034 * xdstrategy: buffering system interface to xd. 1035 */ 1036 void 1037 xdstrategy(bp) 1038 struct buf *bp; 1039 1040 { 1041 struct xd_softc *xd; 1042 struct xdc_softc *parent; 1043 int s, unit; 1044 1045 unit = DISKUNIT(bp->b_dev); 1046 1047 /* check for live device */ 1048 1049 if (unit >= xd_cd.cd_ndevs || (xd = xd_cd.cd_devs[unit]) == 0 || 1050 bp->b_blkno < 0 || 1051 (bp->b_bcount % xd->sc_dk.dk_label->d_secsize) != 0) { 1052 bp->b_error = EINVAL; 1053 goto bad; 1054 } 1055 1056 /* There should always be an open first. */ 1057 if (xd->state == XD_DRIVE_UNKNOWN) { 1058 bp->b_error = EIO; 1059 goto bad; 1060 } 1061 1062 if (xd->state != XD_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) { 1063 /* no I/O to unlabeled disks, unless raw partition */ 1064 bp->b_error = EIO; 1065 goto bad; 1066 } 1067 /* short circuit zero length request */ 1068 1069 if (bp->b_bcount == 0) 1070 goto done; 1071 1072 /* check bounds with label (disksubr.c). Determine the size of the 1073 * transfer, and make sure it is within the boundaries of the 1074 * partition. Adjust transfer if needed, and signal errors or early 1075 * completion. */ 1076 1077 if (bounds_check_with_label(&xd->sc_dk, bp, 1078 (xd->flags & XD_WLABEL) != 0) <= 0) 1079 goto done; 1080 1081 /* 1082 * now we know we have a valid buf structure that we need to do I/O 1083 * on. 1084 * 1085 * note that we don't disksort because the controller has a sorting 1086 * algorithm built into the hardware. 1087 */ 1088 1089 s = splbio(); /* protect the queues */ 1090 1091 /* first, give jobs in front of us a chance */ 1092 parent = xd->parent; 1093 while (parent->nfree > 0 && BUFQ_PEEK(&parent->sc_wq) != NULL) 1094 if (xdc_startbuf(parent, NULL, NULL) != XD_ERR_AOK) 1095 break; 1096 1097 /* 1098 * if there are no free iorq's, then we just queue and return. the 1099 * buffs will get picked up later by xdcintr(). 1100 */ 1101 if (parent->nfree == 0) { 1102 BUFQ_PUT(&parent->sc_wq, bp); 1103 splx(s); 1104 return; 1105 } 1106 1107 /* now we have free iopb's and we are at splbio... start 'em up */ 1108 if (xdc_startbuf(parent, xd, bp) != XD_ERR_AOK) { 1109 return; 1110 } 1111 1112 /* done! */ 1113 1114 splx(s); 1115 return; 1116 1117 bad: /* tells upper layers we have an error */ 1118 bp->b_flags |= B_ERROR; 1119 done: /* tells upper layers we are done with this 1120 * buf */ 1121 bp->b_resid = bp->b_bcount; 1122 biodone(bp); 1123 } 1124 /* 1125 * end of {b,c}devsw functions 1126 */ 1127 1128 /* 1129 * i n t e r r u p t f u n c t i o n 1130 * 1131 * xdcintr: hardware interrupt. 1132 */ 1133 int 1134 xdcintr(v) 1135 void *v; 1136 1137 { 1138 struct xdc_softc *xdcsc = v; 1139 1140 /* kick the event counter */ 1141 xdcsc->sc_intrcnt.ev_count++; 1142 1143 /* remove as many done IOPBs as possible */ 1144 xdc_remove_iorq(xdcsc); 1145 1146 /* start any iorq's already waiting */ 1147 xdc_start(xdcsc, XDC_MAXIOPB); 1148 1149 /* fill up any remaining iorq's with queue'd buffers */ 1150 while (xdcsc->nfree > 0 && BUFQ_PEEK(&xdcsc->sc_wq) != NULL) 1151 if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK) 1152 break; 1153 1154 return (1); 1155 } 1156 /* 1157 * end of interrupt function 1158 */ 1159 1160 /* 1161 * i n t e r n a l f u n c t i o n s 1162 */ 1163 1164 /* 1165 * xdc_rqinit: fill out the fields of an I/O request 1166 */ 1167 1168 inline void 1169 xdc_rqinit(rq, xdc, xd, md, blk, cnt, db, bp) 1170 struct xd_iorq *rq; 1171 struct xdc_softc *xdc; 1172 struct xd_softc *xd; 1173 int md; 1174 u_long blk; 1175 int cnt; 1176 caddr_t db; 1177 struct buf *bp; 1178 { 1179 rq->xdc = xdc; 1180 rq->xd = xd; 1181 rq->ttl = XDC_MAXTTL + 10; 1182 rq->mode = md; 1183 rq->tries = rq->errno = rq->lasterror = 0; 1184 rq->blockno = blk; 1185 rq->sectcnt = cnt; 1186 rq->dbuf = rq->dbufbase = db; 1187 rq->buf = bp; 1188 } 1189 1190 /* 1191 * xdc_rqtopb: load up an IOPB based on an iorq 1192 */ 1193 void 1194 xdc_rqtopb(iorq, iopb, cmd, subfun) 1195 struct xd_iorq *iorq; 1196 struct xd_iopb *iopb; 1197 int cmd, subfun; 1198 1199 { 1200 u_long block, dp; 1201 1202 /* standard stuff */ 1203 1204 iopb->errs = iopb->done = 0; 1205 iopb->comm = cmd; 1206 iopb->errno = iopb->status = 0; 1207 iopb->subfun = subfun; 1208 if (iorq->xd) 1209 iopb->unit = iorq->xd->xd_drive; 1210 else 1211 iopb->unit = 0; 1212 1213 /* check for alternate IOPB format */ 1214 1215 if (cmd == XDCMD_WRP) { 1216 switch (subfun) { 1217 case XDFUN_CTL:{ 1218 struct xd_iopb_ctrl *ctrl = 1219 (struct xd_iopb_ctrl *) iopb; 1220 iopb->lll = 0; 1221 iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL) 1222 ? 0 1223 : iorq->xdc->ipl; 1224 ctrl->param_a = XDPA_TMOD | XDPA_DACF; 1225 ctrl->param_b = XDPB_ROR | XDPB_TDT_3_2USEC; 1226 ctrl->param_c = XDPC_OVS | XDPC_COP | XDPC_ASR | 1227 XDPC_RBC | XDPC_ECC2; 1228 ctrl->throttle = XDC_THROTTLE; 1229 #ifdef sparc 1230 if (CPU_ISSUN4 && cpuinfo.cpu_type == CPUTYP_4_300) 1231 ctrl->delay = XDC_DELAY_4_300; 1232 else 1233 ctrl->delay = XDC_DELAY_SPARC; 1234 #endif 1235 #ifdef sun3 1236 ctrl->delay = XDC_DELAY_SUN3; 1237 #endif 1238 break; 1239 } 1240 case XDFUN_DRV:{ 1241 struct xd_iopb_drive *drv = 1242 (struct xd_iopb_drive *)iopb; 1243 /* we assume that the disk label has the right 1244 * info */ 1245 if (XD_STATE(iorq->mode) == XD_SUB_POLL) 1246 drv->dparam_ipl = (XDC_DPARAM << 3); 1247 else 1248 drv->dparam_ipl = (XDC_DPARAM << 3) | 1249 iorq->xdc->ipl; 1250 drv->maxsect = iorq->xd->nsect - 1; 1251 drv->maxsector = drv->maxsect; 1252 /* note: maxsector != maxsect only if you are 1253 * doing cyl sparing */ 1254 drv->headoff = 0; 1255 drv->maxcyl = iorq->xd->pcyl - 1; 1256 drv->maxhead = iorq->xd->nhead - 1; 1257 break; 1258 } 1259 case XDFUN_FMT:{ 1260 struct xd_iopb_format *form = 1261 (struct xd_iopb_format *) iopb; 1262 if (XD_STATE(iorq->mode) == XD_SUB_POLL) 1263 form->interleave_ipl = (XDC_INTERLEAVE << 3); 1264 else 1265 form->interleave_ipl = (XDC_INTERLEAVE << 3) | 1266 iorq->xdc->ipl; 1267 form->field1 = XDFM_FIELD1; 1268 form->field2 = XDFM_FIELD2; 1269 form->field3 = XDFM_FIELD3; 1270 form->field4 = XDFM_FIELD4; 1271 form->bytespersec = XDFM_BPS; 1272 form->field6 = XDFM_FIELD6; 1273 form->field7 = XDFM_FIELD7; 1274 break; 1275 } 1276 } 1277 } else { 1278 1279 /* normal IOPB case (harmless to RDP command) */ 1280 1281 iopb->lll = 0; 1282 iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL) 1283 ? 0 1284 : iorq->xdc->ipl; 1285 iopb->sectcnt = iorq->sectcnt; 1286 block = iorq->blockno; 1287 if (iorq->xd == NULL || block == 0) { 1288 iopb->sectno = iopb->headno = iopb->cylno = 0; 1289 } else { 1290 iopb->sectno = block % iorq->xd->nsect; 1291 block = block / iorq->xd->nsect; 1292 iopb->headno = block % iorq->xd->nhead; 1293 block = block / iorq->xd->nhead; 1294 iopb->cylno = block; 1295 } 1296 iopb->daddr = dp = (iorq->dbuf == NULL) ? 0 : 1297 dvma_kvtopa(iorq->dbuf, iorq->xdc->bustype); 1298 iopb->addrmod = XDC_ADDRMOD; 1299 } 1300 } 1301 1302 /* 1303 * xdc_cmd: front end for POLL'd and WAIT'd commands. Returns rqno. 1304 * If you've already got an IORQ, you can call submit directly (currently 1305 * there is no need to do this). NORM requests are handled separately. 1306 */ 1307 int 1308 xdc_cmd(xdcsc, cmd, subfn, unit, block, scnt, dptr, fullmode) 1309 struct xdc_softc *xdcsc; 1310 int cmd, subfn, unit, block, scnt; 1311 char *dptr; 1312 int fullmode; 1313 1314 { 1315 struct xd_iorq *iorq; 1316 struct xd_iopb *iopb; 1317 int rqno, retry; 1318 int submode = XD_STATE(fullmode); 1319 1320 /* get iorq/iopb */ 1321 switch (submode) { 1322 case XD_SUB_POLL: 1323 while (xdcsc->nfree == 0) { 1324 if (xdc_piodriver(xdcsc, 0, 1) != XD_ERR_AOK) 1325 return (XD_ERR_FAIL); 1326 } 1327 break; 1328 case XD_SUB_WAIT: 1329 retry = 1; 1330 while (retry) { 1331 while (xdcsc->nfree == 0) { 1332 if (tsleep(&xdcsc->nfree, PRIBIO, "xdnfree", 0)) 1333 return (XD_ERR_FAIL); 1334 } 1335 while (xdcsc->ndone > XDC_SUBWAITLIM) { 1336 if (tsleep(&xdcsc->ndone, PRIBIO, "xdsubwait", 0)) 1337 return (XD_ERR_FAIL); 1338 } 1339 if (xdcsc->nfree) 1340 retry = 0; /* got it */ 1341 } 1342 break; 1343 default: 1344 return (XD_ERR_FAIL); /* illegal */ 1345 } 1346 if (xdcsc->nfree == 0) 1347 panic("xdcmd nfree"); 1348 rqno = XDC_RQALLOC(xdcsc); 1349 iorq = &xdcsc->reqs[rqno]; 1350 iopb = iorq->iopb; 1351 1352 1353 /* init iorq/iopb */ 1354 xdc_rqinit(iorq, xdcsc, 1355 (unit == XDC_NOUNIT) ? NULL : xdcsc->sc_drives[unit], 1356 fullmode, block, scnt, dptr, NULL); 1357 1358 /* load IOPB from iorq */ 1359 xdc_rqtopb(iorq, iopb, cmd, subfn); 1360 1361 /* submit it for processing */ 1362 xdc_submit_iorq(xdcsc, rqno, fullmode); /* error code will be in iorq */ 1363 1364 return (rqno); 1365 } 1366 1367 /* 1368 * xdc_startbuf 1369 * start a buffer running, assumes nfree > 0 1370 */ 1371 int 1372 xdc_startbuf(xdcsc, xdsc, bp) 1373 struct xdc_softc *xdcsc; 1374 struct xd_softc *xdsc; 1375 struct buf *bp; 1376 1377 { 1378 int rqno, partno; 1379 struct xd_iorq *iorq; 1380 struct xd_iopb *iopb; 1381 u_long block; 1382 caddr_t dbuf; 1383 1384 if (!xdcsc->nfree) 1385 panic("xdc_startbuf free"); 1386 rqno = XDC_RQALLOC(xdcsc); 1387 iorq = &xdcsc->reqs[rqno]; 1388 iopb = iorq->iopb; 1389 1390 /* get buf */ 1391 1392 if (bp == NULL) { 1393 bp = BUFQ_GET(&xdcsc->sc_wq); 1394 if (bp == NULL) 1395 panic("xdc_startbuf bp"); 1396 xdsc = xdcsc->sc_drives[DISKUNIT(bp->b_dev)]; 1397 } 1398 partno = DISKPART(bp->b_dev); 1399 #ifdef XDC_DEBUG 1400 printf("xdc_startbuf: %s%c: %s block %d\n", xdsc->sc_dev.dv_xname, 1401 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno); 1402 printf("xdc_startbuf: b_bcount %d, b_data 0x%x\n", 1403 bp->b_bcount, bp->b_data); 1404 #endif 1405 1406 /* 1407 * load request. we have to calculate the correct block number based 1408 * on partition info. 1409 * 1410 * also, note that there are two kinds of buf structures, those with 1411 * B_PHYS set and those without B_PHYS. if B_PHYS is set, then it is 1412 * a raw I/O (to a cdevsw) and we are doing I/O directly to the users' 1413 * buffer which has already been mapped into DVMA space. (Not on sun3) 1414 * However, if B_PHYS is not set, then the buffer is a normal system 1415 * buffer which does *not* live in DVMA space. In that case we call 1416 * dvma_mapin to map it into DVMA space so we can do the DMA to it. 1417 * 1418 * in cases where we do a dvma_mapin, note that iorq points to the buffer 1419 * as mapped into DVMA space, where as the bp->b_data points to its 1420 * non-DVMA mapping. 1421 * 1422 * XXX - On the sun3, B_PHYS does NOT mean the buffer is mapped 1423 * into dvma space, only that it was remapped into the kernel. 1424 * We ALWAYS have to remap the kernel buf into DVMA space. 1425 * (It is done inexpensively, using whole segments!) 1426 */ 1427 1428 block = bp->b_blkno + ((partno == RAW_PART) ? 0 : 1429 xdsc->sc_dk.dk_label->d_partitions[partno].p_offset); 1430 1431 dbuf = dvma_mapin(bp->b_data, bp->b_bcount, 0); 1432 if (dbuf == NULL) { /* out of DVMA space */ 1433 printf("%s: warning: out of DVMA space\n", 1434 xdcsc->sc_dev.dv_xname); 1435 XDC_FREE(xdcsc, rqno); 1436 BUFQ_PUT(&xdcsc->sc_wq, bp); 1437 return (XD_ERR_FAIL); /* XXX: need some sort of 1438 * call-back scheme here? */ 1439 } 1440 1441 /* init iorq and load iopb from it */ 1442 1443 xdc_rqinit(iorq, xdcsc, xdsc, XD_SUB_NORM | XD_MODE_VERBO, block, 1444 bp->b_bcount / XDFM_BPS, dbuf, bp); 1445 1446 xdc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XDCMD_RD : XDCMD_WR, 0); 1447 1448 /* Instrumentation. */ 1449 disk_busy(&xdsc->sc_dk); 1450 1451 /* now submit [note that xdc_submit_iorq can never fail on NORM reqs] */ 1452 1453 xdc_submit_iorq(xdcsc, rqno, XD_SUB_NORM); 1454 return (XD_ERR_AOK); 1455 } 1456 1457 1458 /* 1459 * xdc_submit_iorq: submit an iorq for processing. returns XD_ERR_AOK 1460 * if ok. if it fail returns an error code. type is XD_SUB_*. 1461 * 1462 * note: caller frees iorq in all cases except NORM 1463 * 1464 * return value: 1465 * NORM: XD_AOK (req pending), XD_FAIL (couldn't submit request) 1466 * WAIT: XD_AOK (success), <error-code> (failed) 1467 * POLL: <same as WAIT> 1468 * NOQ : <same as NORM> 1469 * 1470 * there are three sources for i/o requests: 1471 * [1] xdstrategy: normal block I/O, using "struct buf" system. 1472 * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts. 1473 * [3] open/ioctl: these are I/O requests done in the context of a process, 1474 * and the process should block until they are done. 1475 * 1476 * software state is stored in the iorq structure. each iorq has an 1477 * iopb structure. the hardware understands the iopb structure. 1478 * every command must go through an iopb. a 7053 can only handle 1479 * XDC_MAXIOPB (31) active iopbs at one time. iopbs are allocated in 1480 * DVMA space at boot up time. what happens if we run out of iopb's? 1481 * for i/o type [1], the buffers are queued at the "buff" layer and 1482 * picked up later by the interrupt routine. for case [2] the 1483 * programmed i/o driver is called with a special flag that says 1484 * return when one iopb is free. for case [3] the process can sleep 1485 * on the iorq free list until some iopbs are available. 1486 */ 1487 1488 1489 int 1490 xdc_submit_iorq(xdcsc, iorqno, type) 1491 struct xdc_softc *xdcsc; 1492 int iorqno; 1493 int type; 1494 1495 { 1496 u_long iopbaddr; 1497 struct xd_iorq *iorq = &xdcsc->reqs[iorqno]; 1498 1499 #ifdef XDC_DEBUG 1500 printf("xdc_submit_iorq(%s, no=%d, type=%d)\n", xdcsc->sc_dev.dv_xname, 1501 iorqno, type); 1502 #endif 1503 1504 /* first check and see if controller is busy */ 1505 if (xdcsc->xdc->xdc_csr & XDC_ADDING) { 1506 #ifdef XDC_DEBUG 1507 printf("xdc_submit_iorq: XDC not ready (ADDING)\n"); 1508 #endif 1509 if (type == XD_SUB_NOQ) 1510 return (XD_ERR_FAIL); /* failed */ 1511 XDC_TWAIT(xdcsc, iorqno); /* put at end of waitq */ 1512 switch (type) { 1513 case XD_SUB_NORM: 1514 return XD_ERR_AOK; /* success */ 1515 case XD_SUB_WAIT: 1516 while (iorq->iopb->done == 0) { 1517 (void) tsleep(iorq, PRIBIO, "xdciorq", 0); 1518 } 1519 return (iorq->errno); 1520 case XD_SUB_POLL: 1521 return (xdc_piodriver(xdcsc, iorqno, 0)); 1522 default: 1523 panic("xdc_submit_iorq adding"); 1524 } 1525 } 1526 #ifdef XDC_DEBUG 1527 { 1528 u_char *rio = (u_char *) iorq->iopb; 1529 int sz = sizeof(struct xd_iopb), lcv; 1530 printf("%s: aio #%d [", 1531 xdcsc->sc_dev.dv_xname, iorq - xdcsc->reqs); 1532 for (lcv = 0; lcv < sz; lcv++) 1533 printf(" %02x", rio[lcv]); 1534 printf("]\n"); 1535 } 1536 #endif /* XDC_DEBUG */ 1537 1538 /* controller not busy, start command */ 1539 iopbaddr = dvma_kvtopa(iorq->iopb, xdcsc->bustype); 1540 XDC_GO(xdcsc->xdc, iopbaddr); /* go! */ 1541 xdcsc->nrun++; 1542 /* command now running, wrap it up */ 1543 switch (type) { 1544 case XD_SUB_NORM: 1545 case XD_SUB_NOQ: 1546 return (XD_ERR_AOK); /* success */ 1547 case XD_SUB_WAIT: 1548 while (iorq->iopb->done == 0) { 1549 (void) tsleep(iorq, PRIBIO, "xdciorq", 0); 1550 } 1551 return (iorq->errno); 1552 case XD_SUB_POLL: 1553 return (xdc_piodriver(xdcsc, iorqno, 0)); 1554 default: 1555 panic("xdc_submit_iorq wrap up"); 1556 } 1557 panic("xdc_submit_iorq"); 1558 return 0; /* not reached */ 1559 } 1560 1561 1562 /* 1563 * xdc_piodriver 1564 * 1565 * programmed i/o driver. this function takes over the computer 1566 * and drains off all i/o requests. it returns the status of the iorq 1567 * the caller is interesting in. if freeone is true, then it returns 1568 * when there is a free iorq. 1569 */ 1570 int 1571 xdc_piodriver(xdcsc, iorqno, freeone) 1572 struct xdc_softc *xdcsc; 1573 int iorqno; 1574 int freeone; 1575 1576 { 1577 int nreset = 0; 1578 int retval = 0; 1579 u_long count; 1580 struct xdc *xdc = xdcsc->xdc; 1581 #ifdef XDC_DEBUG 1582 printf("xdc_piodriver(%s, %d, freeone=%d)\n", xdcsc->sc_dev.dv_xname, 1583 iorqno, freeone); 1584 #endif 1585 1586 while (xdcsc->nwait || xdcsc->nrun) { 1587 #ifdef XDC_DEBUG 1588 printf("xdc_piodriver: wait=%d, run=%d\n", 1589 xdcsc->nwait, xdcsc->nrun); 1590 #endif 1591 XDC_WAIT(xdc, count, XDC_MAXTIME, (XDC_REMIOPB | XDC_F_ERROR)); 1592 #ifdef XDC_DEBUG 1593 printf("xdc_piodriver: done wait with count = %d\n", count); 1594 #endif 1595 /* we expect some progress soon */ 1596 if (count == 0 && nreset >= 2) { 1597 xdc_reset(xdcsc, 0, XD_RSET_ALL, XD_ERR_FAIL, 0); 1598 #ifdef XDC_DEBUG 1599 printf("xdc_piodriver: timeout\n"); 1600 #endif 1601 return (XD_ERR_FAIL); 1602 } 1603 if (count == 0) { 1604 if (xdc_reset(xdcsc, 0, 1605 (nreset++ == 0) ? XD_RSET_NONE : iorqno, 1606 XD_ERR_FAIL, 1607 0) == XD_ERR_FAIL) 1608 return (XD_ERR_FAIL); /* flushes all but POLL 1609 * requests, resets */ 1610 continue; 1611 } 1612 xdc_remove_iorq(xdcsc); /* could resubmit request */ 1613 if (freeone) { 1614 if (xdcsc->nrun < XDC_MAXIOPB) { 1615 #ifdef XDC_DEBUG 1616 printf("xdc_piodriver: done: one free\n"); 1617 #endif 1618 return (XD_ERR_AOK); 1619 } 1620 continue; /* don't xdc_start */ 1621 } 1622 xdc_start(xdcsc, XDC_MAXIOPB); 1623 } 1624 1625 /* get return value */ 1626 1627 retval = xdcsc->reqs[iorqno].errno; 1628 1629 #ifdef XDC_DEBUG 1630 printf("xdc_piodriver: done, retval = 0x%x (%s)\n", 1631 xdcsc->reqs[iorqno].errno, xdc_e2str(xdcsc->reqs[iorqno].errno)); 1632 #endif 1633 1634 /* now that we've drained everything, start up any bufs that have 1635 * queued */ 1636 1637 while (xdcsc->nfree > 0 && BUFQ_PEEK(&xdcsc->sc_wq) != NULL) 1638 if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK) 1639 break; 1640 1641 return (retval); 1642 } 1643 1644 /* 1645 * xdc_reset: reset one drive. NOTE: assumes xdc was just reset. 1646 * we steal iopb[0] for this, but we put it back when we are done. 1647 */ 1648 void 1649 xdc_xdreset(xdcsc, xdsc) 1650 struct xdc_softc *xdcsc; 1651 struct xd_softc *xdsc; 1652 1653 { 1654 struct xd_iopb tmpiopb; 1655 u_long addr; 1656 int del; 1657 memcpy(&tmpiopb, xdcsc->iopbase, sizeof(tmpiopb)); 1658 memset(xdcsc->iopbase, 0, sizeof(tmpiopb)); 1659 xdcsc->iopbase->comm = XDCMD_RST; 1660 xdcsc->iopbase->unit = xdsc->xd_drive; 1661 addr = (u_long) xdcsc->dvmaiopb; 1662 XDC_GO(xdcsc->xdc, addr); /* go! */ 1663 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_REMIOPB); 1664 if (del <= 0 || xdcsc->iopbase->errs) { 1665 printf("%s: off-line: %s\n", xdcsc->sc_dev.dv_xname, 1666 xdc_e2str(xdcsc->iopbase->errno)); 1667 xdcsc->xdc->xdc_csr = XDC_RESET; 1668 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET); 1669 if (del <= 0) 1670 panic("xdc_reset"); 1671 } else { 1672 xdcsc->xdc->xdc_csr = XDC_CLRRIO; /* clear RIO */ 1673 } 1674 memcpy(xdcsc->iopbase, &tmpiopb, sizeof(tmpiopb)); 1675 } 1676 1677 1678 /* 1679 * xdc_reset: reset everything: requests are marked as errors except 1680 * a polled request (which is resubmitted) 1681 */ 1682 int 1683 xdc_reset(xdcsc, quiet, blastmode, error, xdsc) 1684 struct xdc_softc *xdcsc; 1685 int quiet, blastmode, error; 1686 struct xd_softc *xdsc; 1687 1688 { 1689 int del = 0, lcv, retval = XD_ERR_AOK; 1690 int oldfree = xdcsc->nfree; 1691 struct xd_iorq *iorq; 1692 1693 /* soft reset hardware */ 1694 1695 if (!quiet) 1696 printf("%s: soft reset\n", xdcsc->sc_dev.dv_xname); 1697 xdcsc->xdc->xdc_csr = XDC_RESET; 1698 XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET); 1699 if (del <= 0) { 1700 blastmode = XD_RSET_ALL; /* dead, flush all requests */ 1701 retval = XD_ERR_FAIL; 1702 } 1703 if (xdsc) 1704 xdc_xdreset(xdcsc, xdsc); 1705 1706 /* fix queues based on "blast-mode" */ 1707 1708 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 1709 iorq = &xdcsc->reqs[lcv]; 1710 1711 if (XD_STATE(iorq->mode) != XD_SUB_POLL && 1712 XD_STATE(iorq->mode) != XD_SUB_WAIT && 1713 XD_STATE(iorq->mode) != XD_SUB_NORM) 1714 /* is it active? */ 1715 continue; 1716 1717 xdcsc->nrun--; /* it isn't running any more */ 1718 if (blastmode == XD_RSET_ALL || blastmode != lcv) { 1719 /* failed */ 1720 iorq->errno = error; 1721 xdcsc->iopbase[lcv].done = xdcsc->iopbase[lcv].errs = 1; 1722 switch (XD_STATE(iorq->mode)) { 1723 case XD_SUB_NORM: 1724 iorq->buf->b_error = EIO; 1725 iorq->buf->b_flags |= B_ERROR; 1726 iorq->buf->b_resid = 1727 iorq->sectcnt * XDFM_BPS; 1728 /* Sun3: map/unmap regardless of B_PHYS */ 1729 dvma_mapout(iorq->dbufbase, 1730 iorq->buf->b_bcount); 1731 disk_unbusy(&iorq->xd->sc_dk, 1732 (iorq->buf->b_bcount - iorq->buf->b_resid), 1733 (iorq->buf->b_flags & B_READ)); 1734 biodone(iorq->buf); 1735 XDC_FREE(xdcsc, lcv); /* add to free list */ 1736 break; 1737 case XD_SUB_WAIT: 1738 wakeup(iorq); 1739 case XD_SUB_POLL: 1740 xdcsc->ndone++; 1741 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1742 break; 1743 } 1744 1745 } else { 1746 1747 /* resubmit, put at front of wait queue */ 1748 XDC_HWAIT(xdcsc, lcv); 1749 } 1750 } 1751 1752 /* 1753 * now, if stuff is waiting, start it. 1754 * since we just reset it should go 1755 */ 1756 xdc_start(xdcsc, XDC_MAXIOPB); 1757 1758 /* ok, we did it */ 1759 if (oldfree == 0 && xdcsc->nfree) 1760 wakeup(&xdcsc->nfree); 1761 1762 #ifdef XDC_DIAG 1763 del = xdcsc->nwait + xdcsc->nrun + xdcsc->nfree + xdcsc->ndone; 1764 if (del != XDC_MAXIOPB) 1765 printf("%s: diag: xdc_reset miscount (%d should be %d)!\n", 1766 xdcsc->sc_dev.dv_xname, del, XDC_MAXIOPB); 1767 else 1768 if (xdcsc->ndone > XDC_MAXIOPB - XDC_SUBWAITLIM) 1769 printf("%s: diag: lots of done jobs (%d)\n", 1770 xdcsc->sc_dev.dv_xname, xdcsc->ndone); 1771 #endif 1772 printf("RESET DONE\n"); 1773 return (retval); 1774 } 1775 1776 /* 1777 * xdc_start: start all waiting buffers 1778 */ 1779 void 1780 xdc_start(xdcsc, maxio) 1781 struct xdc_softc *xdcsc; 1782 int maxio; 1783 1784 { 1785 int rqno; 1786 while (maxio && xdcsc->nwait && 1787 (xdcsc->xdc->xdc_csr & XDC_ADDING) == 0) { 1788 XDC_GET_WAITER(xdcsc, rqno); /* note: rqno is an "out" 1789 * param */ 1790 if (xdc_submit_iorq(xdcsc, rqno, XD_SUB_NOQ) != XD_ERR_AOK) 1791 panic("xdc_start"); /* should never happen */ 1792 maxio--; 1793 } 1794 } 1795 1796 /* 1797 * xdc_remove_iorq: remove "done" IOPB's. 1798 */ 1799 int 1800 xdc_remove_iorq(xdcsc) 1801 struct xdc_softc *xdcsc; 1802 1803 { 1804 int errno, rqno, comm, errs; 1805 struct xdc *xdc = xdcsc->xdc; 1806 struct xd_iopb *iopb; 1807 struct xd_iorq *iorq; 1808 struct buf *bp; 1809 1810 if (xdc->xdc_csr & XDC_F_ERROR) { 1811 /* 1812 * FATAL ERROR: should never happen under normal use. This 1813 * error is so bad, you can't even tell which IOPB is bad, so 1814 * we dump them all. 1815 */ 1816 errno = xdc->xdc_f_err; 1817 printf("%s: fatal error 0x%02x: %s\n", xdcsc->sc_dev.dv_xname, 1818 errno, xdc_e2str(errno)); 1819 if (xdc_reset(xdcsc, 0, XD_RSET_ALL, errno, 0) != XD_ERR_AOK) { 1820 printf("%s: soft reset failed!\n", 1821 xdcsc->sc_dev.dv_xname); 1822 panic("xdc_remove_iorq: controller DEAD"); 1823 } 1824 return (XD_ERR_AOK); 1825 } 1826 1827 /* 1828 * get iopb that is done 1829 * 1830 * hmm... I used to read the address of the done IOPB off the VME 1831 * registers and calculate the rqno directly from that. that worked 1832 * until I started putting a load on the controller. when loaded, i 1833 * would get interrupts but neither the REMIOPB or F_ERROR bits would 1834 * be set, even after DELAY'ing a while! later on the timeout 1835 * routine would detect IOPBs that were marked "running" but their 1836 * "done" bit was set. rather than dealing directly with this 1837 * problem, it is just easier to look at all running IOPB's for the 1838 * done bit. 1839 */ 1840 if (xdc->xdc_csr & XDC_REMIOPB) { 1841 xdc->xdc_csr = XDC_CLRRIO; 1842 } 1843 1844 for (rqno = 0; rqno < XDC_MAXIOPB; rqno++) { 1845 iorq = &xdcsc->reqs[rqno]; 1846 if (iorq->mode == 0 || XD_STATE(iorq->mode) == XD_SUB_DONE) 1847 continue; /* free, or done */ 1848 iopb = &xdcsc->iopbase[rqno]; 1849 if (iopb->done == 0) 1850 continue; /* not done yet */ 1851 1852 #ifdef XDC_DEBUG 1853 { 1854 u_char *rio = (u_char *) iopb; 1855 int sz = sizeof(struct xd_iopb), lcv; 1856 printf("%s: rio #%d [", xdcsc->sc_dev.dv_xname, rqno); 1857 for (lcv = 0; lcv < sz; lcv++) 1858 printf(" %02x", rio[lcv]); 1859 printf("]\n"); 1860 } 1861 #endif /* XDC_DEBUG */ 1862 1863 xdcsc->nrun--; 1864 1865 comm = iopb->comm; 1866 errs = iopb->errs; 1867 1868 if (errs) 1869 iorq->errno = iopb->errno; 1870 else 1871 iorq->errno = 0; 1872 1873 /* handle non-fatal errors */ 1874 1875 if (errs && 1876 xdc_error(xdcsc, iorq, iopb, rqno, comm) == XD_ERR_AOK) 1877 continue; /* AOK: we resubmitted it */ 1878 1879 1880 /* this iorq is now done (hasn't been restarted or anything) */ 1881 1882 if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror) 1883 xdc_perror(iorq, iopb, 0); 1884 1885 /* now, if read/write check to make sure we got all the data 1886 * we needed. (this may not be the case if we got an error in 1887 * the middle of a multisector request). */ 1888 1889 if ((iorq->mode & XD_MODE_B144) != 0 && errs == 0 && 1890 (comm == XDCMD_RD || comm == XDCMD_WR)) { 1891 /* we just successfully processed a bad144 sector 1892 * note: if we are in bad 144 mode, the pointers have 1893 * been advanced already (see above) and are pointing 1894 * at the bad144 sector. to exit bad144 mode, we 1895 * must advance the pointers 1 sector and issue a new 1896 * request if there are still sectors left to process 1897 * 1898 */ 1899 XDC_ADVANCE(iorq, 1); /* advance 1 sector */ 1900 1901 /* exit b144 mode */ 1902 iorq->mode = iorq->mode & (~XD_MODE_B144); 1903 1904 if (iorq->sectcnt) { /* more to go! */ 1905 iorq->lasterror = iorq->errno = iopb->errno = 0; 1906 iopb->errs = iopb->done = 0; 1907 iorq->tries = 0; 1908 iopb->sectcnt = iorq->sectcnt; 1909 iopb->cylno = iorq->blockno / 1910 iorq->xd->sectpercyl; 1911 iopb->headno = 1912 (iorq->blockno / iorq->xd->nhead) % 1913 iorq->xd->nhead; 1914 iopb->sectno = iorq->blockno % XDFM_BPS; 1915 iopb->daddr = 1916 dvma_kvtopa(iorq->dbuf, xdcsc->bustype); 1917 XDC_HWAIT(xdcsc, rqno); 1918 xdc_start(xdcsc, 1); /* resubmit */ 1919 continue; 1920 } 1921 } 1922 /* final cleanup, totally done with this request */ 1923 1924 switch (XD_STATE(iorq->mode)) { 1925 case XD_SUB_NORM: 1926 bp = iorq->buf; 1927 if (errs) { 1928 bp->b_error = EIO; 1929 bp->b_flags |= B_ERROR; 1930 bp->b_resid = iorq->sectcnt * XDFM_BPS; 1931 } else { 1932 bp->b_resid = 0; /* done */ 1933 } 1934 /* Sun3: map/unmap regardless of B_PHYS */ 1935 dvma_mapout(iorq->dbufbase, 1936 iorq->buf->b_bcount); 1937 disk_unbusy(&iorq->xd->sc_dk, 1938 (bp->b_bcount - bp->b_resid), 1939 (bp->b_flags & B_READ)); 1940 XDC_FREE(xdcsc, rqno); 1941 biodone(bp); 1942 break; 1943 case XD_SUB_WAIT: 1944 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1945 xdcsc->ndone++; 1946 wakeup(iorq); 1947 break; 1948 case XD_SUB_POLL: 1949 iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1950 xdcsc->ndone++; 1951 break; 1952 } 1953 } 1954 1955 return (XD_ERR_AOK); 1956 } 1957 1958 /* 1959 * xdc_perror: print error. 1960 * - if still_trying is true: we got an error, retried and got a 1961 * different error. in that case lasterror is the old error, 1962 * and errno is the new one. 1963 * - if still_trying is not true, then if we ever had an error it 1964 * is in lasterror. also, if iorq->errno == 0, then we recovered 1965 * from that error (otherwise iorq->errno == iorq->lasterror). 1966 */ 1967 void 1968 xdc_perror(iorq, iopb, still_trying) 1969 struct xd_iorq *iorq; 1970 struct xd_iopb *iopb; 1971 int still_trying; 1972 1973 { 1974 1975 int error = iorq->lasterror; 1976 1977 printf("%s", (iorq->xd) ? 1978 iorq->xd->sc_dev.dv_xname : 1979 iorq->xdc->sc_dev.dv_xname); 1980 if (iorq->buf) 1981 printf("%c: ", 'a' + DISKPART(iorq->buf->b_dev)); 1982 if (iopb->comm == XDCMD_RD || iopb->comm == XDCMD_WR) 1983 printf("%s %d/%d/%d: ", 1984 (iopb->comm == XDCMD_RD) ? "read" : "write", 1985 iopb->cylno, iopb->headno, iopb->sectno); 1986 printf("%s", xdc_e2str(error)); 1987 1988 if (still_trying) 1989 printf(" [still trying, new error=%s]", xdc_e2str(iorq->errno)); 1990 else 1991 if (iorq->errno == 0) 1992 printf(" [recovered in %d tries]", iorq->tries); 1993 1994 printf("\n"); 1995 } 1996 1997 /* 1998 * xdc_error: non-fatal error encountered... recover. 1999 * return AOK if resubmitted, return FAIL if this iopb is done 2000 */ 2001 int 2002 xdc_error(xdcsc, iorq, iopb, rqno, comm) 2003 struct xdc_softc *xdcsc; 2004 struct xd_iorq *iorq; 2005 struct xd_iopb *iopb; 2006 int rqno, comm; 2007 2008 { 2009 int errno = iorq->errno; 2010 int erract = errno & XD_ERA_MASK; 2011 int oldmode, advance, i; 2012 2013 if (erract == XD_ERA_RSET) { /* some errors require a reset */ 2014 oldmode = iorq->mode; 2015 iorq->mode = XD_SUB_DONE | (~XD_SUB_MASK & oldmode); 2016 xdcsc->ndone++; 2017 /* make xdc_start ignore us */ 2018 xdc_reset(xdcsc, 1, XD_RSET_NONE, errno, iorq->xd); 2019 iorq->mode = oldmode; 2020 xdcsc->ndone--; 2021 } 2022 /* check for read/write to a sector in bad144 table if bad: redirect 2023 * request to bad144 area */ 2024 2025 if ((comm == XDCMD_RD || comm == XDCMD_WR) && 2026 (iorq->mode & XD_MODE_B144) == 0) { 2027 advance = iorq->sectcnt - iopb->sectcnt; 2028 XDC_ADVANCE(iorq, advance); 2029 if ((i = isbad(&iorq->xd->dkb, iorq->blockno / iorq->xd->sectpercyl, 2030 (iorq->blockno / iorq->xd->nsect) % iorq->xd->nhead, 2031 iorq->blockno % iorq->xd->nsect)) != -1) { 2032 iorq->mode |= XD_MODE_B144; /* enter bad144 mode & 2033 * redirect */ 2034 iopb->errno = iopb->done = iopb->errs = 0; 2035 iopb->sectcnt = 1; 2036 iopb->cylno = (iorq->xd->ncyl + iorq->xd->acyl) - 2; 2037 /* second to last acyl */ 2038 i = iorq->xd->sectpercyl - 1 - i; /* follow bad144 2039 * standard */ 2040 iopb->headno = i / iorq->xd->nhead; 2041 iopb->sectno = i % iorq->xd->nhead; 2042 XDC_HWAIT(xdcsc, rqno); 2043 xdc_start(xdcsc, 1); /* resubmit */ 2044 return (XD_ERR_AOK); /* recovered! */ 2045 } 2046 } 2047 2048 /* 2049 * it isn't a bad144 sector, must be real error! see if we can retry 2050 * it? 2051 */ 2052 if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror) 2053 xdc_perror(iorq, iopb, 1); /* inform of error state 2054 * change */ 2055 iorq->lasterror = errno; 2056 2057 if ((erract == XD_ERA_RSET || erract == XD_ERA_HARD) 2058 && iorq->tries < XDC_MAXTRIES) { /* retry? */ 2059 iorq->tries++; 2060 iorq->errno = iopb->errno = iopb->done = iopb->errs = 0; 2061 XDC_HWAIT(xdcsc, rqno); 2062 xdc_start(xdcsc, 1); /* restart */ 2063 return (XD_ERR_AOK); /* recovered! */ 2064 } 2065 2066 /* failed to recover from this error */ 2067 return (XD_ERR_FAIL); 2068 } 2069 2070 /* 2071 * xdc_tick: make sure xd is still alive and ticking (err, kicking). 2072 */ 2073 void 2074 xdc_tick(arg) 2075 void *arg; 2076 2077 { 2078 struct xdc_softc *xdcsc = arg; 2079 int lcv, s, reset = 0; 2080 #ifdef XDC_DIAG 2081 int wait, run, free, done, whd = 0; 2082 u_char fqc[XDC_MAXIOPB], wqc[XDC_MAXIOPB], mark[XDC_MAXIOPB]; 2083 s = splbio(); 2084 wait = xdcsc->nwait; 2085 run = xdcsc->nrun; 2086 free = xdcsc->nfree; 2087 done = xdcsc->ndone; 2088 memcpy(wqc, xdcsc->waitq, sizeof(wqc)); 2089 memcpy(fqc, xdcsc->freereq, sizeof(fqc)); 2090 splx(s); 2091 if (wait + run + free + done != XDC_MAXIOPB) { 2092 printf("%s: diag: IOPB miscount (got w/f/r/d %d/%d/%d/%d, wanted %d)\n", 2093 xdcsc->sc_dev.dv_xname, wait, free, run, done, XDC_MAXIOPB); 2094 memset(mark, 0, sizeof(mark)); 2095 printf("FREE: "); 2096 for (lcv = free; lcv > 0; lcv--) { 2097 printf("%d ", fqc[lcv - 1]); 2098 mark[fqc[lcv - 1]] = 1; 2099 } 2100 printf("\nWAIT: "); 2101 lcv = wait; 2102 while (lcv > 0) { 2103 printf("%d ", wqc[whd]); 2104 mark[wqc[whd]] = 1; 2105 whd = (whd + 1) % XDC_MAXIOPB; 2106 lcv--; 2107 } 2108 printf("\n"); 2109 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2110 if (mark[lcv] == 0) 2111 printf("MARK: running %d: mode %d done %d errs %d errno 0x%x ttl %d buf %p\n", 2112 lcv, xdcsc->reqs[lcv].mode, 2113 xdcsc->iopbase[lcv].done, 2114 xdcsc->iopbase[lcv].errs, 2115 xdcsc->iopbase[lcv].errno, 2116 xdcsc->reqs[lcv].ttl, 2117 xdcsc->reqs[lcv].buf); 2118 } 2119 } else 2120 if (done > XDC_MAXIOPB - XDC_SUBWAITLIM) 2121 printf("%s: diag: lots of done jobs (%d)\n", 2122 xdcsc->sc_dev.dv_xname, done); 2123 2124 #endif 2125 #ifdef XDC_DEBUG 2126 printf("%s: tick: csr 0x%x, w/f/r/d %d/%d/%d/%d\n", 2127 xdcsc->sc_dev.dv_xname, 2128 xdcsc->xdc->xdc_csr, xdcsc->nwait, xdcsc->nfree, xdcsc->nrun, 2129 xdcsc->ndone); 2130 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2131 if (xdcsc->reqs[lcv].mode) 2132 printf("running %d: mode %d done %d errs %d errno 0x%x\n", 2133 lcv, 2134 xdcsc->reqs[lcv].mode, xdcsc->iopbase[lcv].done, 2135 xdcsc->iopbase[lcv].errs, xdcsc->iopbase[lcv].errno); 2136 } 2137 #endif 2138 2139 /* reduce ttl for each request if one goes to zero, reset xdc */ 2140 s = splbio(); 2141 for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2142 if (xdcsc->reqs[lcv].mode == 0 || 2143 XD_STATE(xdcsc->reqs[lcv].mode) == XD_SUB_DONE) 2144 continue; 2145 xdcsc->reqs[lcv].ttl--; 2146 if (xdcsc->reqs[lcv].ttl == 0) 2147 reset = 1; 2148 } 2149 if (reset) { 2150 printf("%s: watchdog timeout\n", xdcsc->sc_dev.dv_xname); 2151 xdc_reset(xdcsc, 0, XD_RSET_NONE, XD_ERR_FAIL, NULL); 2152 } 2153 splx(s); 2154 2155 /* until next time */ 2156 2157 callout_reset(&xdcsc->sc_tick_ch, XDC_TICKCNT, xdc_tick, xdcsc); 2158 } 2159 2160 /* 2161 * xdc_ioctlcmd: this function provides a user level interface to the 2162 * controller via ioctl. this allows "format" programs to be written 2163 * in user code, and is also useful for some debugging. we return 2164 * an error code. called at user priority. 2165 */ 2166 int 2167 xdc_ioctlcmd(xd, dev, xio) 2168 struct xd_softc *xd; 2169 dev_t dev; 2170 struct xd_iocmd *xio; 2171 2172 { 2173 int s, err, rqno; 2174 caddr_t dvmabuf = NULL; 2175 struct xdc_softc *xdcsc; 2176 2177 /* check sanity of requested command */ 2178 2179 switch (xio->cmd) { 2180 2181 case XDCMD_NOP: /* no op: everything should be zero */ 2182 if (xio->subfn || xio->dptr || xio->dlen || 2183 xio->block || xio->sectcnt) 2184 return (EINVAL); 2185 break; 2186 2187 case XDCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */ 2188 case XDCMD_WR: 2189 if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS || 2190 xio->sectcnt * XDFM_BPS != xio->dlen || xio->dptr == NULL) 2191 return (EINVAL); 2192 break; 2193 2194 case XDCMD_SK: /* seek: doesn't seem useful to export this */ 2195 return (EINVAL); 2196 2197 case XDCMD_WRP: /* write parameters */ 2198 return (EINVAL);/* not useful, except maybe drive 2199 * parameters... but drive parameters should 2200 * go via disklabel changes */ 2201 2202 case XDCMD_RDP: /* read parameters */ 2203 if (xio->subfn != XDFUN_DRV || 2204 xio->dlen || xio->block || xio->dptr) 2205 return (EINVAL); /* allow read drive params to 2206 * get hw_spt */ 2207 xio->sectcnt = xd->hw_spt; /* we already know the answer */ 2208 return (0); 2209 break; 2210 2211 case XDCMD_XRD: /* extended read/write */ 2212 case XDCMD_XWR: 2213 2214 switch (xio->subfn) { 2215 2216 case XDFUN_THD:/* track headers */ 2217 if (xio->sectcnt != xd->hw_spt || 2218 (xio->block % xd->nsect) != 0 || 2219 xio->dlen != XD_IOCMD_HSZ * xd->hw_spt || 2220 xio->dptr == NULL) 2221 return (EINVAL); 2222 xio->sectcnt = 0; 2223 break; 2224 2225 case XDFUN_FMT:/* NOTE: also XDFUN_VFY */ 2226 if (xio->cmd == XDCMD_XRD) 2227 return (EINVAL); /* no XDFUN_VFY */ 2228 if (xio->sectcnt || xio->dlen || 2229 (xio->block % xd->nsect) != 0 || xio->dptr) 2230 return (EINVAL); 2231 break; 2232 2233 case XDFUN_HDR:/* header, header verify, data, data ECC */ 2234 return (EINVAL); /* not yet */ 2235 2236 case XDFUN_DM: /* defect map */ 2237 case XDFUN_DMX:/* defect map (alternate location) */ 2238 if (xio->sectcnt || xio->dlen != XD_IOCMD_DMSZ || 2239 (xio->block % xd->nsect) != 0 || xio->dptr == NULL) 2240 return (EINVAL); 2241 break; 2242 2243 default: 2244 return (EINVAL); 2245 } 2246 break; 2247 2248 case XDCMD_TST: /* diagnostics */ 2249 return (EINVAL); 2250 2251 default: 2252 return (EINVAL);/* ??? */ 2253 } 2254 2255 /* create DVMA buffer for request if needed */ 2256 2257 if (xio->dlen) { 2258 dvmabuf = dvma_malloc(xio->dlen); 2259 if (xio->cmd == XDCMD_WR || xio->cmd == XDCMD_XWR) { 2260 err = copyin(xio->dptr, dvmabuf, xio->dlen); 2261 if (err) { 2262 dvma_free(dvmabuf, xio->dlen); 2263 return (err); 2264 } 2265 } 2266 } 2267 /* do it! */ 2268 2269 err = 0; 2270 xdcsc = xd->parent; 2271 s = splbio(); 2272 rqno = xdc_cmd(xdcsc, xio->cmd, xio->subfn, xd->xd_drive, xio->block, 2273 xio->sectcnt, dvmabuf, XD_SUB_WAIT); 2274 if (rqno == XD_ERR_FAIL) { 2275 err = EIO; 2276 goto done; 2277 } 2278 xio->errno = xdcsc->reqs[rqno].errno; 2279 xio->tries = xdcsc->reqs[rqno].tries; 2280 XDC_DONE(xdcsc, rqno, err); 2281 2282 if (xio->cmd == XDCMD_RD || xio->cmd == XDCMD_XRD) 2283 err = copyout(dvmabuf, xio->dptr, xio->dlen); 2284 2285 done: 2286 splx(s); 2287 if (dvmabuf) 2288 dvma_free(dvmabuf, xio->dlen); 2289 return (err); 2290 } 2291 2292 /* 2293 * xdc_e2str: convert error code number into an error string 2294 */ 2295 char * 2296 xdc_e2str(no) 2297 int no; 2298 { 2299 switch (no) { 2300 case XD_ERR_FAIL: 2301 return ("Software fatal error"); 2302 case XD_ERR_AOK: 2303 return ("Successful completion"); 2304 case XD_ERR_ICYL: 2305 return ("Illegal cylinder address"); 2306 case XD_ERR_IHD: 2307 return ("Illegal head address"); 2308 case XD_ERR_ISEC: 2309 return ("Illgal sector address"); 2310 case XD_ERR_CZER: 2311 return ("Count zero"); 2312 case XD_ERR_UIMP: 2313 return ("Unimplemented command"); 2314 case XD_ERR_IF1: 2315 return ("Illegal field length 1"); 2316 case XD_ERR_IF2: 2317 return ("Illegal field length 2"); 2318 case XD_ERR_IF3: 2319 return ("Illegal field length 3"); 2320 case XD_ERR_IF4: 2321 return ("Illegal field length 4"); 2322 case XD_ERR_IF5: 2323 return ("Illegal field length 5"); 2324 case XD_ERR_IF6: 2325 return ("Illegal field length 6"); 2326 case XD_ERR_IF7: 2327 return ("Illegal field length 7"); 2328 case XD_ERR_ISG: 2329 return ("Illegal scatter/gather length"); 2330 case XD_ERR_ISPT: 2331 return ("Not enough sectors per track"); 2332 case XD_ERR_ALGN: 2333 return ("Next IOPB address alignment error"); 2334 case XD_ERR_SGAL: 2335 return ("Scatter/gather address alignment error"); 2336 case XD_ERR_SGEC: 2337 return ("Scatter/gather with auto-ECC"); 2338 case XD_ERR_SECC: 2339 return ("Soft ECC corrected"); 2340 case XD_ERR_SIGN: 2341 return ("ECC ignored"); 2342 case XD_ERR_ASEK: 2343 return ("Auto-seek retry recovered"); 2344 case XD_ERR_RTRY: 2345 return ("Soft retry recovered"); 2346 case XD_ERR_HECC: 2347 return ("Hard data ECC"); 2348 case XD_ERR_NHDR: 2349 return ("Header not found"); 2350 case XD_ERR_NRDY: 2351 return ("Drive not ready"); 2352 case XD_ERR_TOUT: 2353 return ("Operation timeout"); 2354 case XD_ERR_VTIM: 2355 return ("VMEDMA timeout"); 2356 case XD_ERR_DSEQ: 2357 return ("Disk sequencer error"); 2358 case XD_ERR_HDEC: 2359 return ("Header ECC error"); 2360 case XD_ERR_RVFY: 2361 return ("Read verify"); 2362 case XD_ERR_VFER: 2363 return ("Fatail VMEDMA error"); 2364 case XD_ERR_VBUS: 2365 return ("VMEbus error"); 2366 case XD_ERR_DFLT: 2367 return ("Drive faulted"); 2368 case XD_ERR_HECY: 2369 return ("Header error/cyliner"); 2370 case XD_ERR_HEHD: 2371 return ("Header error/head"); 2372 case XD_ERR_NOCY: 2373 return ("Drive not on-cylinder"); 2374 case XD_ERR_SEEK: 2375 return ("Seek error"); 2376 case XD_ERR_ILSS: 2377 return ("Illegal sector size"); 2378 case XD_ERR_SEC: 2379 return ("Soft ECC"); 2380 case XD_ERR_WPER: 2381 return ("Write-protect error"); 2382 case XD_ERR_IRAM: 2383 return ("IRAM self test failure"); 2384 case XD_ERR_MT3: 2385 return ("Maintenance test 3 failure (DSKCEL RAM)"); 2386 case XD_ERR_MT4: 2387 return ("Maintenance test 4 failure (header shift reg)"); 2388 case XD_ERR_MT5: 2389 return ("Maintenance test 5 failure (VMEDMA regs)"); 2390 case XD_ERR_MT6: 2391 return ("Maintenance test 6 failure (REGCEL chip)"); 2392 case XD_ERR_MT7: 2393 return ("Maintenance test 7 failure (buffer parity)"); 2394 case XD_ERR_MT8: 2395 return ("Maintenance test 8 failure (disk FIFO)"); 2396 case XD_ERR_IOCK: 2397 return ("IOPB checksum miscompare"); 2398 case XD_ERR_IODM: 2399 return ("IOPB DMA fatal"); 2400 case XD_ERR_IOAL: 2401 return ("IOPB address alignment error"); 2402 case XD_ERR_FIRM: 2403 return ("Firmware error"); 2404 case XD_ERR_MMOD: 2405 return ("Illegal maintenance mode test number"); 2406 case XD_ERR_ACFL: 2407 return ("ACFAIL asserted"); 2408 default: 2409 return ("Unknown error"); 2410 } 2411 } 2412