1 /*- 2 * Copyright (c) 1997, 1998, 1999 3 * Nan Yang Computer Services Limited. All rights reserved. 4 * 5 * Parts copyright (c) 1997, 1998 Cybernet Corporation, NetMAX project. 6 * 7 * Written by Greg Lehey 8 * 9 * This software is distributed under the so-called ``Berkeley 10 * License'': 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Nan Yang Computer 23 * Services Limited. 24 * 4. Neither the name of the Company nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * This software is provided ``as is'', and any express or implied 29 * warranties, including, but not limited to, the implied warranties of 30 * merchantability and fitness for a particular purpose are disclaimed. 31 * In no event shall the company or contributors be liable for any 32 * direct, indirect, incidental, special, exemplary, or consequential 33 * damages (including, but not limited to, procurement of substitute 34 * goods or services; loss of use, data, or profits; or business 35 * interruption) however caused and on any theory of liability, whether 36 * in contract, strict liability, or tort (including negligence or 37 * otherwise) arising in any way out of the use of this software, even if 38 * advised of the possibility of such damage. 39 * 40 * $Id: vinumrequest.c,v 1.30 2001/01/09 04:20:55 grog Exp grog $ 41 * $FreeBSD: src/sys/dev/vinum/vinumrequest.c,v 1.44.2.5 2002/08/28 04:30:56 grog Exp $ 42 */ 43 44 #include "vinumhdr.h" 45 #include "request.h" 46 #include <sys/resourcevar.h> 47 48 enum requeststatus bre(struct request *rq, 49 int plexno, 50 vinum_off_t * diskstart, 51 vinum_off_t diskend); 52 enum requeststatus bre5(struct request *rq, 53 int plexno, 54 vinum_off_t * diskstart, 55 vinum_off_t diskend); 56 enum requeststatus build_read_request(struct request *rq, int volplexno); 57 enum requeststatus build_write_request(struct request *rq); 58 enum requeststatus build_rq_buffer(struct rqelement *rqe, struct plex *plex); 59 int find_alternate_sd(struct request *rq); 60 int check_range_covered(struct request *); 61 void complete_rqe(struct bio *bio); 62 void complete_raid5_write(struct rqelement *); 63 int abortrequest(struct request *rq, int error); 64 void sdio_done(struct bio *bio); 65 struct bio *vinum_bounds_check(struct bio *bio, struct volume *vol); 66 caddr_t allocdatabuf(struct rqelement *rqe); 67 void freedatabuf(struct rqelement *rqe); 68 69 #ifdef VINUMDEBUG 70 struct rqinfo rqinfo[RQINFO_SIZE]; 71 struct rqinfo *rqip = rqinfo; 72 73 void 74 logrq(enum rqinfo_type type, union rqinfou info, struct bio *ubio) 75 { 76 cdev_t dev; 77 78 crit_enter(); 79 80 microtime(&rqip->timestamp); /* when did this happen? */ 81 rqip->type = type; 82 rqip->bio = ubio; /* user buffer */ 83 84 switch (type) { 85 case loginfo_user_bp: 86 case loginfo_user_bpl: 87 case loginfo_sdio: /* subdisk I/O */ 88 case loginfo_sdiol: /* subdisk I/O launch */ 89 case loginfo_sdiodone: /* subdisk I/O complete */ 90 bcopy(info.bio, &rqip->info.bio, sizeof(struct bio)); 91 dev = info.bio->bio_driver_info; 92 rqip->devmajor = major(dev); 93 rqip->devminor = minor(dev); 94 break; 95 96 case loginfo_iodone: 97 case loginfo_rqe: 98 case loginfo_raid5_data: 99 case loginfo_raid5_parity: 100 bcopy(info.rqe, &rqip->info.rqe, sizeof(struct rqelement)); 101 dev = info.rqe->b.b_bio1.bio_driver_info; 102 rqip->devmajor = major(dev); 103 rqip->devminor = minor(dev); 104 break; 105 106 case loginfo_lockwait: 107 case loginfo_lock: 108 case loginfo_unlock: 109 bcopy(info.lockinfo, &rqip->info.lockinfo, sizeof(struct rangelock)); 110 111 break; 112 113 case loginfo_unused: 114 break; 115 } 116 rqip++; 117 if (rqip >= &rqinfo[RQINFO_SIZE]) /* wrap around */ 118 rqip = rqinfo; 119 crit_exit(); 120 } 121 122 #endif 123 124 int 125 vinumstrategy(struct dev_strategy_args *ap) 126 { 127 cdev_t dev = ap->a_head.a_dev; 128 struct bio *bio = ap->a_bio; 129 struct buf *bp = bio->bio_buf; 130 struct bio *nbio = bio; 131 struct volume *vol = NULL; 132 int volno; 133 134 switch (DEVTYPE(dev)) { 135 case VINUM_SD_TYPE: 136 case VINUM_RAWSD_TYPE: 137 bio->bio_driver_info = dev; 138 sdio(bio); 139 break; 140 case VINUM_DRIVE_TYPE: 141 default: 142 /* 143 * In fact, vinum doesn't handle drives: they're 144 * handled directly by the disk drivers 145 */ 146 bp->b_error = EIO; /* I/O error */ 147 bp->b_flags |= B_ERROR; 148 biodone(bio); 149 break; 150 151 case VINUM_VOLUME_TYPE: /* volume I/O */ 152 volno = Volno(dev); 153 vol = &VOL[volno]; 154 if (vol->state != volume_up) { /* can't access this volume */ 155 bp->b_error = EIO; /* I/O error */ 156 bp->b_flags |= B_ERROR; 157 biodone(bio); 158 break; 159 } 160 nbio = vinum_bounds_check(bio, vol); 161 if (nbio == NULL) { 162 biodone(bio); 163 break; 164 } 165 /* FALLTHROUGH */ 166 case VINUM_PLEX_TYPE: 167 case VINUM_RAWPLEX_TYPE: 168 /* 169 * Plex I/O is pretty much the same as volume I/O 170 * for a single plex. Indicate this by passing a NULL 171 * pointer (set above) for the volume 172 */ 173 bp->b_resid = bp->b_bcount; /* transfer everything */ 174 vinumstart(dev, nbio, 0); 175 break; 176 } 177 return(0); 178 } 179 180 /* 181 * Start a transfer. Return -1 on error, 182 * 0 if OK, 1 if we need to retry. 183 * Parameter reviveok is set when doing 184 * transfers for revives: it allows transfers to 185 * be started immediately when a revive is in 186 * progress. During revive, normal transfers 187 * are queued if they share address space with 188 * a currently active revive operation. 189 */ 190 int 191 vinumstart(cdev_t dev, struct bio *bio, int reviveok) 192 { 193 struct buf *bp = bio->bio_buf; 194 int plexno; 195 int maxplex; /* maximum number of plexes to handle */ 196 struct volume *vol; 197 struct request *rq; /* build up our request here */ 198 enum requeststatus status; 199 200 bio->bio_driver_info = dev; 201 202 #if VINUMDEBUG 203 if (debug & DEBUG_LASTREQS) 204 logrq(loginfo_user_bp, (union rqinfou) bio, bio); 205 #endif 206 207 if ((bp->b_bcount % DEV_BSIZE) != 0) { /* bad length */ 208 bp->b_error = EINVAL; /* invalid size */ 209 bp->b_flags |= B_ERROR; 210 biodone(bio); 211 return -1; 212 } 213 rq = (struct request *) Malloc(sizeof(struct request)); /* allocate a request struct */ 214 if (rq == NULL) { /* can't do it */ 215 bp->b_error = ENOMEM; /* can't get memory */ 216 bp->b_flags |= B_ERROR; 217 biodone(bio); 218 return -1; 219 } 220 bzero(rq, sizeof(struct request)); 221 222 /* 223 * Note the volume ID. This can be NULL, which 224 * the request building functions use as an 225 * indication for single plex I/O 226 */ 227 rq->bio = bio; /* and the user buffer struct */ 228 229 if (DEVTYPE(dev) == VINUM_VOLUME_TYPE) { /* it's a volume, */ 230 rq->volplex.volno = Volno(dev); /* get the volume number */ 231 vol = &VOL[rq->volplex.volno]; /* and point to it */ 232 vol->active++; /* one more active request */ 233 maxplex = vol->plexes; /* consider all its plexes */ 234 } else { 235 vol = NULL; /* no volume */ 236 rq->volplex.plexno = Plexno(dev); /* point to the plex */ 237 rq->isplex = 1; /* note that it's a plex */ 238 maxplex = 1; /* just the one plex */ 239 } 240 241 if (bp->b_cmd == BUF_CMD_READ) { 242 /* 243 * This is a read request. Decide 244 * which plex to read from. 245 * 246 * There's a potential race condition here, 247 * since we're not locked, and we could end 248 * up multiply incrementing the round-robin 249 * counter. This doesn't have any serious 250 * effects, however. 251 */ 252 if (vol != NULL) { 253 plexno = vol->preferred_plex; /* get the plex to use */ 254 if (plexno < 0) { /* round robin */ 255 plexno = vol->last_plex_read; 256 vol->last_plex_read++; 257 if (vol->last_plex_read >= vol->plexes) /* got the the end? */ 258 vol->last_plex_read = 0; /* wrap around */ 259 } 260 status = build_read_request(rq, plexno); /* build a request */ 261 } else { 262 vinum_off_t diskaddr = (vinum_off_t)(bio->bio_offset >> DEV_BSHIFT); 263 /* start offset of transfer */ 264 status = bre(rq, /* build a request list */ 265 rq->volplex.plexno, 266 &diskaddr, 267 diskaddr + (bp->b_bcount / DEV_BSIZE)); 268 } 269 270 if (status > REQUEST_RECOVERED) { /* can't satisfy it */ 271 if (status == REQUEST_DOWN) { /* not enough subdisks */ 272 bp->b_error = EIO; /* I/O error */ 273 bp->b_flags |= B_ERROR; 274 } 275 biodone(bio); 276 freerq(rq); 277 return -1; 278 } 279 return launch_requests(rq, reviveok); /* now start the requests if we can */ 280 } else 281 /* 282 * This is a write operation. We write to all plexes. If this is 283 * a RAID-4 or RAID-5 plex, we must also update the parity stripe. 284 */ 285 { 286 if (vol != NULL) 287 status = build_write_request(rq); /* Not all the subdisks are up */ 288 else { /* plex I/O */ 289 vinum_off_t diskstart; 290 vinum_off_t diskend; 291 292 diskstart = (vinum_off_t)(bio->bio_offset >> DEV_BSHIFT); /* start offset of transfer */ 293 diskend = diskstart + bp->b_bcount / DEV_BSIZE; 294 status = bre(rq, Plexno(dev), 295 &diskstart, diskend); /* build requests for the plex */ 296 } 297 if (status > REQUEST_RECOVERED) { /* can't satisfy it */ 298 if (status == REQUEST_DOWN) { /* not enough subdisks */ 299 bp->b_error = EIO; /* I/O error */ 300 bp->b_flags |= B_ERROR; 301 } 302 biodone(bio); 303 freerq(rq); 304 return -1; 305 } 306 return launch_requests(rq, reviveok); /* now start the requests if we can */ 307 } 308 } 309 310 /* 311 * Call the low-level strategy routines to 312 * perform the requests in a struct request 313 */ 314 int 315 launch_requests(struct request *rq, int reviveok) 316 { 317 struct rqgroup *rqg; 318 int rqno; /* loop index */ 319 struct rqelement *rqe; /* current element */ 320 struct drive *drive; 321 int rcount; /* request count */ 322 323 /* 324 * First find out whether we're reviving, and the 325 * request contains a conflict. If so, we hang 326 * the request off plex->waitlist of the first 327 * plex we find which is reviving 328 */ 329 330 if ((rq->flags & XFR_REVIVECONFLICT) /* possible revive conflict */ 331 &&(!reviveok)) { /* and we don't want to do it now, */ 332 struct sd *sd; 333 struct request *waitlist; /* point to the waitlist */ 334 335 sd = &SD[rq->sdno]; 336 if (sd->waitlist != NULL) { /* something there already, */ 337 waitlist = sd->waitlist; 338 while (waitlist->next != NULL) /* find the end */ 339 waitlist = waitlist->next; 340 waitlist->next = rq; /* hook our request there */ 341 } else 342 sd->waitlist = rq; /* hook our request at the front */ 343 344 #if VINUMDEBUG 345 if (debug & DEBUG_REVIVECONFLICT) { 346 log(LOG_DEBUG, 347 "Revive conflict sd %d: %p\n%s dev %d.%d, offset 0x%llx, length %d\n", 348 rq->sdno, 349 rq, 350 (rq->bio->bio_buf->b_cmd & BUF_CMD_READ) ? "Read" : "Write", 351 major(((cdev_t)rq->bio->bio_driver_info)), 352 minor(((cdev_t)rq->bio->bio_driver_info)), 353 rq->bio->bio_offset, 354 rq->bio->bio_buf->b_bcount); 355 } 356 #endif 357 return 0; /* and get out of here */ 358 } 359 rq->active = 0; /* nothing yet */ 360 #if VINUMDEBUG 361 if (debug & DEBUG_ADDRESSES) 362 log(LOG_DEBUG, 363 "Request: %p\n%s dev %d.%d, offset 0x%llx, length %d\n", 364 rq, 365 (rq->bio->bio_buf->b_cmd == BUF_CMD_READ) ? "Read" : "Write", 366 major(((cdev_t)rq->bio->bio_driver_info)), 367 minor(((cdev_t)rq->bio->bio_driver_info)), 368 rq->bio->bio_offset, 369 rq->bio->bio_buf->b_bcount); 370 vinum_conf.lastrq = rq; 371 vinum_conf.lastbio = rq->bio; 372 if (debug & DEBUG_LASTREQS) 373 logrq(loginfo_user_bpl, (union rqinfou) rq->bio, rq->bio); 374 #endif 375 376 /* 377 * This loop happens without any participation 378 * of the bottom half, so it requires no 379 * protection. 380 */ 381 for (rqg = rq->rqg; rqg != NULL; rqg = rqg->next) { /* through the whole request chain */ 382 rqg->active = rqg->count; /* they're all active */ 383 for (rqno = 0; rqno < rqg->count; rqno++) { 384 rqe = &rqg->rqe[rqno]; 385 if (rqe->flags & XFR_BAD_SUBDISK) /* this subdisk is bad, */ 386 rqg->active--; /* one less active request */ 387 } 388 if (rqg->active) /* we have at least one active request, */ 389 rq->active++; /* one more active request group */ 390 } 391 392 /* 393 * Now fire off the requests. In this loop the 394 * bottom half could be completing requests 395 * before we finish, so we need critical section protection. 396 */ 397 crit_enter(); 398 for (rqg = rq->rqg; rqg != NULL;) { /* through the whole request chain */ 399 if (rqg->lockbase >= 0) /* this rqg needs a lock first */ 400 rqg->lock = lockrange(rqg->lockbase, rqg->rq->bio->bio_buf, &PLEX[rqg->plexno]); 401 rcount = rqg->count; 402 for (rqno = 0; rqno < rcount;) { 403 cdev_t dev; 404 405 rqe = &rqg->rqe[rqno]; 406 407 /* 408 * Point to next rqg before the bottom end 409 * changes the structures. 410 */ 411 if (++rqno >= rcount) 412 rqg = rqg->next; 413 if ((rqe->flags & XFR_BAD_SUBDISK) == 0) { /* this subdisk is good, */ 414 drive = &DRIVE[rqe->driveno]; /* look at drive */ 415 drive->active++; 416 if (drive->active >= drive->maxactive) 417 drive->maxactive = drive->active; 418 vinum_conf.active++; 419 if (vinum_conf.active >= vinum_conf.maxactive) 420 vinum_conf.maxactive = vinum_conf.active; 421 422 dev = rqe->b.b_bio1.bio_driver_info; 423 #ifdef VINUMDEBUG 424 if (debug & DEBUG_ADDRESSES) 425 log(LOG_DEBUG, 426 " %s dev %d.%d, sd %d, offset 0x%llx, devoffset 0x%llx, length %d\n", 427 (rqe->b.b_cmd == BUF_CMD_READ) ? "Read" : "Write", 428 major(dev), 429 minor(dev), 430 rqe->sdno, 431 rqe->b.b_bio1.bio_offset - ((off_t)SD[rqe->sdno].driveoffset << DEV_BSHIFT), 432 rqe->b.b_bio1.bio_offset, 433 rqe->b.b_bcount); 434 if (debug & DEBUG_LASTREQS) 435 logrq(loginfo_rqe, (union rqinfou) rqe, rq->bio); 436 #endif 437 /* fire off the request */ 438 /* XXX this had better not be a low level drive */ 439 dev_dstrategy(dev, &rqe->b.b_bio1); 440 } 441 } 442 } 443 crit_exit(); 444 return 0; 445 } 446 447 /* 448 * define the low-level requests needed to perform a 449 * high-level I/O operation for a specific plex 'plexno'. 450 * 451 * Return REQUEST_OK if all subdisks involved in the request are up, 452 * REQUEST_DOWN if some subdisks are not up, and REQUEST_EOF if the 453 * request is at least partially outside the bounds of the subdisks. 454 * 455 * Modify the pointer *diskstart to point to the end address. On 456 * read, return on the first bad subdisk, so that the caller 457 * (build_read_request) can try alternatives. 458 * 459 * On entry to this routine, the rqg structures are not assigned. The 460 * assignment is performed by expandrq(). Strictly speaking, the 461 * elements rqe->sdno of all entries should be set to -1, since 0 462 * (from bzero) is a valid subdisk number. We avoid this problem by 463 * initializing the ones we use, and not looking at the others (index 464 * >= rqg->requests). 465 */ 466 enum requeststatus 467 bre(struct request *rq, 468 int plexno, 469 vinum_off_t * diskaddr, 470 vinum_off_t diskend) 471 { 472 int sdno; 473 struct sd *sd; 474 struct rqgroup *rqg; 475 struct bio *bio; 476 struct buf *bp; /* user's bp */ 477 struct plex *plex; 478 enum requeststatus status; /* return value */ 479 vinum_off_t plexoffset; /* offset of transfer in plex */ 480 vinum_off_t stripebase; /* base address of stripe (1st subdisk) */ 481 vinum_off_t stripeoffset; /* offset in stripe */ 482 vinum_off_t blockoffset; /* offset in stripe on subdisk */ 483 struct rqelement *rqe; /* point to this request information */ 484 vinum_off_t diskstart = *diskaddr; /* remember where this transfer starts */ 485 enum requeststatus s; /* temp return value */ 486 487 bio = rq->bio; /* buffer pointer */ 488 bp = bio->bio_buf; 489 status = REQUEST_OK; /* return value: OK until proven otherwise */ 490 plex = &PLEX[plexno]; /* point to the plex */ 491 492 switch (plex->organization) { 493 case plex_concat: 494 sd = NULL; /* (keep compiler quiet) */ 495 for (sdno = 0; sdno < plex->subdisks; sdno++) { 496 sd = &SD[plex->sdnos[sdno]]; 497 if (*diskaddr < sd->plexoffset) /* we must have a hole, */ 498 status = REQUEST_DEGRADED; /* note the fact */ 499 if (*diskaddr < (sd->plexoffset + sd->sectors)) { /* the request starts in this subdisk */ 500 rqg = allocrqg(rq, 1); /* space for the request */ 501 if (rqg == NULL) { /* malloc failed */ 502 bp->b_error = ENOMEM; 503 bp->b_flags |= B_ERROR; 504 return REQUEST_ENOMEM; 505 } 506 rqg->plexno = plexno; 507 508 rqe = &rqg->rqe[0]; /* point to the element */ 509 rqe->rqg = rqg; /* group */ 510 rqe->sdno = sd->sdno; /* put in the subdisk number */ 511 plexoffset = *diskaddr; /* start offset in plex */ 512 rqe->sdoffset = plexoffset - sd->plexoffset; /* start offset in subdisk */ 513 rqe->useroffset = plexoffset - diskstart; /* start offset in user buffer */ 514 rqe->dataoffset = 0; 515 rqe->datalen = u64min(diskend - *diskaddr, 516 sd->sectors - rqe->sdoffset); 517 rqe->groupoffset = 0; /* no groups for concatenated plexes */ 518 rqe->grouplen = 0; 519 rqe->buflen = rqe->datalen; /* buffer length is data buffer length */ 520 rqe->flags = 0; 521 rqe->driveno = sd->driveno; 522 if (sd->state != sd_up) { /* *now* we find the sd is down */ 523 s = checksdstate(sd, rq, *diskaddr, diskend); /* do we need to change state? */ 524 if (s == REQUEST_DOWN) { /* down? */ 525 rqe->flags = XFR_BAD_SUBDISK; /* yup */ 526 if (rq->bio->bio_buf->b_cmd == BUF_CMD_READ) /* read request, */ 527 return REQUEST_DEGRADED; /* give up here */ 528 /* 529 * If we're writing, don't give up 530 * because of a bad subdisk. Go 531 * through to the bitter end, but note 532 * which ones we can't access. 533 */ 534 status = REQUEST_DEGRADED; /* can't do it all */ 535 } 536 } 537 *diskaddr += rqe->datalen; /* bump the address */ 538 if (build_rq_buffer(rqe, plex)) { /* build the buffer */ 539 deallocrqg(rqg); 540 bp->b_error = ENOMEM; 541 bp->b_flags |= B_ERROR; 542 return REQUEST_ENOMEM; /* can't do it */ 543 } 544 } 545 if (*diskaddr == diskend) /* we're finished, */ 546 break; /* get out of here */ 547 } 548 /* 549 * We've got to the end of the plex. Have we got to the end of 550 * the transfer? It would seem that having an offset beyond the 551 * end of the subdisk is an error, but in fact it can happen if 552 * the volume has another plex of different size. There's a valid 553 * question as to why you would want to do this, but currently 554 * it's allowed. 555 * 556 * In a previous version, I returned REQUEST_DOWN here. I think 557 * REQUEST_EOF is more appropriate now. 558 */ 559 if (diskend > sd->sectors + sd->plexoffset) /* pointing beyond EOF? */ 560 status = REQUEST_EOF; 561 break; 562 563 case plex_striped: 564 { 565 while (*diskaddr < diskend) { /* until we get it all sorted out */ 566 if (*diskaddr >= plex->length) /* beyond the end of the plex */ 567 return REQUEST_EOF; /* can't continue */ 568 569 /* The offset of the start address from the start of the stripe. */ 570 stripeoffset = *diskaddr % (plex->stripesize * plex->subdisks); 571 572 /* The plex-relative address of the start of the stripe. */ 573 stripebase = *diskaddr - stripeoffset; 574 575 /* The number of the subdisk in which the start is located. */ 576 sdno = stripeoffset / plex->stripesize; 577 578 /* The offset from the beginning of the stripe on this subdisk. */ 579 blockoffset = stripeoffset % plex->stripesize; 580 581 sd = &SD[plex->sdnos[sdno]]; /* the subdisk in question */ 582 rqg = allocrqg(rq, 1); /* space for the request */ 583 if (rqg == NULL) { /* malloc failed */ 584 bp->b_error = ENOMEM; 585 bp->b_flags |= B_ERROR; 586 return REQUEST_ENOMEM; 587 } 588 rqg->plexno = plexno; 589 590 rqe = &rqg->rqe[0]; /* point to the element */ 591 rqe->rqg = rqg; 592 rqe->sdoffset = stripebase / plex->subdisks + blockoffset; /* start offset in this subdisk */ 593 rqe->useroffset = *diskaddr - diskstart; /* The offset of the start in the user buffer */ 594 rqe->dataoffset = 0; 595 rqe->datalen = u64min(diskend - *diskaddr, 596 plex->stripesize - blockoffset); 597 rqe->groupoffset = 0; /* no groups for striped plexes */ 598 rqe->grouplen = 0; 599 rqe->buflen = rqe->datalen; /* buffer length is data buffer length */ 600 rqe->flags = 0; 601 rqe->sdno = sd->sdno; /* put in the subdisk number */ 602 rqe->driveno = sd->driveno; 603 604 if (sd->state != sd_up) { /* *now* we find the sd is down */ 605 s = checksdstate(sd, rq, *diskaddr, diskend); /* do we need to change state? */ 606 if (s == REQUEST_DOWN) { /* down? */ 607 rqe->flags = XFR_BAD_SUBDISK; /* yup */ 608 if (rq->bio->bio_buf->b_cmd == BUF_CMD_READ) /* read request, */ 609 return REQUEST_DEGRADED; /* give up here */ 610 /* 611 * If we're writing, don't give up 612 * because of a bad subdisk. Go through 613 * to the bitter end, but note which 614 * ones we can't access. 615 */ 616 status = REQUEST_DEGRADED; /* can't do it all */ 617 } 618 } 619 /* 620 * It would seem that having an offset 621 * beyond the end of the subdisk is an 622 * error, but in fact it can happen if the 623 * volume has another plex of different 624 * size. There's a valid question as to why 625 * you would want to do this, but currently 626 * it's allowed. 627 */ 628 if (rqe->sdoffset + rqe->datalen > sd->sectors) { /* ends beyond the end of the subdisk? */ 629 rqe->datalen = sd->sectors - rqe->sdoffset; /* truncate */ 630 #if VINUMDEBUG 631 if (debug & DEBUG_EOFINFO) { /* tell on the request */ 632 log(LOG_DEBUG, 633 "vinum: EOF on plex %s, sd %s offset %jx (user offset %jx)\n", 634 plex->name, 635 sd->name, 636 (uintmax_t)sd->sectors, 637 (uintmax_t)bp->b_bio1.bio_offset); 638 log(LOG_DEBUG, 639 "vinum: stripebase 0x%llx, stripeoffset 0x%llx, " 640 "blockoffset 0x%llx\n", 641 (long long)stripebase, 642 (long long)stripeoffset, 643 (long long)blockoffset); 644 } 645 #endif 646 } 647 if (build_rq_buffer(rqe, plex)) { /* build the buffer */ 648 deallocrqg(rqg); 649 bp->b_error = ENOMEM; 650 bp->b_flags |= B_ERROR; 651 return REQUEST_ENOMEM; /* can't do it */ 652 } 653 *diskaddr += rqe->datalen; /* look at the remainder */ 654 if ((*diskaddr < diskend) /* didn't finish the request on this stripe */ 655 &&(*diskaddr < plex->length)) { /* and there's more to come */ 656 plex->multiblock++; /* count another one */ 657 if (sdno == plex->subdisks - 1) /* last subdisk, */ 658 plex->multistripe++; /* another stripe as well */ 659 } 660 } 661 } 662 break; 663 664 /* 665 * RAID-4 and RAID-5 are complicated enough to have their own 666 * function. 667 */ 668 case plex_raid4: 669 case plex_raid5: 670 status = bre5(rq, plexno, diskaddr, diskend); 671 break; 672 673 default: 674 log(LOG_ERR, "vinum: invalid plex type %d in bre\n", plex->organization); 675 status = REQUEST_DOWN; /* can't access it */ 676 } 677 678 return status; 679 } 680 681 /* 682 * Build up a request structure for reading volumes. 683 * This function is not needed for plex reads, since there's 684 * no recovery if a plex read can't be satisified. 685 */ 686 enum requeststatus 687 build_read_request(struct request *rq, /* request */ 688 int plexindex) 689 { /* index in the volume's plex table */ 690 struct bio *bio; 691 struct buf *bp; 692 vinum_off_t startaddr; /* offset of previous part of transfer */ 693 vinum_off_t diskaddr; /* offset of current part of transfer */ 694 vinum_off_t diskend; /* and end offset of transfer */ 695 int plexno; /* plex index in vinum_conf */ 696 struct rqgroup *rqg; /* point to the request we're working on */ 697 struct volume *vol; /* volume in question */ 698 int recovered = 0; /* set if we recover a read */ 699 enum requeststatus status = REQUEST_OK; 700 int plexmask; /* bit mask of plexes, for recovery */ 701 702 bio = rq->bio; /* buffer pointer */ 703 bp = bio->bio_buf; 704 diskaddr = bio->bio_offset >> DEV_BSHIFT; /* start offset of transfer */ 705 diskend = diskaddr + (bp->b_bcount / DEV_BSIZE); /* and end offset of transfer */ 706 rqg = &rq->rqg[plexindex]; /* plex request */ 707 vol = &VOL[rq->volplex.volno]; /* point to volume */ 708 709 while (diskaddr < diskend) { /* build up request components */ 710 startaddr = diskaddr; 711 status = bre(rq, vol->plex[plexindex], &diskaddr, diskend); /* build up a request */ 712 switch (status) { 713 case REQUEST_OK: 714 continue; 715 716 case REQUEST_RECOVERED: 717 /* 718 * XXX FIXME if we have more than one plex, and we can 719 * satisfy the request from another, don't use the 720 * recovered request, since it's more expensive. 721 */ 722 recovered = 1; 723 break; 724 725 case REQUEST_ENOMEM: 726 return status; 727 /* 728 * If we get here, our request is not complete. Try 729 * to fill in the missing parts from another plex. 730 * This can happen multiple times in this function, 731 * and we reinitialize the plex mask each time, since 732 * we could have a hole in our plexes. 733 */ 734 case REQUEST_EOF: 735 case REQUEST_DOWN: /* can't access the plex */ 736 case REQUEST_DEGRADED: /* can't access the plex */ 737 plexmask = ((1 << vol->plexes) - 1) /* all plexes in the volume */ 738 &~(1 << plexindex); /* except for the one we were looking at */ 739 for (plexno = 0; plexno < vol->plexes; plexno++) { 740 if (plexmask == 0) /* no plexes left to try */ 741 return REQUEST_DOWN; /* failed */ 742 diskaddr = startaddr; /* start at the beginning again */ 743 if (plexmask & (1 << plexno)) { /* we haven't tried this plex yet */ 744 bre(rq, vol->plex[plexno], &diskaddr, diskend); /* try a request */ 745 if (diskaddr > startaddr) { /* we satisfied another part */ 746 recovered = 1; /* we recovered from the problem */ 747 status = REQUEST_OK; /* don't complain about it */ 748 break; 749 } 750 } 751 } 752 if (diskaddr == startaddr) /* didn't get any further, */ 753 return status; 754 } 755 if (recovered) 756 vol->recovered_reads += recovered; /* adjust our recovery count */ 757 } 758 return status; 759 } 760 761 /* 762 * Build up a request structure for writes. 763 * Return 0 if all subdisks involved in the request are up, 1 if some 764 * subdisks are not up, and -1 if the request is at least partially 765 * outside the bounds of the subdisks. 766 */ 767 enum requeststatus 768 build_write_request(struct request *rq) 769 { /* request */ 770 struct bio *bio; 771 struct buf *bp; 772 vinum_off_t diskstart; /* offset of current part of transfer */ 773 vinum_off_t diskend; /* and end offset of transfer */ 774 int plexno; /* plex index in vinum_conf */ 775 struct volume *vol; /* volume in question */ 776 enum requeststatus status; 777 778 bio = rq->bio; /* buffer pointer */ 779 bp = bio->bio_buf; 780 vol = &VOL[rq->volplex.volno]; /* point to volume */ 781 diskend = (vinum_off_t)(bio->bio_offset >> DEV_BSHIFT) + (bp->b_bcount / DEV_BSIZE); /* end offset of transfer */ 782 status = REQUEST_DOWN; /* assume the worst */ 783 for (plexno = 0; plexno < vol->plexes; plexno++) { 784 diskstart = (vinum_off_t)(bio->bio_offset >> DEV_BSHIFT); /* start offset of transfer */ 785 /* 786 * Build requests for the plex. 787 * We take the best possible result here (min, 788 * not max): we're happy if we can write at all 789 */ 790 status = u64min(status, 791 bre(rq, vol->plex[plexno], &diskstart, diskend)); 792 } 793 return status; 794 } 795 796 /* Fill in the struct buf part of a request element. */ 797 enum requeststatus 798 build_rq_buffer(struct rqelement *rqe, struct plex *plex) 799 { 800 struct sd *sd; /* point to subdisk */ 801 struct volume *vol; 802 struct buf *bp; 803 struct buf *ubp; /* user (high level) buffer header */ 804 struct bio *ubio; 805 806 vol = &VOL[rqe->rqg->rq->volplex.volno]; 807 sd = &SD[rqe->sdno]; /* point to subdisk */ 808 bp = &rqe->b; 809 ubio = rqe->rqg->rq->bio; /* pointer to user buffer header */ 810 ubp = ubio->bio_buf; 811 812 /* Initialize the buf struct */ 813 /* copy these flags from user bp */ 814 bp->b_flags = ubp->b_flags & (B_ORDERED | B_NOCACHE); 815 bp->b_cmd = ubp->b_cmd; 816 #ifdef VINUMDEBUG 817 if (rqe->flags & XFR_BUFLOCKED) /* paranoia */ 818 panic("build_rq_buffer: rqe already locked"); /* XXX remove this when we're sure */ 819 #endif 820 initbufbio(bp); 821 BUF_LOCK(bp, LK_EXCLUSIVE); /* and lock it */ 822 BUF_KERNPROC(bp); 823 rqe->flags |= XFR_BUFLOCKED; 824 bp->b_bio1.bio_done = complete_rqe; 825 /* 826 * You'd think that we wouldn't need to even 827 * build the request buffer for a dead subdisk, 828 * but in some cases we need information like 829 * the user buffer address. Err on the side of 830 * generosity and supply what we can. That 831 * obviously doesn't include drive information 832 * when the drive is dead. 833 */ 834 if ((rqe->flags & XFR_BAD_SUBDISK) == 0) /* subdisk is accessible, */ 835 bp->b_bio1.bio_driver_info = DRIVE[rqe->driveno].dev; /* drive device */ 836 bp->b_bio1.bio_offset = (off_t)(rqe->sdoffset + sd->driveoffset) << DEV_BSHIFT; /* start address */ 837 bp->b_bcount = rqe->buflen << DEV_BSHIFT; /* number of bytes to transfer */ 838 bp->b_resid = bp->b_bcount; /* and it's still all waiting */ 839 840 if (rqe->flags & XFR_MALLOCED) { /* this operation requires a malloced buffer */ 841 bp->b_data = Malloc(bp->b_bcount); /* get a buffer to put it in */ 842 if (bp->b_data == NULL) { /* failed */ 843 abortrequest(rqe->rqg->rq, ENOMEM); 844 return REQUEST_ENOMEM; /* no memory */ 845 } 846 } else 847 /* 848 * Point directly to user buffer data. This means 849 * that we don't need to do anything when we have 850 * finished the transfer 851 */ 852 bp->b_data = ubp->b_data + rqe->useroffset * DEV_BSIZE; 853 /* 854 * On a recovery read, we perform an XOR of 855 * all blocks to the user buffer. To make 856 * this work, we first clean out the buffer 857 */ 858 if ((rqe->flags & (XFR_RECOVERY_READ | XFR_BAD_SUBDISK)) 859 == (XFR_RECOVERY_READ | XFR_BAD_SUBDISK)) { /* bad subdisk of a recovery read */ 860 int length = rqe->grouplen << DEV_BSHIFT; /* and count involved */ 861 char *data = (char *) &rqe->b.b_data[rqe->groupoffset << DEV_BSHIFT]; /* destination */ 862 863 bzero(data, length); /* clean it out */ 864 } 865 return 0; 866 } 867 868 /* 869 * Abort a request: free resources and complete the 870 * user request with the specified error 871 */ 872 int 873 abortrequest(struct request *rq, int error) 874 { 875 struct buf *bp = rq->bio->bio_buf; /* user buffer */ 876 877 bp->b_error = error; 878 freerq(rq); /* free everything we're doing */ 879 bp->b_flags |= B_ERROR; 880 return error; /* and give up */ 881 } 882 883 /* 884 * Check that our transfer will cover the 885 * complete address space of the user request. 886 * 887 * Return 1 if it can, otherwise 0 888 */ 889 int 890 check_range_covered(struct request *rq) 891 { 892 return 1; 893 } 894 895 /* Perform I/O on a subdisk */ 896 void 897 sdio(struct bio *bio) 898 { 899 cdev_t dev; 900 struct sd *sd; 901 struct sdbuf *sbp; 902 vinum_off_t endoffset; 903 struct drive *drive; 904 struct buf *bp = bio->bio_buf; 905 906 dev = bio->bio_driver_info; 907 908 #if VINUMDEBUG 909 if (debug & DEBUG_LASTREQS) 910 logrq(loginfo_sdio, (union rqinfou) bio, bio); 911 #endif 912 sd = &SD[Sdno(dev)]; /* point to the subdisk */ 913 drive = &DRIVE[sd->driveno]; 914 915 if (drive->state != drive_up) { 916 if (sd->state >= sd_crashed) { 917 if (bp->b_cmd != BUF_CMD_READ) /* writing, */ 918 set_sd_state(sd->sdno, sd_stale, setstate_force); 919 else 920 set_sd_state(sd->sdno, sd_crashed, setstate_force); 921 } 922 bp->b_error = EIO; 923 bp->b_flags |= B_ERROR; 924 biodone(bio); 925 return; 926 } 927 /* 928 * We allow access to any kind of subdisk as long as we can expect 929 * to get the I/O performed. 930 */ 931 if (sd->state < sd_empty) { /* nothing to talk to, */ 932 bp->b_error = EIO; 933 bp->b_flags |= B_ERROR; 934 biodone(bio); 935 return; 936 } 937 /* Get a buffer */ 938 sbp = (struct sdbuf *) Malloc(sizeof(struct sdbuf)); 939 if (sbp == NULL) { 940 bp->b_error = ENOMEM; 941 bp->b_flags |= B_ERROR; 942 biodone(bio); 943 return; 944 } 945 bzero(sbp, sizeof(struct sdbuf)); /* start with nothing */ 946 sbp->b.b_cmd = bp->b_cmd; 947 sbp->b.b_bcount = bp->b_bcount; /* number of bytes to transfer */ 948 sbp->b.b_resid = bp->b_resid; /* and amount waiting */ 949 sbp->b.b_data = bp->b_data; /* data buffer */ 950 initbufbio(&sbp->b); 951 BUF_LOCK(&sbp->b, LK_EXCLUSIVE); /* and lock it */ 952 BUF_KERNPROC(&sbp->b); 953 sbp->b.b_bio1.bio_offset = bio->bio_offset + ((off_t)sd->driveoffset << DEV_BSHIFT); 954 sbp->b.b_bio1.bio_done = sdio_done; /* come here on completion */ 955 sbp->b.b_bio1.bio_flags |= BIO_SYNC; 956 sbp->bio = bio; /* note the address of the original header */ 957 sbp->sdno = sd->sdno; /* note for statistics */ 958 sbp->driveno = sd->driveno; 959 endoffset = (vinum_off_t)(bio->bio_offset >> DEV_BSHIFT) + sbp->b.b_bcount / DEV_BSIZE; /* final sector offset */ 960 if (endoffset > sd->sectors) { /* beyond the end */ 961 sbp->b.b_bcount -= (endoffset - sd->sectors) * DEV_BSIZE; /* trim */ 962 if (sbp->b.b_bcount <= 0) { /* nothing to transfer */ 963 bp->b_resid = bp->b_bcount; /* nothing transferred */ 964 biodone(bio); 965 BUF_UNLOCK(&sbp->b); 966 uninitbufbio(&sbp->b); 967 Free(sbp); 968 return; 969 } 970 } 971 #if VINUMDEBUG 972 if (debug & DEBUG_ADDRESSES) 973 log(LOG_DEBUG, 974 " %s dev %s, sd %d, offset 0x%llx, devoffset 0x%llx, length %d\n", 975 (sbp->b.b_cmd == BUF_CMD_READ) ? "Read" : "Write", 976 drive->devicename, 977 sbp->sdno, 978 sbp->b.b_bio1.bio_offset - ((off_t)SD[sbp->sdno].driveoffset << DEV_BSHIFT), 979 sbp->b.b_bio1.bio_offset, 980 sbp->b.b_bcount); 981 #endif 982 crit_enter(); 983 #if VINUMDEBUG 984 if (debug & DEBUG_LASTREQS) 985 logrq(loginfo_sdiol, (union rqinfou) &sbp->b.b_bio1, &sbp->b.b_bio1); 986 #endif 987 vn_strategy(drive->vp, &sbp->b.b_bio1); 988 crit_exit(); 989 } 990 991 /* 992 * Determine the size of the transfer, and make sure it is 993 * within the boundaries of the partition. Adjust transfer 994 * if needed, and signal errors or early completion. 995 * 996 * Volumes are simpler than disk slices: they only contain 997 * one component (though we call them a, b and c to make 998 * system utilities happy), and they always take up the 999 * complete space of the "partition". 1000 * 1001 * I'm still not happy with this: why should the label be 1002 * protected? If it weren't so damned difficult to write 1003 * one in the first pleace (because it's protected), it wouldn't 1004 * be a problem. 1005 */ 1006 struct bio * 1007 vinum_bounds_check(struct bio *bio, struct volume *vol) 1008 { 1009 struct buf *bp = bio->bio_buf; 1010 struct bio *nbio; 1011 vinum_off_t maxsize = vol->size; /* size of the partition (sectors) */ 1012 int size = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; /* size of this request (sectors) */ 1013 vinum_off_t blkno = (vinum_off_t)(bio->bio_offset >> DEV_BSHIFT); 1014 1015 if (size == 0) /* no transfer specified, */ 1016 return 0; /* treat as EOF */ 1017 /* beyond partition? */ 1018 if (bio->bio_offset < 0 /* negative start */ 1019 || blkno + size > maxsize) { /* or goes beyond the end of the partition */ 1020 /* if exactly at end of disk, return an EOF */ 1021 if (blkno == maxsize) { 1022 bp->b_resid = bp->b_bcount; 1023 return (NULL); 1024 } 1025 /* or truncate if part of it fits */ 1026 size = maxsize - blkno; 1027 if (size <= 0) { /* nothing to transfer */ 1028 bp->b_error = EINVAL; 1029 bp->b_flags |= B_ERROR; 1030 return (NULL); 1031 } 1032 bp->b_bcount = size << DEV_BSHIFT; 1033 } 1034 nbio = push_bio(bio); 1035 nbio->bio_offset = bio->bio_offset; 1036 return (nbio); 1037 } 1038 1039 /* 1040 * Allocate a request group and hook 1041 * it in in the list for rq 1042 */ 1043 struct rqgroup * 1044 allocrqg(struct request *rq, int elements) 1045 { 1046 struct rqgroup *rqg; /* the one we're going to allocate */ 1047 int size = sizeof(struct rqgroup) + elements * sizeof(struct rqelement); 1048 1049 rqg = (struct rqgroup *) Malloc(size); 1050 if (rqg != NULL) { /* malloc OK, */ 1051 if (rq->rqg) /* we already have requests */ 1052 rq->lrqg->next = rqg; /* hang it off the end */ 1053 else /* first request */ 1054 rq->rqg = rqg; /* at the start */ 1055 rq->lrqg = rqg; /* this one is the last in the list */ 1056 1057 bzero(rqg, size); /* no old junk */ 1058 rqg->rq = rq; /* point back to the parent request */ 1059 rqg->count = elements; /* number of requests in the group */ 1060 rqg->lockbase = -1; /* no lock required yet */ 1061 } 1062 return rqg; 1063 } 1064 1065 /* 1066 * Deallocate a request group out of a chain. We do 1067 * this by linear search: the chain is short, this 1068 * almost never happens, and currently it can only 1069 * happen to the first member of the chain. 1070 */ 1071 void 1072 deallocrqg(struct rqgroup *rqg) 1073 { 1074 struct rqgroup *rqgc = rqg->rq->rqg; /* point to the request chain */ 1075 1076 if (rqg->lock) /* got a lock? */ 1077 unlockrange(rqg->plexno, rqg->lock); /* yes, free it */ 1078 if (rqgc == rqg) /* we're first in line */ 1079 rqg->rq->rqg = rqg->next; /* unhook ourselves */ 1080 else { 1081 while ((rqgc->next != NULL) /* find the group */ 1082 &&(rqgc->next != rqg)) 1083 rqgc = rqgc->next; 1084 if (rqgc->next == NULL) 1085 log(LOG_ERR, 1086 "vinum deallocrqg: rqg %p not found in request %p\n", 1087 rqg->rq, 1088 rqg); 1089 else 1090 rqgc->next = rqg->next; /* make the chain jump over us */ 1091 } 1092 Free(rqg); 1093 } 1094