1 /* $NetBSD: rf_driver.c,v 1.43 2001/10/04 17:31:01 oster Exp $ */ 2 /*- 3 * Copyright (c) 1999 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Greg Oster 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the NetBSD 20 * Foundation, Inc. and its contributors. 21 * 4. Neither the name of The NetBSD Foundation nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 1995 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Author: Mark Holland, Khalil Amiri, Claudson Bornstein, William V. Courtright II, 43 * Robby Findler, Daniel Stodolsky, Rachad Youssef, Jim Zelenka 44 * 45 * Permission to use, copy, modify and distribute this software and 46 * its documentation is hereby granted, provided that both the copyright 47 * notice and this permission notice appear in all copies of the 48 * software, derivative works or modified versions, and any portions 49 * thereof, and that both notices appear in supporting documentation. 50 * 51 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 52 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 53 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 54 * 55 * Carnegie Mellon requests users of this software to return to 56 * 57 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 58 * School of Computer Science 59 * Carnegie Mellon University 60 * Pittsburgh PA 15213-3890 61 * 62 * any improvements or extensions that they make and grant Carnegie the 63 * rights to redistribute these changes. 64 */ 65 66 /****************************************************************************** 67 * 68 * rf_driver.c -- main setup, teardown, and access routines for the RAID driver 69 * 70 * all routines are prefixed with rf_ (raidframe), to avoid conficts. 71 * 72 ******************************************************************************/ 73 74 75 #include <sys/types.h> 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/ioctl.h> 79 #include <sys/fcntl.h> 80 #include <sys/vnode.h> 81 82 83 #include "rf_archs.h" 84 #include "rf_threadstuff.h" 85 86 #include <sys/errno.h> 87 88 #include "rf_raid.h" 89 #include "rf_dag.h" 90 #include "rf_aselect.h" 91 #include "rf_diskqueue.h" 92 #include "rf_parityscan.h" 93 #include "rf_alloclist.h" 94 #include "rf_dagutils.h" 95 #include "rf_utils.h" 96 #include "rf_etimer.h" 97 #include "rf_acctrace.h" 98 #include "rf_general.h" 99 #include "rf_desc.h" 100 #include "rf_states.h" 101 #include "rf_freelist.h" 102 #include "rf_decluster.h" 103 #include "rf_map.h" 104 #include "rf_revent.h" 105 #include "rf_callback.h" 106 #include "rf_engine.h" 107 #include "rf_memchunk.h" 108 #include "rf_mcpair.h" 109 #include "rf_nwayxor.h" 110 #include "rf_debugprint.h" 111 #include "rf_copyback.h" 112 #include "rf_driver.h" 113 #include "rf_options.h" 114 #include "rf_shutdown.h" 115 #include "rf_kintf.h" 116 117 #include <sys/buf.h> 118 119 /* rad == RF_RaidAccessDesc_t */ 120 static RF_FreeList_t *rf_rad_freelist; 121 #define RF_MAX_FREE_RAD 128 122 #define RF_RAD_INC 16 123 #define RF_RAD_INITIAL 32 124 125 /* debug variables */ 126 char rf_panicbuf[2048]; /* a buffer to hold an error msg when we panic */ 127 128 /* main configuration routines */ 129 static int raidframe_booted = 0; 130 131 static void rf_ConfigureDebug(RF_Config_t * cfgPtr); 132 static void set_debug_option(char *name, long val); 133 static void rf_UnconfigureArray(void); 134 static int init_rad(RF_RaidAccessDesc_t *); 135 static void clean_rad(RF_RaidAccessDesc_t *); 136 static void rf_ShutdownRDFreeList(void *); 137 static int rf_ConfigureRDFreeList(RF_ShutdownList_t **); 138 139 RF_DECLARE_MUTEX(rf_printf_mutex) /* debug only: avoids interleaved 140 * printfs by different stripes */ 141 142 #define SIGNAL_QUIESCENT_COND(_raid_) wakeup(&((_raid_)->accesses_suspended)) 143 #define WAIT_FOR_QUIESCENCE(_raid_) \ 144 ltsleep(&((_raid_)->accesses_suspended), PRIBIO, \ 145 "raidframe quiesce", 0, &((_raid_)->access_suspend_mutex)) 146 147 #define IO_BUF_ERR(bp, err) { \ 148 bp->b_flags |= B_ERROR; \ 149 bp->b_resid = bp->b_bcount; \ 150 bp->b_error = err; \ 151 biodone(bp); \ 152 } 153 154 static int configureCount = 0; /* number of active configurations */ 155 static int isconfigged = 0; /* is basic raidframe (non per-array) 156 * stuff configged */ 157 RF_DECLARE_STATIC_MUTEX(configureMutex) /* used to lock the configuration 158 * stuff */ 159 static RF_ShutdownList_t *globalShutdown; /* non array-specific 160 * stuff */ 161 162 static int rf_ConfigureRDFreeList(RF_ShutdownList_t ** listp); 163 164 /* called at system boot time */ 165 int 166 rf_BootRaidframe() 167 { 168 int rc; 169 170 if (raidframe_booted) 171 return (EBUSY); 172 raidframe_booted = 1; 173 174 rc = rf_mutex_init(&configureMutex); 175 if (rc) { 176 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, 177 __LINE__, rc); 178 RF_PANIC(); 179 } 180 configureCount = 0; 181 isconfigged = 0; 182 globalShutdown = NULL; 183 return (0); 184 } 185 /* 186 * This function is really just for debugging user-level stuff: it 187 * frees up all memory, other RAIDframe resources which might otherwise 188 * be kept around. This is used with systems like "sentinel" to detect 189 * memory leaks. 190 */ 191 int 192 rf_UnbootRaidframe() 193 { 194 int rc; 195 196 RF_LOCK_MUTEX(configureMutex); 197 if (configureCount) { 198 RF_UNLOCK_MUTEX(configureMutex); 199 return (EBUSY); 200 } 201 raidframe_booted = 0; 202 RF_UNLOCK_MUTEX(configureMutex); 203 rc = rf_mutex_destroy(&configureMutex); 204 if (rc) { 205 RF_ERRORMSG3("Unable to destroy mutex file %s line %d rc=%d\n", __FILE__, 206 __LINE__, rc); 207 RF_PANIC(); 208 } 209 return (0); 210 } 211 /* 212 * Called whenever an array is shutdown 213 */ 214 static void 215 rf_UnconfigureArray() 216 { 217 int rc; 218 219 RF_LOCK_MUTEX(configureMutex); 220 if (--configureCount == 0) { /* if no active configurations, shut 221 * everything down */ 222 isconfigged = 0; 223 224 rc = rf_ShutdownList(&globalShutdown); 225 if (rc) { 226 RF_ERRORMSG1("RAIDFRAME: unable to do global shutdown, rc=%d\n", rc); 227 } 228 229 /* 230 * We must wait until now, because the AllocList module 231 * uses the DebugMem module. 232 */ 233 if (rf_memDebug) 234 rf_print_unfreed(); 235 } 236 RF_UNLOCK_MUTEX(configureMutex); 237 } 238 239 /* 240 * Called to shut down an array. 241 */ 242 int 243 rf_Shutdown(raidPtr) 244 RF_Raid_t *raidPtr; 245 { 246 247 if (!raidPtr->valid) { 248 RF_ERRORMSG("Attempt to shut down unconfigured RAIDframe driver. Aborting shutdown\n"); 249 return (EINVAL); 250 } 251 /* 252 * wait for outstanding IOs to land 253 * As described in rf_raid.h, we use the rad_freelist lock 254 * to protect the per-array info about outstanding descs 255 * since we need to do freelist locking anyway, and this 256 * cuts down on the amount of serialization we've got going 257 * on. 258 */ 259 RF_FREELIST_DO_LOCK(rf_rad_freelist); 260 if (raidPtr->waitShutdown) { 261 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 262 return (EBUSY); 263 } 264 raidPtr->waitShutdown = 1; 265 while (raidPtr->nAccOutstanding) { 266 RF_WAIT_COND(raidPtr->outstandingCond, RF_FREELIST_MUTEX_OF(rf_rad_freelist)); 267 } 268 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 269 270 /* Wait for any parity re-writes to stop... */ 271 while (raidPtr->parity_rewrite_in_progress) { 272 printf("Waiting for parity re-write to exit...\n"); 273 tsleep(&raidPtr->parity_rewrite_in_progress, PRIBIO, 274 "rfprwshutdown", 0); 275 } 276 277 raidPtr->valid = 0; 278 279 rf_update_component_labels(raidPtr, RF_FINAL_COMPONENT_UPDATE); 280 281 rf_UnconfigureVnodes(raidPtr); 282 283 rf_ShutdownList(&raidPtr->shutdownList); 284 285 rf_UnconfigureArray(); 286 287 return (0); 288 } 289 290 291 #define DO_INIT_CONFIGURE(f) { \ 292 rc = f (&globalShutdown); \ 293 if (rc) { \ 294 RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \ 295 rf_ShutdownList(&globalShutdown); \ 296 configureCount--; \ 297 RF_UNLOCK_MUTEX(configureMutex); \ 298 return(rc); \ 299 } \ 300 } 301 302 #define DO_RAID_FAIL() { \ 303 rf_UnconfigureVnodes(raidPtr); \ 304 rf_ShutdownList(&raidPtr->shutdownList); \ 305 rf_UnconfigureArray(); \ 306 } 307 308 #define DO_RAID_INIT_CONFIGURE(f) { \ 309 rc = f (&raidPtr->shutdownList, raidPtr, cfgPtr); \ 310 if (rc) { \ 311 RF_ERRORMSG2("RAIDFRAME: failed %s with %d\n", RF_STRING(f), rc); \ 312 DO_RAID_FAIL(); \ 313 return(rc); \ 314 } \ 315 } 316 317 #define DO_RAID_MUTEX(_m_) { \ 318 rc = rf_create_managed_mutex(&raidPtr->shutdownList, (_m_)); \ 319 if (rc) { \ 320 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", \ 321 __FILE__, __LINE__, rc); \ 322 DO_RAID_FAIL(); \ 323 return(rc); \ 324 } \ 325 } 326 327 #define DO_RAID_COND(_c_) { \ 328 rc = rf_create_managed_cond(&raidPtr->shutdownList, (_c_)); \ 329 if (rc) { \ 330 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", \ 331 __FILE__, __LINE__, rc); \ 332 DO_RAID_FAIL(); \ 333 return(rc); \ 334 } \ 335 } 336 337 int 338 rf_Configure(raidPtr, cfgPtr, ac) 339 RF_Raid_t *raidPtr; 340 RF_Config_t *cfgPtr; 341 RF_AutoConfig_t *ac; 342 { 343 RF_RowCol_t row, col; 344 int i, rc; 345 346 /* XXX This check can probably be removed now, since 347 RAIDFRAME_CONFIGURRE now checks to make sure that the 348 RAID set is not already valid 349 */ 350 if (raidPtr->valid) { 351 RF_ERRORMSG("RAIDframe configuration not shut down. Aborting configure.\n"); 352 return (EINVAL); 353 } 354 RF_LOCK_MUTEX(configureMutex); 355 configureCount++; 356 if (isconfigged == 0) { 357 rc = rf_create_managed_mutex(&globalShutdown, &rf_printf_mutex); 358 if (rc) { 359 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, 360 __LINE__, rc); 361 rf_ShutdownList(&globalShutdown); 362 return (rc); 363 } 364 /* initialize globals */ 365 printf("RAIDFRAME: protectedSectors is %ld\n", 366 rf_protectedSectors); 367 368 rf_clear_debug_print_buffer(); 369 370 DO_INIT_CONFIGURE(rf_ConfigureAllocList); 371 372 /* 373 * Yes, this does make debugging general to the whole 374 * system instead of being array specific. Bummer, drag. 375 */ 376 rf_ConfigureDebug(cfgPtr); 377 DO_INIT_CONFIGURE(rf_ConfigureDebugMem); 378 DO_INIT_CONFIGURE(rf_ConfigureAccessTrace); 379 DO_INIT_CONFIGURE(rf_ConfigureMapModule); 380 DO_INIT_CONFIGURE(rf_ConfigureReconEvent); 381 DO_INIT_CONFIGURE(rf_ConfigureCallback); 382 DO_INIT_CONFIGURE(rf_ConfigureMemChunk); 383 DO_INIT_CONFIGURE(rf_ConfigureRDFreeList); 384 DO_INIT_CONFIGURE(rf_ConfigureNWayXor); 385 DO_INIT_CONFIGURE(rf_ConfigureStripeLockFreeList); 386 DO_INIT_CONFIGURE(rf_ConfigureMCPair); 387 DO_INIT_CONFIGURE(rf_ConfigureDAGs); 388 DO_INIT_CONFIGURE(rf_ConfigureDAGFuncs); 389 DO_INIT_CONFIGURE(rf_ConfigureDebugPrint); 390 DO_INIT_CONFIGURE(rf_ConfigureReconstruction); 391 DO_INIT_CONFIGURE(rf_ConfigureCopyback); 392 DO_INIT_CONFIGURE(rf_ConfigureDiskQueueSystem); 393 isconfigged = 1; 394 } 395 RF_UNLOCK_MUTEX(configureMutex); 396 397 DO_RAID_MUTEX(&raidPtr->mutex); 398 /* set up the cleanup list. Do this after ConfigureDebug so that 399 * value of memDebug will be set */ 400 401 rf_MakeAllocList(raidPtr->cleanupList); 402 if (raidPtr->cleanupList == NULL) { 403 DO_RAID_FAIL(); 404 return (ENOMEM); 405 } 406 rc = rf_ShutdownCreate(&raidPtr->shutdownList, 407 (void (*) (void *)) rf_FreeAllocList, 408 raidPtr->cleanupList); 409 if (rc) { 410 RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n", 411 __FILE__, __LINE__, rc); 412 DO_RAID_FAIL(); 413 return (rc); 414 } 415 raidPtr->numRow = cfgPtr->numRow; 416 raidPtr->numCol = cfgPtr->numCol; 417 raidPtr->numSpare = cfgPtr->numSpare; 418 419 /* XXX we don't even pretend to support more than one row in the 420 * kernel... */ 421 if (raidPtr->numRow != 1) { 422 RF_ERRORMSG("Only one row supported in kernel.\n"); 423 DO_RAID_FAIL(); 424 return (EINVAL); 425 } 426 RF_CallocAndAdd(raidPtr->status, raidPtr->numRow, sizeof(RF_RowStatus_t), 427 (RF_RowStatus_t *), raidPtr->cleanupList); 428 if (raidPtr->status == NULL) { 429 DO_RAID_FAIL(); 430 return (ENOMEM); 431 } 432 RF_CallocAndAdd(raidPtr->reconControl, raidPtr->numRow, 433 sizeof(RF_ReconCtrl_t *), (RF_ReconCtrl_t **), raidPtr->cleanupList); 434 if (raidPtr->reconControl == NULL) { 435 DO_RAID_FAIL(); 436 return (ENOMEM); 437 } 438 for (i = 0; i < raidPtr->numRow; i++) { 439 raidPtr->status[i] = rf_rs_optimal; 440 raidPtr->reconControl[i] = NULL; 441 } 442 443 DO_RAID_INIT_CONFIGURE(rf_ConfigureEngine); 444 DO_RAID_INIT_CONFIGURE(rf_ConfigureStripeLocks); 445 446 DO_RAID_COND(&raidPtr->outstandingCond); 447 448 raidPtr->nAccOutstanding = 0; 449 raidPtr->waitShutdown = 0; 450 451 DO_RAID_MUTEX(&raidPtr->access_suspend_mutex); 452 DO_RAID_COND(&raidPtr->quiescent_cond); 453 454 DO_RAID_COND(&raidPtr->waitForReconCond); 455 456 DO_RAID_MUTEX(&raidPtr->recon_done_proc_mutex); 457 458 if (ac!=NULL) { 459 /* We have an AutoConfig structure.. Don't do the 460 normal disk configuration... call the auto config 461 stuff */ 462 rf_AutoConfigureDisks(raidPtr, cfgPtr, ac); 463 } else { 464 DO_RAID_INIT_CONFIGURE(rf_ConfigureDisks); 465 DO_RAID_INIT_CONFIGURE(rf_ConfigureSpareDisks); 466 } 467 /* do this after ConfigureDisks & ConfigureSpareDisks to be sure dev 468 * no. is set */ 469 DO_RAID_INIT_CONFIGURE(rf_ConfigureDiskQueues); 470 471 DO_RAID_INIT_CONFIGURE(rf_ConfigureLayout); 472 473 DO_RAID_INIT_CONFIGURE(rf_ConfigurePSStatus); 474 475 for (row = 0; row < raidPtr->numRow; row++) { 476 for (col = 0; col < raidPtr->numCol; col++) { 477 /* 478 * XXX better distribution 479 */ 480 raidPtr->hist_diskreq[row][col] = 0; 481 } 482 } 483 484 raidPtr->numNewFailures = 0; 485 raidPtr->copyback_in_progress = 0; 486 raidPtr->parity_rewrite_in_progress = 0; 487 raidPtr->recon_in_progress = 0; 488 raidPtr->maxOutstanding = cfgPtr->maxOutstandingDiskReqs; 489 490 /* autoconfigure and root_partition will actually get filled in 491 after the config is done */ 492 raidPtr->autoconfigure = 0; 493 raidPtr->root_partition = 0; 494 raidPtr->last_unit = raidPtr->raidid; 495 raidPtr->config_order = 0; 496 497 if (rf_keepAccTotals) { 498 raidPtr->keep_acc_totals = 1; 499 } 500 rf_StartUserStats(raidPtr); 501 502 raidPtr->valid = 1; 503 return (0); 504 } 505 506 static int 507 init_rad(desc) 508 RF_RaidAccessDesc_t *desc; 509 { 510 int rc; 511 512 rc = rf_mutex_init(&desc->mutex); 513 if (rc) { 514 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, 515 __LINE__, rc); 516 return (rc); 517 } 518 rc = rf_cond_init(&desc->cond); 519 if (rc) { 520 RF_ERRORMSG3("Unable to init cond file %s line %d rc=%d\n", __FILE__, 521 __LINE__, rc); 522 rf_mutex_destroy(&desc->mutex); 523 return (rc); 524 } 525 return (0); 526 } 527 528 static void 529 clean_rad(desc) 530 RF_RaidAccessDesc_t *desc; 531 { 532 rf_mutex_destroy(&desc->mutex); 533 rf_cond_destroy(&desc->cond); 534 } 535 536 static void 537 rf_ShutdownRDFreeList(ignored) 538 void *ignored; 539 { 540 RF_FREELIST_DESTROY_CLEAN(rf_rad_freelist, next, (RF_RaidAccessDesc_t *), clean_rad); 541 } 542 543 static int 544 rf_ConfigureRDFreeList(listp) 545 RF_ShutdownList_t **listp; 546 { 547 int rc; 548 549 RF_FREELIST_CREATE(rf_rad_freelist, RF_MAX_FREE_RAD, 550 RF_RAD_INC, sizeof(RF_RaidAccessDesc_t)); 551 if (rf_rad_freelist == NULL) { 552 return (ENOMEM); 553 } 554 rc = rf_ShutdownCreate(listp, rf_ShutdownRDFreeList, NULL); 555 if (rc) { 556 RF_ERRORMSG3("Unable to add to shutdown list file %s line %d rc=%d\n", __FILE__, 557 __LINE__, rc); 558 rf_ShutdownRDFreeList(NULL); 559 return (rc); 560 } 561 RF_FREELIST_PRIME_INIT(rf_rad_freelist, RF_RAD_INITIAL, next, 562 (RF_RaidAccessDesc_t *), init_rad); 563 return (0); 564 } 565 566 RF_RaidAccessDesc_t * 567 rf_AllocRaidAccDesc( 568 RF_Raid_t * raidPtr, 569 RF_IoType_t type, 570 RF_RaidAddr_t raidAddress, 571 RF_SectorCount_t numBlocks, 572 caddr_t bufPtr, 573 void *bp, 574 RF_RaidAccessFlags_t flags, 575 RF_AccessState_t * states) 576 { 577 RF_RaidAccessDesc_t *desc; 578 579 RF_FREELIST_GET_INIT_NOUNLOCK(rf_rad_freelist, desc, next, (RF_RaidAccessDesc_t *), init_rad); 580 if (raidPtr->waitShutdown) { 581 /* 582 * Actually, we're shutting the array down. Free the desc 583 * and return NULL. 584 */ 585 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 586 RF_FREELIST_FREE_CLEAN(rf_rad_freelist, desc, next, clean_rad); 587 return (NULL); 588 } 589 raidPtr->nAccOutstanding++; 590 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 591 592 desc->raidPtr = (void *) raidPtr; 593 desc->type = type; 594 desc->raidAddress = raidAddress; 595 desc->numBlocks = numBlocks; 596 desc->bufPtr = bufPtr; 597 desc->bp = bp; 598 desc->paramDAG = NULL; 599 desc->paramASM = NULL; 600 desc->flags = flags; 601 desc->states = states; 602 desc->state = 0; 603 604 desc->status = 0; 605 memset((char *) &desc->tracerec, 0, sizeof(RF_AccTraceEntry_t)); 606 desc->callbackFunc = NULL; 607 desc->callbackArg = NULL; 608 desc->next = NULL; 609 desc->head = desc; 610 desc->numPending = 0; 611 desc->cleanupList = NULL; 612 rf_MakeAllocList(desc->cleanupList); 613 return (desc); 614 } 615 616 void 617 rf_FreeRaidAccDesc(RF_RaidAccessDesc_t * desc) 618 { 619 RF_Raid_t *raidPtr = desc->raidPtr; 620 621 RF_ASSERT(desc); 622 623 rf_FreeAllocList(desc->cleanupList); 624 RF_FREELIST_FREE_CLEAN_NOUNLOCK(rf_rad_freelist, desc, next, clean_rad); 625 raidPtr->nAccOutstanding--; 626 if (raidPtr->waitShutdown) { 627 RF_SIGNAL_COND(raidPtr->outstandingCond); 628 } 629 RF_FREELIST_DO_UNLOCK(rf_rad_freelist); 630 } 631 /********************************************************************* 632 * Main routine for performing an access. 633 * Accesses are retried until a DAG can not be selected. This occurs 634 * when either the DAG library is incomplete or there are too many 635 * failures in a parity group. 636 ********************************************************************/ 637 int 638 rf_DoAccess( 639 RF_Raid_t * raidPtr, 640 RF_IoType_t type, 641 int async_flag, 642 RF_RaidAddr_t raidAddress, 643 RF_SectorCount_t numBlocks, 644 caddr_t bufPtr, 645 void *bp_in, 646 RF_RaidAccessFlags_t flags) 647 /* 648 type should be read or write 649 async_flag should be RF_TRUE or RF_FALSE 650 bp_in is a buf pointer. void * to facilitate ignoring it outside the kernel 651 */ 652 { 653 RF_RaidAccessDesc_t *desc; 654 caddr_t lbufPtr = bufPtr; 655 struct buf *bp = (struct buf *) bp_in; 656 657 raidAddress += rf_raidSectorOffset; 658 659 if (!raidPtr->valid) { 660 RF_ERRORMSG("RAIDframe driver not successfully configured. Rejecting access.\n"); 661 IO_BUF_ERR(bp, EINVAL); 662 return (EINVAL); 663 } 664 665 if (rf_accessDebug) { 666 667 printf("logBytes is: %d %d %d\n", raidPtr->raidid, 668 raidPtr->logBytesPerSector, 669 (int) rf_RaidAddressToByte(raidPtr, numBlocks)); 670 printf("raid%d: %s raidAddr %d (stripeid %d-%d) numBlocks %d (%d bytes) buf 0x%lx\n", raidPtr->raidid, 671 (type == RF_IO_TYPE_READ) ? "READ" : "WRITE", (int) raidAddress, 672 (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress), 673 (int) rf_RaidAddressToStripeID(&raidPtr->Layout, raidAddress + numBlocks - 1), 674 (int) numBlocks, 675 (int) rf_RaidAddressToByte(raidPtr, numBlocks), 676 (long) bufPtr); 677 } 678 if (raidAddress + numBlocks > raidPtr->totalSectors) { 679 680 printf("DoAccess: raid addr %lu too large to access %lu sectors. Max legal addr is %lu\n", 681 (u_long) raidAddress, (u_long) numBlocks, (u_long) raidPtr->totalSectors); 682 683 IO_BUF_ERR(bp, ENOSPC); 684 return (ENOSPC); 685 } 686 desc = rf_AllocRaidAccDesc(raidPtr, type, raidAddress, 687 numBlocks, lbufPtr, bp, flags, raidPtr->Layout.map->states); 688 689 if (desc == NULL) { 690 return (ENOMEM); 691 } 692 RF_ETIMER_START(desc->tracerec.tot_timer); 693 694 desc->async_flag = async_flag; 695 696 rf_ContinueRaidAccess(desc); 697 698 return (0); 699 } 700 /* force the array into reconfigured mode without doing reconstruction */ 701 int 702 rf_SetReconfiguredMode(raidPtr, row, col) 703 RF_Raid_t *raidPtr; 704 int row; 705 int col; 706 { 707 if (!(raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE)) { 708 printf("Can't set reconfigured mode in dedicated-spare array\n"); 709 RF_PANIC(); 710 } 711 RF_LOCK_MUTEX(raidPtr->mutex); 712 raidPtr->numFailures++; 713 raidPtr->Disks[row][col].status = rf_ds_dist_spared; 714 raidPtr->status[row] = rf_rs_reconfigured; 715 rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE); 716 /* install spare table only if declustering + distributed sparing 717 * architecture. */ 718 if (raidPtr->Layout.map->flags & RF_BD_DECLUSTERED) 719 rf_InstallSpareTable(raidPtr, row, col); 720 RF_UNLOCK_MUTEX(raidPtr->mutex); 721 return (0); 722 } 723 724 int 725 rf_FailDisk( 726 RF_Raid_t * raidPtr, 727 int frow, 728 int fcol, 729 int initRecon) 730 { 731 printf("raid%d: Failing disk r%d c%d\n", raidPtr->raidid, frow, fcol); 732 RF_LOCK_MUTEX(raidPtr->mutex); 733 raidPtr->numFailures++; 734 raidPtr->Disks[frow][fcol].status = rf_ds_failed; 735 raidPtr->status[frow] = rf_rs_degraded; 736 rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE); 737 RF_UNLOCK_MUTEX(raidPtr->mutex); 738 if (initRecon) 739 rf_ReconstructFailedDisk(raidPtr, frow, fcol); 740 return (0); 741 } 742 /* releases a thread that is waiting for the array to become quiesced. 743 * access_suspend_mutex should be locked upon calling this 744 */ 745 void 746 rf_SignalQuiescenceLock(raidPtr, reconDesc) 747 RF_Raid_t *raidPtr; 748 RF_RaidReconDesc_t *reconDesc; 749 { 750 if (rf_quiesceDebug) { 751 printf("raid%d: Signalling quiescence lock\n", 752 raidPtr->raidid); 753 } 754 raidPtr->access_suspend_release = 1; 755 756 if (raidPtr->waiting_for_quiescence) { 757 SIGNAL_QUIESCENT_COND(raidPtr); 758 } 759 } 760 /* suspends all new requests to the array. No effect on accesses that are in flight. */ 761 int 762 rf_SuspendNewRequestsAndWait(raidPtr) 763 RF_Raid_t *raidPtr; 764 { 765 if (rf_quiesceDebug) 766 printf("Suspending new reqs\n"); 767 768 RF_LOCK_MUTEX(raidPtr->access_suspend_mutex); 769 raidPtr->accesses_suspended++; 770 raidPtr->waiting_for_quiescence = (raidPtr->accs_in_flight == 0) ? 0 : 1; 771 772 if (raidPtr->waiting_for_quiescence) { 773 raidPtr->access_suspend_release = 0; 774 while (!raidPtr->access_suspend_release) { 775 printf("Suspending: Waiting for Quiescence\n"); 776 WAIT_FOR_QUIESCENCE(raidPtr); 777 raidPtr->waiting_for_quiescence = 0; 778 } 779 } 780 printf("Quiescence reached..\n"); 781 782 RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex); 783 return (raidPtr->waiting_for_quiescence); 784 } 785 /* wake up everyone waiting for quiescence to be released */ 786 void 787 rf_ResumeNewRequests(raidPtr) 788 RF_Raid_t *raidPtr; 789 { 790 RF_CallbackDesc_t *t, *cb; 791 792 if (rf_quiesceDebug) 793 printf("Resuming new reqs\n"); 794 795 RF_LOCK_MUTEX(raidPtr->access_suspend_mutex); 796 raidPtr->accesses_suspended--; 797 if (raidPtr->accesses_suspended == 0) 798 cb = raidPtr->quiesce_wait_list; 799 else 800 cb = NULL; 801 raidPtr->quiesce_wait_list = NULL; 802 RF_UNLOCK_MUTEX(raidPtr->access_suspend_mutex); 803 804 while (cb) { 805 t = cb; 806 cb = cb->next; 807 (t->callbackFunc) (t->callbackArg); 808 rf_FreeCallbackDesc(t); 809 } 810 } 811 /***************************************************************************************** 812 * 813 * debug routines 814 * 815 ****************************************************************************************/ 816 817 static void 818 set_debug_option(name, val) 819 char *name; 820 long val; 821 { 822 RF_DebugName_t *p; 823 824 for (p = rf_debugNames; p->name; p++) { 825 if (!strcmp(p->name, name)) { 826 *(p->ptr) = val; 827 printf("[Set debug variable %s to %ld]\n", name, val); 828 return; 829 } 830 } 831 RF_ERRORMSG1("Unknown debug string \"%s\"\n", name); 832 } 833 834 835 /* would like to use sscanf here, but apparently not available in kernel */ 836 /*ARGSUSED*/ 837 static void 838 rf_ConfigureDebug(cfgPtr) 839 RF_Config_t *cfgPtr; 840 { 841 char *val_p, *name_p, *white_p; 842 long val; 843 int i; 844 845 rf_ResetDebugOptions(); 846 for (i = 0; cfgPtr->debugVars[i][0] && i < RF_MAXDBGV; i++) { 847 name_p = rf_find_non_white(&cfgPtr->debugVars[i][0]); 848 white_p = rf_find_white(name_p); /* skip to start of 2nd 849 * word */ 850 val_p = rf_find_non_white(white_p); 851 if (*val_p == '0' && *(val_p + 1) == 'x') 852 val = rf_htoi(val_p + 2); 853 else 854 val = rf_atoi(val_p); 855 *white_p = '\0'; 856 set_debug_option(name_p, val); 857 } 858 } 859 /* performance monitoring stuff */ 860 861 #define TIMEVAL_TO_US(t) (((long) t.tv_sec) * 1000000L + (long) t.tv_usec) 862 863 #if !defined(_KERNEL) && !defined(SIMULATE) 864 865 /* 866 * Throughput stats currently only used in user-level RAIDframe 867 */ 868 869 static int 870 rf_InitThroughputStats( 871 RF_ShutdownList_t ** listp, 872 RF_Raid_t * raidPtr, 873 RF_Config_t * cfgPtr) 874 { 875 int rc; 876 877 /* these used by user-level raidframe only */ 878 rc = rf_create_managed_mutex(listp, &raidPtr->throughputstats.mutex); 879 if (rc) { 880 RF_ERRORMSG3("Unable to init mutex file %s line %d rc=%d\n", __FILE__, 881 __LINE__, rc); 882 return (rc); 883 } 884 raidPtr->throughputstats.sum_io_us = 0; 885 raidPtr->throughputstats.num_ios = 0; 886 raidPtr->throughputstats.num_out_ios = 0; 887 return (0); 888 } 889 890 void 891 rf_StartThroughputStats(RF_Raid_t * raidPtr) 892 { 893 RF_LOCK_MUTEX(raidPtr->throughputstats.mutex); 894 raidPtr->throughputstats.num_ios++; 895 raidPtr->throughputstats.num_out_ios++; 896 if (raidPtr->throughputstats.num_out_ios == 1) 897 RF_GETTIME(raidPtr->throughputstats.start); 898 RF_UNLOCK_MUTEX(raidPtr->throughputstats.mutex); 899 } 900 901 static void 902 rf_StopThroughputStats(RF_Raid_t * raidPtr) 903 { 904 struct timeval diff; 905 906 RF_LOCK_MUTEX(raidPtr->throughputstats.mutex); 907 raidPtr->throughputstats.num_out_ios--; 908 if (raidPtr->throughputstats.num_out_ios == 0) { 909 RF_GETTIME(raidPtr->throughputstats.stop); 910 RF_TIMEVAL_DIFF(&raidPtr->throughputstats.start, &raidPtr->throughputstats.stop, &diff); 911 raidPtr->throughputstats.sum_io_us += TIMEVAL_TO_US(diff); 912 } 913 RF_UNLOCK_MUTEX(raidPtr->throughputstats.mutex); 914 } 915 916 static void 917 rf_PrintThroughputStats(RF_Raid_t * raidPtr) 918 { 919 RF_ASSERT(raidPtr->throughputstats.num_out_ios == 0); 920 if (raidPtr->throughputstats.sum_io_us != 0) { 921 printf("[Througphut: %8.2f IOs/second]\n", raidPtr->throughputstats.num_ios 922 / (raidPtr->throughputstats.sum_io_us / 1000000.0)); 923 } 924 } 925 #endif /* !KERNEL && !SIMULATE */ 926 927 void 928 rf_StartUserStats(RF_Raid_t * raidPtr) 929 { 930 RF_GETTIME(raidPtr->userstats.start); 931 raidPtr->userstats.sum_io_us = 0; 932 raidPtr->userstats.num_ios = 0; 933 raidPtr->userstats.num_sect_moved = 0; 934 } 935 936 void 937 rf_StopUserStats(RF_Raid_t * raidPtr) 938 { 939 RF_GETTIME(raidPtr->userstats.stop); 940 } 941 942 void 943 rf_UpdateUserStats(raidPtr, rt, numsect) 944 RF_Raid_t *raidPtr; 945 int rt; /* resp time in us */ 946 int numsect; /* number of sectors for this access */ 947 { 948 raidPtr->userstats.sum_io_us += rt; 949 raidPtr->userstats.num_ios++; 950 raidPtr->userstats.num_sect_moved += numsect; 951 } 952 953 void 954 rf_PrintUserStats(RF_Raid_t * raidPtr) 955 { 956 long elapsed_us, mbs, mbs_frac; 957 struct timeval diff; 958 959 RF_TIMEVAL_DIFF(&raidPtr->userstats.start, &raidPtr->userstats.stop, &diff); 960 elapsed_us = TIMEVAL_TO_US(diff); 961 962 /* 2000 sectors per megabyte, 10000000 microseconds per second */ 963 if (elapsed_us) 964 mbs = (raidPtr->userstats.num_sect_moved / 2000) / (elapsed_us / 1000000); 965 else 966 mbs = 0; 967 968 /* this computes only the first digit of the fractional mb/s moved */ 969 if (elapsed_us) { 970 mbs_frac = ((raidPtr->userstats.num_sect_moved / 200) / (elapsed_us / 1000000)) 971 - (mbs * 10); 972 } else { 973 mbs_frac = 0; 974 } 975 976 printf("Number of I/Os: %ld\n", raidPtr->userstats.num_ios); 977 printf("Elapsed time (us): %ld\n", elapsed_us); 978 printf("User I/Os per second: %ld\n", RF_DB0_CHECK(raidPtr->userstats.num_ios, (elapsed_us / 1000000))); 979 printf("Average user response time: %ld us\n", RF_DB0_CHECK(raidPtr->userstats.sum_io_us, raidPtr->userstats.num_ios)); 980 printf("Total sectors moved: %ld\n", raidPtr->userstats.num_sect_moved); 981 printf("Average access size (sect): %ld\n", RF_DB0_CHECK(raidPtr->userstats.num_sect_moved, raidPtr->userstats.num_ios)); 982 printf("Achieved data rate: %ld.%ld MB/sec\n", mbs, mbs_frac); 983 } 984 985 986 void 987 rf_print_panic_message(line,file) 988 int line; 989 char *file; 990 { 991 sprintf(rf_panicbuf,"raidframe error at line %d file %s", 992 line, file); 993 } 994 995 void 996 rf_print_assert_panic_message(line,file,condition) 997 int line; 998 char *file; 999 char *condition; 1000 { 1001 sprintf(rf_panicbuf, 1002 "raidframe error at line %d file %s (failed asserting %s)\n", 1003 line, file, condition); 1004 } 1005