1 /* $NetBSD: rf_raid1.c,v 1.3 1999/02/05 00:06:15 oster Exp $ */ 2 /* 3 * Copyright (c) 1995 Carnegie-Mellon University. 4 * All rights reserved. 5 * 6 * Author: William V. Courtright II 7 * 8 * Permission to use, copy, modify and distribute this software and 9 * its documentation is hereby granted, provided that both the copyright 10 * notice and this permission notice appear in all copies of the 11 * software, derivative works or modified versions, and any portions 12 * thereof, and that both notices appear in supporting documentation. 13 * 14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 17 * 18 * Carnegie Mellon requests users of this software to return to 19 * 20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 21 * School of Computer Science 22 * Carnegie Mellon University 23 * Pittsburgh PA 15213-3890 24 * 25 * any improvements or extensions that they make and grant Carnegie the 26 * rights to redistribute these changes. 27 */ 28 29 /***************************************************************************** 30 * 31 * rf_raid1.c -- implements RAID Level 1 32 * 33 *****************************************************************************/ 34 35 #include "rf_raid.h" 36 #include "rf_raid1.h" 37 #include "rf_dag.h" 38 #include "rf_dagffrd.h" 39 #include "rf_dagffwr.h" 40 #include "rf_dagdegrd.h" 41 #include "rf_dagutils.h" 42 #include "rf_dagfuncs.h" 43 #include "rf_threadid.h" 44 #include "rf_diskqueue.h" 45 #include "rf_general.h" 46 #include "rf_utils.h" 47 #include "rf_parityscan.h" 48 #include "rf_mcpair.h" 49 #include "rf_layout.h" 50 #include "rf_map.h" 51 #include "rf_engine.h" 52 #include "rf_reconbuffer.h" 53 #include "rf_sys.h" 54 55 typedef struct RF_Raid1ConfigInfo_s { 56 RF_RowCol_t **stripeIdentifier; 57 } RF_Raid1ConfigInfo_t; 58 /* start of day code specific to RAID level 1 */ 59 int 60 rf_ConfigureRAID1( 61 RF_ShutdownList_t ** listp, 62 RF_Raid_t * raidPtr, 63 RF_Config_t * cfgPtr) 64 { 65 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout; 66 RF_Raid1ConfigInfo_t *info; 67 RF_RowCol_t i; 68 69 /* create a RAID level 1 configuration structure */ 70 RF_MallocAndAdd(info, sizeof(RF_Raid1ConfigInfo_t), (RF_Raid1ConfigInfo_t *), raidPtr->cleanupList); 71 if (info == NULL) 72 return (ENOMEM); 73 layoutPtr->layoutSpecificInfo = (void *) info; 74 75 /* ... and fill it in. */ 76 info->stripeIdentifier = rf_make_2d_array(raidPtr->numCol / 2, 2, raidPtr->cleanupList); 77 if (info->stripeIdentifier == NULL) 78 return (ENOMEM); 79 for (i = 0; i < (raidPtr->numCol / 2); i++) { 80 info->stripeIdentifier[i][0] = (2 * i); 81 info->stripeIdentifier[i][1] = (2 * i) + 1; 82 } 83 84 RF_ASSERT(raidPtr->numRow == 1); 85 86 /* this implementation of RAID level 1 uses one row of numCol disks 87 * and allows multiple (numCol / 2) stripes per row. A stripe 88 * consists of a single data unit and a single parity (mirror) unit. 89 * stripe id = raidAddr / stripeUnitSize */ 90 raidPtr->totalSectors = layoutPtr->stripeUnitsPerDisk * (raidPtr->numCol / 2) * layoutPtr->sectorsPerStripeUnit; 91 layoutPtr->numStripe = layoutPtr->stripeUnitsPerDisk * (raidPtr->numCol / 2); 92 layoutPtr->dataSectorsPerStripe = layoutPtr->sectorsPerStripeUnit; 93 layoutPtr->bytesPerStripeUnit = layoutPtr->sectorsPerStripeUnit << raidPtr->logBytesPerSector; 94 layoutPtr->numDataCol = 1; 95 layoutPtr->numParityCol = 1; 96 return (0); 97 } 98 99 100 /* returns the physical disk location of the primary copy in the mirror pair */ 101 void 102 rf_MapSectorRAID1( 103 RF_Raid_t * raidPtr, 104 RF_RaidAddr_t raidSector, 105 RF_RowCol_t * row, 106 RF_RowCol_t * col, 107 RF_SectorNum_t * diskSector, 108 int remap) 109 { 110 RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit; 111 RF_RowCol_t mirrorPair = SUID % (raidPtr->numCol / 2); 112 113 *row = 0; 114 *col = 2 * mirrorPair; 115 *diskSector = ((SUID / (raidPtr->numCol / 2)) * raidPtr->Layout.sectorsPerStripeUnit) + (raidSector % raidPtr->Layout.sectorsPerStripeUnit); 116 } 117 118 119 /* Map Parity 120 * 121 * returns the physical disk location of the secondary copy in the mirror 122 * pair 123 */ 124 void 125 rf_MapParityRAID1( 126 RF_Raid_t * raidPtr, 127 RF_RaidAddr_t raidSector, 128 RF_RowCol_t * row, 129 RF_RowCol_t * col, 130 RF_SectorNum_t * diskSector, 131 int remap) 132 { 133 RF_StripeNum_t SUID = raidSector / raidPtr->Layout.sectorsPerStripeUnit; 134 RF_RowCol_t mirrorPair = SUID % (raidPtr->numCol / 2); 135 136 *row = 0; 137 *col = (2 * mirrorPair) + 1; 138 139 *diskSector = ((SUID / (raidPtr->numCol / 2)) * raidPtr->Layout.sectorsPerStripeUnit) + (raidSector % raidPtr->Layout.sectorsPerStripeUnit); 140 } 141 142 143 /* IdentifyStripeRAID1 144 * 145 * returns a list of disks for a given redundancy group 146 */ 147 void 148 rf_IdentifyStripeRAID1( 149 RF_Raid_t * raidPtr, 150 RF_RaidAddr_t addr, 151 RF_RowCol_t ** diskids, 152 RF_RowCol_t * outRow) 153 { 154 RF_StripeNum_t stripeID = rf_RaidAddressToStripeID(&raidPtr->Layout, addr); 155 RF_Raid1ConfigInfo_t *info = raidPtr->Layout.layoutSpecificInfo; 156 RF_ASSERT(stripeID >= 0); 157 RF_ASSERT(addr >= 0); 158 *outRow = 0; 159 *diskids = info->stripeIdentifier[stripeID % (raidPtr->numCol / 2)]; 160 RF_ASSERT(*diskids); 161 } 162 163 164 /* MapSIDToPSIDRAID1 165 * 166 * maps a logical stripe to a stripe in the redundant array 167 */ 168 void 169 rf_MapSIDToPSIDRAID1( 170 RF_RaidLayout_t * layoutPtr, 171 RF_StripeNum_t stripeID, 172 RF_StripeNum_t * psID, 173 RF_ReconUnitNum_t * which_ru) 174 { 175 *which_ru = 0; 176 *psID = stripeID; 177 } 178 179 180 181 /****************************************************************************** 182 * select a graph to perform a single-stripe access 183 * 184 * Parameters: raidPtr - description of the physical array 185 * type - type of operation (read or write) requested 186 * asmap - logical & physical addresses for this access 187 * createFunc - name of function to use to create the graph 188 *****************************************************************************/ 189 190 void 191 rf_RAID1DagSelect( 192 RF_Raid_t * raidPtr, 193 RF_IoType_t type, 194 RF_AccessStripeMap_t * asmap, 195 RF_VoidFuncPtr * createFunc) 196 { 197 RF_RowCol_t frow, fcol, or, oc; 198 RF_PhysDiskAddr_t *failedPDA; 199 int prior_recon, tid; 200 RF_RowStatus_t rstat; 201 RF_SectorNum_t oo; 202 203 204 RF_ASSERT(RF_IO_IS_R_OR_W(type)); 205 206 if (asmap->numDataFailed + asmap->numParityFailed > 1) { 207 RF_ERRORMSG("Multiple disks failed in a single group! Aborting I/O operation.\n"); 208 *createFunc = NULL; 209 return; 210 } 211 if (asmap->numDataFailed + asmap->numParityFailed) { 212 /* 213 * We've got a fault. Re-map to spare space, iff applicable. 214 * Shouldn't the arch-independent code do this for us? 215 * Anyway, it turns out if we don't do this here, then when 216 * we're reconstructing, writes go only to the surviving 217 * original disk, and aren't reflected on the reconstructed 218 * spare. Oops. --jimz 219 */ 220 failedPDA = asmap->failedPDAs[0]; 221 frow = failedPDA->row; 222 fcol = failedPDA->col; 223 rstat = raidPtr->status[frow]; 224 prior_recon = (rstat == rf_rs_reconfigured) || ( 225 (rstat == rf_rs_reconstructing) ? 226 rf_CheckRUReconstructed(raidPtr->reconControl[frow]->reconMap, failedPDA->startSector) : 0 227 ); 228 if (prior_recon) { 229 or = frow; 230 oc = fcol; 231 oo = failedPDA->startSector; 232 /* 233 * If we did distributed sparing, we'd monkey with that here. 234 * But we don't, so we'll 235 */ 236 failedPDA->row = raidPtr->Disks[frow][fcol].spareRow; 237 failedPDA->col = raidPtr->Disks[frow][fcol].spareCol; 238 /* 239 * Redirect other components, iff necessary. This looks 240 * pretty suspicious to me, but it's what the raid5 241 * DAG select does. 242 */ 243 if (asmap->parityInfo->next) { 244 if (failedPDA == asmap->parityInfo) { 245 failedPDA->next->row = failedPDA->row; 246 failedPDA->next->col = failedPDA->col; 247 } else { 248 if (failedPDA == asmap->parityInfo->next) { 249 asmap->parityInfo->row = failedPDA->row; 250 asmap->parityInfo->col = failedPDA->col; 251 } 252 } 253 } 254 if (rf_dagDebug || rf_mapDebug) { 255 rf_get_threadid(tid); 256 printf("[%d] Redirected type '%c' r %d c %d o %ld -> r %d c %d o %ld\n", 257 tid, type, or, oc, (long) oo, failedPDA->row, failedPDA->col, 258 (long) failedPDA->startSector); 259 } 260 asmap->numDataFailed = asmap->numParityFailed = 0; 261 } 262 } 263 if (type == RF_IO_TYPE_READ) { 264 if (asmap->numDataFailed == 0) 265 *createFunc = (RF_VoidFuncPtr) rf_CreateMirrorIdleReadDAG; 266 else 267 *createFunc = (RF_VoidFuncPtr) rf_CreateRaidOneDegradedReadDAG; 268 } else { 269 *createFunc = (RF_VoidFuncPtr) rf_CreateRaidOneWriteDAG; 270 } 271 } 272 273 int 274 rf_VerifyParityRAID1( 275 RF_Raid_t * raidPtr, 276 RF_RaidAddr_t raidAddr, 277 RF_PhysDiskAddr_t * parityPDA, 278 int correct_it, 279 RF_RaidAccessFlags_t flags) 280 { 281 int nbytes, bcount, stripeWidth, ret, i, j, tid = 0, nbad, *bbufs; 282 RF_DagNode_t *blockNode, *unblockNode, *wrBlock; 283 RF_DagHeader_t *rd_dag_h, *wr_dag_h; 284 RF_AccessStripeMapHeader_t *asm_h; 285 RF_AllocListElem_t *allocList; 286 RF_AccTraceEntry_t tracerec; 287 RF_ReconUnitNum_t which_ru; 288 RF_RaidLayout_t *layoutPtr; 289 RF_AccessStripeMap_t *aasm; 290 RF_SectorCount_t nsector; 291 RF_RaidAddr_t startAddr; 292 char *buf, *buf1, *buf2; 293 RF_PhysDiskAddr_t *pda; 294 RF_StripeNum_t psID; 295 RF_MCPair_t *mcpair; 296 297 if (rf_verifyParityDebug) { 298 rf_get_threadid(tid); 299 } 300 layoutPtr = &raidPtr->Layout; 301 startAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr, raidAddr); 302 nsector = parityPDA->numSector; 303 nbytes = rf_RaidAddressToByte(raidPtr, nsector); 304 psID = rf_RaidAddressToParityStripeID(layoutPtr, raidAddr, &which_ru); 305 306 asm_h = NULL; 307 rd_dag_h = wr_dag_h = NULL; 308 mcpair = NULL; 309 310 ret = RF_PARITY_COULD_NOT_VERIFY; 311 312 rf_MakeAllocList(allocList); 313 if (allocList == NULL) 314 return (RF_PARITY_COULD_NOT_VERIFY); 315 mcpair = rf_AllocMCPair(); 316 if (mcpair == NULL) 317 goto done; 318 RF_ASSERT(layoutPtr->numDataCol == layoutPtr->numParityCol); 319 stripeWidth = layoutPtr->numDataCol + layoutPtr->numParityCol; 320 bcount = nbytes * (layoutPtr->numDataCol + layoutPtr->numParityCol); 321 RF_MallocAndAdd(buf, bcount, (char *), allocList); 322 if (buf == NULL) 323 goto done; 324 if (rf_verifyParityDebug) { 325 printf("[%d] RAID1 parity verify: buf=%lx bcount=%d (%lx - %lx)\n", 326 tid, (long) buf, bcount, (long) buf, (long) buf + bcount); 327 } 328 /* 329 * Generate a DAG which will read the entire stripe- then we can 330 * just compare data chunks versus "parity" chunks. 331 */ 332 333 rd_dag_h = rf_MakeSimpleDAG(raidPtr, stripeWidth, nbytes, buf, 334 rf_DiskReadFunc, rf_DiskReadUndoFunc, "Rod", allocList, flags, 335 RF_IO_NORMAL_PRIORITY); 336 if (rd_dag_h == NULL) 337 goto done; 338 blockNode = rd_dag_h->succedents[0]; 339 unblockNode = blockNode->succedents[0]->succedents[0]; 340 341 /* 342 * Map the access to physical disk addresses (PDAs)- this will 343 * get us both a list of data addresses, and "parity" addresses 344 * (which are really mirror copies). 345 */ 346 asm_h = rf_MapAccess(raidPtr, startAddr, layoutPtr->dataSectorsPerStripe, 347 buf, RF_DONT_REMAP); 348 aasm = asm_h->stripeMap; 349 350 buf1 = buf; 351 /* 352 * Loop through the data blocks, setting up read nodes for each. 353 */ 354 for (pda = aasm->physInfo, i = 0; i < layoutPtr->numDataCol; i++, pda = pda->next) { 355 RF_ASSERT(pda); 356 357 rf_RangeRestrictPDA(raidPtr, parityPDA, pda, 0, 1); 358 359 RF_ASSERT(pda->numSector != 0); 360 if (rf_TryToRedirectPDA(raidPtr, pda, 0)) { 361 /* cannot verify parity with dead disk */ 362 goto done; 363 } 364 pda->bufPtr = buf1; 365 blockNode->succedents[i]->params[0].p = pda; 366 blockNode->succedents[i]->params[1].p = buf1; 367 blockNode->succedents[i]->params[2].v = psID; 368 blockNode->succedents[i]->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru); 369 buf1 += nbytes; 370 } 371 RF_ASSERT(pda == NULL); 372 /* 373 * keep i, buf1 running 374 * 375 * Loop through parity blocks, setting up read nodes for each. 376 */ 377 for (pda = aasm->parityInfo; i < layoutPtr->numDataCol + layoutPtr->numParityCol; i++, pda = pda->next) { 378 RF_ASSERT(pda); 379 rf_RangeRestrictPDA(raidPtr, parityPDA, pda, 0, 1); 380 RF_ASSERT(pda->numSector != 0); 381 if (rf_TryToRedirectPDA(raidPtr, pda, 0)) { 382 /* cannot verify parity with dead disk */ 383 goto done; 384 } 385 pda->bufPtr = buf1; 386 blockNode->succedents[i]->params[0].p = pda; 387 blockNode->succedents[i]->params[1].p = buf1; 388 blockNode->succedents[i]->params[2].v = psID; 389 blockNode->succedents[i]->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru); 390 buf1 += nbytes; 391 } 392 RF_ASSERT(pda == NULL); 393 394 bzero((char *) &tracerec, sizeof(tracerec)); 395 rd_dag_h->tracerec = &tracerec; 396 397 if (rf_verifyParityDebug > 1) { 398 printf("[%d] RAID1 parity verify read dag:\n", tid); 399 rf_PrintDAGList(rd_dag_h); 400 } 401 RF_LOCK_MUTEX(mcpair->mutex); 402 mcpair->flag = 0; 403 rf_DispatchDAG(rd_dag_h, (void (*) (void *)) rf_MCPairWakeupFunc, 404 (void *) mcpair); 405 while (mcpair->flag == 0) { 406 RF_WAIT_MCPAIR(mcpair); 407 } 408 RF_UNLOCK_MUTEX(mcpair->mutex); 409 410 if (rd_dag_h->status != rf_enable) { 411 RF_ERRORMSG("Unable to verify raid1 parity: can't read stripe\n"); 412 ret = RF_PARITY_COULD_NOT_VERIFY; 413 goto done; 414 } 415 /* 416 * buf1 is the beginning of the data blocks chunk 417 * buf2 is the beginning of the parity blocks chunk 418 */ 419 buf1 = buf; 420 buf2 = buf + (nbytes * layoutPtr->numDataCol); 421 ret = RF_PARITY_OKAY; 422 /* 423 * bbufs is "bad bufs"- an array whose entries are the data 424 * column numbers where we had miscompares. (That is, column 0 425 * and column 1 of the array are mirror copies, and are considered 426 * "data column 0" for this purpose). 427 */ 428 RF_MallocAndAdd(bbufs, layoutPtr->numParityCol * sizeof(int), (int *), 429 allocList); 430 nbad = 0; 431 /* 432 * Check data vs "parity" (mirror copy). 433 */ 434 for (i = 0; i < layoutPtr->numDataCol; i++) { 435 if (rf_verifyParityDebug) { 436 printf("[%d] RAID1 parity verify %d bytes: i=%d buf1=%lx buf2=%lx buf=%lx\n", 437 tid, nbytes, i, (long) buf1, (long) buf2, (long) buf); 438 } 439 ret = bcmp(buf1, buf2, nbytes); 440 if (ret) { 441 if (rf_verifyParityDebug > 1) { 442 for (j = 0; j < nbytes; j++) { 443 if (buf1[j] != buf2[j]) 444 break; 445 } 446 printf("psid=%ld j=%d\n", (long) psID, j); 447 printf("buf1 %02x %02x %02x %02x %02x\n", buf1[0] & 0xff, 448 buf1[1] & 0xff, buf1[2] & 0xff, buf1[3] & 0xff, buf1[4] & 0xff); 449 printf("buf2 %02x %02x %02x %02x %02x\n", buf2[0] & 0xff, 450 buf2[1] & 0xff, buf2[2] & 0xff, buf2[3] & 0xff, buf2[4] & 0xff); 451 } 452 if (rf_verifyParityDebug) { 453 printf("[%d] RAID1: found bad parity, i=%d\n", tid, i); 454 } 455 /* 456 * Parity is bad. Keep track of which columns were bad. 457 */ 458 if (bbufs) 459 bbufs[nbad] = i; 460 nbad++; 461 ret = RF_PARITY_BAD; 462 } 463 buf1 += nbytes; 464 buf2 += nbytes; 465 } 466 467 if ((ret != RF_PARITY_OKAY) && correct_it) { 468 ret = RF_PARITY_COULD_NOT_CORRECT; 469 if (rf_verifyParityDebug) { 470 printf("[%d] RAID1 parity verify: parity not correct\n", tid); 471 } 472 if (bbufs == NULL) 473 goto done; 474 /* 475 * Make a DAG with one write node for each bad unit. We'll simply 476 * write the contents of the data unit onto the parity unit for 477 * correction. (It's possible that the mirror copy was the correct 478 * copy, and that we're spooging good data by writing bad over it, 479 * but there's no way we can know that. 480 */ 481 wr_dag_h = rf_MakeSimpleDAG(raidPtr, nbad, nbytes, buf, 482 rf_DiskWriteFunc, rf_DiskWriteUndoFunc, "Wnp", allocList, flags, 483 RF_IO_NORMAL_PRIORITY); 484 if (wr_dag_h == NULL) 485 goto done; 486 wrBlock = wr_dag_h->succedents[0]; 487 /* 488 * Fill in a write node for each bad compare. 489 */ 490 for (i = 0; i < nbad; i++) { 491 j = i + layoutPtr->numDataCol; 492 pda = blockNode->succedents[j]->params[0].p; 493 pda->bufPtr = blockNode->succedents[i]->params[1].p; 494 wrBlock->succedents[i]->params[0].p = pda; 495 wrBlock->succedents[i]->params[1].p = pda->bufPtr; 496 wrBlock->succedents[i]->params[2].v = psID; 497 wrBlock->succedents[0]->params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru); 498 } 499 bzero((char *) &tracerec, sizeof(tracerec)); 500 wr_dag_h->tracerec = &tracerec; 501 if (rf_verifyParityDebug > 1) { 502 printf("Parity verify write dag:\n"); 503 rf_PrintDAGList(wr_dag_h); 504 } 505 RF_LOCK_MUTEX(mcpair->mutex); 506 mcpair->flag = 0; 507 /* fire off the write DAG */ 508 rf_DispatchDAG(wr_dag_h, (void (*) (void *)) rf_MCPairWakeupFunc, 509 (void *) mcpair); 510 while (!mcpair->flag) { 511 RF_WAIT_COND(mcpair->cond, mcpair->mutex); 512 } 513 RF_UNLOCK_MUTEX(mcpair->mutex); 514 if (wr_dag_h->status != rf_enable) { 515 RF_ERRORMSG("Unable to correct RAID1 parity in VerifyParity\n"); 516 goto done; 517 } 518 ret = RF_PARITY_CORRECTED; 519 } 520 done: 521 /* 522 * All done. We might've gotten here without doing part of the function, 523 * so cleanup what we have to and return our running status. 524 */ 525 if (asm_h) 526 rf_FreeAccessStripeMap(asm_h); 527 if (rd_dag_h) 528 rf_FreeDAG(rd_dag_h); 529 if (wr_dag_h) 530 rf_FreeDAG(wr_dag_h); 531 if (mcpair) 532 rf_FreeMCPair(mcpair); 533 rf_FreeAllocList(allocList); 534 if (rf_verifyParityDebug) { 535 printf("[%d] RAID1 parity verify, returning %d\n", tid, ret); 536 } 537 return (ret); 538 } 539 540 int 541 rf_SubmitReconBufferRAID1(rbuf, keep_it, use_committed) 542 RF_ReconBuffer_t *rbuf; /* the recon buffer to submit */ 543 int keep_it; /* whether we can keep this buffer or we have 544 * to return it */ 545 int use_committed; /* whether to use a committed or an available 546 * recon buffer */ 547 { 548 RF_ReconParityStripeStatus_t *pssPtr; 549 RF_ReconCtrl_t *reconCtrlPtr; 550 RF_RaidLayout_t *layoutPtr; 551 int tid = 0, retcode, created; 552 RF_CallbackDesc_t *cb, *p; 553 RF_ReconBuffer_t *t; 554 RF_Raid_t *raidPtr; 555 caddr_t ta; 556 557 retcode = 0; 558 created = 0; 559 560 raidPtr = rbuf->raidPtr; 561 layoutPtr = &raidPtr->Layout; 562 reconCtrlPtr = raidPtr->reconControl[rbuf->row]; 563 564 RF_ASSERT(rbuf); 565 RF_ASSERT(rbuf->col != reconCtrlPtr->fcol); 566 567 if (rf_reconbufferDebug) { 568 rf_get_threadid(tid); 569 printf("[%d] RAID1 reconbuffer submission r%d c%d psid %ld ru%d (failed offset %ld)\n", 570 tid, rbuf->row, rbuf->col, (long) rbuf->parityStripeID, rbuf->which_ru, 571 (long) rbuf->failedDiskSectorOffset); 572 } 573 if (rf_reconDebug) { 574 printf("RAID1 reconbuffer submit psid %ld buf %lx\n", 575 (long) rbuf->parityStripeID, (long) rbuf->buffer); 576 printf("RAID1 psid %ld %02x %02x %02x %02x %02x\n", 577 (long) rbuf->parityStripeID, 578 rbuf->buffer[0], rbuf->buffer[1], rbuf->buffer[2], rbuf->buffer[3], 579 rbuf->buffer[4]); 580 } 581 RF_LOCK_PSS_MUTEX(raidPtr, rbuf->row, rbuf->parityStripeID); 582 583 RF_LOCK_MUTEX(reconCtrlPtr->rb_mutex); 584 585 pssPtr = rf_LookupRUStatus(raidPtr, reconCtrlPtr->pssTable, 586 rbuf->parityStripeID, rbuf->which_ru, RF_PSS_NONE, &created); 587 RF_ASSERT(pssPtr); /* if it didn't exist, we wouldn't have gotten 588 * an rbuf for it */ 589 590 /* 591 * Since this is simple mirroring, the first submission for a stripe is also 592 * treated as the last. 593 */ 594 595 t = NULL; 596 if (keep_it) { 597 if (rf_reconbufferDebug) { 598 printf("[%d] RAID1 rbuf submission: keeping rbuf\n", tid); 599 } 600 t = rbuf; 601 } else { 602 if (use_committed) { 603 if (rf_reconbufferDebug) { 604 printf("[%d] RAID1 rbuf submission: using committed rbuf\n", tid); 605 } 606 t = reconCtrlPtr->committedRbufs; 607 RF_ASSERT(t); 608 reconCtrlPtr->committedRbufs = t->next; 609 t->next = NULL; 610 } else 611 if (reconCtrlPtr->floatingRbufs) { 612 if (rf_reconbufferDebug) { 613 printf("[%d] RAID1 rbuf submission: using floating rbuf\n", tid); 614 } 615 t = reconCtrlPtr->floatingRbufs; 616 reconCtrlPtr->floatingRbufs = t->next; 617 t->next = NULL; 618 } 619 } 620 if (t == NULL) { 621 if (rf_reconbufferDebug) { 622 printf("[%d] RAID1 rbuf submission: waiting for rbuf\n", tid); 623 } 624 RF_ASSERT((keep_it == 0) && (use_committed == 0)); 625 raidPtr->procsInBufWait++; 626 if ((raidPtr->procsInBufWait == (raidPtr->numCol - 1)) 627 && (raidPtr->numFullReconBuffers == 0)) { 628 /* ruh-ro */ 629 RF_ERRORMSG("Buffer wait deadlock\n"); 630 rf_PrintPSStatusTable(raidPtr, rbuf->row); 631 RF_PANIC(); 632 } 633 pssPtr->flags |= RF_PSS_BUFFERWAIT; 634 cb = rf_AllocCallbackDesc(); 635 cb->row = rbuf->row; 636 cb->col = rbuf->col; 637 cb->callbackArg.v = rbuf->parityStripeID; 638 cb->callbackArg2.v = rbuf->which_ru; 639 cb->next = NULL; 640 if (reconCtrlPtr->bufferWaitList == NULL) { 641 /* we are the wait list- lucky us */ 642 reconCtrlPtr->bufferWaitList = cb; 643 } else { 644 /* append to wait list */ 645 for (p = reconCtrlPtr->bufferWaitList; p->next; p = p->next); 646 p->next = cb; 647 } 648 retcode = 1; 649 goto out; 650 } 651 if (t != rbuf) { 652 t->row = rbuf->row; 653 t->col = reconCtrlPtr->fcol; 654 t->parityStripeID = rbuf->parityStripeID; 655 t->which_ru = rbuf->which_ru; 656 t->failedDiskSectorOffset = rbuf->failedDiskSectorOffset; 657 t->spRow = rbuf->spRow; 658 t->spCol = rbuf->spCol; 659 t->spOffset = rbuf->spOffset; 660 /* Swap buffers. DANCE! */ 661 ta = t->buffer; 662 t->buffer = rbuf->buffer; 663 rbuf->buffer = ta; 664 } 665 /* 666 * Use the rbuf we've been given as the target. 667 */ 668 RF_ASSERT(pssPtr->rbuf == NULL); 669 pssPtr->rbuf = t; 670 671 t->count = 1; 672 /* 673 * Below, we use 1 for numDataCol (which is equal to the count in the 674 * previous line), so we'll always be done. 675 */ 676 rf_CheckForFullRbuf(raidPtr, reconCtrlPtr, pssPtr, 1); 677 678 out: 679 RF_UNLOCK_PSS_MUTEX(raidPtr, rbuf->row, rbuf->parityStripeID); 680 RF_UNLOCK_MUTEX(reconCtrlPtr->rb_mutex); 681 if (rf_reconbufferDebug) { 682 printf("[%d] RAID1 rbuf submission: returning %d\n", tid, retcode); 683 } 684 return (retcode); 685 } 686