1 /* $NetBSD: rf_dagutils.c,v 1.43 2004/03/23 21:53:36 oster Exp $ */ 2 /* 3 * Copyright (c) 1995 Carnegie-Mellon University. 4 * All rights reserved. 5 * 6 * Authors: Mark Holland, William V. Courtright II, Jim Zelenka 7 * 8 * Permission to use, copy, modify and distribute this software and 9 * its documentation is hereby granted, provided that both the copyright 10 * notice and this permission notice appear in all copies of the 11 * software, derivative works or modified versions, and any portions 12 * thereof, and that both notices appear in supporting documentation. 13 * 14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 17 * 18 * Carnegie Mellon requests users of this software to return to 19 * 20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 21 * School of Computer Science 22 * Carnegie Mellon University 23 * Pittsburgh PA 15213-3890 24 * 25 * any improvements or extensions that they make and grant Carnegie the 26 * rights to redistribute these changes. 27 */ 28 29 /****************************************************************************** 30 * 31 * rf_dagutils.c -- utility routines for manipulating dags 32 * 33 *****************************************************************************/ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: rf_dagutils.c,v 1.43 2004/03/23 21:53:36 oster Exp $"); 37 38 #include <dev/raidframe/raidframevar.h> 39 40 #include "rf_archs.h" 41 #include "rf_threadstuff.h" 42 #include "rf_raid.h" 43 #include "rf_dag.h" 44 #include "rf_dagutils.h" 45 #include "rf_dagfuncs.h" 46 #include "rf_general.h" 47 #include "rf_map.h" 48 #include "rf_shutdown.h" 49 50 #define SNUM_DIFF(_a_,_b_) (((_a_)>(_b_))?((_a_)-(_b_)):((_b_)-(_a_))) 51 52 const RF_RedFuncs_t rf_xorFuncs = { 53 rf_RegularXorFunc, "Reg Xr", 54 rf_SimpleXorFunc, "Simple Xr"}; 55 56 const RF_RedFuncs_t rf_xorRecoveryFuncs = { 57 rf_RecoveryXorFunc, "Recovery Xr", 58 rf_RecoveryXorFunc, "Recovery Xr"}; 59 60 #if RF_DEBUG_VALIDATE_DAG 61 static void rf_RecurPrintDAG(RF_DagNode_t *, int, int); 62 static void rf_PrintDAG(RF_DagHeader_t *); 63 static int rf_ValidateBranch(RF_DagNode_t *, int *, int *, 64 RF_DagNode_t **, int); 65 static void rf_ValidateBranchVisitedBits(RF_DagNode_t *, int, int); 66 static void rf_ValidateVisitedBits(RF_DagHeader_t *); 67 #endif /* RF_DEBUG_VALIDATE_DAG */ 68 69 /* The maximum number of nodes in a DAG is bounded by 70 71 (2 * raidPtr->Layout->numDataCol) + (1 * layoutPtr->numParityCol) + 72 (1 * 2 * layoutPtr->numParityCol) + 3 73 74 which is: 2*RF_MAXCOL+1*2+1*2*2+3 75 76 For RF_MAXCOL of 40, this works out to 89. We use this value to provide an estimate 77 on the maximum size needed for RF_DAGPCACHE_SIZE. For RF_MAXCOL of 40, this structure 78 would be 534 bytes. Too much to have on-hand in a RF_DagNode_t, but should be ok to 79 have a few kicking around. 80 */ 81 #define RF_DAGPCACHE_SIZE ((2*RF_MAXCOL+1*2+1*2*2+3) *(RF_MAX(sizeof(RF_DagParam_t), sizeof(RF_DagNode_t *)))) 82 83 84 /****************************************************************************** 85 * 86 * InitNode - initialize a dag node 87 * 88 * the size of the propList array is always the same as that of the 89 * successors array. 90 * 91 *****************************************************************************/ 92 void 93 rf_InitNode(RF_DagNode_t *node, RF_NodeStatus_t initstatus, int commit, 94 int (*doFunc) (RF_DagNode_t *node), 95 int (*undoFunc) (RF_DagNode_t *node), 96 int (*wakeFunc) (RF_DagNode_t *node, int status), 97 int nSucc, int nAnte, int nParam, int nResult, 98 RF_DagHeader_t *hdr, char *name, RF_AllocListElem_t *alist) 99 { 100 void **ptrs; 101 int nptrs; 102 103 if (nAnte > RF_MAX_ANTECEDENTS) 104 RF_PANIC(); 105 node->status = initstatus; 106 node->commitNode = commit; 107 node->doFunc = doFunc; 108 node->undoFunc = undoFunc; 109 node->wakeFunc = wakeFunc; 110 node->numParams = nParam; 111 node->numResults = nResult; 112 node->numAntecedents = nAnte; 113 node->numAntDone = 0; 114 node->next = NULL; 115 /* node->list_next = NULL */ /* Don't touch this here! 116 It may already be 117 in use by the caller! */ 118 node->numSuccedents = nSucc; 119 node->name = name; 120 node->dagHdr = hdr; 121 node->big_dag_ptrs = NULL; 122 node->big_dag_params = NULL; 123 node->visited = 0; 124 125 /* allocate all the pointers with one call to malloc */ 126 nptrs = nSucc + nAnte + nResult + nSucc; 127 128 if (nptrs <= RF_DAG_PTRCACHESIZE) { 129 /* 130 * The dag_ptrs field of the node is basically some scribble 131 * space to be used here. We could get rid of it, and always 132 * allocate the range of pointers, but that's expensive. So, 133 * we pick a "common case" size for the pointer cache. Hopefully, 134 * we'll find that: 135 * (1) Generally, nptrs doesn't exceed RF_DAG_PTRCACHESIZE by 136 * only a little bit (least efficient case) 137 * (2) Generally, ntprs isn't a lot less than RF_DAG_PTRCACHESIZE 138 * (wasted memory) 139 */ 140 ptrs = (void **) node->dag_ptrs; 141 } else if (nptrs <= (RF_DAGPCACHE_SIZE / sizeof(RF_DagNode_t *))) { 142 node->big_dag_ptrs = rf_AllocDAGPCache(); 143 ptrs = (void **) node->big_dag_ptrs; 144 } else { 145 RF_MallocAndAdd(ptrs, nptrs * sizeof(void *), 146 (void **), alist); 147 } 148 node->succedents = (nSucc) ? (RF_DagNode_t **) ptrs : NULL; 149 node->antecedents = (nAnte) ? (RF_DagNode_t **) (ptrs + nSucc) : NULL; 150 node->results = (nResult) ? (void **) (ptrs + nSucc + nAnte) : NULL; 151 node->propList = (nSucc) ? (RF_PropHeader_t **) (ptrs + nSucc + nAnte + nResult) : NULL; 152 153 if (nParam) { 154 if (nParam <= RF_DAG_PARAMCACHESIZE) { 155 node->params = (RF_DagParam_t *) node->dag_params; 156 } else if (nParam <= (RF_DAGPCACHE_SIZE / sizeof(RF_DagParam_t))) { 157 node->big_dag_params = rf_AllocDAGPCache(); 158 node->params = node->big_dag_params; 159 } else { 160 RF_MallocAndAdd(node->params, 161 nParam * sizeof(RF_DagParam_t), 162 (RF_DagParam_t *), alist); 163 } 164 } else { 165 node->params = NULL; 166 } 167 } 168 169 170 171 /****************************************************************************** 172 * 173 * allocation and deallocation routines 174 * 175 *****************************************************************************/ 176 177 void 178 rf_FreeDAG(RF_DagHeader_t *dag_h) 179 { 180 RF_AccessStripeMapHeader_t *asmap, *t_asmap; 181 RF_PhysDiskAddr_t *pda; 182 RF_DagNode_t *tmpnode; 183 RF_VoidPointerListElem_t *tmpiobuf; 184 RF_DagHeader_t *nextDag; 185 186 while (dag_h) { 187 nextDag = dag_h->next; 188 rf_FreeAllocList(dag_h->allocList); 189 for (asmap = dag_h->asmList; asmap;) { 190 t_asmap = asmap; 191 asmap = asmap->next; 192 rf_FreeAccessStripeMap(t_asmap); 193 } 194 while (dag_h->pda_cleanup_list) { 195 pda = dag_h->pda_cleanup_list; 196 dag_h->pda_cleanup_list = dag_h->pda_cleanup_list->next; 197 rf_FreePhysDiskAddr(pda); 198 } 199 while (dag_h->iobufs) { 200 tmpiobuf = dag_h->iobufs; 201 dag_h->iobufs = dag_h->iobufs->next; 202 if (tmpiobuf->p) 203 rf_FreeIOBuffer(dag_h->raidPtr, tmpiobuf->p); 204 rf_FreeVPListElem(tmpiobuf); 205 } 206 while (dag_h->nodes) { 207 tmpnode = dag_h->nodes; 208 dag_h->nodes = dag_h->nodes->list_next; 209 rf_FreeDAGNode(tmpnode); 210 } 211 rf_FreeDAGHeader(dag_h); 212 dag_h = nextDag; 213 } 214 } 215 216 #define RF_MAX_FREE_DAGH 128 217 #define RF_MIN_FREE_DAGH 32 218 219 #define RF_MAX_FREE_DAGNODE 512 /* XXX Tune this... */ 220 #define RF_MIN_FREE_DAGNODE 128 /* XXX Tune this... */ 221 222 #define RF_MAX_FREE_DAGLIST 128 223 #define RF_MIN_FREE_DAGLIST 32 224 225 #define RF_MAX_FREE_DAGPCACHE 128 226 #define RF_MIN_FREE_DAGPCACHE 8 227 228 #define RF_MAX_FREE_FUNCLIST 128 229 #define RF_MIN_FREE_FUNCLIST 32 230 231 #define RF_MAX_FREE_BUFFERS 128 232 #define RF_MIN_FREE_BUFFERS 32 233 234 static void rf_ShutdownDAGs(void *); 235 static void 236 rf_ShutdownDAGs(void *ignored) 237 { 238 pool_destroy(&rf_pools.dagh); 239 pool_destroy(&rf_pools.dagnode); 240 pool_destroy(&rf_pools.daglist); 241 pool_destroy(&rf_pools.dagpcache); 242 pool_destroy(&rf_pools.funclist); 243 } 244 245 int 246 rf_ConfigureDAGs(RF_ShutdownList_t **listp) 247 { 248 249 rf_pool_init(&rf_pools.dagnode, sizeof(RF_DagNode_t), 250 "rf_dagnode_pl", RF_MIN_FREE_DAGNODE, RF_MAX_FREE_DAGNODE); 251 rf_pool_init(&rf_pools.dagh, sizeof(RF_DagHeader_t), 252 "rf_dagh_pl", RF_MIN_FREE_DAGH, RF_MAX_FREE_DAGH); 253 rf_pool_init(&rf_pools.daglist, sizeof(RF_DagList_t), 254 "rf_daglist_pl", RF_MIN_FREE_DAGLIST, RF_MAX_FREE_DAGLIST); 255 rf_pool_init(&rf_pools.dagpcache, RF_DAGPCACHE_SIZE, 256 "rf_dagpcache_pl", RF_MIN_FREE_DAGPCACHE, RF_MAX_FREE_DAGPCACHE); 257 rf_pool_init(&rf_pools.funclist, sizeof(RF_FuncList_t), 258 "rf_funclist_pl", RF_MIN_FREE_FUNCLIST, RF_MAX_FREE_FUNCLIST); 259 rf_ShutdownCreate(listp, rf_ShutdownDAGs, NULL); 260 261 return (0); 262 } 263 264 RF_DagHeader_t * 265 rf_AllocDAGHeader() 266 { 267 RF_DagHeader_t *dh; 268 269 dh = pool_get(&rf_pools.dagh, PR_WAITOK); 270 memset((char *) dh, 0, sizeof(RF_DagHeader_t)); 271 return (dh); 272 } 273 274 void 275 rf_FreeDAGHeader(RF_DagHeader_t * dh) 276 { 277 pool_put(&rf_pools.dagh, dh); 278 } 279 280 RF_DagNode_t * 281 rf_AllocDAGNode() 282 { 283 RF_DagNode_t *node; 284 285 node = pool_get(&rf_pools.dagnode, PR_WAITOK); 286 memset(node, 0, sizeof(RF_DagNode_t)); 287 return (node); 288 } 289 290 void 291 rf_FreeDAGNode(RF_DagNode_t *node) 292 { 293 if (node->big_dag_ptrs) { 294 rf_FreeDAGPCache(node->big_dag_ptrs); 295 } 296 if (node->big_dag_params) { 297 rf_FreeDAGPCache(node->big_dag_params); 298 } 299 pool_put(&rf_pools.dagnode, node); 300 } 301 302 RF_DagList_t * 303 rf_AllocDAGList() 304 { 305 RF_DagList_t *dagList; 306 307 dagList = pool_get(&rf_pools.daglist, PR_WAITOK); 308 memset(dagList, 0, sizeof(RF_DagList_t)); 309 310 return (dagList); 311 } 312 313 void 314 rf_FreeDAGList(RF_DagList_t *dagList) 315 { 316 pool_put(&rf_pools.daglist, dagList); 317 } 318 319 void * 320 rf_AllocDAGPCache() 321 { 322 void *p; 323 p = pool_get(&rf_pools.dagpcache, PR_WAITOK); 324 memset(p, 0, RF_DAGPCACHE_SIZE); 325 326 return (p); 327 } 328 329 void 330 rf_FreeDAGPCache(void *p) 331 { 332 pool_put(&rf_pools.dagpcache, p); 333 } 334 335 RF_FuncList_t * 336 rf_AllocFuncList() 337 { 338 RF_FuncList_t *funcList; 339 340 funcList = pool_get(&rf_pools.funclist, PR_WAITOK); 341 memset(funcList, 0, sizeof(RF_FuncList_t)); 342 343 return (funcList); 344 } 345 346 void 347 rf_FreeFuncList(RF_FuncList_t *funcList) 348 { 349 pool_put(&rf_pools.funclist, funcList); 350 } 351 352 353 354 /* allocates a buffer big enough to hold the data described by the 355 caller (ie. the data of the associated PDA). Glue this buffer 356 into our dag_h cleanup structure. */ 357 358 void * 359 rf_AllocBuffer(RF_Raid_t *raidPtr, int size, RF_AllocListElem_t * allocList) 360 { 361 void *p; 362 363 RF_MallocAndAdd(p, size, (char *), allocList); 364 return ((void *) p); 365 } 366 367 368 void * 369 rf_AllocBuffer2(RF_Raid_t *raidPtr, RF_DagHeader_t *dag_h, int size) 370 { 371 RF_VoidPointerListElem_t *vple; 372 void *p; 373 374 p = rf_AllocIOBuffer(raidPtr, size); 375 vple = rf_AllocVPListElem(); 376 vple->p = p; 377 vple->next = dag_h->iobufs; 378 dag_h->iobufs = vple; 379 380 return (p); 381 } 382 383 void * 384 rf_AllocIOBuffer(RF_Raid_t *raidPtr, int size) 385 { 386 void *p; 387 388 RF_ASSERT(size <= (raidPtr->Layout.sectorsPerStripeUnit << 389 raidPtr->logBytesPerSector)); 390 391 p = malloc( raidPtr->Layout.sectorsPerStripeUnit << 392 raidPtr->logBytesPerSector, 393 M_RAIDFRAME, M_NOWAIT); 394 if (!p) { 395 RF_LOCK_MUTEX(raidPtr->mutex); 396 if (raidPtr->iobuf_count > 0) { 397 p = raidPtr->iobuf; 398 raidPtr->iobuf = raidPtr->iobuf->next; 399 raidPtr->iobuf_count--; 400 } else { 401 #ifdef DIAGNOSTIC 402 printf("raid%d: Help! Out of emergency buffers!\n", raidPtr->raidid); 403 #endif 404 } 405 RF_UNLOCK_MUTEX(raidPtr->mutex); 406 if (!p) { 407 /* We didn't get a buffer... not much we can do other than wait, 408 and hope that someone frees up memory for us.. */ 409 p = malloc( raidPtr->Layout.sectorsPerStripeUnit << 410 raidPtr->logBytesPerSector, 411 M_RAIDFRAME, M_WAITOK); 412 } 413 } 414 return (p); 415 } 416 417 void 418 rf_FreeIOBuffer(RF_Raid_t *raidPtr, void *p) 419 { 420 RF_LOCK_MUTEX(raidPtr->mutex); 421 if (raidPtr->iobuf_count < raidPtr->numEmergencyBuffers) { 422 ((RF_IOBufHeader_t *)p)->next = raidPtr->iobuf; 423 raidPtr->iobuf = p; 424 raidPtr->iobuf_count++; 425 } else { 426 free(p, M_RAIDFRAME); 427 } 428 RF_UNLOCK_MUTEX(raidPtr->mutex); 429 } 430 431 432 433 #if RF_DEBUG_VALIDATE_DAG 434 /****************************************************************************** 435 * 436 * debug routines 437 * 438 *****************************************************************************/ 439 440 char * 441 rf_NodeStatusString(RF_DagNode_t *node) 442 { 443 switch (node->status) { 444 case rf_wait: 445 return ("wait"); 446 case rf_fired: 447 return ("fired"); 448 case rf_good: 449 return ("good"); 450 case rf_bad: 451 return ("bad"); 452 default: 453 return ("?"); 454 } 455 } 456 457 void 458 rf_PrintNodeInfoString(RF_DagNode_t *node) 459 { 460 RF_PhysDiskAddr_t *pda; 461 int (*df) (RF_DagNode_t *) = node->doFunc; 462 int i, lk, unlk; 463 void *bufPtr; 464 465 if ((df == rf_DiskReadFunc) || (df == rf_DiskWriteFunc) 466 || (df == rf_DiskReadMirrorIdleFunc) 467 || (df == rf_DiskReadMirrorPartitionFunc)) { 468 pda = (RF_PhysDiskAddr_t *) node->params[0].p; 469 bufPtr = (void *) node->params[1].p; 470 lk = 0; 471 unlk = 0; 472 RF_ASSERT(!(lk && unlk)); 473 printf("c %d offs %ld nsect %d buf 0x%lx %s\n", pda->col, 474 (long) pda->startSector, (int) pda->numSector, (long) bufPtr, 475 (lk) ? "LOCK" : ((unlk) ? "UNLK" : " ")); 476 return; 477 } 478 if (df == rf_DiskUnlockFunc) { 479 pda = (RF_PhysDiskAddr_t *) node->params[0].p; 480 lk = 0; 481 unlk = 0; 482 RF_ASSERT(!(lk && unlk)); 483 printf("c %d %s\n", pda->col, 484 (lk) ? "LOCK" : ((unlk) ? "UNLK" : "nop")); 485 return; 486 } 487 if ((df == rf_SimpleXorFunc) || (df == rf_RegularXorFunc) 488 || (df == rf_RecoveryXorFunc)) { 489 printf("result buf 0x%lx\n", (long) node->results[0]); 490 for (i = 0; i < node->numParams - 1; i += 2) { 491 pda = (RF_PhysDiskAddr_t *) node->params[i].p; 492 bufPtr = (RF_PhysDiskAddr_t *) node->params[i + 1].p; 493 printf(" buf 0x%lx c%d offs %ld nsect %d\n", 494 (long) bufPtr, pda->col, 495 (long) pda->startSector, (int) pda->numSector); 496 } 497 return; 498 } 499 #if RF_INCLUDE_PARITYLOGGING > 0 500 if (df == rf_ParityLogOverwriteFunc || df == rf_ParityLogUpdateFunc) { 501 for (i = 0; i < node->numParams - 1; i += 2) { 502 pda = (RF_PhysDiskAddr_t *) node->params[i].p; 503 bufPtr = (RF_PhysDiskAddr_t *) node->params[i + 1].p; 504 printf(" c%d offs %ld nsect %d buf 0x%lx\n", 505 pda->col, (long) pda->startSector, 506 (int) pda->numSector, (long) bufPtr); 507 } 508 return; 509 } 510 #endif /* RF_INCLUDE_PARITYLOGGING > 0 */ 511 512 if ((df == rf_TerminateFunc) || (df == rf_NullNodeFunc)) { 513 printf("\n"); 514 return; 515 } 516 printf("?\n"); 517 } 518 #ifdef DEBUG 519 static void 520 rf_RecurPrintDAG(RF_DagNode_t *node, int depth, int unvisited) 521 { 522 char *anttype; 523 int i; 524 525 node->visited = (unvisited) ? 0 : 1; 526 printf("(%d) %d C%d %s: %s,s%d %d/%d,a%d/%d,p%d,r%d S{", depth, 527 node->nodeNum, node->commitNode, node->name, rf_NodeStatusString(node), 528 node->numSuccedents, node->numSuccFired, node->numSuccDone, 529 node->numAntecedents, node->numAntDone, node->numParams, node->numResults); 530 for (i = 0; i < node->numSuccedents; i++) { 531 printf("%d%s", node->succedents[i]->nodeNum, 532 ((i == node->numSuccedents - 1) ? "\0" : " ")); 533 } 534 printf("} A{"); 535 for (i = 0; i < node->numAntecedents; i++) { 536 switch (node->antType[i]) { 537 case rf_trueData: 538 anttype = "T"; 539 break; 540 case rf_antiData: 541 anttype = "A"; 542 break; 543 case rf_outputData: 544 anttype = "O"; 545 break; 546 case rf_control: 547 anttype = "C"; 548 break; 549 default: 550 anttype = "?"; 551 break; 552 } 553 printf("%d(%s)%s", node->antecedents[i]->nodeNum, anttype, (i == node->numAntecedents - 1) ? "\0" : " "); 554 } 555 printf("}; "); 556 rf_PrintNodeInfoString(node); 557 for (i = 0; i < node->numSuccedents; i++) { 558 if (node->succedents[i]->visited == unvisited) 559 rf_RecurPrintDAG(node->succedents[i], depth + 1, unvisited); 560 } 561 } 562 563 static void 564 rf_PrintDAG(RF_DagHeader_t *dag_h) 565 { 566 int unvisited, i; 567 char *status; 568 569 /* set dag status */ 570 switch (dag_h->status) { 571 case rf_enable: 572 status = "enable"; 573 break; 574 case rf_rollForward: 575 status = "rollForward"; 576 break; 577 case rf_rollBackward: 578 status = "rollBackward"; 579 break; 580 default: 581 status = "illegal!"; 582 break; 583 } 584 /* find out if visited bits are currently set or clear */ 585 unvisited = dag_h->succedents[0]->visited; 586 587 printf("DAG type: %s\n", dag_h->creator); 588 printf("format is (depth) num commit type: status,nSucc nSuccFired/nSuccDone,nAnte/nAnteDone,nParam,nResult S{x} A{x(type)}; info\n"); 589 printf("(0) %d Hdr: %s, s%d, (commit %d/%d) S{", dag_h->nodeNum, 590 status, dag_h->numSuccedents, dag_h->numCommitNodes, dag_h->numCommits); 591 for (i = 0; i < dag_h->numSuccedents; i++) { 592 printf("%d%s", dag_h->succedents[i]->nodeNum, 593 ((i == dag_h->numSuccedents - 1) ? "\0" : " ")); 594 } 595 printf("};\n"); 596 for (i = 0; i < dag_h->numSuccedents; i++) { 597 if (dag_h->succedents[i]->visited == unvisited) 598 rf_RecurPrintDAG(dag_h->succedents[i], 1, unvisited); 599 } 600 } 601 #endif 602 /* assigns node numbers */ 603 int 604 rf_AssignNodeNums(RF_DagHeader_t * dag_h) 605 { 606 int unvisited, i, nnum; 607 RF_DagNode_t *node; 608 609 nnum = 0; 610 unvisited = dag_h->succedents[0]->visited; 611 612 dag_h->nodeNum = nnum++; 613 for (i = 0; i < dag_h->numSuccedents; i++) { 614 node = dag_h->succedents[i]; 615 if (node->visited == unvisited) { 616 nnum = rf_RecurAssignNodeNums(dag_h->succedents[i], nnum, unvisited); 617 } 618 } 619 return (nnum); 620 } 621 622 int 623 rf_RecurAssignNodeNums(RF_DagNode_t *node, int num, int unvisited) 624 { 625 int i; 626 627 node->visited = (unvisited) ? 0 : 1; 628 629 node->nodeNum = num++; 630 for (i = 0; i < node->numSuccedents; i++) { 631 if (node->succedents[i]->visited == unvisited) { 632 num = rf_RecurAssignNodeNums(node->succedents[i], num, unvisited); 633 } 634 } 635 return (num); 636 } 637 /* set the header pointers in each node to "newptr" */ 638 void 639 rf_ResetDAGHeaderPointers(RF_DagHeader_t *dag_h, RF_DagHeader_t *newptr) 640 { 641 int i; 642 for (i = 0; i < dag_h->numSuccedents; i++) 643 if (dag_h->succedents[i]->dagHdr != newptr) 644 rf_RecurResetDAGHeaderPointers(dag_h->succedents[i], newptr); 645 } 646 647 void 648 rf_RecurResetDAGHeaderPointers(RF_DagNode_t *node, RF_DagHeader_t *newptr) 649 { 650 int i; 651 node->dagHdr = newptr; 652 for (i = 0; i < node->numSuccedents; i++) 653 if (node->succedents[i]->dagHdr != newptr) 654 rf_RecurResetDAGHeaderPointers(node->succedents[i], newptr); 655 } 656 657 658 void 659 rf_PrintDAGList(RF_DagHeader_t * dag_h) 660 { 661 int i = 0; 662 663 for (; dag_h; dag_h = dag_h->next) { 664 rf_AssignNodeNums(dag_h); 665 printf("\n\nDAG %d IN LIST:\n", i++); 666 rf_PrintDAG(dag_h); 667 } 668 } 669 670 static int 671 rf_ValidateBranch(RF_DagNode_t *node, int *scount, int *acount, 672 RF_DagNode_t **nodes, int unvisited) 673 { 674 int i, retcode = 0; 675 676 /* construct an array of node pointers indexed by node num */ 677 node->visited = (unvisited) ? 0 : 1; 678 nodes[node->nodeNum] = node; 679 680 if (node->next != NULL) { 681 printf("INVALID DAG: next pointer in node is not NULL\n"); 682 retcode = 1; 683 } 684 if (node->status != rf_wait) { 685 printf("INVALID DAG: Node status is not wait\n"); 686 retcode = 1; 687 } 688 if (node->numAntDone != 0) { 689 printf("INVALID DAG: numAntDone is not zero\n"); 690 retcode = 1; 691 } 692 if (node->doFunc == rf_TerminateFunc) { 693 if (node->numSuccedents != 0) { 694 printf("INVALID DAG: Terminator node has succedents\n"); 695 retcode = 1; 696 } 697 } else { 698 if (node->numSuccedents == 0) { 699 printf("INVALID DAG: Non-terminator node has no succedents\n"); 700 retcode = 1; 701 } 702 } 703 for (i = 0; i < node->numSuccedents; i++) { 704 if (!node->succedents[i]) { 705 printf("INVALID DAG: succedent %d of node %s is NULL\n", i, node->name); 706 retcode = 1; 707 } 708 scount[node->succedents[i]->nodeNum]++; 709 } 710 for (i = 0; i < node->numAntecedents; i++) { 711 if (!node->antecedents[i]) { 712 printf("INVALID DAG: antecedent %d of node %s is NULL\n", i, node->name); 713 retcode = 1; 714 } 715 acount[node->antecedents[i]->nodeNum]++; 716 } 717 for (i = 0; i < node->numSuccedents; i++) { 718 if (node->succedents[i]->visited == unvisited) { 719 if (rf_ValidateBranch(node->succedents[i], scount, 720 acount, nodes, unvisited)) { 721 retcode = 1; 722 } 723 } 724 } 725 return (retcode); 726 } 727 728 static void 729 rf_ValidateBranchVisitedBits(RF_DagNode_t *node, int unvisited, int rl) 730 { 731 int i; 732 733 RF_ASSERT(node->visited == unvisited); 734 for (i = 0; i < node->numSuccedents; i++) { 735 if (node->succedents[i] == NULL) { 736 printf("node=%lx node->succedents[%d] is NULL\n", (long) node, i); 737 RF_ASSERT(0); 738 } 739 rf_ValidateBranchVisitedBits(node->succedents[i], unvisited, rl + 1); 740 } 741 } 742 /* NOTE: never call this on a big dag, because it is exponential 743 * in execution time 744 */ 745 static void 746 rf_ValidateVisitedBits(RF_DagHeader_t *dag) 747 { 748 int i, unvisited; 749 750 unvisited = dag->succedents[0]->visited; 751 752 for (i = 0; i < dag->numSuccedents; i++) { 753 if (dag->succedents[i] == NULL) { 754 printf("dag=%lx dag->succedents[%d] is NULL\n", (long) dag, i); 755 RF_ASSERT(0); 756 } 757 rf_ValidateBranchVisitedBits(dag->succedents[i], unvisited, 0); 758 } 759 } 760 /* validate a DAG. _at entry_ verify that: 761 * -- numNodesCompleted is zero 762 * -- node queue is null 763 * -- dag status is rf_enable 764 * -- next pointer is null on every node 765 * -- all nodes have status wait 766 * -- numAntDone is zero in all nodes 767 * -- terminator node has zero successors 768 * -- no other node besides terminator has zero successors 769 * -- no successor or antecedent pointer in a node is NULL 770 * -- number of times that each node appears as a successor of another node 771 * is equal to the antecedent count on that node 772 * -- number of times that each node appears as an antecedent of another node 773 * is equal to the succedent count on that node 774 * -- what else? 775 */ 776 int 777 rf_ValidateDAG(RF_DagHeader_t *dag_h) 778 { 779 int i, nodecount; 780 int *scount, *acount;/* per-node successor and antecedent counts */ 781 RF_DagNode_t **nodes; /* array of ptrs to nodes in dag */ 782 int retcode = 0; 783 int unvisited; 784 int commitNodeCount = 0; 785 786 if (rf_validateVisitedDebug) 787 rf_ValidateVisitedBits(dag_h); 788 789 if (dag_h->numNodesCompleted != 0) { 790 printf("INVALID DAG: num nodes completed is %d, should be 0\n", dag_h->numNodesCompleted); 791 retcode = 1; 792 goto validate_dag_bad; 793 } 794 if (dag_h->status != rf_enable) { 795 printf("INVALID DAG: not enabled\n"); 796 retcode = 1; 797 goto validate_dag_bad; 798 } 799 if (dag_h->numCommits != 0) { 800 printf("INVALID DAG: numCommits != 0 (%d)\n", dag_h->numCommits); 801 retcode = 1; 802 goto validate_dag_bad; 803 } 804 if (dag_h->numSuccedents != 1) { 805 /* currently, all dags must have only one succedent */ 806 printf("INVALID DAG: numSuccedents !1 (%d)\n", dag_h->numSuccedents); 807 retcode = 1; 808 goto validate_dag_bad; 809 } 810 nodecount = rf_AssignNodeNums(dag_h); 811 812 unvisited = dag_h->succedents[0]->visited; 813 814 RF_Malloc(scount, nodecount * sizeof(int), (int *)); 815 RF_Malloc(acount, nodecount * sizeof(int), (int *)); 816 RF_Malloc(nodes, nodecount * sizeof(RF_DagNode_t *), 817 (RF_DagNode_t **)); 818 for (i = 0; i < dag_h->numSuccedents; i++) { 819 if ((dag_h->succedents[i]->visited == unvisited) 820 && rf_ValidateBranch(dag_h->succedents[i], scount, 821 acount, nodes, unvisited)) { 822 retcode = 1; 823 } 824 } 825 /* start at 1 to skip the header node */ 826 for (i = 1; i < nodecount; i++) { 827 if (nodes[i]->commitNode) 828 commitNodeCount++; 829 if (nodes[i]->doFunc == NULL) { 830 printf("INVALID DAG: node %s has an undefined doFunc\n", nodes[i]->name); 831 retcode = 1; 832 goto validate_dag_out; 833 } 834 if (nodes[i]->undoFunc == NULL) { 835 printf("INVALID DAG: node %s has an undefined doFunc\n", nodes[i]->name); 836 retcode = 1; 837 goto validate_dag_out; 838 } 839 if (nodes[i]->numAntecedents != scount[nodes[i]->nodeNum]) { 840 printf("INVALID DAG: node %s has %d antecedents but appears as a succedent %d times\n", 841 nodes[i]->name, nodes[i]->numAntecedents, scount[nodes[i]->nodeNum]); 842 retcode = 1; 843 goto validate_dag_out; 844 } 845 if (nodes[i]->numSuccedents != acount[nodes[i]->nodeNum]) { 846 printf("INVALID DAG: node %s has %d succedents but appears as an antecedent %d times\n", 847 nodes[i]->name, nodes[i]->numSuccedents, acount[nodes[i]->nodeNum]); 848 retcode = 1; 849 goto validate_dag_out; 850 } 851 } 852 853 if (dag_h->numCommitNodes != commitNodeCount) { 854 printf("INVALID DAG: incorrect commit node count. hdr->numCommitNodes (%d) found (%d) commit nodes in graph\n", 855 dag_h->numCommitNodes, commitNodeCount); 856 retcode = 1; 857 goto validate_dag_out; 858 } 859 validate_dag_out: 860 RF_Free(scount, nodecount * sizeof(int)); 861 RF_Free(acount, nodecount * sizeof(int)); 862 RF_Free(nodes, nodecount * sizeof(RF_DagNode_t *)); 863 if (retcode) 864 rf_PrintDAGList(dag_h); 865 866 if (rf_validateVisitedDebug) 867 rf_ValidateVisitedBits(dag_h); 868 869 return (retcode); 870 871 validate_dag_bad: 872 rf_PrintDAGList(dag_h); 873 return (retcode); 874 } 875 876 #endif /* RF_DEBUG_VALIDATE_DAG */ 877 878 /****************************************************************************** 879 * 880 * misc construction routines 881 * 882 *****************************************************************************/ 883 884 void 885 rf_redirect_asm(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap) 886 { 887 int ds = (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) ? 1 : 0; 888 int fcol = raidPtr->reconControl->fcol; 889 int scol = raidPtr->reconControl->spareCol; 890 RF_PhysDiskAddr_t *pda; 891 892 RF_ASSERT(raidPtr->status == rf_rs_reconstructing); 893 for (pda = asmap->physInfo; pda; pda = pda->next) { 894 if (pda->col == fcol) { 895 #if RF_DEBUG_DAG 896 if (rf_dagDebug) { 897 if (!rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, 898 pda->startSector)) { 899 RF_PANIC(); 900 } 901 } 902 #endif 903 /* printf("Remapped data for large write\n"); */ 904 if (ds) { 905 raidPtr->Layout.map->MapSector(raidPtr, pda->raidAddress, 906 &pda->col, &pda->startSector, RF_REMAP); 907 } else { 908 pda->col = scol; 909 } 910 } 911 } 912 for (pda = asmap->parityInfo; pda; pda = pda->next) { 913 if (pda->col == fcol) { 914 #if RF_DEBUG_DAG 915 if (rf_dagDebug) { 916 if (!rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, pda->startSector)) { 917 RF_PANIC(); 918 } 919 } 920 #endif 921 } 922 if (ds) { 923 (raidPtr->Layout.map->MapParity) (raidPtr, pda->raidAddress, &pda->col, &pda->startSector, RF_REMAP); 924 } else { 925 pda->col = scol; 926 } 927 } 928 } 929 930 931 /* this routine allocates read buffers and generates stripe maps for the 932 * regions of the array from the start of the stripe to the start of the 933 * access, and from the end of the access to the end of the stripe. It also 934 * computes and returns the number of DAG nodes needed to read all this data. 935 * Note that this routine does the wrong thing if the access is fully 936 * contained within one stripe unit, so we RF_ASSERT against this case at the 937 * start. 938 * 939 * layoutPtr - in: layout information 940 * asmap - in: access stripe map 941 * dag_h - in: header of the dag to create 942 * new_asm_h - in: ptr to array of 2 headers. to be filled in 943 * nRodNodes - out: num nodes to be generated to read unaccessed data 944 * sosBuffer, eosBuffer - out: pointers to newly allocated buffer 945 */ 946 void 947 rf_MapUnaccessedPortionOfStripe(RF_Raid_t *raidPtr, 948 RF_RaidLayout_t *layoutPtr, 949 RF_AccessStripeMap_t *asmap, 950 RF_DagHeader_t *dag_h, 951 RF_AccessStripeMapHeader_t **new_asm_h, 952 int *nRodNodes, 953 char **sosBuffer, char **eosBuffer, 954 RF_AllocListElem_t *allocList) 955 { 956 RF_RaidAddr_t sosRaidAddress, eosRaidAddress; 957 RF_SectorNum_t sosNumSector, eosNumSector; 958 959 RF_ASSERT(asmap->numStripeUnitsAccessed > (layoutPtr->numDataCol / 2)); 960 /* generate an access map for the region of the array from start of 961 * stripe to start of access */ 962 new_asm_h[0] = new_asm_h[1] = NULL; 963 *nRodNodes = 0; 964 if (!rf_RaidAddressStripeAligned(layoutPtr, asmap->raidAddress)) { 965 sosRaidAddress = rf_RaidAddressOfPrevStripeBoundary(layoutPtr, asmap->raidAddress); 966 sosNumSector = asmap->raidAddress - sosRaidAddress; 967 *sosBuffer = rf_AllocBuffer(raidPtr, rf_RaidAddressToByte(raidPtr, sosNumSector), allocList); 968 new_asm_h[0] = rf_MapAccess(raidPtr, sosRaidAddress, sosNumSector, *sosBuffer, RF_DONT_REMAP); 969 new_asm_h[0]->next = dag_h->asmList; 970 dag_h->asmList = new_asm_h[0]; 971 *nRodNodes += new_asm_h[0]->stripeMap->numStripeUnitsAccessed; 972 973 RF_ASSERT(new_asm_h[0]->stripeMap->next == NULL); 974 /* we're totally within one stripe here */ 975 if (asmap->flags & RF_ASM_REDIR_LARGE_WRITE) 976 rf_redirect_asm(raidPtr, new_asm_h[0]->stripeMap); 977 } 978 /* generate an access map for the region of the array from end of 979 * access to end of stripe */ 980 if (!rf_RaidAddressStripeAligned(layoutPtr, asmap->endRaidAddress)) { 981 eosRaidAddress = asmap->endRaidAddress; 982 eosNumSector = rf_RaidAddressOfNextStripeBoundary(layoutPtr, eosRaidAddress) - eosRaidAddress; 983 *eosBuffer = rf_AllocBuffer(raidPtr, rf_RaidAddressToByte(raidPtr, eosNumSector), allocList); 984 new_asm_h[1] = rf_MapAccess(raidPtr, eosRaidAddress, eosNumSector, *eosBuffer, RF_DONT_REMAP); 985 new_asm_h[1]->next = dag_h->asmList; 986 dag_h->asmList = new_asm_h[1]; 987 *nRodNodes += new_asm_h[1]->stripeMap->numStripeUnitsAccessed; 988 989 RF_ASSERT(new_asm_h[1]->stripeMap->next == NULL); 990 /* we're totally within one stripe here */ 991 if (asmap->flags & RF_ASM_REDIR_LARGE_WRITE) 992 rf_redirect_asm(raidPtr, new_asm_h[1]->stripeMap); 993 } 994 } 995 996 997 998 /* returns non-zero if the indicated ranges of stripe unit offsets overlap */ 999 int 1000 rf_PDAOverlap(RF_RaidLayout_t *layoutPtr, 1001 RF_PhysDiskAddr_t *src, RF_PhysDiskAddr_t *dest) 1002 { 1003 RF_SectorNum_t soffs = rf_StripeUnitOffset(layoutPtr, src->startSector); 1004 RF_SectorNum_t doffs = rf_StripeUnitOffset(layoutPtr, dest->startSector); 1005 /* use -1 to be sure we stay within SU */ 1006 RF_SectorNum_t send = rf_StripeUnitOffset(layoutPtr, src->startSector + src->numSector - 1); 1007 RF_SectorNum_t dend = rf_StripeUnitOffset(layoutPtr, dest->startSector + dest->numSector - 1); 1008 return ((RF_MAX(soffs, doffs) <= RF_MIN(send, dend)) ? 1 : 0); 1009 } 1010 1011 1012 /* GenerateFailedAccessASMs 1013 * 1014 * this routine figures out what portion of the stripe needs to be read 1015 * to effect the degraded read or write operation. It's primary function 1016 * is to identify everything required to recover the data, and then 1017 * eliminate anything that is already being accessed by the user. 1018 * 1019 * The main result is two new ASMs, one for the region from the start of the 1020 * stripe to the start of the access, and one for the region from the end of 1021 * the access to the end of the stripe. These ASMs describe everything that 1022 * needs to be read to effect the degraded access. Other results are: 1023 * nXorBufs -- the total number of buffers that need to be XORed together to 1024 * recover the lost data, 1025 * rpBufPtr -- ptr to a newly-allocated buffer to hold the parity. If NULL 1026 * at entry, not allocated. 1027 * overlappingPDAs -- 1028 * describes which of the non-failed PDAs in the user access 1029 * overlap data that needs to be read to effect recovery. 1030 * overlappingPDAs[i]==1 if and only if, neglecting the failed 1031 * PDA, the ith pda in the input asm overlaps data that needs 1032 * to be read for recovery. 1033 */ 1034 /* in: asm - ASM for the actual access, one stripe only */ 1035 /* in: failedPDA - which component of the access has failed */ 1036 /* in: dag_h - header of the DAG we're going to create */ 1037 /* out: new_asm_h - the two new ASMs */ 1038 /* out: nXorBufs - the total number of xor bufs required */ 1039 /* out: rpBufPtr - a buffer for the parity read */ 1040 void 1041 rf_GenerateFailedAccessASMs(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap, 1042 RF_PhysDiskAddr_t *failedPDA, 1043 RF_DagHeader_t *dag_h, 1044 RF_AccessStripeMapHeader_t **new_asm_h, 1045 int *nXorBufs, char **rpBufPtr, 1046 char *overlappingPDAs, 1047 RF_AllocListElem_t *allocList) 1048 { 1049 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout); 1050 1051 /* s=start, e=end, s=stripe, a=access, f=failed, su=stripe unit */ 1052 RF_RaidAddr_t sosAddr, sosEndAddr, eosStartAddr, eosAddr; 1053 RF_PhysDiskAddr_t *pda; 1054 int foundit, i; 1055 1056 foundit = 0; 1057 /* first compute the following raid addresses: start of stripe, 1058 * (sosAddr) MIN(start of access, start of failed SU), (sosEndAddr) 1059 * MAX(end of access, end of failed SU), (eosStartAddr) end of 1060 * stripe (i.e. start of next stripe) (eosAddr) */ 1061 sosAddr = rf_RaidAddressOfPrevStripeBoundary(layoutPtr, asmap->raidAddress); 1062 sosEndAddr = RF_MIN(asmap->raidAddress, rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, failedPDA->raidAddress)); 1063 eosStartAddr = RF_MAX(asmap->endRaidAddress, rf_RaidAddressOfNextStripeUnitBoundary(layoutPtr, failedPDA->raidAddress)); 1064 eosAddr = rf_RaidAddressOfNextStripeBoundary(layoutPtr, asmap->raidAddress); 1065 1066 /* now generate access stripe maps for each of the above regions of 1067 * the stripe. Use a dummy (NULL) buf ptr for now */ 1068 1069 new_asm_h[0] = (sosAddr != sosEndAddr) ? rf_MapAccess(raidPtr, sosAddr, sosEndAddr - sosAddr, NULL, RF_DONT_REMAP) : NULL; 1070 new_asm_h[1] = (eosStartAddr != eosAddr) ? rf_MapAccess(raidPtr, eosStartAddr, eosAddr - eosStartAddr, NULL, RF_DONT_REMAP) : NULL; 1071 1072 /* walk through the PDAs and range-restrict each SU to the region of 1073 * the SU touched on the failed PDA. also compute total data buffer 1074 * space requirements in this step. Ignore the parity for now. */ 1075 /* Also count nodes to find out how many bufs need to be xored together */ 1076 (*nXorBufs) = 1; /* in read case, 1 is for parity. In write 1077 * case, 1 is for failed data */ 1078 1079 if (new_asm_h[0]) { 1080 new_asm_h[0]->next = dag_h->asmList; 1081 dag_h->asmList = new_asm_h[0]; 1082 for (pda = new_asm_h[0]->stripeMap->physInfo; pda; pda = pda->next) { 1083 rf_RangeRestrictPDA(raidPtr, failedPDA, pda, RF_RESTRICT_NOBUFFER, 0); 1084 pda->bufPtr = rf_AllocBuffer(raidPtr, pda->numSector << raidPtr->logBytesPerSector, allocList); 1085 } 1086 (*nXorBufs) += new_asm_h[0]->stripeMap->numStripeUnitsAccessed; 1087 } 1088 if (new_asm_h[1]) { 1089 new_asm_h[1]->next = dag_h->asmList; 1090 dag_h->asmList = new_asm_h[1]; 1091 for (pda = new_asm_h[1]->stripeMap->physInfo; pda; pda = pda->next) { 1092 rf_RangeRestrictPDA(raidPtr, failedPDA, pda, RF_RESTRICT_NOBUFFER, 0); 1093 pda->bufPtr = rf_AllocBuffer(raidPtr, pda->numSector << raidPtr->logBytesPerSector, allocList); 1094 } 1095 (*nXorBufs) += new_asm_h[1]->stripeMap->numStripeUnitsAccessed; 1096 } 1097 1098 /* allocate a buffer for parity */ 1099 if (rpBufPtr) 1100 *rpBufPtr = rf_AllocBuffer(raidPtr, failedPDA->numSector << raidPtr->logBytesPerSector, allocList); 1101 1102 /* the last step is to figure out how many more distinct buffers need 1103 * to get xor'd to produce the missing unit. there's one for each 1104 * user-data read node that overlaps the portion of the failed unit 1105 * being accessed */ 1106 1107 for (foundit = i = 0, pda = asmap->physInfo; pda; i++, pda = pda->next) { 1108 if (pda == failedPDA) { 1109 i--; 1110 foundit = 1; 1111 continue; 1112 } 1113 if (rf_PDAOverlap(layoutPtr, pda, failedPDA)) { 1114 overlappingPDAs[i] = 1; 1115 (*nXorBufs)++; 1116 } 1117 } 1118 if (!foundit) { 1119 RF_ERRORMSG("GenerateFailedAccessASMs: did not find failedPDA in asm list\n"); 1120 RF_ASSERT(0); 1121 } 1122 #if RF_DEBUG_DAG 1123 if (rf_degDagDebug) { 1124 if (new_asm_h[0]) { 1125 printf("First asm:\n"); 1126 rf_PrintFullAccessStripeMap(new_asm_h[0], 1); 1127 } 1128 if (new_asm_h[1]) { 1129 printf("Second asm:\n"); 1130 rf_PrintFullAccessStripeMap(new_asm_h[1], 1); 1131 } 1132 } 1133 #endif 1134 } 1135 1136 1137 /* adjusts the offset and number of sectors in the destination pda so that 1138 * it covers at most the region of the SU covered by the source PDA. This 1139 * is exclusively a restriction: the number of sectors indicated by the 1140 * target PDA can only shrink. 1141 * 1142 * For example: s = sectors within SU indicated by source PDA 1143 * d = sectors within SU indicated by dest PDA 1144 * r = results, stored in dest PDA 1145 * 1146 * |--------------- one stripe unit ---------------------| 1147 * | sssssssssssssssssssssssssssssssss | 1148 * | ddddddddddddddddddddddddddddddddddddddddddddd | 1149 * | rrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr | 1150 * 1151 * Another example: 1152 * 1153 * |--------------- one stripe unit ---------------------| 1154 * | sssssssssssssssssssssssssssssssss | 1155 * | ddddddddddddddddddddddd | 1156 * | rrrrrrrrrrrrrrrr | 1157 * 1158 */ 1159 void 1160 rf_RangeRestrictPDA(RF_Raid_t *raidPtr, RF_PhysDiskAddr_t *src, 1161 RF_PhysDiskAddr_t *dest, int dobuffer, int doraidaddr) 1162 { 1163 RF_RaidLayout_t *layoutPtr = &raidPtr->Layout; 1164 RF_SectorNum_t soffs = rf_StripeUnitOffset(layoutPtr, src->startSector); 1165 RF_SectorNum_t doffs = rf_StripeUnitOffset(layoutPtr, dest->startSector); 1166 RF_SectorNum_t send = rf_StripeUnitOffset(layoutPtr, src->startSector + src->numSector - 1); /* use -1 to be sure we 1167 * stay within SU */ 1168 RF_SectorNum_t dend = rf_StripeUnitOffset(layoutPtr, dest->startSector + dest->numSector - 1); 1169 RF_SectorNum_t subAddr = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, dest->startSector); /* stripe unit boundary */ 1170 1171 dest->startSector = subAddr + RF_MAX(soffs, doffs); 1172 dest->numSector = subAddr + RF_MIN(send, dend) + 1 - dest->startSector; 1173 1174 if (dobuffer) 1175 dest->bufPtr += (soffs > doffs) ? rf_RaidAddressToByte(raidPtr, soffs - doffs) : 0; 1176 if (doraidaddr) { 1177 dest->raidAddress = rf_RaidAddressOfPrevStripeUnitBoundary(layoutPtr, dest->raidAddress) + 1178 rf_StripeUnitOffset(layoutPtr, dest->startSector); 1179 } 1180 } 1181 1182 #if (RF_INCLUDE_CHAINDECLUSTER > 0) 1183 1184 /* 1185 * Want the highest of these primes to be the largest one 1186 * less than the max expected number of columns (won't hurt 1187 * to be too small or too large, but won't be optimal, either) 1188 * --jimz 1189 */ 1190 #define NLOWPRIMES 8 1191 static int lowprimes[NLOWPRIMES] = {2, 3, 5, 7, 11, 13, 17, 19}; 1192 /***************************************************************************** 1193 * compute the workload shift factor. (chained declustering) 1194 * 1195 * return nonzero if access should shift to secondary, otherwise, 1196 * access is to primary 1197 *****************************************************************************/ 1198 int 1199 rf_compute_workload_shift(RF_Raid_t *raidPtr, RF_PhysDiskAddr_t *pda) 1200 { 1201 /* 1202 * variables: 1203 * d = column of disk containing primary 1204 * f = column of failed disk 1205 * n = number of disks in array 1206 * sd = "shift distance" (number of columns that d is to the right of f) 1207 * v = numerator of redirection ratio 1208 * k = denominator of redirection ratio 1209 */ 1210 RF_RowCol_t d, f, sd, n; 1211 int k, v, ret, i; 1212 1213 n = raidPtr->numCol; 1214 1215 /* assign column of primary copy to d */ 1216 d = pda->col; 1217 1218 /* assign column of dead disk to f */ 1219 for (f = 0; ((!RF_DEAD_DISK(raidPtr->Disks[f].status)) && (f < n)); f++); 1220 1221 RF_ASSERT(f < n); 1222 RF_ASSERT(f != d); 1223 1224 sd = (f > d) ? (n + d - f) : (d - f); 1225 RF_ASSERT(sd < n); 1226 1227 /* 1228 * v of every k accesses should be redirected 1229 * 1230 * v/k := (n-1-sd)/(n-1) 1231 */ 1232 v = (n - 1 - sd); 1233 k = (n - 1); 1234 1235 #if 1 1236 /* 1237 * XXX 1238 * Is this worth it? 1239 * 1240 * Now reduce the fraction, by repeatedly factoring 1241 * out primes (just like they teach in elementary school!) 1242 */ 1243 for (i = 0; i < NLOWPRIMES; i++) { 1244 if (lowprimes[i] > v) 1245 break; 1246 while (((v % lowprimes[i]) == 0) && ((k % lowprimes[i]) == 0)) { 1247 v /= lowprimes[i]; 1248 k /= lowprimes[i]; 1249 } 1250 } 1251 #endif 1252 1253 raidPtr->hist_diskreq[d]++; 1254 if (raidPtr->hist_diskreq[d] > v) { 1255 ret = 0; /* do not redirect */ 1256 } else { 1257 ret = 1; /* redirect */ 1258 } 1259 1260 #if 0 1261 printf("d=%d f=%d sd=%d v=%d k=%d ret=%d h=%d\n", d, f, sd, v, k, ret, 1262 raidPtr->hist_diskreq[d]); 1263 #endif 1264 1265 if (raidPtr->hist_diskreq[d] >= k) { 1266 /* reset counter */ 1267 raidPtr->hist_diskreq[d] = 0; 1268 } 1269 return (ret); 1270 } 1271 #endif /* (RF_INCLUDE_CHAINDECLUSTER > 0) */ 1272 1273 /* 1274 * Disk selection routines 1275 */ 1276 1277 /* 1278 * Selects the disk with the shortest queue from a mirror pair. 1279 * Both the disk I/Os queued in RAIDframe as well as those at the physical 1280 * disk are counted as members of the "queue" 1281 */ 1282 void 1283 rf_SelectMirrorDiskIdle(RF_DagNode_t * node) 1284 { 1285 RF_Raid_t *raidPtr = (RF_Raid_t *) node->dagHdr->raidPtr; 1286 RF_RowCol_t colData, colMirror; 1287 int dataQueueLength, mirrorQueueLength, usemirror; 1288 RF_PhysDiskAddr_t *data_pda = (RF_PhysDiskAddr_t *) node->params[0].p; 1289 RF_PhysDiskAddr_t *mirror_pda = (RF_PhysDiskAddr_t *) node->params[4].p; 1290 RF_PhysDiskAddr_t *tmp_pda; 1291 RF_RaidDisk_t *disks = raidPtr->Disks; 1292 RF_DiskQueue_t *dqs = raidPtr->Queues, *dataQueue, *mirrorQueue; 1293 1294 /* return the [row col] of the disk with the shortest queue */ 1295 colData = data_pda->col; 1296 colMirror = mirror_pda->col; 1297 dataQueue = &(dqs[colData]); 1298 mirrorQueue = &(dqs[colMirror]); 1299 1300 #ifdef RF_LOCK_QUEUES_TO_READ_LEN 1301 RF_LOCK_QUEUE_MUTEX(dataQueue, "SelectMirrorDiskIdle"); 1302 #endif /* RF_LOCK_QUEUES_TO_READ_LEN */ 1303 dataQueueLength = dataQueue->queueLength + dataQueue->numOutstanding; 1304 #ifdef RF_LOCK_QUEUES_TO_READ_LEN 1305 RF_UNLOCK_QUEUE_MUTEX(dataQueue, "SelectMirrorDiskIdle"); 1306 RF_LOCK_QUEUE_MUTEX(mirrorQueue, "SelectMirrorDiskIdle"); 1307 #endif /* RF_LOCK_QUEUES_TO_READ_LEN */ 1308 mirrorQueueLength = mirrorQueue->queueLength + mirrorQueue->numOutstanding; 1309 #ifdef RF_LOCK_QUEUES_TO_READ_LEN 1310 RF_UNLOCK_QUEUE_MUTEX(mirrorQueue, "SelectMirrorDiskIdle"); 1311 #endif /* RF_LOCK_QUEUES_TO_READ_LEN */ 1312 1313 usemirror = 0; 1314 if (RF_DEAD_DISK(disks[colMirror].status)) { 1315 usemirror = 0; 1316 } else 1317 if (RF_DEAD_DISK(disks[colData].status)) { 1318 usemirror = 1; 1319 } else 1320 if (raidPtr->parity_good == RF_RAID_DIRTY) { 1321 /* Trust only the main disk */ 1322 usemirror = 0; 1323 } else 1324 if (dataQueueLength < mirrorQueueLength) { 1325 usemirror = 0; 1326 } else 1327 if (mirrorQueueLength < dataQueueLength) { 1328 usemirror = 1; 1329 } else { 1330 /* queues are equal length. attempt 1331 * cleverness. */ 1332 if (SNUM_DIFF(dataQueue->last_deq_sector, data_pda->startSector) 1333 <= SNUM_DIFF(mirrorQueue->last_deq_sector, mirror_pda->startSector)) { 1334 usemirror = 0; 1335 } else { 1336 usemirror = 1; 1337 } 1338 } 1339 1340 if (usemirror) { 1341 /* use mirror (parity) disk, swap params 0 & 4 */ 1342 tmp_pda = data_pda; 1343 node->params[0].p = mirror_pda; 1344 node->params[4].p = tmp_pda; 1345 } else { 1346 /* use data disk, leave param 0 unchanged */ 1347 } 1348 /* printf("dataQueueLength %d, mirrorQueueLength 1349 * %d\n",dataQueueLength, mirrorQueueLength); */ 1350 } 1351 #if (RF_INCLUDE_CHAINDECLUSTER > 0) || (RF_INCLUDE_INTERDECLUSTER > 0) || (RF_DEBUG_VALIDATE_DAG > 0) 1352 /* 1353 * Do simple partitioning. This assumes that 1354 * the data and parity disks are laid out identically. 1355 */ 1356 void 1357 rf_SelectMirrorDiskPartition(RF_DagNode_t * node) 1358 { 1359 RF_Raid_t *raidPtr = (RF_Raid_t *) node->dagHdr->raidPtr; 1360 RF_RowCol_t colData, colMirror; 1361 RF_PhysDiskAddr_t *data_pda = (RF_PhysDiskAddr_t *) node->params[0].p; 1362 RF_PhysDiskAddr_t *mirror_pda = (RF_PhysDiskAddr_t *) node->params[4].p; 1363 RF_PhysDiskAddr_t *tmp_pda; 1364 RF_RaidDisk_t *disks = raidPtr->Disks; 1365 int usemirror; 1366 1367 /* return the [row col] of the disk with the shortest queue */ 1368 colData = data_pda->col; 1369 colMirror = mirror_pda->col; 1370 1371 usemirror = 0; 1372 if (RF_DEAD_DISK(disks[colMirror].status)) { 1373 usemirror = 0; 1374 } else 1375 if (RF_DEAD_DISK(disks[colData].status)) { 1376 usemirror = 1; 1377 } else 1378 if (raidPtr->parity_good == RF_RAID_DIRTY) { 1379 /* Trust only the main disk */ 1380 usemirror = 0; 1381 } else 1382 if (data_pda->startSector < 1383 (disks[colData].numBlocks / 2)) { 1384 usemirror = 0; 1385 } else { 1386 usemirror = 1; 1387 } 1388 1389 if (usemirror) { 1390 /* use mirror (parity) disk, swap params 0 & 4 */ 1391 tmp_pda = data_pda; 1392 node->params[0].p = mirror_pda; 1393 node->params[4].p = tmp_pda; 1394 } else { 1395 /* use data disk, leave param 0 unchanged */ 1396 } 1397 } 1398 #endif 1399