1 /* $NetBSD: rf_parityloggingdags.c,v 1.4 2000/01/07 03:41:04 oster Exp $ */ 2 /* 3 * Copyright (c) 1995 Carnegie-Mellon University. 4 * All rights reserved. 5 * 6 * Author: William V. Courtright II 7 * 8 * Permission to use, copy, modify and distribute this software and 9 * its documentation is hereby granted, provided that both the copyright 10 * notice and this permission notice appear in all copies of the 11 * software, derivative works or modified versions, and any portions 12 * thereof, and that both notices appear in supporting documentation. 13 * 14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 17 * 18 * Carnegie Mellon requests users of this software to return to 19 * 20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 21 * School of Computer Science 22 * Carnegie Mellon University 23 * Pittsburgh PA 15213-3890 24 * 25 * any improvements or extensions that they make and grant Carnegie the 26 * rights to redistribute these changes. 27 */ 28 29 #include "rf_archs.h" 30 31 #if RF_INCLUDE_PARITYLOGGING > 0 32 33 /* 34 DAGs specific to parity logging are created here 35 */ 36 37 #include "rf_types.h" 38 #include "rf_raid.h" 39 #include "rf_dag.h" 40 #include "rf_dagutils.h" 41 #include "rf_dagfuncs.h" 42 #include "rf_debugMem.h" 43 #include "rf_paritylog.h" 44 #include "rf_memchunk.h" 45 #include "rf_general.h" 46 47 #include "rf_parityloggingdags.h" 48 49 /****************************************************************************** 50 * 51 * creates a DAG to perform a large-write operation: 52 * 53 * / Rod \ / Wnd \ 54 * H -- NIL- Rod - NIL - Wnd ------ NIL - T 55 * \ Rod / \ Xor - Lpo / 56 * 57 * The writes are not done until the reads complete because if they were done in 58 * parallel, a failure on one of the reads could leave the parity in an inconsistent 59 * state, so that the retry with a new DAG would produce erroneous parity. 60 * 61 * Note: this DAG has the nasty property that none of the buffers allocated for reading 62 * old data can be freed until the XOR node fires. Need to fix this. 63 * 64 * The last two arguments are the number of faults tolerated, and function for the 65 * redundancy calculation. The undo for the redundancy calc is assumed to be null 66 * 67 *****************************************************************************/ 68 69 void 70 rf_CommonCreateParityLoggingLargeWriteDAG( 71 RF_Raid_t * raidPtr, 72 RF_AccessStripeMap_t * asmap, 73 RF_DagHeader_t * dag_h, 74 void *bp, 75 RF_RaidAccessFlags_t flags, 76 RF_AllocListElem_t * allocList, 77 int nfaults, 78 int (*redFunc) (RF_DagNode_t *)) 79 { 80 RF_DagNode_t *nodes, *wndNodes, *rodNodes = NULL, *syncNode, *xorNode, 81 *lpoNode, *blockNode, *unblockNode, *termNode; 82 int nWndNodes, nRodNodes, i; 83 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout); 84 RF_AccessStripeMapHeader_t *new_asm_h[2]; 85 int nodeNum, asmNum; 86 RF_ReconUnitNum_t which_ru; 87 char *sosBuffer, *eosBuffer; 88 RF_PhysDiskAddr_t *pda; 89 RF_StripeNum_t parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout), asmap->raidAddress, &which_ru); 90 91 if (rf_dagDebug) 92 printf("[Creating parity-logging large-write DAG]\n"); 93 RF_ASSERT(nfaults == 1);/* this arch only single fault tolerant */ 94 dag_h->creator = "ParityLoggingLargeWriteDAG"; 95 96 /* alloc the Wnd nodes, the xor node, and the Lpo node */ 97 nWndNodes = asmap->numStripeUnitsAccessed; 98 RF_CallocAndAdd(nodes, nWndNodes + 6, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList); 99 i = 0; 100 wndNodes = &nodes[i]; 101 i += nWndNodes; 102 xorNode = &nodes[i]; 103 i += 1; 104 lpoNode = &nodes[i]; 105 i += 1; 106 blockNode = &nodes[i]; 107 i += 1; 108 syncNode = &nodes[i]; 109 i += 1; 110 unblockNode = &nodes[i]; 111 i += 1; 112 termNode = &nodes[i]; 113 i += 1; 114 115 dag_h->numCommitNodes = nWndNodes + 1; 116 dag_h->numCommits = 0; 117 dag_h->numSuccedents = 1; 118 119 rf_MapUnaccessedPortionOfStripe(raidPtr, layoutPtr, asmap, dag_h, new_asm_h, &nRodNodes, &sosBuffer, &eosBuffer, allocList); 120 if (nRodNodes > 0) 121 RF_CallocAndAdd(rodNodes, nRodNodes, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList); 122 123 /* begin node initialization */ 124 rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nRodNodes + 1, 0, 0, 0, dag_h, "Nil", allocList); 125 rf_InitNode(unblockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, nWndNodes + 1, 0, 0, dag_h, "Nil", allocList); 126 rf_InitNode(syncNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nWndNodes + 1, nRodNodes + 1, 0, 0, dag_h, "Nil", allocList); 127 rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 1, 0, 0, dag_h, "Trm", allocList); 128 129 /* initialize the Rod nodes */ 130 for (nodeNum = asmNum = 0; asmNum < 2; asmNum++) { 131 if (new_asm_h[asmNum]) { 132 pda = new_asm_h[asmNum]->stripeMap->physInfo; 133 while (pda) { 134 rf_InitNode(&rodNodes[nodeNum], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rod", allocList); 135 rodNodes[nodeNum].params[0].p = pda; 136 rodNodes[nodeNum].params[1].p = pda->bufPtr; 137 rodNodes[nodeNum].params[2].v = parityStripeID; 138 rodNodes[nodeNum].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru); 139 nodeNum++; 140 pda = pda->next; 141 } 142 } 143 } 144 RF_ASSERT(nodeNum == nRodNodes); 145 146 /* initialize the wnd nodes */ 147 pda = asmap->physInfo; 148 for (i = 0; i < nWndNodes; i++) { 149 rf_InitNode(&wndNodes[i], rf_wait, RF_TRUE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wnd", allocList); 150 RF_ASSERT(pda != NULL); 151 wndNodes[i].params[0].p = pda; 152 wndNodes[i].params[1].p = pda->bufPtr; 153 wndNodes[i].params[2].v = parityStripeID; 154 wndNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru); 155 pda = pda->next; 156 } 157 158 /* initialize the redundancy node */ 159 rf_InitNode(xorNode, rf_wait, RF_TRUE, redFunc, rf_NullNodeUndoFunc, NULL, 1, 1, 2 * (nWndNodes + nRodNodes) + 1, 1, dag_h, "Xr ", allocList); 160 xorNode->flags |= RF_DAGNODE_FLAG_YIELD; 161 for (i = 0; i < nWndNodes; i++) { 162 xorNode->params[2 * i + 0] = wndNodes[i].params[0]; /* pda */ 163 xorNode->params[2 * i + 1] = wndNodes[i].params[1]; /* buf ptr */ 164 } 165 for (i = 0; i < nRodNodes; i++) { 166 xorNode->params[2 * (nWndNodes + i) + 0] = rodNodes[i].params[0]; /* pda */ 167 xorNode->params[2 * (nWndNodes + i) + 1] = rodNodes[i].params[1]; /* buf ptr */ 168 } 169 xorNode->params[2 * (nWndNodes + nRodNodes)].p = raidPtr; /* xor node needs to get 170 * at RAID information */ 171 172 /* look for an Rod node that reads a complete SU. If none, alloc a 173 * buffer to receive the parity info. Note that we can't use a new 174 * data buffer because it will not have gotten written when the xor 175 * occurs. */ 176 for (i = 0; i < nRodNodes; i++) 177 if (((RF_PhysDiskAddr_t *) rodNodes[i].params[0].p)->numSector == raidPtr->Layout.sectorsPerStripeUnit) 178 break; 179 if (i == nRodNodes) { 180 RF_CallocAndAdd(xorNode->results[0], 1, rf_RaidAddressToByte(raidPtr, raidPtr->Layout.sectorsPerStripeUnit), (void *), allocList); 181 } else { 182 xorNode->results[0] = rodNodes[i].params[1].p; 183 } 184 185 /* initialize the Lpo node */ 186 rf_InitNode(lpoNode, rf_wait, RF_FALSE, rf_ParityLogOverwriteFunc, rf_ParityLogOverwriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h, "Lpo", allocList); 187 188 lpoNode->params[0].p = asmap->parityInfo; 189 lpoNode->params[1].p = xorNode->results[0]; 190 RF_ASSERT(asmap->parityInfo->next == NULL); /* parityInfo must 191 * describe entire 192 * parity unit */ 193 194 /* connect nodes to form graph */ 195 196 /* connect dag header to block node */ 197 RF_ASSERT(dag_h->numSuccedents == 1); 198 RF_ASSERT(blockNode->numAntecedents == 0); 199 dag_h->succedents[0] = blockNode; 200 201 /* connect the block node to the Rod nodes */ 202 RF_ASSERT(blockNode->numSuccedents == nRodNodes + 1); 203 for (i = 0; i < nRodNodes; i++) { 204 RF_ASSERT(rodNodes[i].numAntecedents == 1); 205 blockNode->succedents[i] = &rodNodes[i]; 206 rodNodes[i].antecedents[0] = blockNode; 207 rodNodes[i].antType[0] = rf_control; 208 } 209 210 /* connect the block node to the sync node */ 211 /* necessary if nRodNodes == 0 */ 212 RF_ASSERT(syncNode->numAntecedents == nRodNodes + 1); 213 blockNode->succedents[nRodNodes] = syncNode; 214 syncNode->antecedents[0] = blockNode; 215 syncNode->antType[0] = rf_control; 216 217 /* connect the Rod nodes to the syncNode */ 218 for (i = 0; i < nRodNodes; i++) { 219 rodNodes[i].succedents[0] = syncNode; 220 syncNode->antecedents[1 + i] = &rodNodes[i]; 221 syncNode->antType[1 + i] = rf_control; 222 } 223 224 /* connect the sync node to the xor node */ 225 RF_ASSERT(syncNode->numSuccedents == nWndNodes + 1); 226 RF_ASSERT(xorNode->numAntecedents == 1); 227 syncNode->succedents[0] = xorNode; 228 xorNode->antecedents[0] = syncNode; 229 xorNode->antType[0] = rf_trueData; /* carry forward from sync */ 230 231 /* connect the sync node to the Wnd nodes */ 232 for (i = 0; i < nWndNodes; i++) { 233 RF_ASSERT(wndNodes->numAntecedents == 1); 234 syncNode->succedents[1 + i] = &wndNodes[i]; 235 wndNodes[i].antecedents[0] = syncNode; 236 wndNodes[i].antType[0] = rf_control; 237 } 238 239 /* connect the xor node to the Lpo node */ 240 RF_ASSERT(xorNode->numSuccedents == 1); 241 RF_ASSERT(lpoNode->numAntecedents == 1); 242 xorNode->succedents[0] = lpoNode; 243 lpoNode->antecedents[0] = xorNode; 244 lpoNode->antType[0] = rf_trueData; 245 246 /* connect the Wnd nodes to the unblock node */ 247 RF_ASSERT(unblockNode->numAntecedents == nWndNodes + 1); 248 for (i = 0; i < nWndNodes; i++) { 249 RF_ASSERT(wndNodes->numSuccedents == 1); 250 wndNodes[i].succedents[0] = unblockNode; 251 unblockNode->antecedents[i] = &wndNodes[i]; 252 unblockNode->antType[i] = rf_control; 253 } 254 255 /* connect the Lpo node to the unblock node */ 256 RF_ASSERT(lpoNode->numSuccedents == 1); 257 lpoNode->succedents[0] = unblockNode; 258 unblockNode->antecedents[nWndNodes] = lpoNode; 259 unblockNode->antType[nWndNodes] = rf_control; 260 261 /* connect unblock node to terminator */ 262 RF_ASSERT(unblockNode->numSuccedents == 1); 263 RF_ASSERT(termNode->numAntecedents == 1); 264 RF_ASSERT(termNode->numSuccedents == 0); 265 unblockNode->succedents[0] = termNode; 266 termNode->antecedents[0] = unblockNode; 267 termNode->antType[0] = rf_control; 268 } 269 270 271 272 273 /****************************************************************************** 274 * 275 * creates a DAG to perform a small-write operation (either raid 5 or pq), which is as follows: 276 * 277 * Header 278 * | 279 * Block 280 * / | ... \ \ 281 * / | \ \ 282 * Rod Rod Rod Rop 283 * | \ /| \ / | \/ | 284 * | | | /\ | 285 * Wnd Wnd Wnd X 286 * | \ / | 287 * | \ / | 288 * \ \ / Lpo 289 * \ \ / / 290 * +-> Unblock <-+ 291 * | 292 * T 293 * 294 * 295 * R = Read, W = Write, X = Xor, o = old, n = new, d = data, p = parity. 296 * When the access spans a stripe unit boundary and is less than one SU in size, there will 297 * be two Rop -- X -- Wnp branches. I call this the "double-XOR" case. 298 * The second output from each Rod node goes to the X node. In the double-XOR 299 * case, there are exactly 2 Rod nodes, and each sends one output to one X node. 300 * There is one Rod -- Wnd -- T branch for each stripe unit being updated. 301 * 302 * The block and unblock nodes are unused. See comment above CreateFaultFreeReadDAG. 303 * 304 * Note: this DAG ignores all the optimizations related to making the RMWs atomic. 305 * it also has the nasty property that none of the buffers allocated for reading 306 * old data & parity can be freed until the XOR node fires. Need to fix this. 307 * 308 * A null qfuncs indicates single fault tolerant 309 *****************************************************************************/ 310 311 void 312 rf_CommonCreateParityLoggingSmallWriteDAG( 313 RF_Raid_t * raidPtr, 314 RF_AccessStripeMap_t * asmap, 315 RF_DagHeader_t * dag_h, 316 void *bp, 317 RF_RaidAccessFlags_t flags, 318 RF_AllocListElem_t * allocList, 319 RF_RedFuncs_t * pfuncs, 320 RF_RedFuncs_t * qfuncs) 321 { 322 RF_DagNode_t *xorNodes, *blockNode, *unblockNode, *nodes; 323 RF_DagNode_t *readDataNodes, *readParityNodes; 324 RF_DagNode_t *writeDataNodes, *lpuNodes; 325 RF_DagNode_t *unlockDataNodes = NULL, *termNode; 326 RF_PhysDiskAddr_t *pda = asmap->physInfo; 327 int numDataNodes = asmap->numStripeUnitsAccessed; 328 int numParityNodes = (asmap->parityInfo->next) ? 2 : 1; 329 int i, j, nNodes, totalNumNodes; 330 RF_ReconUnitNum_t which_ru; 331 int (*func) (RF_DagNode_t * node), (*undoFunc) (RF_DagNode_t * node); 332 int (*qfunc) (RF_DagNode_t * node); 333 char *name, *qname; 334 RF_StripeNum_t parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout), asmap->raidAddress, &which_ru); 335 long nfaults = qfuncs ? 2 : 1; 336 int lu_flag = (rf_enableAtomicRMW) ? 1 : 0; /* lock/unlock flag */ 337 338 if (rf_dagDebug) 339 printf("[Creating parity-logging small-write DAG]\n"); 340 RF_ASSERT(numDataNodes > 0); 341 RF_ASSERT(nfaults == 1); 342 dag_h->creator = "ParityLoggingSmallWriteDAG"; 343 344 /* DAG creation occurs in three steps: 1. count the number of nodes in 345 * the DAG 2. create the nodes 3. initialize the nodes 4. connect the 346 * nodes */ 347 348 /* Step 1. compute number of nodes in the graph */ 349 350 /* number of nodes: a read and write for each data unit a redundancy 351 * computation node for each parity node a read and Lpu for each 352 * parity unit a block and unblock node (2) a terminator node if 353 * atomic RMW an unlock node for each data unit, redundancy unit */ 354 totalNumNodes = (2 * numDataNodes) + numParityNodes + (2 * numParityNodes) + 3; 355 if (lu_flag) 356 totalNumNodes += numDataNodes; 357 358 nNodes = numDataNodes + numParityNodes; 359 360 dag_h->numCommitNodes = numDataNodes + numParityNodes; 361 dag_h->numCommits = 0; 362 dag_h->numSuccedents = 1; 363 364 /* Step 2. create the nodes */ 365 RF_CallocAndAdd(nodes, totalNumNodes, sizeof(RF_DagNode_t), (RF_DagNode_t *), allocList); 366 i = 0; 367 blockNode = &nodes[i]; 368 i += 1; 369 unblockNode = &nodes[i]; 370 i += 1; 371 readDataNodes = &nodes[i]; 372 i += numDataNodes; 373 readParityNodes = &nodes[i]; 374 i += numParityNodes; 375 writeDataNodes = &nodes[i]; 376 i += numDataNodes; 377 lpuNodes = &nodes[i]; 378 i += numParityNodes; 379 xorNodes = &nodes[i]; 380 i += numParityNodes; 381 termNode = &nodes[i]; 382 i += 1; 383 if (lu_flag) { 384 unlockDataNodes = &nodes[i]; 385 i += numDataNodes; 386 } 387 RF_ASSERT(i == totalNumNodes); 388 389 /* Step 3. initialize the nodes */ 390 /* initialize block node (Nil) */ 391 rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nNodes, 0, 0, 0, dag_h, "Nil", allocList); 392 393 /* initialize unblock node (Nil) */ 394 rf_InitNode(unblockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, nNodes, 0, 0, dag_h, "Nil", allocList); 395 396 /* initialize terminatory node (Trm) */ 397 rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 1, 0, 0, dag_h, "Trm", allocList); 398 399 /* initialize nodes which read old data (Rod) */ 400 for (i = 0; i < numDataNodes; i++) { 401 rf_InitNode(&readDataNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, nNodes, 1, 4, 0, dag_h, "Rod", allocList); 402 RF_ASSERT(pda != NULL); 403 readDataNodes[i].params[0].p = pda; /* physical disk addr 404 * desc */ 405 readDataNodes[i].params[1].p = rf_AllocBuffer(raidPtr, dag_h, pda, allocList); /* buffer to hold old 406 * data */ 407 readDataNodes[i].params[2].v = parityStripeID; 408 readDataNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, lu_flag, 0, which_ru); 409 pda = pda->next; 410 readDataNodes[i].propList[0] = NULL; 411 readDataNodes[i].propList[1] = NULL; 412 } 413 414 /* initialize nodes which read old parity (Rop) */ 415 pda = asmap->parityInfo; 416 i = 0; 417 for (i = 0; i < numParityNodes; i++) { 418 RF_ASSERT(pda != NULL); 419 rf_InitNode(&readParityNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, nNodes, 1, 4, 0, dag_h, "Rop", allocList); 420 readParityNodes[i].params[0].p = pda; 421 readParityNodes[i].params[1].p = rf_AllocBuffer(raidPtr, dag_h, pda, allocList); /* buffer to hold old 422 * parity */ 423 readParityNodes[i].params[2].v = parityStripeID; 424 readParityNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru); 425 readParityNodes[i].propList[0] = NULL; 426 pda = pda->next; 427 } 428 429 /* initialize nodes which write new data (Wnd) */ 430 pda = asmap->physInfo; 431 for (i = 0; i < numDataNodes; i++) { 432 RF_ASSERT(pda != NULL); 433 rf_InitNode(&writeDataNodes[i], rf_wait, RF_TRUE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, nNodes, 4, 0, dag_h, "Wnd", allocList); 434 writeDataNodes[i].params[0].p = pda; /* physical disk addr 435 * desc */ 436 writeDataNodes[i].params[1].p = pda->bufPtr; /* buffer holding new 437 * data to be written */ 438 writeDataNodes[i].params[2].v = parityStripeID; 439 writeDataNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, 0, which_ru); 440 441 if (lu_flag) { 442 /* initialize node to unlock the disk queue */ 443 rf_InitNode(&unlockDataNodes[i], rf_wait, RF_FALSE, rf_DiskUnlockFunc, rf_DiskUnlockUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h, "Und", allocList); 444 unlockDataNodes[i].params[0].p = pda; /* physical disk addr 445 * desc */ 446 unlockDataNodes[i].params[1].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, 0, lu_flag, which_ru); 447 } 448 pda = pda->next; 449 } 450 451 452 /* initialize nodes which compute new parity */ 453 /* we use the simple XOR func in the double-XOR case, and when we're 454 * accessing only a portion of one stripe unit. the distinction 455 * between the two is that the regular XOR func assumes that the 456 * targbuf is a full SU in size, and examines the pda associated with 457 * the buffer to decide where within the buffer to XOR the data, 458 * whereas the simple XOR func just XORs the data into the start of 459 * the buffer. */ 460 if ((numParityNodes == 2) || ((numDataNodes == 1) && (asmap->totalSectorsAccessed < raidPtr->Layout.sectorsPerStripeUnit))) { 461 func = pfuncs->simple; 462 undoFunc = rf_NullNodeUndoFunc; 463 name = pfuncs->SimpleName; 464 if (qfuncs) { 465 qfunc = qfuncs->simple; 466 qname = qfuncs->SimpleName; 467 } 468 } else { 469 func = pfuncs->regular; 470 undoFunc = rf_NullNodeUndoFunc; 471 name = pfuncs->RegularName; 472 if (qfuncs) { 473 qfunc = qfuncs->regular; 474 qname = qfuncs->RegularName; 475 } 476 } 477 /* initialize the xor nodes: params are {pda,buf} from {Rod,Wnd,Rop} 478 * nodes, and raidPtr */ 479 if (numParityNodes == 2) { /* double-xor case */ 480 for (i = 0; i < numParityNodes; i++) { 481 rf_InitNode(&xorNodes[i], rf_wait, RF_TRUE, func, undoFunc, NULL, 1, nNodes, 7, 1, dag_h, name, allocList); /* no wakeup func for 482 * xor */ 483 xorNodes[i].flags |= RF_DAGNODE_FLAG_YIELD; 484 xorNodes[i].params[0] = readDataNodes[i].params[0]; 485 xorNodes[i].params[1] = readDataNodes[i].params[1]; 486 xorNodes[i].params[2] = readParityNodes[i].params[0]; 487 xorNodes[i].params[3] = readParityNodes[i].params[1]; 488 xorNodes[i].params[4] = writeDataNodes[i].params[0]; 489 xorNodes[i].params[5] = writeDataNodes[i].params[1]; 490 xorNodes[i].params[6].p = raidPtr; 491 xorNodes[i].results[0] = readParityNodes[i].params[1].p; /* use old parity buf as 492 * target buf */ 493 } 494 } else { 495 /* there is only one xor node in this case */ 496 rf_InitNode(&xorNodes[0], rf_wait, RF_TRUE, func, undoFunc, NULL, 1, nNodes, (2 * (numDataNodes + numDataNodes + 1) + 1), 1, dag_h, name, allocList); 497 xorNodes[0].flags |= RF_DAGNODE_FLAG_YIELD; 498 for (i = 0; i < numDataNodes + 1; i++) { 499 /* set up params related to Rod and Rop nodes */ 500 xorNodes[0].params[2 * i + 0] = readDataNodes[i].params[0]; /* pda */ 501 xorNodes[0].params[2 * i + 1] = readDataNodes[i].params[1]; /* buffer pointer */ 502 } 503 for (i = 0; i < numDataNodes; i++) { 504 /* set up params related to Wnd and Wnp nodes */ 505 xorNodes[0].params[2 * (numDataNodes + 1 + i) + 0] = writeDataNodes[i].params[0]; /* pda */ 506 xorNodes[0].params[2 * (numDataNodes + 1 + i) + 1] = writeDataNodes[i].params[1]; /* buffer pointer */ 507 } 508 xorNodes[0].params[2 * (numDataNodes + numDataNodes + 1)].p = raidPtr; /* xor node needs to get 509 * at RAID information */ 510 xorNodes[0].results[0] = readParityNodes[0].params[1].p; 511 } 512 513 /* initialize the log node(s) */ 514 pda = asmap->parityInfo; 515 for (i = 0; i < numParityNodes; i++) { 516 RF_ASSERT(pda); 517 rf_InitNode(&lpuNodes[i], rf_wait, RF_FALSE, rf_ParityLogUpdateFunc, rf_ParityLogUpdateUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h, "Lpu", allocList); 518 lpuNodes[i].params[0].p = pda; /* PhysDiskAddr of parity */ 519 lpuNodes[i].params[1].p = xorNodes[i].results[0]; /* buffer pointer to 520 * parity */ 521 pda = pda->next; 522 } 523 524 525 /* Step 4. connect the nodes */ 526 527 /* connect header to block node */ 528 RF_ASSERT(dag_h->numSuccedents == 1); 529 RF_ASSERT(blockNode->numAntecedents == 0); 530 dag_h->succedents[0] = blockNode; 531 532 /* connect block node to read old data nodes */ 533 RF_ASSERT(blockNode->numSuccedents == (numDataNodes + numParityNodes)); 534 for (i = 0; i < numDataNodes; i++) { 535 blockNode->succedents[i] = &readDataNodes[i]; 536 RF_ASSERT(readDataNodes[i].numAntecedents == 1); 537 readDataNodes[i].antecedents[0] = blockNode; 538 readDataNodes[i].antType[0] = rf_control; 539 } 540 541 /* connect block node to read old parity nodes */ 542 for (i = 0; i < numParityNodes; i++) { 543 blockNode->succedents[numDataNodes + i] = &readParityNodes[i]; 544 RF_ASSERT(readParityNodes[i].numAntecedents == 1); 545 readParityNodes[i].antecedents[0] = blockNode; 546 readParityNodes[i].antType[0] = rf_control; 547 } 548 549 /* connect read old data nodes to write new data nodes */ 550 for (i = 0; i < numDataNodes; i++) { 551 RF_ASSERT(readDataNodes[i].numSuccedents == numDataNodes + numParityNodes); 552 for (j = 0; j < numDataNodes; j++) { 553 RF_ASSERT(writeDataNodes[j].numAntecedents == numDataNodes + numParityNodes); 554 readDataNodes[i].succedents[j] = &writeDataNodes[j]; 555 writeDataNodes[j].antecedents[i] = &readDataNodes[i]; 556 if (i == j) 557 writeDataNodes[j].antType[i] = rf_antiData; 558 else 559 writeDataNodes[j].antType[i] = rf_control; 560 } 561 } 562 563 /* connect read old data nodes to xor nodes */ 564 for (i = 0; i < numDataNodes; i++) 565 for (j = 0; j < numParityNodes; j++) { 566 RF_ASSERT(xorNodes[j].numAntecedents == numDataNodes + numParityNodes); 567 readDataNodes[i].succedents[numDataNodes + j] = &xorNodes[j]; 568 xorNodes[j].antecedents[i] = &readDataNodes[i]; 569 xorNodes[j].antType[i] = rf_trueData; 570 } 571 572 /* connect read old parity nodes to write new data nodes */ 573 for (i = 0; i < numParityNodes; i++) { 574 RF_ASSERT(readParityNodes[i].numSuccedents == numDataNodes + numParityNodes); 575 for (j = 0; j < numDataNodes; j++) { 576 readParityNodes[i].succedents[j] = &writeDataNodes[j]; 577 writeDataNodes[j].antecedents[numDataNodes + i] = &readParityNodes[i]; 578 writeDataNodes[j].antType[numDataNodes + i] = rf_control; 579 } 580 } 581 582 /* connect read old parity nodes to xor nodes */ 583 for (i = 0; i < numParityNodes; i++) 584 for (j = 0; j < numParityNodes; j++) { 585 readParityNodes[i].succedents[numDataNodes + j] = &xorNodes[j]; 586 xorNodes[j].antecedents[numDataNodes + i] = &readParityNodes[i]; 587 xorNodes[j].antType[numDataNodes + i] = rf_trueData; 588 } 589 590 /* connect xor nodes to write new parity nodes */ 591 for (i = 0; i < numParityNodes; i++) { 592 RF_ASSERT(xorNodes[i].numSuccedents == 1); 593 RF_ASSERT(lpuNodes[i].numAntecedents == 1); 594 xorNodes[i].succedents[0] = &lpuNodes[i]; 595 lpuNodes[i].antecedents[0] = &xorNodes[i]; 596 lpuNodes[i].antType[0] = rf_trueData; 597 } 598 599 for (i = 0; i < numDataNodes; i++) { 600 if (lu_flag) { 601 /* connect write new data nodes to unlock nodes */ 602 RF_ASSERT(writeDataNodes[i].numSuccedents == 1); 603 RF_ASSERT(unlockDataNodes[i].numAntecedents == 1); 604 writeDataNodes[i].succedents[0] = &unlockDataNodes[i]; 605 unlockDataNodes[i].antecedents[0] = &writeDataNodes[i]; 606 unlockDataNodes[i].antType[0] = rf_control; 607 608 /* connect unlock nodes to unblock node */ 609 RF_ASSERT(unlockDataNodes[i].numSuccedents == 1); 610 RF_ASSERT(unblockNode->numAntecedents == (numDataNodes + (nfaults * numParityNodes))); 611 unlockDataNodes[i].succedents[0] = unblockNode; 612 unblockNode->antecedents[i] = &unlockDataNodes[i]; 613 unblockNode->antType[i] = rf_control; 614 } else { 615 /* connect write new data nodes to unblock node */ 616 RF_ASSERT(writeDataNodes[i].numSuccedents == 1); 617 RF_ASSERT(unblockNode->numAntecedents == (numDataNodes + (nfaults * numParityNodes))); 618 writeDataNodes[i].succedents[0] = unblockNode; 619 unblockNode->antecedents[i] = &writeDataNodes[i]; 620 unblockNode->antType[i] = rf_control; 621 } 622 } 623 624 /* connect write new parity nodes to unblock node */ 625 for (i = 0; i < numParityNodes; i++) { 626 RF_ASSERT(lpuNodes[i].numSuccedents == 1); 627 lpuNodes[i].succedents[0] = unblockNode; 628 unblockNode->antecedents[numDataNodes + i] = &lpuNodes[i]; 629 unblockNode->antType[numDataNodes + i] = rf_control; 630 } 631 632 /* connect unblock node to terminator */ 633 RF_ASSERT(unblockNode->numSuccedents == 1); 634 RF_ASSERT(termNode->numAntecedents == 1); 635 RF_ASSERT(termNode->numSuccedents == 0); 636 unblockNode->succedents[0] = termNode; 637 termNode->antecedents[0] = unblockNode; 638 termNode->antType[0] = rf_control; 639 } 640 641 642 void 643 rf_CreateParityLoggingSmallWriteDAG( 644 RF_Raid_t * raidPtr, 645 RF_AccessStripeMap_t * asmap, 646 RF_DagHeader_t * dag_h, 647 void *bp, 648 RF_RaidAccessFlags_t flags, 649 RF_AllocListElem_t * allocList, 650 RF_RedFuncs_t * pfuncs, 651 RF_RedFuncs_t * qfuncs) 652 { 653 dag_h->creator = "ParityLoggingSmallWriteDAG"; 654 rf_CommonCreateParityLoggingSmallWriteDAG(raidPtr, asmap, dag_h, bp, flags, allocList, &rf_xorFuncs, NULL); 655 } 656 657 658 void 659 rf_CreateParityLoggingLargeWriteDAG( 660 RF_Raid_t * raidPtr, 661 RF_AccessStripeMap_t * asmap, 662 RF_DagHeader_t * dag_h, 663 void *bp, 664 RF_RaidAccessFlags_t flags, 665 RF_AllocListElem_t * allocList, 666 int nfaults, 667 int (*redFunc) (RF_DagNode_t *)) 668 { 669 dag_h->creator = "ParityLoggingSmallWriteDAG"; 670 rf_CommonCreateParityLoggingLargeWriteDAG(raidPtr, asmap, dag_h, bp, flags, allocList, 1, rf_RegularXorFunc); 671 } 672 #endif /* RF_INCLUDE_PARITYLOGGING > 0 */ 673