1 /* $NetBSD: rf_parityloggingdags.c,v 1.20 2013/11/22 18:56:27 riz Exp $ */ 2 /* 3 * Copyright (c) 1995 Carnegie-Mellon University. 4 * All rights reserved. 5 * 6 * Author: William V. Courtright II 7 * 8 * Permission to use, copy, modify and distribute this software and 9 * its documentation is hereby granted, provided that both the copyright 10 * notice and this permission notice appear in all copies of the 11 * software, derivative works or modified versions, and any portions 12 * thereof, and that both notices appear in supporting documentation. 13 * 14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 17 * 18 * Carnegie Mellon requests users of this software to return to 19 * 20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 21 * School of Computer Science 22 * Carnegie Mellon University 23 * Pittsburgh PA 15213-3890 24 * 25 * any improvements or extensions that they make and grant Carnegie the 26 * rights to redistribute these changes. 27 */ 28 29 /* 30 DAGs specific to parity logging are created here 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: rf_parityloggingdags.c,v 1.20 2013/11/22 18:56:27 riz Exp $"); 35 36 #ifdef _KERNEL_OPT 37 #include "opt_raid_diagnostic.h" 38 #endif 39 40 #include "rf_archs.h" 41 42 #if RF_INCLUDE_PARITYLOGGING > 0 43 44 #include <dev/raidframe/raidframevar.h> 45 46 #include "rf_raid.h" 47 #include "rf_dag.h" 48 #include "rf_dagutils.h" 49 #include "rf_dagfuncs.h" 50 #include "rf_debugMem.h" 51 #include "rf_paritylog.h" 52 #include "rf_general.h" 53 54 #include "rf_parityloggingdags.h" 55 56 /****************************************************************************** 57 * 58 * creates a DAG to perform a large-write operation: 59 * 60 * / Rod \ / Wnd \ 61 * H -- NIL- Rod - NIL - Wnd ------ NIL - T 62 * \ Rod / \ Xor - Lpo / 63 * 64 * The writes are not done until the reads complete because if they were done in 65 * parallel, a failure on one of the reads could leave the parity in an inconsistent 66 * state, so that the retry with a new DAG would produce erroneous parity. 67 * 68 * Note: this DAG has the nasty property that none of the buffers allocated for reading 69 * old data can be freed until the XOR node fires. Need to fix this. 70 * 71 * The last two arguments are the number of faults tolerated, and function for the 72 * redundancy calculation. The undo for the redundancy calc is assumed to be null 73 * 74 *****************************************************************************/ 75 76 void 77 rf_CommonCreateParityLoggingLargeWriteDAG( 78 RF_Raid_t * raidPtr, 79 RF_AccessStripeMap_t * asmap, 80 RF_DagHeader_t * dag_h, 81 void *bp, 82 RF_RaidAccessFlags_t flags, 83 RF_AllocListElem_t * allocList, 84 int nfaults, 85 int (*redFunc) (RF_DagNode_t *)) 86 { 87 RF_DagNode_t *nodes, *wndNodes, *rodNodes = NULL, *syncNode, *xorNode, 88 *lpoNode, *blockNode, *unblockNode, *termNode; 89 int nWndNodes, nRodNodes, i; 90 RF_RaidLayout_t *layoutPtr = &(raidPtr->Layout); 91 RF_AccessStripeMapHeader_t *new_asm_h[2]; 92 int nodeNum, asmNum; 93 RF_ReconUnitNum_t which_ru; 94 char *sosBuffer, *eosBuffer; 95 RF_PhysDiskAddr_t *pda; 96 RF_StripeNum_t parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout), asmap->raidAddress, &which_ru); 97 98 if (rf_dagDebug) 99 printf("[Creating parity-logging large-write DAG]\n"); 100 RF_ASSERT(nfaults == 1);/* this arch only single fault tolerant */ 101 dag_h->creator = "ParityLoggingLargeWriteDAG"; 102 103 /* alloc the Wnd nodes, the xor node, and the Lpo node */ 104 nWndNodes = asmap->numStripeUnitsAccessed; 105 RF_MallocAndAdd(nodes, (nWndNodes + 6) * sizeof(RF_DagNode_t), 106 (RF_DagNode_t *), allocList); 107 i = 0; 108 wndNodes = &nodes[i]; 109 i += nWndNodes; 110 xorNode = &nodes[i]; 111 i += 1; 112 lpoNode = &nodes[i]; 113 i += 1; 114 blockNode = &nodes[i]; 115 i += 1; 116 syncNode = &nodes[i]; 117 i += 1; 118 unblockNode = &nodes[i]; 119 i += 1; 120 termNode = &nodes[i]; 121 i += 1; 122 123 dag_h->numCommitNodes = nWndNodes + 1; 124 dag_h->numCommits = 0; 125 dag_h->numSuccedents = 1; 126 127 rf_MapUnaccessedPortionOfStripe(raidPtr, layoutPtr, asmap, dag_h, new_asm_h, &nRodNodes, &sosBuffer, &eosBuffer, allocList); 128 if (nRodNodes > 0) 129 RF_MallocAndAdd(rodNodes, nRodNodes * sizeof(RF_DagNode_t), 130 (RF_DagNode_t *), allocList); 131 132 /* begin node initialization */ 133 rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nRodNodes + 1, 0, 0, 0, dag_h, "Nil", allocList); 134 rf_InitNode(unblockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, nWndNodes + 1, 0, 0, dag_h, "Nil", allocList); 135 rf_InitNode(syncNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nWndNodes + 1, nRodNodes + 1, 0, 0, dag_h, "Nil", allocList); 136 rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 1, 0, 0, dag_h, "Trm", allocList); 137 138 /* initialize the Rod nodes */ 139 for (nodeNum = asmNum = 0; asmNum < 2; asmNum++) { 140 if (new_asm_h[asmNum]) { 141 pda = new_asm_h[asmNum]->stripeMap->physInfo; 142 while (pda) { 143 rf_InitNode(&rodNodes[nodeNum], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Rod", allocList); 144 rodNodes[nodeNum].params[0].p = pda; 145 rodNodes[nodeNum].params[1].p = pda->bufPtr; 146 rodNodes[nodeNum].params[2].v = parityStripeID; 147 rodNodes[nodeNum].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, which_ru); 148 nodeNum++; 149 pda = pda->next; 150 } 151 } 152 } 153 RF_ASSERT(nodeNum == nRodNodes); 154 155 /* initialize the wnd nodes */ 156 pda = asmap->physInfo; 157 for (i = 0; i < nWndNodes; i++) { 158 rf_InitNode(&wndNodes[i], rf_wait, RF_TRUE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 4, 0, dag_h, "Wnd", allocList); 159 RF_ASSERT(pda != NULL); 160 wndNodes[i].params[0].p = pda; 161 wndNodes[i].params[1].p = pda->bufPtr; 162 wndNodes[i].params[2].v = parityStripeID; 163 wndNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, which_ru); 164 pda = pda->next; 165 } 166 167 /* initialize the redundancy node */ 168 rf_InitNode(xorNode, rf_wait, RF_TRUE, redFunc, rf_NullNodeUndoFunc, NULL, 1, 1, 2 * (nWndNodes + nRodNodes) + 1, 1, dag_h, "Xr ", allocList); 169 xorNode->flags |= RF_DAGNODE_FLAG_YIELD; 170 for (i = 0; i < nWndNodes; i++) { 171 xorNode->params[2 * i + 0] = wndNodes[i].params[0]; /* pda */ 172 xorNode->params[2 * i + 1] = wndNodes[i].params[1]; /* buf ptr */ 173 } 174 for (i = 0; i < nRodNodes; i++) { 175 xorNode->params[2 * (nWndNodes + i) + 0] = rodNodes[i].params[0]; /* pda */ 176 xorNode->params[2 * (nWndNodes + i) + 1] = rodNodes[i].params[1]; /* buf ptr */ 177 } 178 xorNode->params[2 * (nWndNodes + nRodNodes)].p = raidPtr; /* xor node needs to get 179 * at RAID information */ 180 181 /* look for an Rod node that reads a complete SU. If none, alloc a 182 * buffer to receive the parity info. Note that we can't use a new 183 * data buffer because it will not have gotten written when the xor 184 * occurs. */ 185 for (i = 0; i < nRodNodes; i++) 186 if (((RF_PhysDiskAddr_t *) rodNodes[i].params[0].p)->numSector == raidPtr->Layout.sectorsPerStripeUnit) 187 break; 188 if (i == nRodNodes) { 189 RF_MallocAndAdd(xorNode->results[0], 190 rf_RaidAddressToByte(raidPtr, raidPtr->Layout.sectorsPerStripeUnit), (void *), allocList); 191 } else { 192 xorNode->results[0] = rodNodes[i].params[1].p; 193 } 194 195 /* initialize the Lpo node */ 196 rf_InitNode(lpoNode, rf_wait, RF_FALSE, rf_ParityLogOverwriteFunc, rf_ParityLogOverwriteUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h, "Lpo", allocList); 197 198 lpoNode->params[0].p = asmap->parityInfo; 199 lpoNode->params[1].p = xorNode->results[0]; 200 RF_ASSERT(asmap->parityInfo->next == NULL); /* parityInfo must 201 * describe entire 202 * parity unit */ 203 204 /* connect nodes to form graph */ 205 206 /* connect dag header to block node */ 207 RF_ASSERT(dag_h->numSuccedents == 1); 208 RF_ASSERT(blockNode->numAntecedents == 0); 209 dag_h->succedents[0] = blockNode; 210 211 /* connect the block node to the Rod nodes */ 212 RF_ASSERT(blockNode->numSuccedents == nRodNodes + 1); 213 for (i = 0; i < nRodNodes; i++) { 214 RF_ASSERT(rodNodes[i].numAntecedents == 1); 215 blockNode->succedents[i] = &rodNodes[i]; 216 rodNodes[i].antecedents[0] = blockNode; 217 rodNodes[i].antType[0] = rf_control; 218 } 219 220 /* connect the block node to the sync node */ 221 /* necessary if nRodNodes == 0 */ 222 RF_ASSERT(syncNode->numAntecedents == nRodNodes + 1); 223 blockNode->succedents[nRodNodes] = syncNode; 224 syncNode->antecedents[0] = blockNode; 225 syncNode->antType[0] = rf_control; 226 227 /* connect the Rod nodes to the syncNode */ 228 for (i = 0; i < nRodNodes; i++) { 229 rodNodes[i].succedents[0] = syncNode; 230 syncNode->antecedents[1 + i] = &rodNodes[i]; 231 syncNode->antType[1 + i] = rf_control; 232 } 233 234 /* connect the sync node to the xor node */ 235 RF_ASSERT(syncNode->numSuccedents == nWndNodes + 1); 236 RF_ASSERT(xorNode->numAntecedents == 1); 237 syncNode->succedents[0] = xorNode; 238 xorNode->antecedents[0] = syncNode; 239 xorNode->antType[0] = rf_trueData; /* carry forward from sync */ 240 241 /* connect the sync node to the Wnd nodes */ 242 for (i = 0; i < nWndNodes; i++) { 243 RF_ASSERT(wndNodes->numAntecedents == 1); 244 syncNode->succedents[1 + i] = &wndNodes[i]; 245 wndNodes[i].antecedents[0] = syncNode; 246 wndNodes[i].antType[0] = rf_control; 247 } 248 249 /* connect the xor node to the Lpo node */ 250 RF_ASSERT(xorNode->numSuccedents == 1); 251 RF_ASSERT(lpoNode->numAntecedents == 1); 252 xorNode->succedents[0] = lpoNode; 253 lpoNode->antecedents[0] = xorNode; 254 lpoNode->antType[0] = rf_trueData; 255 256 /* connect the Wnd nodes to the unblock node */ 257 RF_ASSERT(unblockNode->numAntecedents == nWndNodes + 1); 258 for (i = 0; i < nWndNodes; i++) { 259 RF_ASSERT(wndNodes->numSuccedents == 1); 260 wndNodes[i].succedents[0] = unblockNode; 261 unblockNode->antecedents[i] = &wndNodes[i]; 262 unblockNode->antType[i] = rf_control; 263 } 264 265 /* connect the Lpo node to the unblock node */ 266 RF_ASSERT(lpoNode->numSuccedents == 1); 267 lpoNode->succedents[0] = unblockNode; 268 unblockNode->antecedents[nWndNodes] = lpoNode; 269 unblockNode->antType[nWndNodes] = rf_control; 270 271 /* connect unblock node to terminator */ 272 RF_ASSERT(unblockNode->numSuccedents == 1); 273 RF_ASSERT(termNode->numAntecedents == 1); 274 RF_ASSERT(termNode->numSuccedents == 0); 275 unblockNode->succedents[0] = termNode; 276 termNode->antecedents[0] = unblockNode; 277 termNode->antType[0] = rf_control; 278 } 279 280 281 282 283 /****************************************************************************** 284 * 285 * creates a DAG to perform a small-write operation (either raid 5 or pq), which is as follows: 286 * 287 * Header 288 * | 289 * Block 290 * / | ... \ \ 291 * / | \ \ 292 * Rod Rod Rod Rop 293 * | \ /| \ / | \/ | 294 * | | | /\ | 295 * Wnd Wnd Wnd X 296 * | \ / | 297 * | \ / | 298 * \ \ / Lpo 299 * \ \ / / 300 * +-> Unblock <-+ 301 * | 302 * T 303 * 304 * 305 * R = Read, W = Write, X = Xor, o = old, n = new, d = data, p = parity. 306 * When the access spans a stripe unit boundary and is less than one SU in size, there will 307 * be two Rop -- X -- Wnp branches. I call this the "double-XOR" case. 308 * The second output from each Rod node goes to the X node. In the double-XOR 309 * case, there are exactly 2 Rod nodes, and each sends one output to one X node. 310 * There is one Rod -- Wnd -- T branch for each stripe unit being updated. 311 * 312 * The block and unblock nodes are unused. See comment above CreateFaultFreeReadDAG. 313 * 314 * Note: this DAG ignores all the optimizations related to making the RMWs atomic. 315 * it also has the nasty property that none of the buffers allocated for reading 316 * old data & parity can be freed until the XOR node fires. Need to fix this. 317 * 318 * A null qfuncs indicates single fault tolerant 319 *****************************************************************************/ 320 321 void 322 rf_CommonCreateParityLoggingSmallWriteDAG( 323 RF_Raid_t * raidPtr, 324 RF_AccessStripeMap_t * asmap, 325 RF_DagHeader_t * dag_h, 326 void *bp, 327 RF_RaidAccessFlags_t flags, 328 RF_AllocListElem_t * allocList, 329 const RF_RedFuncs_t * pfuncs, 330 const RF_RedFuncs_t * qfuncs) 331 { 332 RF_DagNode_t *xorNodes, *blockNode, *unblockNode, *nodes; 333 RF_DagNode_t *readDataNodes, *readParityNodes; 334 RF_DagNode_t *writeDataNodes, *lpuNodes; 335 RF_DagNode_t *termNode; 336 RF_PhysDiskAddr_t *pda = asmap->physInfo; 337 int numDataNodes = asmap->numStripeUnitsAccessed; 338 int numParityNodes = (asmap->parityInfo->next) ? 2 : 1; 339 int i, j, nNodes, totalNumNodes; 340 RF_ReconUnitNum_t which_ru; 341 int (*func) (RF_DagNode_t * node), (*undoFunc) (RF_DagNode_t * node); 342 int (*qfunc) (RF_DagNode_t * node); 343 const char *name, *qname; 344 RF_StripeNum_t parityStripeID = rf_RaidAddressToParityStripeID(&(raidPtr->Layout), asmap->raidAddress, &which_ru); 345 long nfaults __unused = qfuncs ? 2 : 1; 346 347 if (rf_dagDebug) 348 printf("[Creating parity-logging small-write DAG]\n"); 349 RF_ASSERT(numDataNodes > 0); 350 RF_ASSERT(nfaults == 1); 351 dag_h->creator = "ParityLoggingSmallWriteDAG"; 352 353 /* DAG creation occurs in three steps: 1. count the number of nodes in 354 * the DAG 2. create the nodes 3. initialize the nodes 4. connect the 355 * nodes */ 356 357 /* Step 1. compute number of nodes in the graph */ 358 359 /* number of nodes: a read and write for each data unit a redundancy 360 * computation node for each parity node a read and Lpu for each 361 * parity unit a block and unblock node (2) a terminator node if 362 * atomic RMW an unlock node for each data unit, redundancy unit */ 363 totalNumNodes = (2 * numDataNodes) + numParityNodes + (2 * numParityNodes) + 3; 364 365 nNodes = numDataNodes + numParityNodes; 366 367 dag_h->numCommitNodes = numDataNodes + numParityNodes; 368 dag_h->numCommits = 0; 369 dag_h->numSuccedents = 1; 370 371 /* Step 2. create the nodes */ 372 RF_MallocAndAdd(nodes, totalNumNodes * sizeof(RF_DagNode_t), 373 (RF_DagNode_t *), allocList); 374 i = 0; 375 blockNode = &nodes[i]; 376 i += 1; 377 unblockNode = &nodes[i]; 378 i += 1; 379 readDataNodes = &nodes[i]; 380 i += numDataNodes; 381 readParityNodes = &nodes[i]; 382 i += numParityNodes; 383 writeDataNodes = &nodes[i]; 384 i += numDataNodes; 385 lpuNodes = &nodes[i]; 386 i += numParityNodes; 387 xorNodes = &nodes[i]; 388 i += numParityNodes; 389 termNode = &nodes[i]; 390 i += 1; 391 392 RF_ASSERT(i == totalNumNodes); 393 394 /* Step 3. initialize the nodes */ 395 /* initialize block node (Nil) */ 396 rf_InitNode(blockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, nNodes, 0, 0, 0, dag_h, "Nil", allocList); 397 398 /* initialize unblock node (Nil) */ 399 rf_InitNode(unblockNode, rf_wait, RF_FALSE, rf_NullNodeFunc, rf_NullNodeUndoFunc, NULL, 1, nNodes, 0, 0, dag_h, "Nil", allocList); 400 401 /* initialize terminatory node (Trm) */ 402 rf_InitNode(termNode, rf_wait, RF_FALSE, rf_TerminateFunc, rf_TerminateUndoFunc, NULL, 0, 1, 0, 0, dag_h, "Trm", allocList); 403 404 /* initialize nodes which read old data (Rod) */ 405 for (i = 0; i < numDataNodes; i++) { 406 rf_InitNode(&readDataNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, nNodes, 1, 4, 0, dag_h, "Rod", allocList); 407 RF_ASSERT(pda != NULL); 408 readDataNodes[i].params[0].p = pda; /* physical disk addr 409 * desc */ 410 readDataNodes[i].params[1].p = rf_AllocBuffer(raidPtr, dag_h, pda->numSector << raidPtr->logBytesPerSector); /* buffer to hold old data */ 411 readDataNodes[i].params[2].v = parityStripeID; 412 readDataNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, which_ru); 413 pda = pda->next; 414 readDataNodes[i].propList[0] = NULL; 415 readDataNodes[i].propList[1] = NULL; 416 } 417 418 /* initialize nodes which read old parity (Rop) */ 419 pda = asmap->parityInfo; 420 i = 0; 421 for (i = 0; i < numParityNodes; i++) { 422 RF_ASSERT(pda != NULL); 423 rf_InitNode(&readParityNodes[i], rf_wait, RF_FALSE, rf_DiskReadFunc, rf_DiskReadUndoFunc, rf_GenericWakeupFunc, nNodes, 1, 4, 0, dag_h, "Rop", allocList); 424 readParityNodes[i].params[0].p = pda; 425 readParityNodes[i].params[1].p = rf_AllocBuffer(raidPtr, dag_h, pda->numSector << raidPtr->logBytesPerSector); /* buffer to hold old parity */ 426 readParityNodes[i].params[2].v = parityStripeID; 427 readParityNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, which_ru); 428 readParityNodes[i].propList[0] = NULL; 429 pda = pda->next; 430 } 431 432 /* initialize nodes which write new data (Wnd) */ 433 pda = asmap->physInfo; 434 for (i = 0; i < numDataNodes; i++) { 435 RF_ASSERT(pda != NULL); 436 rf_InitNode(&writeDataNodes[i], rf_wait, RF_TRUE, rf_DiskWriteFunc, rf_DiskWriteUndoFunc, rf_GenericWakeupFunc, 1, nNodes, 4, 0, dag_h, "Wnd", allocList); 437 writeDataNodes[i].params[0].p = pda; /* physical disk addr 438 * desc */ 439 writeDataNodes[i].params[1].p = pda->bufPtr; /* buffer holding new 440 * data to be written */ 441 writeDataNodes[i].params[2].v = parityStripeID; 442 writeDataNodes[i].params[3].v = RF_CREATE_PARAM3(RF_IO_NORMAL_PRIORITY, which_ru); 443 444 pda = pda->next; 445 } 446 447 448 /* initialize nodes which compute new parity */ 449 /* we use the simple XOR func in the double-XOR case, and when we're 450 * accessing only a portion of one stripe unit. the distinction 451 * between the two is that the regular XOR func assumes that the 452 * targbuf is a full SU in size, and examines the pda associated with 453 * the buffer to decide where within the buffer to XOR the data, 454 * whereas the simple XOR func just XORs the data into the start of 455 * the buffer. */ 456 if ((numParityNodes == 2) || ((numDataNodes == 1) && (asmap->totalSectorsAccessed < raidPtr->Layout.sectorsPerStripeUnit))) { 457 func = pfuncs->simple; 458 undoFunc = rf_NullNodeUndoFunc; 459 name = pfuncs->SimpleName; 460 if (qfuncs) { 461 qfunc = qfuncs->simple; 462 qname = qfuncs->SimpleName; 463 } 464 } else { 465 func = pfuncs->regular; 466 undoFunc = rf_NullNodeUndoFunc; 467 name = pfuncs->RegularName; 468 if (qfuncs) { 469 qfunc = qfuncs->regular; 470 qname = qfuncs->RegularName; 471 } 472 } 473 /* initialize the xor nodes: params are {pda,buf} from {Rod,Wnd,Rop} 474 * nodes, and raidPtr */ 475 if (numParityNodes == 2) { /* double-xor case */ 476 for (i = 0; i < numParityNodes; i++) { 477 rf_InitNode(&xorNodes[i], rf_wait, RF_TRUE, func, undoFunc, NULL, 1, nNodes, 7, 1, dag_h, name, allocList); /* no wakeup func for 478 * xor */ 479 xorNodes[i].flags |= RF_DAGNODE_FLAG_YIELD; 480 xorNodes[i].params[0] = readDataNodes[i].params[0]; 481 xorNodes[i].params[1] = readDataNodes[i].params[1]; 482 xorNodes[i].params[2] = readParityNodes[i].params[0]; 483 xorNodes[i].params[3] = readParityNodes[i].params[1]; 484 xorNodes[i].params[4] = writeDataNodes[i].params[0]; 485 xorNodes[i].params[5] = writeDataNodes[i].params[1]; 486 xorNodes[i].params[6].p = raidPtr; 487 xorNodes[i].results[0] = readParityNodes[i].params[1].p; /* use old parity buf as 488 * target buf */ 489 } 490 } else { 491 /* there is only one xor node in this case */ 492 rf_InitNode(&xorNodes[0], rf_wait, RF_TRUE, func, undoFunc, NULL, 1, nNodes, (2 * (numDataNodes + numDataNodes + 1) + 1), 1, dag_h, name, allocList); 493 xorNodes[0].flags |= RF_DAGNODE_FLAG_YIELD; 494 for (i = 0; i < numDataNodes + 1; i++) { 495 /* set up params related to Rod and Rop nodes */ 496 xorNodes[0].params[2 * i + 0] = readDataNodes[i].params[0]; /* pda */ 497 xorNodes[0].params[2 * i + 1] = readDataNodes[i].params[1]; /* buffer pointer */ 498 } 499 for (i = 0; i < numDataNodes; i++) { 500 /* set up params related to Wnd and Wnp nodes */ 501 xorNodes[0].params[2 * (numDataNodes + 1 + i) + 0] = writeDataNodes[i].params[0]; /* pda */ 502 xorNodes[0].params[2 * (numDataNodes + 1 + i) + 1] = writeDataNodes[i].params[1]; /* buffer pointer */ 503 } 504 xorNodes[0].params[2 * (numDataNodes + numDataNodes + 1)].p = raidPtr; /* xor node needs to get 505 * at RAID information */ 506 xorNodes[0].results[0] = readParityNodes[0].params[1].p; 507 } 508 509 /* initialize the log node(s) */ 510 pda = asmap->parityInfo; 511 for (i = 0; i < numParityNodes; i++) { 512 RF_ASSERT(pda); 513 rf_InitNode(&lpuNodes[i], rf_wait, RF_FALSE, rf_ParityLogUpdateFunc, rf_ParityLogUpdateUndoFunc, rf_GenericWakeupFunc, 1, 1, 2, 0, dag_h, "Lpu", allocList); 514 lpuNodes[i].params[0].p = pda; /* PhysDiskAddr of parity */ 515 lpuNodes[i].params[1].p = xorNodes[i].results[0]; /* buffer pointer to 516 * parity */ 517 pda = pda->next; 518 } 519 520 521 /* Step 4. connect the nodes */ 522 523 /* connect header to block node */ 524 RF_ASSERT(dag_h->numSuccedents == 1); 525 RF_ASSERT(blockNode->numAntecedents == 0); 526 dag_h->succedents[0] = blockNode; 527 528 /* connect block node to read old data nodes */ 529 RF_ASSERT(blockNode->numSuccedents == (numDataNodes + numParityNodes)); 530 for (i = 0; i < numDataNodes; i++) { 531 blockNode->succedents[i] = &readDataNodes[i]; 532 RF_ASSERT(readDataNodes[i].numAntecedents == 1); 533 readDataNodes[i].antecedents[0] = blockNode; 534 readDataNodes[i].antType[0] = rf_control; 535 } 536 537 /* connect block node to read old parity nodes */ 538 for (i = 0; i < numParityNodes; i++) { 539 blockNode->succedents[numDataNodes + i] = &readParityNodes[i]; 540 RF_ASSERT(readParityNodes[i].numAntecedents == 1); 541 readParityNodes[i].antecedents[0] = blockNode; 542 readParityNodes[i].antType[0] = rf_control; 543 } 544 545 /* connect read old data nodes to write new data nodes */ 546 for (i = 0; i < numDataNodes; i++) { 547 RF_ASSERT(readDataNodes[i].numSuccedents == numDataNodes + numParityNodes); 548 for (j = 0; j < numDataNodes; j++) { 549 RF_ASSERT(writeDataNodes[j].numAntecedents == numDataNodes + numParityNodes); 550 readDataNodes[i].succedents[j] = &writeDataNodes[j]; 551 writeDataNodes[j].antecedents[i] = &readDataNodes[i]; 552 if (i == j) 553 writeDataNodes[j].antType[i] = rf_antiData; 554 else 555 writeDataNodes[j].antType[i] = rf_control; 556 } 557 } 558 559 /* connect read old data nodes to xor nodes */ 560 for (i = 0; i < numDataNodes; i++) 561 for (j = 0; j < numParityNodes; j++) { 562 RF_ASSERT(xorNodes[j].numAntecedents == numDataNodes + numParityNodes); 563 readDataNodes[i].succedents[numDataNodes + j] = &xorNodes[j]; 564 xorNodes[j].antecedents[i] = &readDataNodes[i]; 565 xorNodes[j].antType[i] = rf_trueData; 566 } 567 568 /* connect read old parity nodes to write new data nodes */ 569 for (i = 0; i < numParityNodes; i++) { 570 RF_ASSERT(readParityNodes[i].numSuccedents == numDataNodes + numParityNodes); 571 for (j = 0; j < numDataNodes; j++) { 572 readParityNodes[i].succedents[j] = &writeDataNodes[j]; 573 writeDataNodes[j].antecedents[numDataNodes + i] = &readParityNodes[i]; 574 writeDataNodes[j].antType[numDataNodes + i] = rf_control; 575 } 576 } 577 578 /* connect read old parity nodes to xor nodes */ 579 for (i = 0; i < numParityNodes; i++) 580 for (j = 0; j < numParityNodes; j++) { 581 readParityNodes[i].succedents[numDataNodes + j] = &xorNodes[j]; 582 xorNodes[j].antecedents[numDataNodes + i] = &readParityNodes[i]; 583 xorNodes[j].antType[numDataNodes + i] = rf_trueData; 584 } 585 586 /* connect xor nodes to write new parity nodes */ 587 for (i = 0; i < numParityNodes; i++) { 588 RF_ASSERT(xorNodes[i].numSuccedents == 1); 589 RF_ASSERT(lpuNodes[i].numAntecedents == 1); 590 xorNodes[i].succedents[0] = &lpuNodes[i]; 591 lpuNodes[i].antecedents[0] = &xorNodes[i]; 592 lpuNodes[i].antType[0] = rf_trueData; 593 } 594 595 for (i = 0; i < numDataNodes; i++) { 596 /* connect write new data nodes to unblock node */ 597 RF_ASSERT(writeDataNodes[i].numSuccedents == 1); 598 RF_ASSERT(unblockNode->numAntecedents == (numDataNodes + (nfaults * numParityNodes))); 599 writeDataNodes[i].succedents[0] = unblockNode; 600 unblockNode->antecedents[i] = &writeDataNodes[i]; 601 unblockNode->antType[i] = rf_control; 602 } 603 604 /* connect write new parity nodes to unblock node */ 605 for (i = 0; i < numParityNodes; i++) { 606 RF_ASSERT(lpuNodes[i].numSuccedents == 1); 607 lpuNodes[i].succedents[0] = unblockNode; 608 unblockNode->antecedents[numDataNodes + i] = &lpuNodes[i]; 609 unblockNode->antType[numDataNodes + i] = rf_control; 610 } 611 612 /* connect unblock node to terminator */ 613 RF_ASSERT(unblockNode->numSuccedents == 1); 614 RF_ASSERT(termNode->numAntecedents == 1); 615 RF_ASSERT(termNode->numSuccedents == 0); 616 unblockNode->succedents[0] = termNode; 617 termNode->antecedents[0] = unblockNode; 618 termNode->antType[0] = rf_control; 619 } 620 621 622 void 623 rf_CreateParityLoggingSmallWriteDAG( 624 RF_Raid_t * raidPtr, 625 RF_AccessStripeMap_t * asmap, 626 RF_DagHeader_t * dag_h, 627 void *bp, 628 RF_RaidAccessFlags_t flags, 629 RF_AllocListElem_t * allocList, 630 const RF_RedFuncs_t * pfuncs, 631 const RF_RedFuncs_t * qfuncs) 632 { 633 dag_h->creator = "ParityLoggingSmallWriteDAG"; 634 rf_CommonCreateParityLoggingSmallWriteDAG(raidPtr, asmap, dag_h, bp, flags, allocList, &rf_xorFuncs, NULL); 635 } 636 637 638 void 639 rf_CreateParityLoggingLargeWriteDAG( 640 RF_Raid_t * raidPtr, 641 RF_AccessStripeMap_t * asmap, 642 RF_DagHeader_t * dag_h, 643 void *bp, 644 RF_RaidAccessFlags_t flags, 645 RF_AllocListElem_t * allocList, 646 int nfaults, 647 int (*redFunc) (RF_DagNode_t *)) 648 { 649 dag_h->creator = "ParityLoggingSmallWriteDAG"; 650 rf_CommonCreateParityLoggingLargeWriteDAG(raidPtr, asmap, dag_h, bp, flags, allocList, 1, rf_RegularXorFunc); 651 } 652 #endif /* RF_INCLUDE_PARITYLOGGING > 0 */ 653