xref: /netbsd-src/sys/dev/raidframe/rf_dagfuncs.c (revision bada23909e740596d0a3785a73bd3583a9807fb8)
1 /*	$NetBSD: rf_dagfuncs.c,v 1.4 1999/03/14 21:53:31 oster Exp $	*/
2 /*
3  * Copyright (c) 1995 Carnegie-Mellon University.
4  * All rights reserved.
5  *
6  * Author: Mark Holland, William V. Courtright II
7  *
8  * Permission to use, copy, modify and distribute this software and
9  * its documentation is hereby granted, provided that both the copyright
10  * notice and this permission notice appear in all copies of the
11  * software, derivative works or modified versions, and any portions
12  * thereof, and that both notices appear in supporting documentation.
13  *
14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17  *
18  * Carnegie Mellon requests users of this software to return to
19  *
20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21  *  School of Computer Science
22  *  Carnegie Mellon University
23  *  Pittsburgh PA 15213-3890
24  *
25  * any improvements or extensions that they make and grant Carnegie the
26  * rights to redistribute these changes.
27  */
28 
29 /*
30  * dagfuncs.c -- DAG node execution routines
31  *
32  * Rules:
33  * 1. Every DAG execution function must eventually cause node->status to
34  *    get set to "good" or "bad", and "FinishNode" to be called. In the
35  *    case of nodes that complete immediately (xor, NullNodeFunc, etc),
36  *    the node execution function can do these two things directly. In
37  *    the case of nodes that have to wait for some event (a disk read to
38  *    complete, a lock to be released, etc) to occur before they can
39  *    complete, this is typically achieved by having whatever module
40  *    is doing the operation call GenericWakeupFunc upon completion.
41  * 2. DAG execution functions should check the status in the DAG header
42  *    and NOP out their operations if the status is not "enable". However,
43  *    execution functions that release resources must be sure to release
44  *    them even when they NOP out the function that would use them.
45  *    Functions that acquire resources should go ahead and acquire them
46  *    even when they NOP, so that a downstream release node will not have
47  *    to check to find out whether or not the acquire was suppressed.
48  */
49 
50 #include <sys/ioctl.h>
51 #include <sys/param.h>
52 
53 #include "rf_archs.h"
54 #include "rf_raid.h"
55 #include "rf_dag.h"
56 #include "rf_layout.h"
57 #include "rf_etimer.h"
58 #include "rf_acctrace.h"
59 #include "rf_diskqueue.h"
60 #include "rf_dagfuncs.h"
61 #include "rf_general.h"
62 #include "rf_engine.h"
63 #include "rf_dagutils.h"
64 
65 #include "rf_kintf.h"
66 
67 #if RF_INCLUDE_PARITYLOGGING > 0
68 #include "rf_paritylog.h"
69 #endif				/* RF_INCLUDE_PARITYLOGGING > 0 */
70 
71 int     (*rf_DiskReadFunc) (RF_DagNode_t *);
72 int     (*rf_DiskWriteFunc) (RF_DagNode_t *);
73 int     (*rf_DiskReadUndoFunc) (RF_DagNode_t *);
74 int     (*rf_DiskWriteUndoFunc) (RF_DagNode_t *);
75 int     (*rf_DiskUnlockFunc) (RF_DagNode_t *);
76 int     (*rf_DiskUnlockUndoFunc) (RF_DagNode_t *);
77 int     (*rf_RegularXorUndoFunc) (RF_DagNode_t *);
78 int     (*rf_SimpleXorUndoFunc) (RF_DagNode_t *);
79 int     (*rf_RecoveryXorUndoFunc) (RF_DagNode_t *);
80 
81 /*****************************************************************************************
82  * main (only) configuration routine for this module
83  ****************************************************************************************/
84 int
85 rf_ConfigureDAGFuncs(listp)
86 	RF_ShutdownList_t **listp;
87 {
88 	RF_ASSERT(((sizeof(long) == 8) && RF_LONGSHIFT == 3) || ((sizeof(long) == 4) && RF_LONGSHIFT == 2));
89 	rf_DiskReadFunc = rf_DiskReadFuncForThreads;
90 	rf_DiskReadUndoFunc = rf_DiskUndoFunc;
91 	rf_DiskWriteFunc = rf_DiskWriteFuncForThreads;
92 	rf_DiskWriteUndoFunc = rf_DiskUndoFunc;
93 	rf_DiskUnlockFunc = rf_DiskUnlockFuncForThreads;
94 	rf_DiskUnlockUndoFunc = rf_NullNodeUndoFunc;
95 	rf_RegularXorUndoFunc = rf_NullNodeUndoFunc;
96 	rf_SimpleXorUndoFunc = rf_NullNodeUndoFunc;
97 	rf_RecoveryXorUndoFunc = rf_NullNodeUndoFunc;
98 	return (0);
99 }
100 
101 
102 
103 /*****************************************************************************************
104  * the execution function associated with a terminate node
105  ****************************************************************************************/
106 int
107 rf_TerminateFunc(node)
108 	RF_DagNode_t *node;
109 {
110 	RF_ASSERT(node->dagHdr->numCommits == node->dagHdr->numCommitNodes);
111 	node->status = rf_good;
112 	return (rf_FinishNode(node, RF_THREAD_CONTEXT));
113 }
114 
115 int
116 rf_TerminateUndoFunc(node)
117 	RF_DagNode_t *node;
118 {
119 	return (0);
120 }
121 
122 
123 /*****************************************************************************************
124  * execution functions associated with a mirror node
125  *
126  * parameters:
127  *
128  * 0 - physical disk addres of data
129  * 1 - buffer for holding read data
130  * 2 - parity stripe ID
131  * 3 - flags
132  * 4 - physical disk address of mirror (parity)
133  *
134  ****************************************************************************************/
135 
136 int
137 rf_DiskReadMirrorIdleFunc(node)
138 	RF_DagNode_t *node;
139 {
140 	/* select the mirror copy with the shortest queue and fill in node
141 	 * parameters with physical disk address */
142 
143 	rf_SelectMirrorDiskIdle(node);
144 	return (rf_DiskReadFunc(node));
145 }
146 
147 int
148 rf_DiskReadMirrorPartitionFunc(node)
149 	RF_DagNode_t *node;
150 {
151 	/* select the mirror copy with the shortest queue and fill in node
152 	 * parameters with physical disk address */
153 
154 	rf_SelectMirrorDiskPartition(node);
155 	return (rf_DiskReadFunc(node));
156 }
157 
158 int
159 rf_DiskReadMirrorUndoFunc(node)
160 	RF_DagNode_t *node;
161 {
162 	return (0);
163 }
164 
165 
166 
167 #if RF_INCLUDE_PARITYLOGGING > 0
168 /*****************************************************************************************
169  * the execution function associated with a parity log update node
170  ****************************************************************************************/
171 int
172 rf_ParityLogUpdateFunc(node)
173 	RF_DagNode_t *node;
174 {
175 	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
176 	caddr_t buf = (caddr_t) node->params[1].p;
177 	RF_ParityLogData_t *logData;
178 	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
179 	RF_Etimer_t timer;
180 
181 	if (node->dagHdr->status == rf_enable) {
182 		RF_ETIMER_START(timer);
183 		logData = rf_CreateParityLogData(RF_UPDATE, pda, buf,
184 		    (RF_Raid_t *) (node->dagHdr->raidPtr),
185 		    node->wakeFunc, (void *) node,
186 		    node->dagHdr->tracerec, timer);
187 		if (logData)
188 			rf_ParityLogAppend(logData, RF_FALSE, NULL, RF_FALSE);
189 		else {
190 			RF_ETIMER_STOP(timer);
191 			RF_ETIMER_EVAL(timer);
192 			tracerec->plog_us += RF_ETIMER_VAL_US(timer);
193 			(node->wakeFunc) (node, ENOMEM);
194 		}
195 	}
196 	return (0);
197 }
198 
199 
200 /*****************************************************************************************
201  * the execution function associated with a parity log overwrite node
202  ****************************************************************************************/
203 int
204 rf_ParityLogOverwriteFunc(node)
205 	RF_DagNode_t *node;
206 {
207 	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
208 	caddr_t buf = (caddr_t) node->params[1].p;
209 	RF_ParityLogData_t *logData;
210 	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
211 	RF_Etimer_t timer;
212 
213 	if (node->dagHdr->status == rf_enable) {
214 		RF_ETIMER_START(timer);
215 		logData = rf_CreateParityLogData(RF_OVERWRITE, pda, buf, (RF_Raid_t *) (node->dagHdr->raidPtr),
216 		    node->wakeFunc, (void *) node, node->dagHdr->tracerec, timer);
217 		if (logData)
218 			rf_ParityLogAppend(logData, RF_FALSE, NULL, RF_FALSE);
219 		else {
220 			RF_ETIMER_STOP(timer);
221 			RF_ETIMER_EVAL(timer);
222 			tracerec->plog_us += RF_ETIMER_VAL_US(timer);
223 			(node->wakeFunc) (node, ENOMEM);
224 		}
225 	}
226 	return (0);
227 }
228 #else				/* RF_INCLUDE_PARITYLOGGING > 0 */
229 
230 int
231 rf_ParityLogUpdateFunc(node)
232 	RF_DagNode_t *node;
233 {
234 	return (0);
235 }
236 int
237 rf_ParityLogOverwriteFunc(node)
238 	RF_DagNode_t *node;
239 {
240 	return (0);
241 }
242 #endif				/* RF_INCLUDE_PARITYLOGGING > 0 */
243 
244 int
245 rf_ParityLogUpdateUndoFunc(node)
246 	RF_DagNode_t *node;
247 {
248 	return (0);
249 }
250 
251 int
252 rf_ParityLogOverwriteUndoFunc(node)
253 	RF_DagNode_t *node;
254 {
255 	return (0);
256 }
257 /*****************************************************************************************
258  * the execution function associated with a NOP node
259  ****************************************************************************************/
260 int
261 rf_NullNodeFunc(node)
262 	RF_DagNode_t *node;
263 {
264 	node->status = rf_good;
265 	return (rf_FinishNode(node, RF_THREAD_CONTEXT));
266 }
267 
268 int
269 rf_NullNodeUndoFunc(node)
270 	RF_DagNode_t *node;
271 {
272 	node->status = rf_undone;
273 	return (rf_FinishNode(node, RF_THREAD_CONTEXT));
274 }
275 
276 
277 /*****************************************************************************************
278  * the execution function associated with a disk-read node
279  ****************************************************************************************/
280 int
281 rf_DiskReadFuncForThreads(node)
282 	RF_DagNode_t *node;
283 {
284 	RF_DiskQueueData_t *req;
285 	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
286 	caddr_t buf = (caddr_t) node->params[1].p;
287 	RF_StripeNum_t parityStripeID = (RF_StripeNum_t) node->params[2].v;
288 	unsigned priority = RF_EXTRACT_PRIORITY(node->params[3].v);
289 	unsigned lock = RF_EXTRACT_LOCK_FLAG(node->params[3].v);
290 	unsigned unlock = RF_EXTRACT_UNLOCK_FLAG(node->params[3].v);
291 	unsigned which_ru = RF_EXTRACT_RU(node->params[3].v);
292 	RF_DiskQueueDataFlags_t flags = 0;
293 	RF_IoType_t iotype = (node->dagHdr->status == rf_enable) ? RF_IO_TYPE_READ : RF_IO_TYPE_NOP;
294 	RF_DiskQueue_t **dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
295 	void   *b_proc = NULL;
296 #if RF_BACKWARD > 0
297 	caddr_t undoBuf;
298 #endif
299 
300 	if (node->dagHdr->bp)
301 		b_proc = (void *) ((struct buf *) node->dagHdr->bp)->b_proc;
302 
303 	RF_ASSERT(!(lock && unlock));
304 	flags |= (lock) ? RF_LOCK_DISK_QUEUE : 0;
305 	flags |= (unlock) ? RF_UNLOCK_DISK_QUEUE : 0;
306 #if RF_BACKWARD > 0
307 	/* allocate and zero the undo buffer. this is equivalent to copying
308 	 * the original buffer's contents to the undo buffer prior to
309 	 * performing the disk read. XXX hardcoded 512 bytes per sector! */
310 	if (node->dagHdr->allocList == NULL)
311 		rf_MakeAllocList(node->dagHdr->allocList);
312 	RF_CallocAndAdd(undoBuf, 1, 512 * pda->numSector, (caddr_t), node->dagHdr->allocList);
313 #endif				/* RF_BACKWARD > 0 */
314 	req = rf_CreateDiskQueueData(iotype, pda->startSector, pda->numSector,
315 	    buf, parityStripeID, which_ru,
316 	    (int (*) (void *, int)) node->wakeFunc,
317 	    node, NULL, node->dagHdr->tracerec,
318 	    (void *) (node->dagHdr->raidPtr), flags, b_proc);
319 	if (!req) {
320 		(node->wakeFunc) (node, ENOMEM);
321 	} else {
322 		node->dagFuncData = (void *) req;
323 		rf_DiskIOEnqueue(&(dqs[pda->row][pda->col]), req, priority);
324 	}
325 	return (0);
326 }
327 
328 
329 /*****************************************************************************************
330  * the execution function associated with a disk-write node
331  ****************************************************************************************/
332 int
333 rf_DiskWriteFuncForThreads(node)
334 	RF_DagNode_t *node;
335 {
336 	RF_DiskQueueData_t *req;
337 	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
338 	caddr_t buf = (caddr_t) node->params[1].p;
339 	RF_StripeNum_t parityStripeID = (RF_StripeNum_t) node->params[2].v;
340 	unsigned priority = RF_EXTRACT_PRIORITY(node->params[3].v);
341 	unsigned lock = RF_EXTRACT_LOCK_FLAG(node->params[3].v);
342 	unsigned unlock = RF_EXTRACT_UNLOCK_FLAG(node->params[3].v);
343 	unsigned which_ru = RF_EXTRACT_RU(node->params[3].v);
344 	RF_DiskQueueDataFlags_t flags = 0;
345 	RF_IoType_t iotype = (node->dagHdr->status == rf_enable) ? RF_IO_TYPE_WRITE : RF_IO_TYPE_NOP;
346 	RF_DiskQueue_t **dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
347 	void   *b_proc = NULL;
348 #if RF_BACKWARD > 0
349 	caddr_t undoBuf;
350 #endif
351 
352 	if (node->dagHdr->bp)
353 		b_proc = (void *) ((struct buf *) node->dagHdr->bp)->b_proc;
354 
355 #if RF_BACKWARD > 0
356 	/* This area is used only for backward error recovery experiments
357 	 * First, schedule allocate a buffer and schedule a pre-read of the
358 	 * disk After the pre-read, proceed with the normal disk write */
359 	if (node->status == rf_bwd2) {
360 		/* just finished undo logging, now perform real function */
361 		node->status = rf_fired;
362 		RF_ASSERT(!(lock && unlock));
363 		flags |= (lock) ? RF_LOCK_DISK_QUEUE : 0;
364 		flags |= (unlock) ? RF_UNLOCK_DISK_QUEUE : 0;
365 		req = rf_CreateDiskQueueData(iotype,
366 		    pda->startSector, pda->numSector, buf, parityStripeID, which_ru,
367 		    node->wakeFunc, (void *) node, NULL, node->dagHdr->tracerec,
368 		    (void *) (node->dagHdr->raidPtr), flags, b_proc);
369 
370 		if (!req) {
371 			(node->wakeFunc) (node, ENOMEM);
372 		} else {
373 			node->dagFuncData = (void *) req;
374 			rf_DiskIOEnqueue(&(dqs[pda->row][pda->col]), req, priority);
375 		}
376 	} else {
377 		/* node status should be rf_fired */
378 		/* schedule a disk pre-read */
379 		node->status = rf_bwd1;
380 		RF_ASSERT(!(lock && unlock));
381 		flags |= (lock) ? RF_LOCK_DISK_QUEUE : 0;
382 		flags |= (unlock) ? RF_UNLOCK_DISK_QUEUE : 0;
383 		if (node->dagHdr->allocList == NULL)
384 			rf_MakeAllocList(node->dagHdr->allocList);
385 		RF_CallocAndAdd(undoBuf, 1, 512 * pda->numSector, (caddr_t), node->dagHdr->allocList);
386 		req = rf_CreateDiskQueueData(RF_IO_TYPE_READ,
387 		    pda->startSector, pda->numSector, undoBuf, parityStripeID, which_ru,
388 		    node->wakeFunc, (void *) node, NULL, node->dagHdr->tracerec,
389 		    (void *) (node->dagHdr->raidPtr), flags, b_proc);
390 
391 		if (!req) {
392 			(node->wakeFunc) (node, ENOMEM);
393 		} else {
394 			node->dagFuncData = (void *) req;
395 			rf_DiskIOEnqueue(&(dqs[pda->row][pda->col]), req, priority);
396 		}
397 	}
398 	return (0);
399 #endif				/* RF_BACKWARD > 0 */
400 
401 	/* normal processing (rollaway or forward recovery) begins here */
402 	RF_ASSERT(!(lock && unlock));
403 	flags |= (lock) ? RF_LOCK_DISK_QUEUE : 0;
404 	flags |= (unlock) ? RF_UNLOCK_DISK_QUEUE : 0;
405 	req = rf_CreateDiskQueueData(iotype, pda->startSector, pda->numSector,
406 	    buf, parityStripeID, which_ru,
407 	    (int (*) (void *, int)) node->wakeFunc,
408 	    (void *) node, NULL,
409 	    node->dagHdr->tracerec,
410 	    (void *) (node->dagHdr->raidPtr),
411 	    flags, b_proc);
412 
413 	if (!req) {
414 		(node->wakeFunc) (node, ENOMEM);
415 	} else {
416 		node->dagFuncData = (void *) req;
417 		rf_DiskIOEnqueue(&(dqs[pda->row][pda->col]), req, priority);
418 	}
419 
420 	return (0);
421 }
422 /*****************************************************************************************
423  * the undo function for disk nodes
424  * Note:  this is not a proper undo of a write node, only locks are released.
425  *        old data is not restored to disk!
426  ****************************************************************************************/
427 int
428 rf_DiskUndoFunc(node)
429 	RF_DagNode_t *node;
430 {
431 	RF_DiskQueueData_t *req;
432 	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
433 	RF_DiskQueue_t **dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
434 
435 	req = rf_CreateDiskQueueData(RF_IO_TYPE_NOP,
436 	    0L, 0, NULL, 0L, 0,
437 	    (int (*) (void *, int)) node->wakeFunc,
438 	    (void *) node,
439 	    NULL, node->dagHdr->tracerec,
440 	    (void *) (node->dagHdr->raidPtr),
441 	    RF_UNLOCK_DISK_QUEUE, NULL);
442 	if (!req)
443 		(node->wakeFunc) (node, ENOMEM);
444 	else {
445 		node->dagFuncData = (void *) req;
446 		rf_DiskIOEnqueue(&(dqs[pda->row][pda->col]), req, RF_IO_NORMAL_PRIORITY);
447 	}
448 
449 	return (0);
450 }
451 /*****************************************************************************************
452  * the execution function associated with an "unlock disk queue" node
453  ****************************************************************************************/
454 int
455 rf_DiskUnlockFuncForThreads(node)
456 	RF_DagNode_t *node;
457 {
458 	RF_DiskQueueData_t *req;
459 	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
460 	RF_DiskQueue_t **dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
461 
462 	req = rf_CreateDiskQueueData(RF_IO_TYPE_NOP,
463 	    0L, 0, NULL, 0L, 0,
464 	    (int (*) (void *, int)) node->wakeFunc,
465 	    (void *) node,
466 	    NULL, node->dagHdr->tracerec,
467 	    (void *) (node->dagHdr->raidPtr),
468 	    RF_UNLOCK_DISK_QUEUE, NULL);
469 	if (!req)
470 		(node->wakeFunc) (node, ENOMEM);
471 	else {
472 		node->dagFuncData = (void *) req;
473 		rf_DiskIOEnqueue(&(dqs[pda->row][pda->col]), req, RF_IO_NORMAL_PRIORITY);
474 	}
475 
476 	return (0);
477 }
478 /*****************************************************************************************
479  * Callback routine for DiskRead and DiskWrite nodes.  When the disk op completes,
480  * the routine is called to set the node status and inform the execution engine that
481  * the node has fired.
482  ****************************************************************************************/
483 int
484 rf_GenericWakeupFunc(node, status)
485 	RF_DagNode_t *node;
486 	int     status;
487 {
488 	switch (node->status) {
489 	case rf_bwd1:
490 		node->status = rf_bwd2;
491 		if (node->dagFuncData)
492 			rf_FreeDiskQueueData((RF_DiskQueueData_t *) node->dagFuncData);
493 		return (rf_DiskWriteFuncForThreads(node));
494 		break;
495 	case rf_fired:
496 		if (status)
497 			node->status = rf_bad;
498 		else
499 			node->status = rf_good;
500 		break;
501 	case rf_recover:
502 		/* probably should never reach this case */
503 		if (status)
504 			node->status = rf_panic;
505 		else
506 			node->status = rf_undone;
507 		break;
508 	default:
509 		printf("rf_GenericWakeupFunc:");
510 		printf("node->status is %d,", node->status);
511 		printf("status is %d \n", status);
512 		RF_PANIC();
513 		break;
514 	}
515 	if (node->dagFuncData)
516 		rf_FreeDiskQueueData((RF_DiskQueueData_t *) node->dagFuncData);
517 	return (rf_FinishNode(node, RF_INTR_CONTEXT));
518 }
519 
520 
521 /*****************************************************************************************
522  * there are three distinct types of xor nodes
523  * A "regular xor" is used in the fault-free case where the access spans a complete
524  * stripe unit.  It assumes that the result buffer is one full stripe unit in size,
525  * and uses the stripe-unit-offset values that it computes from the PDAs to determine
526  * where within the stripe unit to XOR each argument buffer.
527  *
528  * A "simple xor" is used in the fault-free case where the access touches only a portion
529  * of one (or two, in some cases) stripe unit(s).  It assumes that all the argument
530  * buffers are of the same size and have the same stripe unit offset.
531  *
532  * A "recovery xor" is used in the degraded-mode case.  It's similar to the regular
533  * xor function except that it takes the failed PDA as an additional parameter, and
534  * uses it to determine what portions of the argument buffers need to be xor'd into
535  * the result buffer, and where in the result buffer they should go.
536  ****************************************************************************************/
537 
538 /* xor the params together and store the result in the result field.
539  * assume the result field points to a buffer that is the size of one SU,
540  * and use the pda params to determine where within the buffer to XOR
541  * the input buffers.
542  */
543 int
544 rf_RegularXorFunc(node)
545 	RF_DagNode_t *node;
546 {
547 	RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
548 	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
549 	RF_Etimer_t timer;
550 	int     i, retcode;
551 #if RF_BACKWARD > 0
552 	RF_PhysDiskAddr_t *pda;
553 	caddr_t undoBuf;
554 #endif
555 
556 	retcode = 0;
557 	if (node->dagHdr->status == rf_enable) {
558 		/* don't do the XOR if the input is the same as the output */
559 		RF_ETIMER_START(timer);
560 		for (i = 0; i < node->numParams - 1; i += 2)
561 			if (node->params[i + 1].p != node->results[0]) {
562 #if RF_BACKWARD > 0
563 				/* This section mimics undo logging for
564 				 * backward error recovery experiments b
565 				 * allocating and initializing a buffer XXX
566 				 * 512 byte sector size is hard coded! */
567 				pda = node->params[i].p;
568 				if (node->dagHdr->allocList == NULL)
569 					rf_MakeAllocList(node->dagHdr->allocList);
570 				RF_CallocAndAdd(undoBuf, 1, 512 * pda->numSector, (caddr_t), node->dagHdr->allocList);
571 #endif				/* RF_BACKWARD > 0 */
572 				retcode = rf_XorIntoBuffer(raidPtr, (RF_PhysDiskAddr_t *) node->params[i].p,
573 				    (char *) node->params[i + 1].p, (char *) node->results[0], node->dagHdr->bp);
574 			}
575 		RF_ETIMER_STOP(timer);
576 		RF_ETIMER_EVAL(timer);
577 		tracerec->xor_us += RF_ETIMER_VAL_US(timer);
578 	}
579 	return (rf_GenericWakeupFunc(node, retcode));	/* call wake func
580 							 * explicitly since no
581 							 * I/O in this node */
582 }
583 /* xor the inputs into the result buffer, ignoring placement issues */
584 int
585 rf_SimpleXorFunc(node)
586 	RF_DagNode_t *node;
587 {
588 	RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
589 	int     i, retcode = 0;
590 	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
591 	RF_Etimer_t timer;
592 #if RF_BACKWARD > 0
593 	RF_PhysDiskAddr_t *pda;
594 	caddr_t undoBuf;
595 #endif
596 
597 	if (node->dagHdr->status == rf_enable) {
598 		RF_ETIMER_START(timer);
599 		/* don't do the XOR if the input is the same as the output */
600 		for (i = 0; i < node->numParams - 1; i += 2)
601 			if (node->params[i + 1].p != node->results[0]) {
602 #if RF_BACKWARD > 0
603 				/* This section mimics undo logging for
604 				 * backward error recovery experiments b
605 				 * allocating and initializing a buffer XXX
606 				 * 512 byte sector size is hard coded! */
607 				pda = node->params[i].p;
608 				if (node->dagHdr->allocList == NULL)
609 					rf_MakeAllocList(node->dagHdr->allocList);
610 				RF_CallocAndAdd(undoBuf, 1, 512 * pda->numSector, (caddr_t), node->dagHdr->allocList);
611 #endif				/* RF_BACKWARD > 0 */
612 				retcode = rf_bxor((char *) node->params[i + 1].p, (char *) node->results[0],
613 				    rf_RaidAddressToByte(raidPtr, ((RF_PhysDiskAddr_t *) node->params[i].p)->numSector),
614 				    (struct buf *) node->dagHdr->bp);
615 			}
616 		RF_ETIMER_STOP(timer);
617 		RF_ETIMER_EVAL(timer);
618 		tracerec->xor_us += RF_ETIMER_VAL_US(timer);
619 	}
620 	return (rf_GenericWakeupFunc(node, retcode));	/* call wake func
621 							 * explicitly since no
622 							 * I/O in this node */
623 }
624 /* this xor is used by the degraded-mode dag functions to recover lost data.
625  * the second-to-last parameter is the PDA for the failed portion of the access.
626  * the code here looks at this PDA and assumes that the xor target buffer is
627  * equal in size to the number of sectors in the failed PDA.  It then uses
628  * the other PDAs in the parameter list to determine where within the target
629  * buffer the corresponding data should be xored.
630  */
631 int
632 rf_RecoveryXorFunc(node)
633 	RF_DagNode_t *node;
634 {
635 	RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
636 	RF_RaidLayout_t *layoutPtr = (RF_RaidLayout_t *) & raidPtr->Layout;
637 	RF_PhysDiskAddr_t *failedPDA = (RF_PhysDiskAddr_t *) node->params[node->numParams - 2].p;
638 	int     i, retcode = 0;
639 	RF_PhysDiskAddr_t *pda;
640 	int     suoffset, failedSUOffset = rf_StripeUnitOffset(layoutPtr, failedPDA->startSector);
641 	char   *srcbuf, *destbuf;
642 	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
643 	RF_Etimer_t timer;
644 #if RF_BACKWARD > 0
645 	caddr_t undoBuf;
646 #endif
647 
648 	if (node->dagHdr->status == rf_enable) {
649 		RF_ETIMER_START(timer);
650 		for (i = 0; i < node->numParams - 2; i += 2)
651 			if (node->params[i + 1].p != node->results[0]) {
652 				pda = (RF_PhysDiskAddr_t *) node->params[i].p;
653 #if RF_BACKWARD > 0
654 				/* This section mimics undo logging for
655 				 * backward error recovery experiments b
656 				 * allocating and initializing a buffer XXX
657 				 * 512 byte sector size is hard coded! */
658 				if (node->dagHdr->allocList == NULL)
659 					rf_MakeAllocList(node->dagHdr->allocList);
660 				RF_CallocAndAdd(undoBuf, 1, 512 * pda->numSector, (caddr_t), node->dagHdr->allocList);
661 #endif				/* RF_BACKWARD > 0 */
662 				srcbuf = (char *) node->params[i + 1].p;
663 				suoffset = rf_StripeUnitOffset(layoutPtr, pda->startSector);
664 				destbuf = ((char *) node->results[0]) + rf_RaidAddressToByte(raidPtr, suoffset - failedSUOffset);
665 				retcode = rf_bxor(srcbuf, destbuf, rf_RaidAddressToByte(raidPtr, pda->numSector), node->dagHdr->bp);
666 			}
667 		RF_ETIMER_STOP(timer);
668 		RF_ETIMER_EVAL(timer);
669 		tracerec->xor_us += RF_ETIMER_VAL_US(timer);
670 	}
671 	return (rf_GenericWakeupFunc(node, retcode));
672 }
673 /*****************************************************************************************
674  * The next three functions are utilities used by the above xor-execution functions.
675  ****************************************************************************************/
676 
677 
678 /*
679  * this is just a glorified buffer xor.  targbuf points to a buffer that is one full stripe unit
680  * in size.  srcbuf points to a buffer that may be less than 1 SU, but never more.  When the
681  * access described by pda is one SU in size (which by implication means it's SU-aligned),
682  * all that happens is (targbuf) <- (srcbuf ^ targbuf).  When the access is less than one
683  * SU in size the XOR occurs on only the portion of targbuf identified in the pda.
684  */
685 
686 int
687 rf_XorIntoBuffer(raidPtr, pda, srcbuf, targbuf, bp)
688 	RF_Raid_t *raidPtr;
689 	RF_PhysDiskAddr_t *pda;
690 	char   *srcbuf;
691 	char   *targbuf;
692 	void   *bp;
693 {
694 	char   *targptr;
695 	int     sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
696 	int     SUOffset = pda->startSector % sectPerSU;
697 	int     length, retcode = 0;
698 
699 	RF_ASSERT(pda->numSector <= sectPerSU);
700 
701 	targptr = targbuf + rf_RaidAddressToByte(raidPtr, SUOffset);
702 	length = rf_RaidAddressToByte(raidPtr, pda->numSector);
703 	retcode = rf_bxor(srcbuf, targptr, length, bp);
704 	return (retcode);
705 }
706 /* it really should be the case that the buffer pointers (returned by malloc)
707  * are aligned to the natural word size of the machine, so this is the only
708  * case we optimize for.  The length should always be a multiple of the sector
709  * size, so there should be no problem with leftover bytes at the end.
710  */
711 int
712 rf_bxor(src, dest, len, bp)
713 	char   *src;
714 	char   *dest;
715 	int     len;
716 	void   *bp;
717 {
718 	unsigned mask = sizeof(long) - 1, retcode = 0;
719 
720 	if (!(((unsigned long) src) & mask) && !(((unsigned long) dest) & mask) && !(len & mask)) {
721 		retcode = rf_longword_bxor((unsigned long *) src, (unsigned long *) dest, len >> RF_LONGSHIFT, bp);
722 	} else {
723 		RF_ASSERT(0);
724 	}
725 	return (retcode);
726 }
727 /* map a user buffer into kernel space, if necessary */
728 #define REMAP_VA(_bp,x,y) (y) = (x)
729 
730 /* When XORing in kernel mode, we need to map each user page to kernel space before we can access it.
731  * We don't want to assume anything about which input buffers are in kernel/user
732  * space, nor about their alignment, so in each loop we compute the maximum number
733  * of bytes that we can xor without crossing any page boundaries, and do only this many
734  * bytes before the next remap.
735  */
736 int
737 rf_longword_bxor(src, dest, len, bp)
738 	register unsigned long *src;
739 	register unsigned long *dest;
740 	int     len;		/* longwords */
741 	void   *bp;
742 {
743 	register unsigned long *end = src + len;
744 	register unsigned long d0, d1, d2, d3, s0, s1, s2, s3;	/* temps */
745 	register unsigned long *pg_src, *pg_dest;	/* per-page source/dest
746 							 * pointers */
747 	int     longs_this_time;/* # longwords to xor in the current iteration */
748 
749 	REMAP_VA(bp, src, pg_src);
750 	REMAP_VA(bp, dest, pg_dest);
751 	if (!pg_src || !pg_dest)
752 		return (EFAULT);
753 
754 	while (len >= 4) {
755 		longs_this_time = RF_MIN(len, RF_MIN(RF_BLIP(pg_src), RF_BLIP(pg_dest)) >> RF_LONGSHIFT);	/* note len in longwords */
756 		src += longs_this_time;
757 		dest += longs_this_time;
758 		len -= longs_this_time;
759 		while (longs_this_time >= 4) {
760 			d0 = pg_dest[0];
761 			d1 = pg_dest[1];
762 			d2 = pg_dest[2];
763 			d3 = pg_dest[3];
764 			s0 = pg_src[0];
765 			s1 = pg_src[1];
766 			s2 = pg_src[2];
767 			s3 = pg_src[3];
768 			pg_dest[0] = d0 ^ s0;
769 			pg_dest[1] = d1 ^ s1;
770 			pg_dest[2] = d2 ^ s2;
771 			pg_dest[3] = d3 ^ s3;
772 			pg_src += 4;
773 			pg_dest += 4;
774 			longs_this_time -= 4;
775 		}
776 		while (longs_this_time > 0) {	/* cannot cross any page
777 						 * boundaries here */
778 			*pg_dest++ ^= *pg_src++;
779 			longs_this_time--;
780 		}
781 
782 		/* either we're done, or we've reached a page boundary on one
783 		 * (or possibly both) of the pointers */
784 		if (len) {
785 			if (RF_PAGE_ALIGNED(src))
786 				REMAP_VA(bp, src, pg_src);
787 			if (RF_PAGE_ALIGNED(dest))
788 				REMAP_VA(bp, dest, pg_dest);
789 			if (!pg_src || !pg_dest)
790 				return (EFAULT);
791 		}
792 	}
793 	while (src < end) {
794 		*pg_dest++ ^= *pg_src++;
795 		src++;
796 		dest++;
797 		len--;
798 		if (RF_PAGE_ALIGNED(src))
799 			REMAP_VA(bp, src, pg_src);
800 		if (RF_PAGE_ALIGNED(dest))
801 			REMAP_VA(bp, dest, pg_dest);
802 	}
803 	RF_ASSERT(len == 0);
804 	return (0);
805 }
806 
807 
808 /*
809    dst = a ^ b ^ c;
810    a may equal dst
811    see comment above longword_bxor
812 */
813 int
814 rf_longword_bxor3(dst, a, b, c, len, bp)
815 	register unsigned long *dst;
816 	register unsigned long *a;
817 	register unsigned long *b;
818 	register unsigned long *c;
819 	int     len;		/* length in longwords */
820 	void   *bp;
821 {
822 	unsigned long a0, a1, a2, a3, b0, b1, b2, b3;
823 	register unsigned long *pg_a, *pg_b, *pg_c, *pg_dst;	/* per-page source/dest
824 								 * pointers */
825 	int     longs_this_time;/* # longs to xor in the current iteration */
826 	char    dst_is_a = 0;
827 
828 	REMAP_VA(bp, a, pg_a);
829 	REMAP_VA(bp, b, pg_b);
830 	REMAP_VA(bp, c, pg_c);
831 	if (a == dst) {
832 		pg_dst = pg_a;
833 		dst_is_a = 1;
834 	} else {
835 		REMAP_VA(bp, dst, pg_dst);
836 	}
837 
838 	/* align dest to cache line.  Can't cross a pg boundary on dst here. */
839 	while ((((unsigned long) pg_dst) & 0x1f)) {
840 		*pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
841 		dst++;
842 		a++;
843 		b++;
844 		c++;
845 		if (RF_PAGE_ALIGNED(a)) {
846 			REMAP_VA(bp, a, pg_a);
847 			if (!pg_a)
848 				return (EFAULT);
849 		}
850 		if (RF_PAGE_ALIGNED(b)) {
851 			REMAP_VA(bp, a, pg_b);
852 			if (!pg_b)
853 				return (EFAULT);
854 		}
855 		if (RF_PAGE_ALIGNED(c)) {
856 			REMAP_VA(bp, a, pg_c);
857 			if (!pg_c)
858 				return (EFAULT);
859 		}
860 		len--;
861 	}
862 
863 	while (len > 4) {
864 		longs_this_time = RF_MIN(len, RF_MIN(RF_BLIP(a), RF_MIN(RF_BLIP(b), RF_MIN(RF_BLIP(c), RF_BLIP(dst)))) >> RF_LONGSHIFT);
865 		a += longs_this_time;
866 		b += longs_this_time;
867 		c += longs_this_time;
868 		dst += longs_this_time;
869 		len -= longs_this_time;
870 		while (longs_this_time >= 4) {
871 			a0 = pg_a[0];
872 			longs_this_time -= 4;
873 
874 			a1 = pg_a[1];
875 			a2 = pg_a[2];
876 
877 			a3 = pg_a[3];
878 			pg_a += 4;
879 
880 			b0 = pg_b[0];
881 			b1 = pg_b[1];
882 
883 			b2 = pg_b[2];
884 			b3 = pg_b[3];
885 			/* start dual issue */
886 			a0 ^= b0;
887 			b0 = pg_c[0];
888 
889 			pg_b += 4;
890 			a1 ^= b1;
891 
892 			a2 ^= b2;
893 			a3 ^= b3;
894 
895 			b1 = pg_c[1];
896 			a0 ^= b0;
897 
898 			b2 = pg_c[2];
899 			a1 ^= b1;
900 
901 			b3 = pg_c[3];
902 			a2 ^= b2;
903 
904 			pg_dst[0] = a0;
905 			a3 ^= b3;
906 			pg_dst[1] = a1;
907 			pg_c += 4;
908 			pg_dst[2] = a2;
909 			pg_dst[3] = a3;
910 			pg_dst += 4;
911 		}
912 		while (longs_this_time > 0) {	/* cannot cross any page
913 						 * boundaries here */
914 			*pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
915 			longs_this_time--;
916 		}
917 
918 		if (len) {
919 			if (RF_PAGE_ALIGNED(a)) {
920 				REMAP_VA(bp, a, pg_a);
921 				if (!pg_a)
922 					return (EFAULT);
923 				if (dst_is_a)
924 					pg_dst = pg_a;
925 			}
926 			if (RF_PAGE_ALIGNED(b)) {
927 				REMAP_VA(bp, b, pg_b);
928 				if (!pg_b)
929 					return (EFAULT);
930 			}
931 			if (RF_PAGE_ALIGNED(c)) {
932 				REMAP_VA(bp, c, pg_c);
933 				if (!pg_c)
934 					return (EFAULT);
935 			}
936 			if (!dst_is_a)
937 				if (RF_PAGE_ALIGNED(dst)) {
938 					REMAP_VA(bp, dst, pg_dst);
939 					if (!pg_dst)
940 						return (EFAULT);
941 				}
942 		}
943 	}
944 	while (len) {
945 		*pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
946 		dst++;
947 		a++;
948 		b++;
949 		c++;
950 		if (RF_PAGE_ALIGNED(a)) {
951 			REMAP_VA(bp, a, pg_a);
952 			if (!pg_a)
953 				return (EFAULT);
954 			if (dst_is_a)
955 				pg_dst = pg_a;
956 		}
957 		if (RF_PAGE_ALIGNED(b)) {
958 			REMAP_VA(bp, b, pg_b);
959 			if (!pg_b)
960 				return (EFAULT);
961 		}
962 		if (RF_PAGE_ALIGNED(c)) {
963 			REMAP_VA(bp, c, pg_c);
964 			if (!pg_c)
965 				return (EFAULT);
966 		}
967 		if (!dst_is_a)
968 			if (RF_PAGE_ALIGNED(dst)) {
969 				REMAP_VA(bp, dst, pg_dst);
970 				if (!pg_dst)
971 					return (EFAULT);
972 			}
973 		len--;
974 	}
975 	return (0);
976 }
977 
978 int
979 rf_bxor3(dst, a, b, c, len, bp)
980 	register unsigned char *dst;
981 	register unsigned char *a;
982 	register unsigned char *b;
983 	register unsigned char *c;
984 	unsigned long len;
985 	void   *bp;
986 {
987 	RF_ASSERT(((RF_UL(dst) | RF_UL(a) | RF_UL(b) | RF_UL(c) | len) & 0x7) == 0);
988 
989 	return (rf_longword_bxor3((unsigned long *) dst, (unsigned long *) a,
990 		(unsigned long *) b, (unsigned long *) c, len >> RF_LONGSHIFT, bp));
991 }
992