Home | History | Annotate | Line # | Download | only in raidframe
rf_dagfuncs.c revision 1.14
      1 /*	$NetBSD: rf_dagfuncs.c,v 1.14 2003/12/29 03:43:07 oster Exp $	*/
      2 /*
      3  * Copyright (c) 1995 Carnegie-Mellon University.
      4  * All rights reserved.
      5  *
      6  * Author: Mark Holland, William V. Courtright II
      7  *
      8  * Permission to use, copy, modify and distribute this software and
      9  * its documentation is hereby granted, provided that both the copyright
     10  * notice and this permission notice appear in all copies of the
     11  * software, derivative works or modified versions, and any portions
     12  * thereof, and that both notices appear in supporting documentation.
     13  *
     14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     17  *
     18  * Carnegie Mellon requests users of this software to return to
     19  *
     20  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     21  *  School of Computer Science
     22  *  Carnegie Mellon University
     23  *  Pittsburgh PA 15213-3890
     24  *
     25  * any improvements or extensions that they make and grant Carnegie the
     26  * rights to redistribute these changes.
     27  */
     28 
     29 /*
     30  * dagfuncs.c -- DAG node execution routines
     31  *
     32  * Rules:
     33  * 1. Every DAG execution function must eventually cause node->status to
     34  *    get set to "good" or "bad", and "FinishNode" to be called. In the
     35  *    case of nodes that complete immediately (xor, NullNodeFunc, etc),
     36  *    the node execution function can do these two things directly. In
     37  *    the case of nodes that have to wait for some event (a disk read to
     38  *    complete, a lock to be released, etc) to occur before they can
     39  *    complete, this is typically achieved by having whatever module
     40  *    is doing the operation call GenericWakeupFunc upon completion.
     41  * 2. DAG execution functions should check the status in the DAG header
     42  *    and NOP out their operations if the status is not "enable". However,
     43  *    execution functions that release resources must be sure to release
     44  *    them even when they NOP out the function that would use them.
     45  *    Functions that acquire resources should go ahead and acquire them
     46  *    even when they NOP, so that a downstream release node will not have
     47  *    to check to find out whether or not the acquire was suppressed.
     48  */
     49 
     50 #include <sys/cdefs.h>
     51 __KERNEL_RCSID(0, "$NetBSD: rf_dagfuncs.c,v 1.14 2003/12/29 03:43:07 oster Exp $");
     52 
     53 #include <sys/param.h>
     54 #include <sys/ioctl.h>
     55 
     56 #include "rf_archs.h"
     57 #include "rf_raid.h"
     58 #include "rf_dag.h"
     59 #include "rf_layout.h"
     60 #include "rf_etimer.h"
     61 #include "rf_acctrace.h"
     62 #include "rf_diskqueue.h"
     63 #include "rf_dagfuncs.h"
     64 #include "rf_general.h"
     65 #include "rf_engine.h"
     66 #include "rf_dagutils.h"
     67 
     68 #include "rf_kintf.h"
     69 
     70 #if RF_INCLUDE_PARITYLOGGING > 0
     71 #include "rf_paritylog.h"
     72 #endif				/* RF_INCLUDE_PARITYLOGGING > 0 */
     73 
     74 int     (*rf_DiskReadFunc) (RF_DagNode_t *);
     75 int     (*rf_DiskWriteFunc) (RF_DagNode_t *);
     76 int     (*rf_DiskReadUndoFunc) (RF_DagNode_t *);
     77 int     (*rf_DiskWriteUndoFunc) (RF_DagNode_t *);
     78 int     (*rf_DiskUnlockFunc) (RF_DagNode_t *);
     79 int     (*rf_DiskUnlockUndoFunc) (RF_DagNode_t *);
     80 int     (*rf_RegularXorUndoFunc) (RF_DagNode_t *);
     81 int     (*rf_SimpleXorUndoFunc) (RF_DagNode_t *);
     82 int     (*rf_RecoveryXorUndoFunc) (RF_DagNode_t *);
     83 
     84 /*****************************************************************************
     85  * main (only) configuration routine for this module
     86  ****************************************************************************/
     87 int
     88 rf_ConfigureDAGFuncs(listp)
     89 	RF_ShutdownList_t **listp;
     90 {
     91 	RF_ASSERT(((sizeof(long) == 8) && RF_LONGSHIFT == 3) ||
     92 		  ((sizeof(long) == 4) && RF_LONGSHIFT == 2));
     93 	rf_DiskReadFunc = rf_DiskReadFuncForThreads;
     94 	rf_DiskReadUndoFunc = rf_DiskUndoFunc;
     95 	rf_DiskWriteFunc = rf_DiskWriteFuncForThreads;
     96 	rf_DiskWriteUndoFunc = rf_DiskUndoFunc;
     97 	rf_DiskUnlockFunc = rf_DiskUnlockFuncForThreads;
     98 	rf_DiskUnlockUndoFunc = rf_NullNodeUndoFunc;
     99 	rf_RegularXorUndoFunc = rf_NullNodeUndoFunc;
    100 	rf_SimpleXorUndoFunc = rf_NullNodeUndoFunc;
    101 	rf_RecoveryXorUndoFunc = rf_NullNodeUndoFunc;
    102 	return (0);
    103 }
    104 
    105 
    106 
    107 /*****************************************************************************
    108  * the execution function associated with a terminate node
    109  ****************************************************************************/
    110 int
    111 rf_TerminateFunc(node)
    112 	RF_DagNode_t *node;
    113 {
    114 	RF_ASSERT(node->dagHdr->numCommits == node->dagHdr->numCommitNodes);
    115 	node->status = rf_good;
    116 	return (rf_FinishNode(node, RF_THREAD_CONTEXT));
    117 }
    118 
    119 int
    120 rf_TerminateUndoFunc(node)
    121 	RF_DagNode_t *node;
    122 {
    123 	return (0);
    124 }
    125 
    126 
    127 /*****************************************************************************************
    128  * execution functions associated with a mirror node
    129  *
    130  * parameters:
    131  *
    132  * 0 - physical disk addres of data
    133  * 1 - buffer for holding read data
    134  * 2 - parity stripe ID
    135  * 3 - flags
    136  * 4 - physical disk address of mirror (parity)
    137  *
    138  ****************************************************************************************/
    139 
    140 int
    141 rf_DiskReadMirrorIdleFunc(node)
    142 	RF_DagNode_t *node;
    143 {
    144 	/* select the mirror copy with the shortest queue and fill in node
    145 	 * parameters with physical disk address */
    146 
    147 	rf_SelectMirrorDiskIdle(node);
    148 	return (rf_DiskReadFunc(node));
    149 }
    150 
    151 #if (RF_INCLUDE_CHAINDECLUSTER > 0) || (RF_INCLUDE_INTERDECLUSTER > 0) || (RF_DEBUG_VALIDATE_DAG > 0)
    152 int
    153 rf_DiskReadMirrorPartitionFunc(node)
    154 	RF_DagNode_t *node;
    155 {
    156 	/* select the mirror copy with the shortest queue and fill in node
    157 	 * parameters with physical disk address */
    158 
    159 	rf_SelectMirrorDiskPartition(node);
    160 	return (rf_DiskReadFunc(node));
    161 }
    162 #endif
    163 
    164 int
    165 rf_DiskReadMirrorUndoFunc(node)
    166 	RF_DagNode_t *node;
    167 {
    168 	return (0);
    169 }
    170 
    171 
    172 
    173 #if RF_INCLUDE_PARITYLOGGING > 0
    174 /*****************************************************************************
    175  * the execution function associated with a parity log update node
    176  ****************************************************************************/
    177 int
    178 rf_ParityLogUpdateFunc(node)
    179 	RF_DagNode_t *node;
    180 {
    181 	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
    182 	caddr_t buf = (caddr_t) node->params[1].p;
    183 	RF_ParityLogData_t *logData;
    184 	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
    185 	RF_Etimer_t timer;
    186 
    187 	if (node->dagHdr->status == rf_enable) {
    188 		RF_ETIMER_START(timer);
    189 		logData = rf_CreateParityLogData(RF_UPDATE, pda, buf,
    190 		    (RF_Raid_t *) (node->dagHdr->raidPtr),
    191 		    node->wakeFunc, (void *) node,
    192 		    node->dagHdr->tracerec, timer);
    193 		if (logData)
    194 			rf_ParityLogAppend(logData, RF_FALSE, NULL, RF_FALSE);
    195 		else {
    196 			RF_ETIMER_STOP(timer);
    197 			RF_ETIMER_EVAL(timer);
    198 			tracerec->plog_us += RF_ETIMER_VAL_US(timer);
    199 			(node->wakeFunc) (node, ENOMEM);
    200 		}
    201 	}
    202 	return (0);
    203 }
    204 
    205 
    206 /*****************************************************************************************
    207  * the execution function associated with a parity log overwrite node
    208  ****************************************************************************************/
    209 int
    210 rf_ParityLogOverwriteFunc(node)
    211 	RF_DagNode_t *node;
    212 {
    213 	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
    214 	caddr_t buf = (caddr_t) node->params[1].p;
    215 	RF_ParityLogData_t *logData;
    216 	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
    217 	RF_Etimer_t timer;
    218 
    219 	if (node->dagHdr->status == rf_enable) {
    220 		RF_ETIMER_START(timer);
    221 		logData = rf_CreateParityLogData(RF_OVERWRITE, pda, buf,
    222 (RF_Raid_t *) (node->dagHdr->raidPtr),
    223 		    node->wakeFunc, (void *) node, node->dagHdr->tracerec, timer);
    224 		if (logData)
    225 			rf_ParityLogAppend(logData, RF_FALSE, NULL, RF_FALSE);
    226 		else {
    227 			RF_ETIMER_STOP(timer);
    228 			RF_ETIMER_EVAL(timer);
    229 			tracerec->plog_us += RF_ETIMER_VAL_US(timer);
    230 			(node->wakeFunc) (node, ENOMEM);
    231 		}
    232 	}
    233 	return (0);
    234 }
    235 
    236 int
    237 rf_ParityLogUpdateUndoFunc(node)
    238 	RF_DagNode_t *node;
    239 {
    240 	return (0);
    241 }
    242 
    243 int
    244 rf_ParityLogOverwriteUndoFunc(node)
    245 	RF_DagNode_t *node;
    246 {
    247 	return (0);
    248 }
    249 #endif				/* RF_INCLUDE_PARITYLOGGING > 0 */
    250 
    251 /*****************************************************************************
    252  * the execution function associated with a NOP node
    253  ****************************************************************************/
    254 int
    255 rf_NullNodeFunc(node)
    256 	RF_DagNode_t *node;
    257 {
    258 	node->status = rf_good;
    259 	return (rf_FinishNode(node, RF_THREAD_CONTEXT));
    260 }
    261 
    262 int
    263 rf_NullNodeUndoFunc(node)
    264 	RF_DagNode_t *node;
    265 {
    266 	node->status = rf_undone;
    267 	return (rf_FinishNode(node, RF_THREAD_CONTEXT));
    268 }
    269 
    270 
    271 /*****************************************************************************
    272  * the execution function associated with a disk-read node
    273  ****************************************************************************/
    274 int
    275 rf_DiskReadFuncForThreads(node)
    276 	RF_DagNode_t *node;
    277 {
    278 	RF_DiskQueueData_t *req;
    279 	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
    280 	caddr_t buf = (caddr_t) node->params[1].p;
    281 	RF_StripeNum_t parityStripeID = (RF_StripeNum_t) node->params[2].v;
    282 	unsigned priority = RF_EXTRACT_PRIORITY(node->params[3].v);
    283 	unsigned lock = RF_EXTRACT_LOCK_FLAG(node->params[3].v);
    284 	unsigned unlock = RF_EXTRACT_UNLOCK_FLAG(node->params[3].v);
    285 	unsigned which_ru = RF_EXTRACT_RU(node->params[3].v);
    286 	RF_DiskQueueDataFlags_t flags = 0;
    287 	RF_IoType_t iotype = (node->dagHdr->status == rf_enable) ? RF_IO_TYPE_READ : RF_IO_TYPE_NOP;
    288 	RF_DiskQueue_t *dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
    289 	void   *b_proc = NULL;
    290 
    291 	if (node->dagHdr->bp)
    292 		b_proc = (void *) ((struct buf *) node->dagHdr->bp)->b_proc;
    293 
    294 	RF_ASSERT(!(lock && unlock));
    295 	flags |= (lock) ? RF_LOCK_DISK_QUEUE : 0;
    296 	flags |= (unlock) ? RF_UNLOCK_DISK_QUEUE : 0;
    297 
    298 	req = rf_CreateDiskQueueData(iotype, pda->startSector, pda->numSector,
    299 	    buf, parityStripeID, which_ru,
    300 	    (int (*) (void *, int)) node->wakeFunc,
    301 	    node, NULL, node->dagHdr->tracerec,
    302 	    (void *) (node->dagHdr->raidPtr), flags, b_proc);
    303 	if (!req) {
    304 		(node->wakeFunc) (node, ENOMEM);
    305 	} else {
    306 		node->dagFuncData = (void *) req;
    307 		rf_DiskIOEnqueue(&(dqs[pda->col]), req, priority);
    308 	}
    309 	return (0);
    310 }
    311 
    312 
    313 /*****************************************************************************
    314  * the execution function associated with a disk-write node
    315  ****************************************************************************/
    316 int
    317 rf_DiskWriteFuncForThreads(node)
    318 	RF_DagNode_t *node;
    319 {
    320 	RF_DiskQueueData_t *req;
    321 	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
    322 	caddr_t buf = (caddr_t) node->params[1].p;
    323 	RF_StripeNum_t parityStripeID = (RF_StripeNum_t) node->params[2].v;
    324 	unsigned priority = RF_EXTRACT_PRIORITY(node->params[3].v);
    325 	unsigned lock = RF_EXTRACT_LOCK_FLAG(node->params[3].v);
    326 	unsigned unlock = RF_EXTRACT_UNLOCK_FLAG(node->params[3].v);
    327 	unsigned which_ru = RF_EXTRACT_RU(node->params[3].v);
    328 	RF_DiskQueueDataFlags_t flags = 0;
    329 	RF_IoType_t iotype = (node->dagHdr->status == rf_enable) ? RF_IO_TYPE_WRITE : RF_IO_TYPE_NOP;
    330 	RF_DiskQueue_t *dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
    331 	void   *b_proc = NULL;
    332 
    333 	if (node->dagHdr->bp)
    334 		b_proc = (void *) ((struct buf *) node->dagHdr->bp)->b_proc;
    335 
    336 	/* normal processing (rollaway or forward recovery) begins here */
    337 	RF_ASSERT(!(lock && unlock));
    338 	flags |= (lock) ? RF_LOCK_DISK_QUEUE : 0;
    339 	flags |= (unlock) ? RF_UNLOCK_DISK_QUEUE : 0;
    340 	req = rf_CreateDiskQueueData(iotype, pda->startSector, pda->numSector,
    341 	    buf, parityStripeID, which_ru,
    342 	    (int (*) (void *, int)) node->wakeFunc,
    343 	    (void *) node, NULL,
    344 	    node->dagHdr->tracerec,
    345 	    (void *) (node->dagHdr->raidPtr),
    346 	    flags, b_proc);
    347 
    348 	if (!req) {
    349 		(node->wakeFunc) (node, ENOMEM);
    350 	} else {
    351 		node->dagFuncData = (void *) req;
    352 		rf_DiskIOEnqueue(&(dqs[pda->col]), req, priority);
    353 	}
    354 
    355 	return (0);
    356 }
    357 /*****************************************************************************
    358  * the undo function for disk nodes
    359  * Note:  this is not a proper undo of a write node, only locks are released.
    360  *        old data is not restored to disk!
    361  ****************************************************************************/
    362 int
    363 rf_DiskUndoFunc(node)
    364 	RF_DagNode_t *node;
    365 {
    366 	RF_DiskQueueData_t *req;
    367 	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
    368 	RF_DiskQueue_t *dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
    369 
    370 	req = rf_CreateDiskQueueData(RF_IO_TYPE_NOP,
    371 	    0L, 0, NULL, 0L, 0,
    372 	    (int (*) (void *, int)) node->wakeFunc,
    373 	    (void *) node,
    374 	    NULL, node->dagHdr->tracerec,
    375 	    (void *) (node->dagHdr->raidPtr),
    376 	    RF_UNLOCK_DISK_QUEUE, NULL);
    377 	if (!req)
    378 		(node->wakeFunc) (node, ENOMEM);
    379 	else {
    380 		node->dagFuncData = (void *) req;
    381 		rf_DiskIOEnqueue(&(dqs[pda->col]), req, RF_IO_NORMAL_PRIORITY);
    382 	}
    383 
    384 	return (0);
    385 }
    386 /*****************************************************************************
    387  * the execution function associated with an "unlock disk queue" node
    388  ****************************************************************************/
    389 int
    390 rf_DiskUnlockFuncForThreads(node)
    391 	RF_DagNode_t *node;
    392 {
    393 	RF_DiskQueueData_t *req;
    394 	RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
    395 	RF_DiskQueue_t *dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
    396 
    397 	req = rf_CreateDiskQueueData(RF_IO_TYPE_NOP,
    398 	    0L, 0, NULL, 0L, 0,
    399 	    (int (*) (void *, int)) node->wakeFunc,
    400 	    (void *) node,
    401 	    NULL, node->dagHdr->tracerec,
    402 	    (void *) (node->dagHdr->raidPtr),
    403 	    RF_UNLOCK_DISK_QUEUE, NULL);
    404 	if (!req)
    405 		(node->wakeFunc) (node, ENOMEM);
    406 	else {
    407 		node->dagFuncData = (void *) req;
    408 		rf_DiskIOEnqueue(&(dqs[pda->col]), req, RF_IO_NORMAL_PRIORITY);
    409 	}
    410 
    411 	return (0);
    412 }
    413 /*****************************************************************************
    414  * Callback routine for DiskRead and DiskWrite nodes.  When the disk
    415  * op completes, the routine is called to set the node status and
    416  * inform the execution engine that the node has fired.
    417  ****************************************************************************/
    418 int
    419 rf_GenericWakeupFunc(node, status)
    420 	RF_DagNode_t *node;
    421 	int     status;
    422 {
    423 	switch (node->status) {
    424 	case rf_bwd1:
    425 		node->status = rf_bwd2;
    426 		if (node->dagFuncData)
    427 			rf_FreeDiskQueueData((RF_DiskQueueData_t *) node->dagFuncData);
    428 		return (rf_DiskWriteFuncForThreads(node));
    429 	case rf_fired:
    430 		if (status)
    431 			node->status = rf_bad;
    432 		else
    433 			node->status = rf_good;
    434 		break;
    435 	case rf_recover:
    436 		/* probably should never reach this case */
    437 		if (status)
    438 			node->status = rf_panic;
    439 		else
    440 			node->status = rf_undone;
    441 		break;
    442 	default:
    443 		printf("rf_GenericWakeupFunc:");
    444 		printf("node->status is %d,", node->status);
    445 		printf("status is %d \n", status);
    446 		RF_PANIC();
    447 		break;
    448 	}
    449 	if (node->dagFuncData)
    450 		rf_FreeDiskQueueData((RF_DiskQueueData_t *) node->dagFuncData);
    451 	return (rf_FinishNode(node, RF_INTR_CONTEXT));
    452 }
    453 
    454 
    455 /*****************************************************************************
    456  * there are three distinct types of xor nodes:
    457 
    458  * A "regular xor" is used in the fault-free case where the access
    459  * spans a complete stripe unit.  It assumes that the result buffer is
    460  * one full stripe unit in size, and uses the stripe-unit-offset
    461  * values that it computes from the PDAs to determine where within the
    462  * stripe unit to XOR each argument buffer.
    463  *
    464  * A "simple xor" is used in the fault-free case where the access
    465  * touches only a portion of one (or two, in some cases) stripe
    466  * unit(s).  It assumes that all the argument buffers are of the same
    467  * size and have the same stripe unit offset.
    468  *
    469  * A "recovery xor" is used in the degraded-mode case.  It's similar
    470  * to the regular xor function except that it takes the failed PDA as
    471  * an additional parameter, and uses it to determine what portions of
    472  * the argument buffers need to be xor'd into the result buffer, and
    473  * where in the result buffer they should go.
    474  ****************************************************************************/
    475 
    476 /* xor the params together and store the result in the result field.
    477  * assume the result field points to a buffer that is the size of one
    478  * SU, and use the pda params to determine where within the buffer to
    479  * XOR the input buffers.  */
    480 int
    481 rf_RegularXorFunc(node)
    482 	RF_DagNode_t *node;
    483 {
    484 	RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
    485 	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
    486 	RF_Etimer_t timer;
    487 	int     i, retcode;
    488 
    489 	retcode = 0;
    490 	if (node->dagHdr->status == rf_enable) {
    491 		/* don't do the XOR if the input is the same as the output */
    492 		RF_ETIMER_START(timer);
    493 		for (i = 0; i < node->numParams - 1; i += 2)
    494 			if (node->params[i + 1].p != node->results[0]) {
    495 				retcode = rf_XorIntoBuffer(raidPtr, (RF_PhysDiskAddr_t *) node->params[i].p,
    496 				    (char *) node->params[i + 1].p, (char *) node->results[0], node->dagHdr->bp);
    497 			}
    498 		RF_ETIMER_STOP(timer);
    499 		RF_ETIMER_EVAL(timer);
    500 		tracerec->xor_us += RF_ETIMER_VAL_US(timer);
    501 	}
    502 	return (rf_GenericWakeupFunc(node, retcode));	/* call wake func
    503 							 * explicitly since no
    504 							 * I/O in this node */
    505 }
    506 /* xor the inputs into the result buffer, ignoring placement issues */
    507 int
    508 rf_SimpleXorFunc(node)
    509 	RF_DagNode_t *node;
    510 {
    511 	RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
    512 	int     i, retcode = 0;
    513 	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
    514 	RF_Etimer_t timer;
    515 
    516 	if (node->dagHdr->status == rf_enable) {
    517 		RF_ETIMER_START(timer);
    518 		/* don't do the XOR if the input is the same as the output */
    519 		for (i = 0; i < node->numParams - 1; i += 2)
    520 			if (node->params[i + 1].p != node->results[0]) {
    521 				retcode = rf_bxor((char *) node->params[i + 1].p, (char *) node->results[0],
    522 				    rf_RaidAddressToByte(raidPtr, ((RF_PhysDiskAddr_t *) node->params[i].p)->numSector),
    523 				    (struct buf *) node->dagHdr->bp);
    524 			}
    525 		RF_ETIMER_STOP(timer);
    526 		RF_ETIMER_EVAL(timer);
    527 		tracerec->xor_us += RF_ETIMER_VAL_US(timer);
    528 	}
    529 	return (rf_GenericWakeupFunc(node, retcode));	/* call wake func
    530 							 * explicitly since no
    531 							 * I/O in this node */
    532 }
    533 /* this xor is used by the degraded-mode dag functions to recover lost
    534  * data.  the second-to-last parameter is the PDA for the failed
    535  * portion of the access.  the code here looks at this PDA and assumes
    536  * that the xor target buffer is equal in size to the number of
    537  * sectors in the failed PDA.  It then uses the other PDAs in the
    538  * parameter list to determine where within the target buffer the
    539  * corresponding data should be xored.  */
    540 int
    541 rf_RecoveryXorFunc(node)
    542 	RF_DagNode_t *node;
    543 {
    544 	RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
    545 	RF_RaidLayout_t *layoutPtr = (RF_RaidLayout_t *) & raidPtr->Layout;
    546 	RF_PhysDiskAddr_t *failedPDA = (RF_PhysDiskAddr_t *) node->params[node->numParams - 2].p;
    547 	int     i, retcode = 0;
    548 	RF_PhysDiskAddr_t *pda;
    549 	int     suoffset, failedSUOffset = rf_StripeUnitOffset(layoutPtr, failedPDA->startSector);
    550 	char   *srcbuf, *destbuf;
    551 	RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
    552 	RF_Etimer_t timer;
    553 
    554 	if (node->dagHdr->status == rf_enable) {
    555 		RF_ETIMER_START(timer);
    556 		for (i = 0; i < node->numParams - 2; i += 2)
    557 			if (node->params[i + 1].p != node->results[0]) {
    558 				pda = (RF_PhysDiskAddr_t *) node->params[i].p;
    559 				srcbuf = (char *) node->params[i + 1].p;
    560 				suoffset = rf_StripeUnitOffset(layoutPtr, pda->startSector);
    561 				destbuf = ((char *) node->results[0]) + rf_RaidAddressToByte(raidPtr, suoffset - failedSUOffset);
    562 				retcode = rf_bxor(srcbuf, destbuf, rf_RaidAddressToByte(raidPtr, pda->numSector), node->dagHdr->bp);
    563 			}
    564 		RF_ETIMER_STOP(timer);
    565 		RF_ETIMER_EVAL(timer);
    566 		tracerec->xor_us += RF_ETIMER_VAL_US(timer);
    567 	}
    568 	return (rf_GenericWakeupFunc(node, retcode));
    569 }
    570 /*****************************************************************************
    571  * The next three functions are utilities used by the above
    572  * xor-execution functions.
    573  ****************************************************************************/
    574 
    575 
    576 /*
    577  * this is just a glorified buffer xor.  targbuf points to a buffer
    578  * that is one full stripe unit in size.  srcbuf points to a buffer
    579  * that may be less than 1 SU, but never more.  When the access
    580  * described by pda is one SU in size (which by implication means it's
    581  * SU-aligned), all that happens is (targbuf) <- (srcbuf ^ targbuf).
    582  * When the access is less than one SU in size the XOR occurs on only
    583  * the portion of targbuf identified in the pda.  */
    584 
    585 int
    586 rf_XorIntoBuffer(raidPtr, pda, srcbuf, targbuf, bp)
    587 	RF_Raid_t *raidPtr;
    588 	RF_PhysDiskAddr_t *pda;
    589 	char   *srcbuf;
    590 	char   *targbuf;
    591 	void   *bp;
    592 {
    593 	char   *targptr;
    594 	int     sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
    595 	int     SUOffset = pda->startSector % sectPerSU;
    596 	int     length, retcode = 0;
    597 
    598 	RF_ASSERT(pda->numSector <= sectPerSU);
    599 
    600 	targptr = targbuf + rf_RaidAddressToByte(raidPtr, SUOffset);
    601 	length = rf_RaidAddressToByte(raidPtr, pda->numSector);
    602 	retcode = rf_bxor(srcbuf, targptr, length, bp);
    603 	return (retcode);
    604 }
    605 /* it really should be the case that the buffer pointers (returned by
    606  * malloc) are aligned to the natural word size of the machine, so
    607  * this is the only case we optimize for.  The length should always be
    608  * a multiple of the sector size, so there should be no problem with
    609  * leftover bytes at the end.  */
    610 int
    611 rf_bxor(src, dest, len, bp)
    612 	char   *src;
    613 	char   *dest;
    614 	int     len;
    615 	void   *bp;
    616 {
    617 	unsigned mask = sizeof(long) - 1, retcode = 0;
    618 
    619 	if (!(((unsigned long) src) & mask) &&
    620 	    !(((unsigned long) dest) & mask) && !(len & mask)) {
    621 		retcode = rf_longword_bxor((unsigned long *) src,
    622 					   (unsigned long *) dest,
    623 					   len >> RF_LONGSHIFT, bp);
    624 	} else {
    625 		RF_ASSERT(0);
    626 	}
    627 	return (retcode);
    628 }
    629 /* map a user buffer into kernel space, if necessary */
    630 #define REMAP_VA(_bp,x,y) (y) = (x)
    631 
    632 /* When XORing in kernel mode, we need to map each user page to kernel
    633  * space before we can access it.  We don't want to assume anything
    634  * about which input buffers are in kernel/user space, nor about their
    635  * alignment, so in each loop we compute the maximum number of bytes
    636  * that we can xor without crossing any page boundaries, and do only
    637  * this many bytes before the next remap.  */
    638 int
    639 rf_longword_bxor(src, dest, len, bp)
    640 	unsigned long *src;
    641 	unsigned long *dest;
    642 	int     len;		/* longwords */
    643 	void   *bp;
    644 {
    645 	unsigned long *end = src + len;
    646 	unsigned long d0, d1, d2, d3, s0, s1, s2, s3;	/* temps */
    647 	unsigned long *pg_src, *pg_dest;   /* per-page source/dest pointers */
    648 	int     longs_this_time;/* # longwords to xor in the current iteration */
    649 
    650 	REMAP_VA(bp, src, pg_src);
    651 	REMAP_VA(bp, dest, pg_dest);
    652 	if (!pg_src || !pg_dest)
    653 		return (EFAULT);
    654 
    655 	while (len >= 4) {
    656 		longs_this_time = RF_MIN(len, RF_MIN(RF_BLIP(pg_src), RF_BLIP(pg_dest)) >> RF_LONGSHIFT);	/* note len in longwords */
    657 		src += longs_this_time;
    658 		dest += longs_this_time;
    659 		len -= longs_this_time;
    660 		while (longs_this_time >= 4) {
    661 			d0 = pg_dest[0];
    662 			d1 = pg_dest[1];
    663 			d2 = pg_dest[2];
    664 			d3 = pg_dest[3];
    665 			s0 = pg_src[0];
    666 			s1 = pg_src[1];
    667 			s2 = pg_src[2];
    668 			s3 = pg_src[3];
    669 			pg_dest[0] = d0 ^ s0;
    670 			pg_dest[1] = d1 ^ s1;
    671 			pg_dest[2] = d2 ^ s2;
    672 			pg_dest[3] = d3 ^ s3;
    673 			pg_src += 4;
    674 			pg_dest += 4;
    675 			longs_this_time -= 4;
    676 		}
    677 		while (longs_this_time > 0) {	/* cannot cross any page
    678 						 * boundaries here */
    679 			*pg_dest++ ^= *pg_src++;
    680 			longs_this_time--;
    681 		}
    682 
    683 		/* either we're done, or we've reached a page boundary on one
    684 		 * (or possibly both) of the pointers */
    685 		if (len) {
    686 			if (RF_PAGE_ALIGNED(src))
    687 				REMAP_VA(bp, src, pg_src);
    688 			if (RF_PAGE_ALIGNED(dest))
    689 				REMAP_VA(bp, dest, pg_dest);
    690 			if (!pg_src || !pg_dest)
    691 				return (EFAULT);
    692 		}
    693 	}
    694 	while (src < end) {
    695 		*pg_dest++ ^= *pg_src++;
    696 		src++;
    697 		dest++;
    698 		len--;
    699 		if (RF_PAGE_ALIGNED(src))
    700 			REMAP_VA(bp, src, pg_src);
    701 		if (RF_PAGE_ALIGNED(dest))
    702 			REMAP_VA(bp, dest, pg_dest);
    703 	}
    704 	RF_ASSERT(len == 0);
    705 	return (0);
    706 }
    707 
    708 #if 0
    709 /*
    710    dst = a ^ b ^ c;
    711    a may equal dst
    712    see comment above longword_bxor
    713 */
    714 int
    715 rf_longword_bxor3(dst, a, b, c, len, bp)
    716 	unsigned long *dst;
    717 	unsigned long *a;
    718 	unsigned long *b;
    719 	unsigned long *c;
    720 	int     len;		/* length in longwords */
    721 	void   *bp;
    722 {
    723 	unsigned long a0, a1, a2, a3, b0, b1, b2, b3;
    724 	unsigned long *pg_a, *pg_b, *pg_c, *pg_dst;	/* per-page source/dest
    725 								 * pointers */
    726 	int     longs_this_time;/* # longs to xor in the current iteration */
    727 	char    dst_is_a = 0;
    728 
    729 	REMAP_VA(bp, a, pg_a);
    730 	REMAP_VA(bp, b, pg_b);
    731 	REMAP_VA(bp, c, pg_c);
    732 	if (a == dst) {
    733 		pg_dst = pg_a;
    734 		dst_is_a = 1;
    735 	} else {
    736 		REMAP_VA(bp, dst, pg_dst);
    737 	}
    738 
    739 	/* align dest to cache line.  Can't cross a pg boundary on dst here. */
    740 	while ((((unsigned long) pg_dst) & 0x1f)) {
    741 		*pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
    742 		dst++;
    743 		a++;
    744 		b++;
    745 		c++;
    746 		if (RF_PAGE_ALIGNED(a)) {
    747 			REMAP_VA(bp, a, pg_a);
    748 			if (!pg_a)
    749 				return (EFAULT);
    750 		}
    751 		if (RF_PAGE_ALIGNED(b)) {
    752 			REMAP_VA(bp, a, pg_b);
    753 			if (!pg_b)
    754 				return (EFAULT);
    755 		}
    756 		if (RF_PAGE_ALIGNED(c)) {
    757 			REMAP_VA(bp, a, pg_c);
    758 			if (!pg_c)
    759 				return (EFAULT);
    760 		}
    761 		len--;
    762 	}
    763 
    764 	while (len > 4) {
    765 		longs_this_time = RF_MIN(len, RF_MIN(RF_BLIP(a), RF_MIN(RF_BLIP(b), RF_MIN(RF_BLIP(c), RF_BLIP(dst)))) >> RF_LONGSHIFT);
    766 		a += longs_this_time;
    767 		b += longs_this_time;
    768 		c += longs_this_time;
    769 		dst += longs_this_time;
    770 		len -= longs_this_time;
    771 		while (longs_this_time >= 4) {
    772 			a0 = pg_a[0];
    773 			longs_this_time -= 4;
    774 
    775 			a1 = pg_a[1];
    776 			a2 = pg_a[2];
    777 
    778 			a3 = pg_a[3];
    779 			pg_a += 4;
    780 
    781 			b0 = pg_b[0];
    782 			b1 = pg_b[1];
    783 
    784 			b2 = pg_b[2];
    785 			b3 = pg_b[3];
    786 			/* start dual issue */
    787 			a0 ^= b0;
    788 			b0 = pg_c[0];
    789 
    790 			pg_b += 4;
    791 			a1 ^= b1;
    792 
    793 			a2 ^= b2;
    794 			a3 ^= b3;
    795 
    796 			b1 = pg_c[1];
    797 			a0 ^= b0;
    798 
    799 			b2 = pg_c[2];
    800 			a1 ^= b1;
    801 
    802 			b3 = pg_c[3];
    803 			a2 ^= b2;
    804 
    805 			pg_dst[0] = a0;
    806 			a3 ^= b3;
    807 			pg_dst[1] = a1;
    808 			pg_c += 4;
    809 			pg_dst[2] = a2;
    810 			pg_dst[3] = a3;
    811 			pg_dst += 4;
    812 		}
    813 		while (longs_this_time > 0) {	/* cannot cross any page
    814 						 * boundaries here */
    815 			*pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
    816 			longs_this_time--;
    817 		}
    818 
    819 		if (len) {
    820 			if (RF_PAGE_ALIGNED(a)) {
    821 				REMAP_VA(bp, a, pg_a);
    822 				if (!pg_a)
    823 					return (EFAULT);
    824 				if (dst_is_a)
    825 					pg_dst = pg_a;
    826 			}
    827 			if (RF_PAGE_ALIGNED(b)) {
    828 				REMAP_VA(bp, b, pg_b);
    829 				if (!pg_b)
    830 					return (EFAULT);
    831 			}
    832 			if (RF_PAGE_ALIGNED(c)) {
    833 				REMAP_VA(bp, c, pg_c);
    834 				if (!pg_c)
    835 					return (EFAULT);
    836 			}
    837 			if (!dst_is_a)
    838 				if (RF_PAGE_ALIGNED(dst)) {
    839 					REMAP_VA(bp, dst, pg_dst);
    840 					if (!pg_dst)
    841 						return (EFAULT);
    842 				}
    843 		}
    844 	}
    845 	while (len) {
    846 		*pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
    847 		dst++;
    848 		a++;
    849 		b++;
    850 		c++;
    851 		if (RF_PAGE_ALIGNED(a)) {
    852 			REMAP_VA(bp, a, pg_a);
    853 			if (!pg_a)
    854 				return (EFAULT);
    855 			if (dst_is_a)
    856 				pg_dst = pg_a;
    857 		}
    858 		if (RF_PAGE_ALIGNED(b)) {
    859 			REMAP_VA(bp, b, pg_b);
    860 			if (!pg_b)
    861 				return (EFAULT);
    862 		}
    863 		if (RF_PAGE_ALIGNED(c)) {
    864 			REMAP_VA(bp, c, pg_c);
    865 			if (!pg_c)
    866 				return (EFAULT);
    867 		}
    868 		if (!dst_is_a)
    869 			if (RF_PAGE_ALIGNED(dst)) {
    870 				REMAP_VA(bp, dst, pg_dst);
    871 				if (!pg_dst)
    872 					return (EFAULT);
    873 			}
    874 		len--;
    875 	}
    876 	return (0);
    877 }
    878 
    879 int
    880 rf_bxor3(dst, a, b, c, len, bp)
    881 	unsigned char *dst;
    882 	unsigned char *a;
    883 	unsigned char *b;
    884 	unsigned char *c;
    885 	unsigned long len;
    886 	void   *bp;
    887 {
    888 	RF_ASSERT(((RF_UL(dst) | RF_UL(a) | RF_UL(b) | RF_UL(c) | len) & 0x7) == 0);
    889 
    890 	return (rf_longword_bxor3((unsigned long *) dst, (unsigned long *) a,
    891 		(unsigned long *) b, (unsigned long *) c, len >> RF_LONGSHIFT, bp));
    892 }
    893 #endif
    894