rf_dagfuncs.c revision 1.19 1 /* $NetBSD: rf_dagfuncs.c,v 1.19 2004/03/01 23:30:58 oster Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland, William V. Courtright II
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29 /*
30 * dagfuncs.c -- DAG node execution routines
31 *
32 * Rules:
33 * 1. Every DAG execution function must eventually cause node->status to
34 * get set to "good" or "bad", and "FinishNode" to be called. In the
35 * case of nodes that complete immediately (xor, NullNodeFunc, etc),
36 * the node execution function can do these two things directly. In
37 * the case of nodes that have to wait for some event (a disk read to
38 * complete, a lock to be released, etc) to occur before they can
39 * complete, this is typically achieved by having whatever module
40 * is doing the operation call GenericWakeupFunc upon completion.
41 * 2. DAG execution functions should check the status in the DAG header
42 * and NOP out their operations if the status is not "enable". However,
43 * execution functions that release resources must be sure to release
44 * them even when they NOP out the function that would use them.
45 * Functions that acquire resources should go ahead and acquire them
46 * even when they NOP, so that a downstream release node will not have
47 * to check to find out whether or not the acquire was suppressed.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: rf_dagfuncs.c,v 1.19 2004/03/01 23:30:58 oster Exp $");
52
53 #include <sys/param.h>
54 #include <sys/ioctl.h>
55
56 #include "rf_archs.h"
57 #include "rf_raid.h"
58 #include "rf_dag.h"
59 #include "rf_layout.h"
60 #include "rf_etimer.h"
61 #include "rf_acctrace.h"
62 #include "rf_diskqueue.h"
63 #include "rf_dagfuncs.h"
64 #include "rf_general.h"
65 #include "rf_engine.h"
66 #include "rf_dagutils.h"
67
68 #include "rf_kintf.h"
69
70 #if RF_INCLUDE_PARITYLOGGING > 0
71 #include "rf_paritylog.h"
72 #endif /* RF_INCLUDE_PARITYLOGGING > 0 */
73
74 int (*rf_DiskReadFunc) (RF_DagNode_t *);
75 int (*rf_DiskWriteFunc) (RF_DagNode_t *);
76 int (*rf_DiskReadUndoFunc) (RF_DagNode_t *);
77 int (*rf_DiskWriteUndoFunc) (RF_DagNode_t *);
78 int (*rf_DiskUnlockFunc) (RF_DagNode_t *);
79 int (*rf_DiskUnlockUndoFunc) (RF_DagNode_t *);
80 int (*rf_RegularXorUndoFunc) (RF_DagNode_t *);
81 int (*rf_SimpleXorUndoFunc) (RF_DagNode_t *);
82 int (*rf_RecoveryXorUndoFunc) (RF_DagNode_t *);
83
84 /*****************************************************************************
85 * main (only) configuration routine for this module
86 ****************************************************************************/
87 int
88 rf_ConfigureDAGFuncs(RF_ShutdownList_t **listp)
89 {
90 RF_ASSERT(((sizeof(long) == 8) && RF_LONGSHIFT == 3) ||
91 ((sizeof(long) == 4) && RF_LONGSHIFT == 2));
92 rf_DiskReadFunc = rf_DiskReadFuncForThreads;
93 rf_DiskReadUndoFunc = rf_DiskUndoFunc;
94 rf_DiskWriteFunc = rf_DiskWriteFuncForThreads;
95 rf_DiskWriteUndoFunc = rf_DiskUndoFunc;
96 rf_DiskUnlockFunc = rf_DiskUnlockFuncForThreads;
97 rf_DiskUnlockUndoFunc = rf_NullNodeUndoFunc;
98 rf_RegularXorUndoFunc = rf_NullNodeUndoFunc;
99 rf_SimpleXorUndoFunc = rf_NullNodeUndoFunc;
100 rf_RecoveryXorUndoFunc = rf_NullNodeUndoFunc;
101 return (0);
102 }
103
104
105
106 /*****************************************************************************
107 * the execution function associated with a terminate node
108 ****************************************************************************/
109 int
110 rf_TerminateFunc(RF_DagNode_t *node)
111 {
112 RF_ASSERT(node->dagHdr->numCommits == node->dagHdr->numCommitNodes);
113 node->status = rf_good;
114 return (rf_FinishNode(node, RF_THREAD_CONTEXT));
115 }
116
117 int
118 rf_TerminateUndoFunc(RF_DagNode_t *node)
119 {
120 return (0);
121 }
122
123
124 /*****************************************************************************
125 * execution functions associated with a mirror node
126 *
127 * parameters:
128 *
129 * 0 - physical disk addres of data
130 * 1 - buffer for holding read data
131 * 2 - parity stripe ID
132 * 3 - flags
133 * 4 - physical disk address of mirror (parity)
134 *
135 ****************************************************************************/
136
137 int
138 rf_DiskReadMirrorIdleFunc(RF_DagNode_t *node)
139 {
140 /* select the mirror copy with the shortest queue and fill in node
141 * parameters with physical disk address */
142
143 rf_SelectMirrorDiskIdle(node);
144 return (rf_DiskReadFunc(node));
145 }
146
147 #if (RF_INCLUDE_CHAINDECLUSTER > 0) || (RF_INCLUDE_INTERDECLUSTER > 0) || (RF_DEBUG_VALIDATE_DAG > 0)
148 int
149 rf_DiskReadMirrorPartitionFunc(RF_DagNode_t *node)
150 {
151 /* select the mirror copy with the shortest queue and fill in node
152 * parameters with physical disk address */
153
154 rf_SelectMirrorDiskPartition(node);
155 return (rf_DiskReadFunc(node));
156 }
157 #endif
158
159 int
160 rf_DiskReadMirrorUndoFunc(RF_DagNode_t *node)
161 {
162 return (0);
163 }
164
165
166
167 #if RF_INCLUDE_PARITYLOGGING > 0
168 /*****************************************************************************
169 * the execution function associated with a parity log update node
170 ****************************************************************************/
171 int
172 rf_ParityLogUpdateFunc(RF_DagNode_t *node)
173 {
174 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
175 caddr_t buf = (caddr_t) node->params[1].p;
176 RF_ParityLogData_t *logData;
177 #if RF_ACC_TRACE > 0
178 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
179 RF_Etimer_t timer;
180 #endif
181
182 if (node->dagHdr->status == rf_enable) {
183 #if RF_ACC_TRACE > 0
184 RF_ETIMER_START(timer);
185 #endif
186 logData = rf_CreateParityLogData(RF_UPDATE, pda, buf,
187 (RF_Raid_t *) (node->dagHdr->raidPtr),
188 node->wakeFunc, (void *) node,
189 node->dagHdr->tracerec, timer);
190 if (logData)
191 rf_ParityLogAppend(logData, RF_FALSE, NULL, RF_FALSE);
192 else {
193 #if RF_ACC_TRACE > 0
194 RF_ETIMER_STOP(timer);
195 RF_ETIMER_EVAL(timer);
196 tracerec->plog_us += RF_ETIMER_VAL_US(timer);
197 #endif
198 (node->wakeFunc) (node, ENOMEM);
199 }
200 }
201 return (0);
202 }
203
204
205 /*****************************************************************************
206 * the execution function associated with a parity log overwrite node
207 ****************************************************************************/
208 int
209 rf_ParityLogOverwriteFunc(RF_DagNode_t *node)
210 {
211 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
212 caddr_t buf = (caddr_t) node->params[1].p;
213 RF_ParityLogData_t *logData;
214 #if RF_ACC_TRACE > 0
215 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
216 RF_Etimer_t timer;
217 #endif
218
219 if (node->dagHdr->status == rf_enable) {
220 #if RF_ACC_TRACE > 0
221 RF_ETIMER_START(timer);
222 #endif
223 logData = rf_CreateParityLogData(RF_OVERWRITE, pda, buf,
224 (RF_Raid_t *) (node->dagHdr->raidPtr),
225 node->wakeFunc, (void *) node, node->dagHdr->tracerec, timer);
226 if (logData)
227 rf_ParityLogAppend(logData, RF_FALSE, NULL, RF_FALSE);
228 else {
229 #if RF_ACC_TRACE > 0
230 RF_ETIMER_STOP(timer);
231 RF_ETIMER_EVAL(timer);
232 tracerec->plog_us += RF_ETIMER_VAL_US(timer);
233 #endif
234 (node->wakeFunc) (node, ENOMEM);
235 }
236 }
237 return (0);
238 }
239
240 int
241 rf_ParityLogUpdateUndoFunc(RF_DagNode_t *node)
242 {
243 return (0);
244 }
245
246 int
247 rf_ParityLogOverwriteUndoFunc(RF_DagNode_t *node)
248 {
249 return (0);
250 }
251 #endif /* RF_INCLUDE_PARITYLOGGING > 0 */
252
253 /*****************************************************************************
254 * the execution function associated with a NOP node
255 ****************************************************************************/
256 int
257 rf_NullNodeFunc(RF_DagNode_t *node)
258 {
259 node->status = rf_good;
260 return (rf_FinishNode(node, RF_THREAD_CONTEXT));
261 }
262
263 int
264 rf_NullNodeUndoFunc(RF_DagNode_t *node)
265 {
266 node->status = rf_undone;
267 return (rf_FinishNode(node, RF_THREAD_CONTEXT));
268 }
269
270
271 /*****************************************************************************
272 * the execution function associated with a disk-read node
273 ****************************************************************************/
274 int
275 rf_DiskReadFuncForThreads(RF_DagNode_t *node)
276 {
277 RF_DiskQueueData_t *req;
278 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
279 caddr_t buf = (caddr_t) node->params[1].p;
280 RF_StripeNum_t parityStripeID = (RF_StripeNum_t) node->params[2].v;
281 unsigned priority = RF_EXTRACT_PRIORITY(node->params[3].v);
282 unsigned which_ru = RF_EXTRACT_RU(node->params[3].v);
283 RF_IoType_t iotype = (node->dagHdr->status == rf_enable) ? RF_IO_TYPE_READ : RF_IO_TYPE_NOP;
284 RF_DiskQueue_t *dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
285 void *b_proc = NULL;
286
287 if (node->dagHdr->bp)
288 b_proc = (void *) ((struct buf *) node->dagHdr->bp)->b_proc;
289
290 req = rf_CreateDiskQueueData(iotype, pda->startSector, pda->numSector,
291 buf, parityStripeID, which_ru,
292 (int (*) (void *, int)) node->wakeFunc,
293 node, NULL,
294 #if RF_ACC_TRACE > 0
295 node->dagHdr->tracerec,
296 #else
297 NULL,
298 #endif
299 (void *) (node->dagHdr->raidPtr), 0, b_proc);
300 if (!req) {
301 (node->wakeFunc) (node, ENOMEM);
302 } else {
303 node->dagFuncData = (void *) req;
304 rf_DiskIOEnqueue(&(dqs[pda->col]), req, priority);
305 }
306 return (0);
307 }
308
309
310 /*****************************************************************************
311 * the execution function associated with a disk-write node
312 ****************************************************************************/
313 int
314 rf_DiskWriteFuncForThreads(RF_DagNode_t *node)
315 {
316 RF_DiskQueueData_t *req;
317 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
318 caddr_t buf = (caddr_t) node->params[1].p;
319 RF_StripeNum_t parityStripeID = (RF_StripeNum_t) node->params[2].v;
320 unsigned priority = RF_EXTRACT_PRIORITY(node->params[3].v);
321 unsigned which_ru = RF_EXTRACT_RU(node->params[3].v);
322 RF_IoType_t iotype = (node->dagHdr->status == rf_enable) ? RF_IO_TYPE_WRITE : RF_IO_TYPE_NOP;
323 RF_DiskQueue_t *dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
324 void *b_proc = NULL;
325
326 if (node->dagHdr->bp)
327 b_proc = (void *) ((struct buf *) node->dagHdr->bp)->b_proc;
328
329 /* normal processing (rollaway or forward recovery) begins here */
330 req = rf_CreateDiskQueueData(iotype, pda->startSector, pda->numSector,
331 buf, parityStripeID, which_ru,
332 (int (*) (void *, int)) node->wakeFunc,
333 (void *) node, NULL,
334 #if RF_ACC_TRACE > 0
335 node->dagHdr->tracerec,
336 #else
337 NULL,
338 #endif
339 (void *) (node->dagHdr->raidPtr),
340 0, b_proc);
341
342 if (!req) {
343 (node->wakeFunc) (node, ENOMEM);
344 } else {
345 node->dagFuncData = (void *) req;
346 rf_DiskIOEnqueue(&(dqs[pda->col]), req, priority);
347 }
348
349 return (0);
350 }
351 /*****************************************************************************
352 * the undo function for disk nodes
353 * Note: this is not a proper undo of a write node, only locks are released.
354 * old data is not restored to disk!
355 ****************************************************************************/
356 int
357 rf_DiskUndoFunc(RF_DagNode_t *node)
358 {
359 RF_DiskQueueData_t *req;
360 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
361 RF_DiskQueue_t *dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
362
363 req = rf_CreateDiskQueueData(RF_IO_TYPE_NOP,
364 0L, 0, NULL, 0L, 0,
365 (int (*) (void *, int)) node->wakeFunc,
366 (void *) node,
367 NULL,
368 #if RF_ACC_TRACE > 0
369 node->dagHdr->tracerec,
370 #else
371 NULL,
372 #endif
373 (void *) (node->dagHdr->raidPtr),
374 RF_UNLOCK_DISK_QUEUE, NULL);
375 if (!req)
376 (node->wakeFunc) (node, ENOMEM);
377 else {
378 node->dagFuncData = (void *) req;
379 rf_DiskIOEnqueue(&(dqs[pda->col]), req, RF_IO_NORMAL_PRIORITY);
380 }
381
382 return (0);
383 }
384 /*****************************************************************************
385 * the execution function associated with an "unlock disk queue" node
386 ****************************************************************************/
387 int
388 rf_DiskUnlockFuncForThreads(RF_DagNode_t *node)
389 {
390 RF_DiskQueueData_t *req;
391 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
392 RF_DiskQueue_t *dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
393
394 req = rf_CreateDiskQueueData(RF_IO_TYPE_NOP,
395 0L, 0, NULL, 0L, 0,
396 (int (*) (void *, int)) node->wakeFunc,
397 (void *) node,
398 NULL,
399 #if RF_ACC_TRACE > 0
400 node->dagHdr->tracerec,
401 #else
402 NULL,
403 #endif
404 (void *) (node->dagHdr->raidPtr),
405 RF_UNLOCK_DISK_QUEUE, NULL);
406 if (!req)
407 (node->wakeFunc) (node, ENOMEM);
408 else {
409 node->dagFuncData = (void *) req;
410 rf_DiskIOEnqueue(&(dqs[pda->col]), req, RF_IO_NORMAL_PRIORITY);
411 }
412
413 return (0);
414 }
415 /*****************************************************************************
416 * Callback routine for DiskRead and DiskWrite nodes. When the disk
417 * op completes, the routine is called to set the node status and
418 * inform the execution engine that the node has fired.
419 ****************************************************************************/
420 int
421 rf_GenericWakeupFunc(RF_DagNode_t *node, int status)
422 {
423
424 switch (node->status) {
425 case rf_bwd1:
426 node->status = rf_bwd2;
427 if (node->dagFuncData)
428 rf_FreeDiskQueueData((RF_DiskQueueData_t *) node->dagFuncData);
429 return (rf_DiskWriteFuncForThreads(node));
430 case rf_fired:
431 if (status)
432 node->status = rf_bad;
433 else
434 node->status = rf_good;
435 break;
436 case rf_recover:
437 /* probably should never reach this case */
438 if (status)
439 node->status = rf_panic;
440 else
441 node->status = rf_undone;
442 break;
443 default:
444 printf("rf_GenericWakeupFunc:");
445 printf("node->status is %d,", node->status);
446 printf("status is %d \n", status);
447 RF_PANIC();
448 break;
449 }
450 if (node->dagFuncData)
451 rf_FreeDiskQueueData((RF_DiskQueueData_t *) node->dagFuncData);
452 return (rf_FinishNode(node, RF_INTR_CONTEXT));
453 }
454
455
456 /*****************************************************************************
457 * there are three distinct types of xor nodes:
458
459 * A "regular xor" is used in the fault-free case where the access
460 * spans a complete stripe unit. It assumes that the result buffer is
461 * one full stripe unit in size, and uses the stripe-unit-offset
462 * values that it computes from the PDAs to determine where within the
463 * stripe unit to XOR each argument buffer.
464 *
465 * A "simple xor" is used in the fault-free case where the access
466 * touches only a portion of one (or two, in some cases) stripe
467 * unit(s). It assumes that all the argument buffers are of the same
468 * size and have the same stripe unit offset.
469 *
470 * A "recovery xor" is used in the degraded-mode case. It's similar
471 * to the regular xor function except that it takes the failed PDA as
472 * an additional parameter, and uses it to determine what portions of
473 * the argument buffers need to be xor'd into the result buffer, and
474 * where in the result buffer they should go.
475 ****************************************************************************/
476
477 /* xor the params together and store the result in the result field.
478 * assume the result field points to a buffer that is the size of one
479 * SU, and use the pda params to determine where within the buffer to
480 * XOR the input buffers. */
481 int
482 rf_RegularXorFunc(RF_DagNode_t *node)
483 {
484 RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
485 #if RF_ACC_TRACE > 0
486 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
487 RF_Etimer_t timer;
488 #endif
489 int i, retcode;
490
491 retcode = 0;
492 if (node->dagHdr->status == rf_enable) {
493 /* don't do the XOR if the input is the same as the output */
494 #if RF_ACC_TRACE > 0
495 RF_ETIMER_START(timer);
496 #endif
497 for (i = 0; i < node->numParams - 1; i += 2)
498 if (node->params[i + 1].p != node->results[0]) {
499 retcode = rf_XorIntoBuffer(raidPtr, (RF_PhysDiskAddr_t *) node->params[i].p,
500 (char *) node->params[i + 1].p, (char *) node->results[0]);
501 }
502 #if RF_ACC_TRACE > 0
503 RF_ETIMER_STOP(timer);
504 RF_ETIMER_EVAL(timer);
505 tracerec->xor_us += RF_ETIMER_VAL_US(timer);
506 #endif
507 }
508 return (rf_GenericWakeupFunc(node, retcode)); /* call wake func
509 * explicitly since no
510 * I/O in this node */
511 }
512 /* xor the inputs into the result buffer, ignoring placement issues */
513 int
514 rf_SimpleXorFunc(RF_DagNode_t *node)
515 {
516 RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
517 int i, retcode = 0;
518 #if RF_ACC_TRACE > 0
519 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
520 RF_Etimer_t timer;
521 #endif
522
523 if (node->dagHdr->status == rf_enable) {
524 #if RF_ACC_TRACE > 0
525 RF_ETIMER_START(timer);
526 #endif
527 /* don't do the XOR if the input is the same as the output */
528 for (i = 0; i < node->numParams - 1; i += 2)
529 if (node->params[i + 1].p != node->results[0]) {
530 retcode = rf_bxor((char *) node->params[i + 1].p, (char *) node->results[0],
531 rf_RaidAddressToByte(raidPtr, ((RF_PhysDiskAddr_t *) node->params[i].p)->numSector));
532 }
533 #if RF_ACC_TRACE > 0
534 RF_ETIMER_STOP(timer);
535 RF_ETIMER_EVAL(timer);
536 tracerec->xor_us += RF_ETIMER_VAL_US(timer);
537 #endif
538 }
539 return (rf_GenericWakeupFunc(node, retcode)); /* call wake func
540 * explicitly since no
541 * I/O in this node */
542 }
543 /* this xor is used by the degraded-mode dag functions to recover lost
544 * data. the second-to-last parameter is the PDA for the failed
545 * portion of the access. the code here looks at this PDA and assumes
546 * that the xor target buffer is equal in size to the number of
547 * sectors in the failed PDA. It then uses the other PDAs in the
548 * parameter list to determine where within the target buffer the
549 * corresponding data should be xored. */
550 int
551 rf_RecoveryXorFunc(RF_DagNode_t *node)
552 {
553 RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
554 RF_RaidLayout_t *layoutPtr = (RF_RaidLayout_t *) & raidPtr->Layout;
555 RF_PhysDiskAddr_t *failedPDA = (RF_PhysDiskAddr_t *) node->params[node->numParams - 2].p;
556 int i, retcode = 0;
557 RF_PhysDiskAddr_t *pda;
558 int suoffset, failedSUOffset = rf_StripeUnitOffset(layoutPtr, failedPDA->startSector);
559 char *srcbuf, *destbuf;
560 #if RF_ACC_TRACE > 0
561 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
562 RF_Etimer_t timer;
563 #endif
564
565 if (node->dagHdr->status == rf_enable) {
566 #if RF_ACC_TRACE > 0
567 RF_ETIMER_START(timer);
568 #endif
569 for (i = 0; i < node->numParams - 2; i += 2)
570 if (node->params[i + 1].p != node->results[0]) {
571 pda = (RF_PhysDiskAddr_t *) node->params[i].p;
572 srcbuf = (char *) node->params[i + 1].p;
573 suoffset = rf_StripeUnitOffset(layoutPtr, pda->startSector);
574 destbuf = ((char *) node->results[0]) + rf_RaidAddressToByte(raidPtr, suoffset - failedSUOffset);
575 retcode = rf_bxor(srcbuf, destbuf, rf_RaidAddressToByte(raidPtr, pda->numSector));
576 }
577 #if RF_ACC_TRACE > 0
578 RF_ETIMER_STOP(timer);
579 RF_ETIMER_EVAL(timer);
580 tracerec->xor_us += RF_ETIMER_VAL_US(timer);
581 #endif
582 }
583 return (rf_GenericWakeupFunc(node, retcode));
584 }
585 /*****************************************************************************
586 * The next three functions are utilities used by the above
587 * xor-execution functions.
588 ****************************************************************************/
589
590
591 /*
592 * this is just a glorified buffer xor. targbuf points to a buffer
593 * that is one full stripe unit in size. srcbuf points to a buffer
594 * that may be less than 1 SU, but never more. When the access
595 * described by pda is one SU in size (which by implication means it's
596 * SU-aligned), all that happens is (targbuf) <- (srcbuf ^ targbuf).
597 * When the access is less than one SU in size the XOR occurs on only
598 * the portion of targbuf identified in the pda. */
599
600 int
601 rf_XorIntoBuffer(RF_Raid_t *raidPtr, RF_PhysDiskAddr_t *pda,
602 char *srcbuf, char *targbuf)
603 {
604 char *targptr;
605 int sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
606 int SUOffset = pda->startSector % sectPerSU;
607 int length, retcode = 0;
608
609 RF_ASSERT(pda->numSector <= sectPerSU);
610
611 targptr = targbuf + rf_RaidAddressToByte(raidPtr, SUOffset);
612 length = rf_RaidAddressToByte(raidPtr, pda->numSector);
613 retcode = rf_bxor(srcbuf, targptr, length);
614 return (retcode);
615 }
616 /* it really should be the case that the buffer pointers (returned by
617 * malloc) are aligned to the natural word size of the machine, so
618 * this is the only case we optimize for. The length should always be
619 * a multiple of the sector size, so there should be no problem with
620 * leftover bytes at the end. */
621 int
622 rf_bxor(char *src, char *dest, int len)
623 {
624 unsigned mask = sizeof(long) - 1, retcode = 0;
625
626 if (!(((unsigned long) src) & mask) &&
627 !(((unsigned long) dest) & mask) && !(len & mask)) {
628 retcode = rf_longword_bxor((unsigned long *) src,
629 (unsigned long *) dest,
630 len >> RF_LONGSHIFT);
631 } else {
632 RF_ASSERT(0);
633 }
634 return (retcode);
635 }
636
637 /* When XORing in kernel mode, we need to map each user page to kernel
638 * space before we can access it. We don't want to assume anything
639 * about which input buffers are in kernel/user space, nor about their
640 * alignment, so in each loop we compute the maximum number of bytes
641 * that we can xor without crossing any page boundaries, and do only
642 * this many bytes before the next remap.
643 *
644 * len - is in longwords
645 */
646 int
647 rf_longword_bxor(unsigned long *src, unsigned long *dest, int len)
648 {
649 unsigned long *end = src + len;
650 unsigned long d0, d1, d2, d3, s0, s1, s2, s3; /* temps */
651 unsigned long *pg_src, *pg_dest; /* per-page source/dest pointers */
652 int longs_this_time;/* # longwords to xor in the current iteration */
653
654 pg_src = src;
655 pg_dest = dest;
656 if (!pg_src || !pg_dest)
657 return (EFAULT);
658
659 while (len >= 4) {
660 longs_this_time = RF_MIN(len, RF_MIN(RF_BLIP(pg_src), RF_BLIP(pg_dest)) >> RF_LONGSHIFT); /* note len in longwords */
661 src += longs_this_time;
662 dest += longs_this_time;
663 len -= longs_this_time;
664 while (longs_this_time >= 4) {
665 d0 = pg_dest[0];
666 d1 = pg_dest[1];
667 d2 = pg_dest[2];
668 d3 = pg_dest[3];
669 s0 = pg_src[0];
670 s1 = pg_src[1];
671 s2 = pg_src[2];
672 s3 = pg_src[3];
673 pg_dest[0] = d0 ^ s0;
674 pg_dest[1] = d1 ^ s1;
675 pg_dest[2] = d2 ^ s2;
676 pg_dest[3] = d3 ^ s3;
677 pg_src += 4;
678 pg_dest += 4;
679 longs_this_time -= 4;
680 }
681 while (longs_this_time > 0) { /* cannot cross any page
682 * boundaries here */
683 *pg_dest++ ^= *pg_src++;
684 longs_this_time--;
685 }
686
687 /* either we're done, or we've reached a page boundary on one
688 * (or possibly both) of the pointers */
689 if (len) {
690 if (RF_PAGE_ALIGNED(src))
691 pg_src = src;
692 if (RF_PAGE_ALIGNED(dest))
693 pg_dest = dest;
694 if (!pg_src || !pg_dest)
695 return (EFAULT);
696 }
697 }
698 while (src < end) {
699 *pg_dest++ ^= *pg_src++;
700 src++;
701 dest++;
702 len--;
703 if (RF_PAGE_ALIGNED(src))
704 pg_src = src;
705 if (RF_PAGE_ALIGNED(dest))
706 pg_dest = dest;
707 }
708 RF_ASSERT(len == 0);
709 return (0);
710 }
711
712 #if 0
713 /*
714 dst = a ^ b ^ c;
715 a may equal dst
716 see comment above longword_bxor
717 len is length in longwords
718 */
719 int
720 rf_longword_bxor3(unsigned long *dst, unsigned long *a, unsigned long *b,
721 unsigned long *c, int len, void *bp)
722 {
723 unsigned long a0, a1, a2, a3, b0, b1, b2, b3;
724 unsigned long *pg_a, *pg_b, *pg_c, *pg_dst; /* per-page source/dest
725 * pointers */
726 int longs_this_time;/* # longs to xor in the current iteration */
727 char dst_is_a = 0;
728
729 pg_a = a;
730 pg_b = b;
731 pg_c = c;
732 if (a == dst) {
733 pg_dst = pg_a;
734 dst_is_a = 1;
735 } else {
736 pg_dst = dst;
737 }
738
739 /* align dest to cache line. Can't cross a pg boundary on dst here. */
740 while ((((unsigned long) pg_dst) & 0x1f)) {
741 *pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
742 dst++;
743 a++;
744 b++;
745 c++;
746 if (RF_PAGE_ALIGNED(a)) {
747 pg_a = a;
748 if (!pg_a)
749 return (EFAULT);
750 }
751 if (RF_PAGE_ALIGNED(b)) {
752 pg_b = a;
753 if (!pg_b)
754 return (EFAULT);
755 }
756 if (RF_PAGE_ALIGNED(c)) {
757 pg_c = a;
758 if (!pg_c)
759 return (EFAULT);
760 }
761 len--;
762 }
763
764 while (len > 4) {
765 longs_this_time = RF_MIN(len, RF_MIN(RF_BLIP(a), RF_MIN(RF_BLIP(b), RF_MIN(RF_BLIP(c), RF_BLIP(dst)))) >> RF_LONGSHIFT);
766 a += longs_this_time;
767 b += longs_this_time;
768 c += longs_this_time;
769 dst += longs_this_time;
770 len -= longs_this_time;
771 while (longs_this_time >= 4) {
772 a0 = pg_a[0];
773 longs_this_time -= 4;
774
775 a1 = pg_a[1];
776 a2 = pg_a[2];
777
778 a3 = pg_a[3];
779 pg_a += 4;
780
781 b0 = pg_b[0];
782 b1 = pg_b[1];
783
784 b2 = pg_b[2];
785 b3 = pg_b[3];
786 /* start dual issue */
787 a0 ^= b0;
788 b0 = pg_c[0];
789
790 pg_b += 4;
791 a1 ^= b1;
792
793 a2 ^= b2;
794 a3 ^= b3;
795
796 b1 = pg_c[1];
797 a0 ^= b0;
798
799 b2 = pg_c[2];
800 a1 ^= b1;
801
802 b3 = pg_c[3];
803 a2 ^= b2;
804
805 pg_dst[0] = a0;
806 a3 ^= b3;
807 pg_dst[1] = a1;
808 pg_c += 4;
809 pg_dst[2] = a2;
810 pg_dst[3] = a3;
811 pg_dst += 4;
812 }
813 while (longs_this_time > 0) { /* cannot cross any page
814 * boundaries here */
815 *pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
816 longs_this_time--;
817 }
818
819 if (len) {
820 if (RF_PAGE_ALIGNED(a)) {
821 pg_a = a;
822 if (!pg_a)
823 return (EFAULT);
824 if (dst_is_a)
825 pg_dst = pg_a;
826 }
827 if (RF_PAGE_ALIGNED(b)) {
828 pg_b = b;
829 if (!pg_b)
830 return (EFAULT);
831 }
832 if (RF_PAGE_ALIGNED(c)) {
833 pg_c = c;
834 if (!pg_c)
835 return (EFAULT);
836 }
837 if (!dst_is_a)
838 if (RF_PAGE_ALIGNED(dst)) {
839 pg_dst = dst;
840 if (!pg_dst)
841 return (EFAULT);
842 }
843 }
844 }
845 while (len) {
846 *pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
847 dst++;
848 a++;
849 b++;
850 c++;
851 if (RF_PAGE_ALIGNED(a)) {
852 pg_a = a;
853 if (!pg_a)
854 return (EFAULT);
855 if (dst_is_a)
856 pg_dst = pg_a;
857 }
858 if (RF_PAGE_ALIGNED(b)) {
859 pg_b = b;
860 if (!pg_b)
861 return (EFAULT);
862 }
863 if (RF_PAGE_ALIGNED(c)) {
864 pg_c = c;
865 if (!pg_c)
866 return (EFAULT);
867 }
868 if (!dst_is_a)
869 if (RF_PAGE_ALIGNED(dst)) {
870 pg_dst = dst;
871 if (!pg_dst)
872 return (EFAULT);
873 }
874 len--;
875 }
876 return (0);
877 }
878
879 int
880 rf_bxor3(unsigned char *dst, unsigned char *a, unsigned char *b,
881 unsigned char *c, unsigned long len, void *bp)
882 {
883 RF_ASSERT(((RF_UL(dst) | RF_UL(a) | RF_UL(b) | RF_UL(c) | len) & 0x7) == 0);
884
885 return (rf_longword_bxor3((unsigned long *) dst, (unsigned long *) a,
886 (unsigned long *) b, (unsigned long *) c, len >> RF_LONGSHIFT, bp));
887 }
888 #endif
889