rf_dagfuncs.c revision 1.30.64.1 1 /* $NetBSD: rf_dagfuncs.c,v 1.30.64.1 2020/04/13 08:04:47 martin Exp $ */
2 /*
3 * Copyright (c) 1995 Carnegie-Mellon University.
4 * All rights reserved.
5 *
6 * Author: Mark Holland, William V. Courtright II
7 *
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 * Software Distribution Coordinator or Software.Distribution (at) CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
27 */
28
29 /*
30 * dagfuncs.c -- DAG node execution routines
31 *
32 * Rules:
33 * 1. Every DAG execution function must eventually cause node->status to
34 * get set to "good" or "bad", and "FinishNode" to be called. In the
35 * case of nodes that complete immediately (xor, NullNodeFunc, etc),
36 * the node execution function can do these two things directly. In
37 * the case of nodes that have to wait for some event (a disk read to
38 * complete, a lock to be released, etc) to occur before they can
39 * complete, this is typically achieved by having whatever module
40 * is doing the operation call GenericWakeupFunc upon completion.
41 * 2. DAG execution functions should check the status in the DAG header
42 * and NOP out their operations if the status is not "enable". However,
43 * execution functions that release resources must be sure to release
44 * them even when they NOP out the function that would use them.
45 * Functions that acquire resources should go ahead and acquire them
46 * even when they NOP, so that a downstream release node will not have
47 * to check to find out whether or not the acquire was suppressed.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: rf_dagfuncs.c,v 1.30.64.1 2020/04/13 08:04:47 martin Exp $");
52
53 #include <sys/param.h>
54 #include <sys/ioctl.h>
55
56 #include "rf_archs.h"
57 #include "rf_raid.h"
58 #include "rf_dag.h"
59 #include "rf_layout.h"
60 #include "rf_etimer.h"
61 #include "rf_acctrace.h"
62 #include "rf_diskqueue.h"
63 #include "rf_dagfuncs.h"
64 #include "rf_general.h"
65 #include "rf_engine.h"
66 #include "rf_dagutils.h"
67
68 #include "rf_kintf.h"
69
70 #if RF_INCLUDE_PARITYLOGGING > 0
71 #include "rf_paritylog.h"
72 #endif /* RF_INCLUDE_PARITYLOGGING > 0 */
73
74 void (*rf_DiskReadFunc) (RF_DagNode_t *);
75 void (*rf_DiskWriteFunc) (RF_DagNode_t *);
76 void (*rf_DiskReadUndoFunc) (RF_DagNode_t *);
77 void (*rf_DiskWriteUndoFunc) (RF_DagNode_t *);
78 void (*rf_RegularXorUndoFunc) (RF_DagNode_t *);
79 void (*rf_SimpleXorUndoFunc) (RF_DagNode_t *);
80 void (*rf_RecoveryXorUndoFunc) (RF_DagNode_t *);
81
82 /*****************************************************************************
83 * main (only) configuration routine for this module
84 ****************************************************************************/
85 int
86 rf_ConfigureDAGFuncs(RF_ShutdownList_t **listp)
87 {
88 RF_ASSERT(((sizeof(long) == 8) && RF_LONGSHIFT == 3) ||
89 ((sizeof(long) == 4) && RF_LONGSHIFT == 2));
90 rf_DiskReadFunc = rf_DiskReadFuncForThreads;
91 rf_DiskReadUndoFunc = rf_DiskUndoFunc;
92 rf_DiskWriteFunc = rf_DiskWriteFuncForThreads;
93 rf_DiskWriteUndoFunc = rf_DiskUndoFunc;
94 rf_RegularXorUndoFunc = rf_NullNodeUndoFunc;
95 rf_SimpleXorUndoFunc = rf_NullNodeUndoFunc;
96 rf_RecoveryXorUndoFunc = rf_NullNodeUndoFunc;
97 return (0);
98 }
99
100
101
102 /*****************************************************************************
103 * the execution function associated with a terminate node
104 ****************************************************************************/
105 void
106 rf_TerminateFunc(RF_DagNode_t *node)
107 {
108 RF_ASSERT(node->dagHdr->numCommits == node->dagHdr->numCommitNodes);
109 node->status = rf_good;
110 rf_FinishNode(node, RF_THREAD_CONTEXT);
111 }
112
113 void
114 rf_TerminateUndoFunc(RF_DagNode_t *node)
115 {
116 }
117
118
119 /*****************************************************************************
120 * execution functions associated with a mirror node
121 *
122 * parameters:
123 *
124 * 0 - physical disk addres of data
125 * 1 - buffer for holding read data
126 * 2 - parity stripe ID
127 * 3 - flags
128 * 4 - physical disk address of mirror (parity)
129 *
130 ****************************************************************************/
131
132 void
133 rf_DiskReadMirrorIdleFunc(RF_DagNode_t *node)
134 {
135 /* select the mirror copy with the shortest queue and fill in node
136 * parameters with physical disk address */
137
138 rf_SelectMirrorDiskIdle(node);
139 rf_DiskReadFunc(node);
140 }
141
142 #if (RF_INCLUDE_CHAINDECLUSTER > 0) || (RF_INCLUDE_INTERDECLUSTER > 0) || (RF_DEBUG_VALIDATE_DAG > 0)
143 void
144 rf_DiskReadMirrorPartitionFunc(RF_DagNode_t *node)
145 {
146 /* select the mirror copy with the shortest queue and fill in node
147 * parameters with physical disk address */
148
149 rf_SelectMirrorDiskPartition(node);
150 rf_DiskReadFunc(node);
151 }
152 #endif
153
154 void
155 rf_DiskReadMirrorUndoFunc(RF_DagNode_t *node)
156 {
157 }
158
159
160
161 #if RF_INCLUDE_PARITYLOGGING > 0
162 /*****************************************************************************
163 * the execution function associated with a parity log update node
164 ****************************************************************************/
165 void
166 rf_ParityLogUpdateFunc(RF_DagNode_t *node)
167 {
168 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
169 void *bf = (void *) node->params[1].p;
170 RF_ParityLogData_t *logData;
171 #if RF_ACC_TRACE > 0
172 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
173 RF_Etimer_t timer;
174 #endif
175
176 if (node->dagHdr->status == rf_enable) {
177 #if RF_ACC_TRACE > 0
178 RF_ETIMER_START(timer);
179 #endif
180 logData = rf_CreateParityLogData(RF_UPDATE, pda, bf,
181 (RF_Raid_t *) (node->dagHdr->raidPtr),
182 node->wakeFunc, node,
183 node->dagHdr->tracerec, timer);
184 if (logData)
185 rf_ParityLogAppend(logData, RF_FALSE, NULL, RF_FALSE);
186 else {
187 #if RF_ACC_TRACE > 0
188 RF_ETIMER_STOP(timer);
189 RF_ETIMER_EVAL(timer);
190 tracerec->plog_us += RF_ETIMER_VAL_US(timer);
191 #endif
192 (node->wakeFunc) (node, ENOMEM);
193 }
194 }
195 }
196
197
198 /*****************************************************************************
199 * the execution function associated with a parity log overwrite node
200 ****************************************************************************/
201 void
202 rf_ParityLogOverwriteFunc(RF_DagNode_t *node)
203 {
204 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
205 void *bf = (void *) node->params[1].p;
206 RF_ParityLogData_t *logData;
207 #if RF_ACC_TRACE > 0
208 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
209 RF_Etimer_t timer;
210 #endif
211
212 if (node->dagHdr->status == rf_enable) {
213 #if RF_ACC_TRACE > 0
214 RF_ETIMER_START(timer);
215 #endif
216 logData = rf_CreateParityLogData(RF_OVERWRITE, pda, bf,
217 (RF_Raid_t *) (node->dagHdr->raidPtr),
218 node->wakeFunc, node, node->dagHdr->tracerec, timer);
219 if (logData)
220 rf_ParityLogAppend(logData, RF_FALSE, NULL, RF_FALSE);
221 else {
222 #if RF_ACC_TRACE > 0
223 RF_ETIMER_STOP(timer);
224 RF_ETIMER_EVAL(timer);
225 tracerec->plog_us += RF_ETIMER_VAL_US(timer);
226 #endif
227 (node->wakeFunc) (node, ENOMEM);
228 }
229 }
230 }
231
232 void
233 rf_ParityLogUpdateUndoFunc(RF_DagNode_t *node)
234 {
235 }
236
237 void
238 rf_ParityLogOverwriteUndoFunc(RF_DagNode_t *node)
239 {
240 }
241 #endif /* RF_INCLUDE_PARITYLOGGING > 0 */
242
243 /*****************************************************************************
244 * the execution function associated with a NOP node
245 ****************************************************************************/
246 void
247 rf_NullNodeFunc(RF_DagNode_t *node)
248 {
249 node->status = rf_good;
250 rf_FinishNode(node, RF_THREAD_CONTEXT);
251 }
252
253 void
254 rf_NullNodeUndoFunc(RF_DagNode_t *node)
255 {
256 node->status = rf_undone;
257 rf_FinishNode(node, RF_THREAD_CONTEXT);
258 }
259
260
261 /*****************************************************************************
262 * the execution function associated with a disk-read node
263 ****************************************************************************/
264 void
265 rf_DiskReadFuncForThreads(RF_DagNode_t *node)
266 {
267 RF_DiskQueueData_t *req;
268 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
269 void *bf = (void *) node->params[1].p;
270 RF_StripeNum_t parityStripeID = (RF_StripeNum_t) node->params[2].v;
271 unsigned priority = RF_EXTRACT_PRIORITY(node->params[3].v);
272 unsigned which_ru = RF_EXTRACT_RU(node->params[3].v);
273 RF_IoType_t iotype = (node->dagHdr->status == rf_enable) ? RF_IO_TYPE_READ : RF_IO_TYPE_NOP;
274 RF_DiskQueue_t *dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
275 void *b_proc = NULL;
276
277 if (node->dagHdr->bp)
278 b_proc = (void *) ((struct buf *) node->dagHdr->bp)->b_proc;
279
280 req = rf_CreateDiskQueueData(iotype, pda->startSector, pda->numSector,
281 bf, parityStripeID, which_ru, node->wakeFunc, node,
282 #if RF_ACC_TRACE > 0
283 node->dagHdr->tracerec,
284 #else
285 NULL,
286 #endif
287 (void *) (node->dagHdr->raidPtr), 0, b_proc, PR_NOWAIT);
288 if (!req) {
289 (node->wakeFunc) (node, ENOMEM);
290 } else {
291 node->dagFuncData = (void *) req;
292 rf_DiskIOEnqueue(&(dqs[pda->col]), req, priority);
293 }
294 }
295
296
297 /*****************************************************************************
298 * the execution function associated with a disk-write node
299 ****************************************************************************/
300 void
301 rf_DiskWriteFuncForThreads(RF_DagNode_t *node)
302 {
303 RF_DiskQueueData_t *req;
304 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
305 void *bf = (void *) node->params[1].p;
306 RF_StripeNum_t parityStripeID = (RF_StripeNum_t) node->params[2].v;
307 unsigned priority = RF_EXTRACT_PRIORITY(node->params[3].v);
308 unsigned which_ru = RF_EXTRACT_RU(node->params[3].v);
309 RF_IoType_t iotype = (node->dagHdr->status == rf_enable) ? RF_IO_TYPE_WRITE : RF_IO_TYPE_NOP;
310 RF_DiskQueue_t *dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
311 void *b_proc = NULL;
312
313 if (node->dagHdr->bp)
314 b_proc = (void *) ((struct buf *) node->dagHdr->bp)->b_proc;
315
316 /* normal processing (rollaway or forward recovery) begins here */
317 req = rf_CreateDiskQueueData(iotype, pda->startSector, pda->numSector,
318 bf, parityStripeID, which_ru, node->wakeFunc, node,
319 #if RF_ACC_TRACE > 0
320 node->dagHdr->tracerec,
321 #else
322 NULL,
323 #endif
324 (void *) (node->dagHdr->raidPtr),
325 0, b_proc, PR_NOWAIT);
326
327 if (!req) {
328 (node->wakeFunc) (node, ENOMEM);
329 } else {
330 node->dagFuncData = (void *) req;
331 rf_DiskIOEnqueue(&(dqs[pda->col]), req, priority);
332 }
333 }
334 /*****************************************************************************
335 * the undo function for disk nodes
336 * Note: this is not a proper undo of a write node, only locks are released.
337 * old data is not restored to disk!
338 ****************************************************************************/
339 void
340 rf_DiskUndoFunc(RF_DagNode_t *node)
341 {
342 RF_DiskQueueData_t *req;
343 RF_PhysDiskAddr_t *pda = (RF_PhysDiskAddr_t *) node->params[0].p;
344 RF_DiskQueue_t *dqs = ((RF_Raid_t *) (node->dagHdr->raidPtr))->Queues;
345
346 req = rf_CreateDiskQueueData(RF_IO_TYPE_NOP,
347 0L, 0, NULL, 0L, 0, node->wakeFunc, node,
348 #if RF_ACC_TRACE > 0
349 node->dagHdr->tracerec,
350 #else
351 NULL,
352 #endif
353 (void *) (node->dagHdr->raidPtr),
354 0, NULL, PR_NOWAIT);
355 if (!req)
356 (node->wakeFunc) (node, ENOMEM);
357 else {
358 node->dagFuncData = (void *) req;
359 rf_DiskIOEnqueue(&(dqs[pda->col]), req, RF_IO_NORMAL_PRIORITY);
360 }
361 }
362
363 /*****************************************************************************
364 * Callback routine for DiskRead and DiskWrite nodes. When the disk
365 * op completes, the routine is called to set the node status and
366 * inform the execution engine that the node has fired.
367 ****************************************************************************/
368 void
369 rf_GenericWakeupFunc(void *v, int status)
370 {
371 RF_DagNode_t *node = v;
372
373 switch (node->status) {
374 case rf_fired:
375 if (status)
376 node->status = rf_bad;
377 else
378 node->status = rf_good;
379 break;
380 case rf_recover:
381 /* probably should never reach this case */
382 if (status)
383 node->status = rf_panic;
384 else
385 node->status = rf_undone;
386 break;
387 default:
388 printf("rf_GenericWakeupFunc:");
389 printf("node->status is %d,", node->status);
390 printf("status is %d \n", status);
391 RF_PANIC();
392 break;
393 }
394 if (node->dagFuncData)
395 rf_FreeDiskQueueData((RF_DiskQueueData_t *) node->dagFuncData);
396 rf_FinishNode(node, RF_INTR_CONTEXT);
397 }
398
399
400 /*****************************************************************************
401 * there are three distinct types of xor nodes:
402
403 * A "regular xor" is used in the fault-free case where the access
404 * spans a complete stripe unit. It assumes that the result buffer is
405 * one full stripe unit in size, and uses the stripe-unit-offset
406 * values that it computes from the PDAs to determine where within the
407 * stripe unit to XOR each argument buffer.
408 *
409 * A "simple xor" is used in the fault-free case where the access
410 * touches only a portion of one (or two, in some cases) stripe
411 * unit(s). It assumes that all the argument buffers are of the same
412 * size and have the same stripe unit offset.
413 *
414 * A "recovery xor" is used in the degraded-mode case. It's similar
415 * to the regular xor function except that it takes the failed PDA as
416 * an additional parameter, and uses it to determine what portions of
417 * the argument buffers need to be xor'd into the result buffer, and
418 * where in the result buffer they should go.
419 ****************************************************************************/
420
421 /* xor the params together and store the result in the result field.
422 * assume the result field points to a buffer that is the size of one
423 * SU, and use the pda params to determine where within the buffer to
424 * XOR the input buffers. */
425 void
426 rf_RegularXorFunc(RF_DagNode_t *node)
427 {
428 RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
429 #if RF_ACC_TRACE > 0
430 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
431 RF_Etimer_t timer;
432 #endif
433 int i, retcode;
434
435 retcode = 0;
436 if (node->dagHdr->status == rf_enable) {
437 /* don't do the XOR if the input is the same as the output */
438 #if RF_ACC_TRACE > 0
439 RF_ETIMER_START(timer);
440 #endif
441 for (i = 0; i < node->numParams - 1; i += 2)
442 if (node->params[i + 1].p != node->results[0]) {
443 retcode = rf_XorIntoBuffer(raidPtr, (RF_PhysDiskAddr_t *) node->params[i].p,
444 (char *) node->params[i + 1].p, (char *) node->results[0]);
445 }
446 #if RF_ACC_TRACE > 0
447 RF_ETIMER_STOP(timer);
448 RF_ETIMER_EVAL(timer);
449 tracerec->xor_us += RF_ETIMER_VAL_US(timer);
450 #endif
451 }
452 rf_GenericWakeupFunc(node, retcode); /* call wake func
453 * explicitly since no
454 * I/O in this node */
455 }
456 /* xor the inputs into the result buffer, ignoring placement issues */
457 void
458 rf_SimpleXorFunc(RF_DagNode_t *node)
459 {
460 RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
461 int i, retcode = 0;
462 #if RF_ACC_TRACE > 0
463 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
464 RF_Etimer_t timer;
465 #endif
466
467 if (node->dagHdr->status == rf_enable) {
468 #if RF_ACC_TRACE > 0
469 RF_ETIMER_START(timer);
470 #endif
471 /* don't do the XOR if the input is the same as the output */
472 for (i = 0; i < node->numParams - 1; i += 2)
473 if (node->params[i + 1].p != node->results[0]) {
474 retcode = rf_bxor((char *) node->params[i + 1].p, (char *) node->results[0],
475 rf_RaidAddressToByte(raidPtr, ((RF_PhysDiskAddr_t *) node->params[i].p)->numSector));
476 }
477 #if RF_ACC_TRACE > 0
478 RF_ETIMER_STOP(timer);
479 RF_ETIMER_EVAL(timer);
480 tracerec->xor_us += RF_ETIMER_VAL_US(timer);
481 #endif
482 }
483 rf_GenericWakeupFunc(node, retcode); /* call wake func
484 * explicitly since no
485 * I/O in this node */
486 }
487 /* this xor is used by the degraded-mode dag functions to recover lost
488 * data. the second-to-last parameter is the PDA for the failed
489 * portion of the access. the code here looks at this PDA and assumes
490 * that the xor target buffer is equal in size to the number of
491 * sectors in the failed PDA. It then uses the other PDAs in the
492 * parameter list to determine where within the target buffer the
493 * corresponding data should be xored. */
494 void
495 rf_RecoveryXorFunc(RF_DagNode_t *node)
496 {
497 RF_Raid_t *raidPtr = (RF_Raid_t *) node->params[node->numParams - 1].p;
498 RF_RaidLayout_t *layoutPtr = (RF_RaidLayout_t *) & raidPtr->Layout;
499 RF_PhysDiskAddr_t *failedPDA = (RF_PhysDiskAddr_t *) node->params[node->numParams - 2].p;
500 int i, retcode = 0;
501 RF_PhysDiskAddr_t *pda;
502 int suoffset, failedSUOffset = rf_StripeUnitOffset(layoutPtr, failedPDA->startSector);
503 char *srcbuf, *destbuf;
504 #if RF_ACC_TRACE > 0
505 RF_AccTraceEntry_t *tracerec = node->dagHdr->tracerec;
506 RF_Etimer_t timer;
507 #endif
508
509 if (node->dagHdr->status == rf_enable) {
510 #if RF_ACC_TRACE > 0
511 RF_ETIMER_START(timer);
512 #endif
513 for (i = 0; i < node->numParams - 2; i += 2)
514 if (node->params[i + 1].p != node->results[0]) {
515 pda = (RF_PhysDiskAddr_t *) node->params[i].p;
516 srcbuf = (char *) node->params[i + 1].p;
517 suoffset = rf_StripeUnitOffset(layoutPtr, pda->startSector);
518 destbuf = ((char *) node->results[0]) + rf_RaidAddressToByte(raidPtr, suoffset - failedSUOffset);
519 retcode = rf_bxor(srcbuf, destbuf, rf_RaidAddressToByte(raidPtr, pda->numSector));
520 }
521 #if RF_ACC_TRACE > 0
522 RF_ETIMER_STOP(timer);
523 RF_ETIMER_EVAL(timer);
524 tracerec->xor_us += RF_ETIMER_VAL_US(timer);
525 #endif
526 }
527 rf_GenericWakeupFunc(node, retcode);
528 }
529 /*****************************************************************************
530 * The next three functions are utilities used by the above
531 * xor-execution functions.
532 ****************************************************************************/
533
534
535 /*
536 * this is just a glorified buffer xor. targbuf points to a buffer
537 * that is one full stripe unit in size. srcbuf points to a buffer
538 * that may be less than 1 SU, but never more. When the access
539 * described by pda is one SU in size (which by implication means it's
540 * SU-aligned), all that happens is (targbuf) <- (srcbuf ^ targbuf).
541 * When the access is less than one SU in size the XOR occurs on only
542 * the portion of targbuf identified in the pda. */
543
544 int
545 rf_XorIntoBuffer(RF_Raid_t *raidPtr, RF_PhysDiskAddr_t *pda,
546 char *srcbuf, char *targbuf)
547 {
548 char *targptr;
549 int sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
550 int SUOffset = pda->startSector % sectPerSU;
551 int length, retcode = 0;
552
553 RF_ASSERT(pda->numSector <= sectPerSU);
554
555 targptr = targbuf + rf_RaidAddressToByte(raidPtr, SUOffset);
556 length = rf_RaidAddressToByte(raidPtr, pda->numSector);
557 retcode = rf_bxor(srcbuf, targptr, length);
558 return (retcode);
559 }
560 /* it really should be the case that the buffer pointers (returned by
561 * malloc) are aligned to the natural word size of the machine, so
562 * this is the only case we optimize for. The length should always be
563 * a multiple of the sector size, so there should be no problem with
564 * leftover bytes at the end. */
565 int
566 rf_bxor(char *src, char *dest, int len)
567 {
568 unsigned mask = sizeof(long) - 1, retcode = 0;
569
570 if (!(((unsigned long) src) & mask) &&
571 !(((unsigned long) dest) & mask) && !(len & mask)) {
572 retcode = rf_longword_bxor((unsigned long *) src,
573 (unsigned long *) dest,
574 len >> RF_LONGSHIFT);
575 } else {
576 RF_ASSERT(0);
577 }
578 return (retcode);
579 }
580
581 /* When XORing in kernel mode, we need to map each user page to kernel
582 * space before we can access it. We don't want to assume anything
583 * about which input buffers are in kernel/user space, nor about their
584 * alignment, so in each loop we compute the maximum number of bytes
585 * that we can xor without crossing any page boundaries, and do only
586 * this many bytes before the next remap.
587 *
588 * len - is in longwords
589 */
590 int
591 rf_longword_bxor(unsigned long *src, unsigned long *dest, int len)
592 {
593 unsigned long *end = src + len;
594 unsigned long d0, d1, d2, d3, s0, s1, s2, s3; /* temps */
595 unsigned long *pg_src, *pg_dest; /* per-page source/dest pointers */
596 int longs_this_time;/* # longwords to xor in the current iteration */
597
598 pg_src = src;
599 pg_dest = dest;
600 if (!pg_src || !pg_dest)
601 return (EFAULT);
602
603 while (len >= 4) {
604 longs_this_time = RF_MIN(len, RF_MIN(RF_BLIP(pg_src), RF_BLIP(pg_dest)) >> RF_LONGSHIFT); /* note len in longwords */
605 src += longs_this_time;
606 dest += longs_this_time;
607 len -= longs_this_time;
608 while (longs_this_time >= 4) {
609 d0 = pg_dest[0];
610 d1 = pg_dest[1];
611 d2 = pg_dest[2];
612 d3 = pg_dest[3];
613 s0 = pg_src[0];
614 s1 = pg_src[1];
615 s2 = pg_src[2];
616 s3 = pg_src[3];
617 pg_dest[0] = d0 ^ s0;
618 pg_dest[1] = d1 ^ s1;
619 pg_dest[2] = d2 ^ s2;
620 pg_dest[3] = d3 ^ s3;
621 pg_src += 4;
622 pg_dest += 4;
623 longs_this_time -= 4;
624 }
625 while (longs_this_time > 0) { /* cannot cross any page
626 * boundaries here */
627 *pg_dest++ ^= *pg_src++;
628 longs_this_time--;
629 }
630
631 /* either we're done, or we've reached a page boundary on one
632 * (or possibly both) of the pointers */
633 if (len) {
634 if (RF_PAGE_ALIGNED(src))
635 pg_src = src;
636 if (RF_PAGE_ALIGNED(dest))
637 pg_dest = dest;
638 if (!pg_src || !pg_dest)
639 return (EFAULT);
640 }
641 }
642 while (src < end) {
643 *pg_dest++ ^= *pg_src++;
644 src++;
645 dest++;
646 len--;
647 if (RF_PAGE_ALIGNED(src))
648 pg_src = src;
649 if (RF_PAGE_ALIGNED(dest))
650 pg_dest = dest;
651 }
652 RF_ASSERT(len == 0);
653 return (0);
654 }
655
656 #if 0
657 /*
658 dst = a ^ b ^ c;
659 a may equal dst
660 see comment above longword_bxor
661 len is length in longwords
662 */
663 int
664 rf_longword_bxor3(unsigned long *dst, unsigned long *a, unsigned long *b,
665 unsigned long *c, int len, void *bp)
666 {
667 unsigned long a0, a1, a2, a3, b0, b1, b2, b3;
668 unsigned long *pg_a, *pg_b, *pg_c, *pg_dst; /* per-page source/dest
669 * pointers */
670 int longs_this_time;/* # longs to xor in the current iteration */
671 char dst_is_a = 0;
672
673 pg_a = a;
674 pg_b = b;
675 pg_c = c;
676 if (a == dst) {
677 pg_dst = pg_a;
678 dst_is_a = 1;
679 } else {
680 pg_dst = dst;
681 }
682
683 /* align dest to cache line. Can't cross a pg boundary on dst here. */
684 while ((((unsigned long) pg_dst) & 0x1f)) {
685 *pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
686 dst++;
687 a++;
688 b++;
689 c++;
690 if (RF_PAGE_ALIGNED(a)) {
691 pg_a = a;
692 if (!pg_a)
693 return (EFAULT);
694 }
695 if (RF_PAGE_ALIGNED(b)) {
696 pg_b = a;
697 if (!pg_b)
698 return (EFAULT);
699 }
700 if (RF_PAGE_ALIGNED(c)) {
701 pg_c = a;
702 if (!pg_c)
703 return (EFAULT);
704 }
705 len--;
706 }
707
708 while (len > 4) {
709 longs_this_time = RF_MIN(len, RF_MIN(RF_BLIP(a), RF_MIN(RF_BLIP(b), RF_MIN(RF_BLIP(c), RF_BLIP(dst)))) >> RF_LONGSHIFT);
710 a += longs_this_time;
711 b += longs_this_time;
712 c += longs_this_time;
713 dst += longs_this_time;
714 len -= longs_this_time;
715 while (longs_this_time >= 4) {
716 a0 = pg_a[0];
717 longs_this_time -= 4;
718
719 a1 = pg_a[1];
720 a2 = pg_a[2];
721
722 a3 = pg_a[3];
723 pg_a += 4;
724
725 b0 = pg_b[0];
726 b1 = pg_b[1];
727
728 b2 = pg_b[2];
729 b3 = pg_b[3];
730 /* start dual issue */
731 a0 ^= b0;
732 b0 = pg_c[0];
733
734 pg_b += 4;
735 a1 ^= b1;
736
737 a2 ^= b2;
738 a3 ^= b3;
739
740 b1 = pg_c[1];
741 a0 ^= b0;
742
743 b2 = pg_c[2];
744 a1 ^= b1;
745
746 b3 = pg_c[3];
747 a2 ^= b2;
748
749 pg_dst[0] = a0;
750 a3 ^= b3;
751 pg_dst[1] = a1;
752 pg_c += 4;
753 pg_dst[2] = a2;
754 pg_dst[3] = a3;
755 pg_dst += 4;
756 }
757 while (longs_this_time > 0) { /* cannot cross any page
758 * boundaries here */
759 *pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
760 longs_this_time--;
761 }
762
763 if (len) {
764 if (RF_PAGE_ALIGNED(a)) {
765 pg_a = a;
766 if (!pg_a)
767 return (EFAULT);
768 if (dst_is_a)
769 pg_dst = pg_a;
770 }
771 if (RF_PAGE_ALIGNED(b)) {
772 pg_b = b;
773 if (!pg_b)
774 return (EFAULT);
775 }
776 if (RF_PAGE_ALIGNED(c)) {
777 pg_c = c;
778 if (!pg_c)
779 return (EFAULT);
780 }
781 if (!dst_is_a)
782 if (RF_PAGE_ALIGNED(dst)) {
783 pg_dst = dst;
784 if (!pg_dst)
785 return (EFAULT);
786 }
787 }
788 }
789 while (len) {
790 *pg_dst++ = *pg_a++ ^ *pg_b++ ^ *pg_c++;
791 dst++;
792 a++;
793 b++;
794 c++;
795 if (RF_PAGE_ALIGNED(a)) {
796 pg_a = a;
797 if (!pg_a)
798 return (EFAULT);
799 if (dst_is_a)
800 pg_dst = pg_a;
801 }
802 if (RF_PAGE_ALIGNED(b)) {
803 pg_b = b;
804 if (!pg_b)
805 return (EFAULT);
806 }
807 if (RF_PAGE_ALIGNED(c)) {
808 pg_c = c;
809 if (!pg_c)
810 return (EFAULT);
811 }
812 if (!dst_is_a)
813 if (RF_PAGE_ALIGNED(dst)) {
814 pg_dst = dst;
815 if (!pg_dst)
816 return (EFAULT);
817 }
818 len--;
819 }
820 return (0);
821 }
822
823 int
824 rf_bxor3(unsigned char *dst, unsigned char *a, unsigned char *b,
825 unsigned char *c, unsigned long len, void *bp)
826 {
827 RF_ASSERT(((RF_UL(dst) | RF_UL(a) | RF_UL(b) | RF_UL(c) | len) & 0x7) == 0);
828
829 return (rf_longword_bxor3((unsigned long *) dst, (unsigned long *) a,
830 (unsigned long *) b, (unsigned long *) c, len >> RF_LONGSHIFT, bp));
831 }
832 #endif
833