Home | History | Annotate | Line # | Download | only in raidframe
rf_reconstruct.c revision 1.56.2.9
      1  1.56.2.9     skrll /*	$NetBSD: rf_reconstruct.c,v 1.56.2.9 2005/02/15 21:33:29 skrll Exp $	*/
      2       1.1     oster /*
      3       1.1     oster  * Copyright (c) 1995 Carnegie-Mellon University.
      4       1.1     oster  * All rights reserved.
      5       1.1     oster  *
      6       1.1     oster  * Author: Mark Holland
      7       1.1     oster  *
      8       1.1     oster  * Permission to use, copy, modify and distribute this software and
      9       1.1     oster  * its documentation is hereby granted, provided that both the copyright
     10       1.1     oster  * notice and this permission notice appear in all copies of the
     11       1.1     oster  * software, derivative works or modified versions, and any portions
     12       1.1     oster  * thereof, and that both notices appear in supporting documentation.
     13       1.1     oster  *
     14       1.1     oster  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     15       1.1     oster  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     16       1.1     oster  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     17       1.1     oster  *
     18       1.1     oster  * Carnegie Mellon requests users of this software to return to
     19       1.1     oster  *
     20       1.1     oster  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     21       1.1     oster  *  School of Computer Science
     22       1.1     oster  *  Carnegie Mellon University
     23       1.1     oster  *  Pittsburgh PA 15213-3890
     24       1.1     oster  *
     25       1.1     oster  * any improvements or extensions that they make and grant Carnegie the
     26       1.1     oster  * rights to redistribute these changes.
     27       1.1     oster  */
     28       1.1     oster 
     29       1.1     oster /************************************************************
     30       1.1     oster  *
     31       1.1     oster  * rf_reconstruct.c -- code to perform on-line reconstruction
     32       1.1     oster  *
     33       1.1     oster  ************************************************************/
     34      1.31     lukem 
     35      1.31     lukem #include <sys/cdefs.h>
     36  1.56.2.9     skrll __KERNEL_RCSID(0, "$NetBSD: rf_reconstruct.c,v 1.56.2.9 2005/02/15 21:33:29 skrll Exp $");
     37       1.1     oster 
     38       1.1     oster #include <sys/time.h>
     39       1.1     oster #include <sys/buf.h>
     40       1.1     oster #include <sys/errno.h>
     41       1.5     oster 
     42       1.5     oster #include <sys/param.h>
     43       1.5     oster #include <sys/systm.h>
     44       1.5     oster #include <sys/proc.h>
     45       1.5     oster #include <sys/ioctl.h>
     46       1.5     oster #include <sys/fcntl.h>
     47       1.5     oster #include <sys/vnode.h>
     48      1.30     oster #include <dev/raidframe/raidframevar.h>
     49       1.5     oster 
     50       1.1     oster #include "rf_raid.h"
     51       1.1     oster #include "rf_reconutil.h"
     52       1.1     oster #include "rf_revent.h"
     53       1.1     oster #include "rf_reconbuffer.h"
     54       1.1     oster #include "rf_acctrace.h"
     55       1.1     oster #include "rf_etimer.h"
     56       1.1     oster #include "rf_dag.h"
     57       1.1     oster #include "rf_desc.h"
     58      1.36     oster #include "rf_debugprint.h"
     59       1.1     oster #include "rf_general.h"
     60       1.1     oster #include "rf_driver.h"
     61       1.1     oster #include "rf_utils.h"
     62       1.1     oster #include "rf_shutdown.h"
     63       1.1     oster 
     64       1.1     oster #include "rf_kintf.h"
     65       1.1     oster 
     66       1.1     oster /* setting these to -1 causes them to be set to their default values if not set by debug options */
     67       1.1     oster 
     68      1.41     oster #if RF_DEBUG_RECON
     69       1.1     oster #define Dprintf(s)         if (rf_reconDebug) rf_debug_printf(s,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
     70       1.1     oster #define Dprintf1(s,a)         if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),NULL,NULL,NULL,NULL,NULL,NULL,NULL)
     71       1.1     oster #define Dprintf2(s,a,b)       if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),NULL,NULL,NULL,NULL,NULL,NULL)
     72       1.1     oster #define Dprintf3(s,a,b,c)     if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),NULL,NULL,NULL,NULL,NULL)
     73       1.1     oster #define Dprintf4(s,a,b,c,d)   if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),NULL,NULL,NULL,NULL)
     74       1.1     oster #define Dprintf5(s,a,b,c,d,e) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),NULL,NULL,NULL)
     75       1.1     oster #define Dprintf6(s,a,b,c,d,e,f) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),(void *)((unsigned long)f),NULL,NULL)
     76       1.1     oster #define Dprintf7(s,a,b,c,d,e,f,g) if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),(void *)((unsigned long)c),(void *)((unsigned long)d),(void *)((unsigned long)e),(void *)((unsigned long)f),(void *)((unsigned long)g),NULL)
     77       1.1     oster 
     78       1.1     oster #define DDprintf1(s,a)         if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),NULL,NULL,NULL,NULL,NULL,NULL,NULL)
     79       1.1     oster #define DDprintf2(s,a,b)       if (rf_reconDebug) rf_debug_printf(s,(void *)((unsigned long)a),(void *)((unsigned long)b),NULL,NULL,NULL,NULL,NULL,NULL)
     80      1.33     oster 
     81      1.41     oster #else /* RF_DEBUG_RECON */
     82      1.33     oster 
     83      1.33     oster #define Dprintf(s) {}
     84      1.33     oster #define Dprintf1(s,a) {}
     85      1.33     oster #define Dprintf2(s,a,b) {}
     86      1.33     oster #define Dprintf3(s,a,b,c) {}
     87      1.33     oster #define Dprintf4(s,a,b,c,d) {}
     88      1.33     oster #define Dprintf5(s,a,b,c,d,e) {}
     89      1.33     oster #define Dprintf6(s,a,b,c,d,e,f) {}
     90      1.33     oster #define Dprintf7(s,a,b,c,d,e,f,g) {}
     91      1.33     oster 
     92      1.33     oster #define DDprintf1(s,a) {}
     93      1.33     oster #define DDprintf2(s,a,b) {}
     94      1.33     oster 
     95      1.41     oster #endif /* RF_DEBUG_RECON */
     96      1.33     oster 
     97  1.56.2.8     skrll #define RF_RECON_DONE_READS   1
     98  1.56.2.8     skrll #define RF_RECON_READ_ERROR   2
     99  1.56.2.8     skrll #define RF_RECON_WRITE_ERROR  3
    100  1.56.2.8     skrll #define RF_RECON_READ_STOPPED 4
    101  1.56.2.8     skrll 
    102  1.56.2.2     skrll #define RF_MAX_FREE_RECONBUFFER 32
    103  1.56.2.2     skrll #define RF_MIN_FREE_RECONBUFFER 16
    104       1.1     oster 
    105  1.56.2.2     skrll static RF_RaidReconDesc_t *AllocRaidReconDesc(RF_Raid_t *, RF_RowCol_t,
    106  1.56.2.2     skrll 					      RF_RaidDisk_t *, int, RF_RowCol_t);
    107  1.56.2.2     skrll static void FreeReconDesc(RF_RaidReconDesc_t *);
    108  1.56.2.2     skrll static int ProcessReconEvent(RF_Raid_t *, RF_ReconEvent_t *);
    109  1.56.2.2     skrll static int IssueNextReadRequest(RF_Raid_t *, RF_RowCol_t);
    110  1.56.2.2     skrll static int TryToRead(RF_Raid_t *, RF_RowCol_t);
    111  1.56.2.2     skrll static int ComputePSDiskOffsets(RF_Raid_t *, RF_StripeNum_t, RF_RowCol_t,
    112  1.56.2.2     skrll 				RF_SectorNum_t *, RF_SectorNum_t *, RF_RowCol_t *,
    113  1.56.2.2     skrll 				RF_SectorNum_t *);
    114  1.56.2.2     skrll static int IssueNextWriteRequest(RF_Raid_t *);
    115  1.56.2.2     skrll static int ReconReadDoneProc(void *, int);
    116  1.56.2.2     skrll static int ReconWriteDoneProc(void *, int);
    117  1.56.2.2     skrll static void CheckForNewMinHeadSep(RF_Raid_t *, RF_HeadSepLimit_t);
    118  1.56.2.2     skrll static int CheckHeadSeparation(RF_Raid_t *, RF_PerDiskReconCtrl_t *,
    119  1.56.2.2     skrll 			       RF_RowCol_t, RF_HeadSepLimit_t,
    120  1.56.2.2     skrll 			       RF_ReconUnitNum_t);
    121  1.56.2.2     skrll static int CheckForcedOrBlockedReconstruction(RF_Raid_t *,
    122  1.56.2.2     skrll 					      RF_ReconParityStripeStatus_t *,
    123  1.56.2.2     skrll 					      RF_PerDiskReconCtrl_t *,
    124  1.56.2.2     skrll 					      RF_RowCol_t, RF_StripeNum_t,
    125  1.56.2.2     skrll 					      RF_ReconUnitNum_t);
    126  1.56.2.2     skrll static void ForceReconReadDoneProc(void *, int);
    127       1.1     oster static void rf_ShutdownReconstruction(void *);
    128       1.1     oster 
    129       1.1     oster struct RF_ReconDoneProc_s {
    130       1.4     oster 	void    (*proc) (RF_Raid_t *, void *);
    131       1.4     oster 	void   *arg;
    132       1.4     oster 	RF_ReconDoneProc_t *next;
    133       1.1     oster };
    134       1.1     oster 
    135      1.13     oster /**************************************************************************
    136       1.1     oster  *
    137       1.1     oster  * sets up the parameters that will be used by the reconstruction process
    138       1.1     oster  * currently there are none, except for those that the layout-specific
    139       1.1     oster  * configuration (e.g. rf_ConfigureDeclustered) routine sets up.
    140       1.1     oster  *
    141       1.1     oster  * in the kernel, we fire off the recon thread.
    142       1.1     oster  *
    143      1.13     oster  **************************************************************************/
    144       1.4     oster static void
    145  1.56.2.2     skrll rf_ShutdownReconstruction(void *ignored)
    146       1.4     oster {
    147  1.56.2.2     skrll 	pool_destroy(&rf_pools.reconbuffer);
    148       1.4     oster }
    149       1.4     oster 
    150       1.4     oster int
    151  1.56.2.2     skrll rf_ConfigureReconstruction(RF_ShutdownList_t **listp)
    152       1.4     oster {
    153       1.4     oster 
    154  1.56.2.2     skrll 	rf_pool_init(&rf_pools.reconbuffer, sizeof(RF_ReconBuffer_t),
    155  1.56.2.2     skrll 		     "rf_reconbuffer_pl", RF_MIN_FREE_RECONBUFFER, RF_MAX_FREE_RECONBUFFER);
    156  1.56.2.2     skrll 	rf_ShutdownCreate(listp, rf_ShutdownReconstruction, NULL);
    157  1.56.2.2     skrll 
    158       1.4     oster 	return (0);
    159       1.4     oster }
    160       1.4     oster 
    161       1.4     oster static RF_RaidReconDesc_t *
    162  1.56.2.2     skrll AllocRaidReconDesc(RF_Raid_t *raidPtr, RF_RowCol_t col,
    163  1.56.2.2     skrll 		   RF_RaidDisk_t *spareDiskPtr, int numDisksDone,
    164  1.56.2.2     skrll 		   RF_RowCol_t scol)
    165       1.1     oster {
    166       1.1     oster 
    167       1.4     oster 	RF_RaidReconDesc_t *reconDesc;
    168       1.4     oster 
    169  1.56.2.7     skrll 	RF_Malloc(reconDesc, sizeof(RF_RaidReconDesc_t),
    170  1.56.2.7     skrll 		  (RF_RaidReconDesc_t *));
    171       1.4     oster 	reconDesc->raidPtr = raidPtr;
    172       1.4     oster 	reconDesc->col = col;
    173       1.4     oster 	reconDesc->spareDiskPtr = spareDiskPtr;
    174       1.4     oster 	reconDesc->numDisksDone = numDisksDone;
    175       1.4     oster 	reconDesc->scol = scol;
    176       1.4     oster 	reconDesc->next = NULL;
    177       1.1     oster 
    178       1.4     oster 	return (reconDesc);
    179       1.1     oster }
    180       1.1     oster 
    181       1.4     oster static void
    182  1.56.2.2     skrll FreeReconDesc(RF_RaidReconDesc_t *reconDesc)
    183       1.1     oster {
    184       1.1     oster #if RF_RECON_STATS > 0
    185      1.50     oster 	printf("raid%d: %lu recon event waits, %lu recon delays\n",
    186      1.50     oster 	       reconDesc->raidPtr->raidid,
    187      1.50     oster 	       (long) reconDesc->numReconEventWaits,
    188      1.50     oster 	       (long) reconDesc->numReconExecDelays);
    189       1.4     oster #endif				/* RF_RECON_STATS > 0 */
    190      1.50     oster 	printf("raid%d: %lu max exec ticks\n",
    191      1.50     oster 	       reconDesc->raidPtr->raidid,
    192      1.50     oster 	       (long) reconDesc->maxReconExecTicks);
    193       1.1     oster #if (RF_RECON_STATS > 0) || defined(KERNEL)
    194       1.4     oster 	printf("\n");
    195       1.4     oster #endif				/* (RF_RECON_STATS > 0) || KERNEL */
    196  1.56.2.7     skrll 	RF_Free(reconDesc, sizeof(RF_RaidReconDesc_t));
    197       1.1     oster }
    198       1.1     oster 
    199       1.1     oster 
    200      1.13     oster /*****************************************************************************
    201       1.1     oster  *
    202       1.1     oster  * primary routine to reconstruct a failed disk.  This should be called from
    203       1.1     oster  * within its own thread.  It won't return until reconstruction completes,
    204       1.1     oster  * fails, or is aborted.
    205      1.13     oster  *****************************************************************************/
    206       1.4     oster int
    207  1.56.2.2     skrll rf_ReconstructFailedDisk(RF_Raid_t *raidPtr, RF_RowCol_t col)
    208       1.4     oster {
    209      1.52  jdolecek 	const RF_LayoutSW_t *lp;
    210       1.4     oster 	int     rc;
    211       1.4     oster 
    212       1.4     oster 	lp = raidPtr->Layout.map;
    213       1.4     oster 	if (lp->SubmitReconBuffer) {
    214       1.4     oster 		/*
    215       1.4     oster 	         * The current infrastructure only supports reconstructing one
    216       1.4     oster 	         * disk at a time for each array.
    217       1.4     oster 	         */
    218       1.4     oster 		RF_LOCK_MUTEX(raidPtr->mutex);
    219       1.4     oster 		while (raidPtr->reconInProgress) {
    220       1.4     oster 			RF_WAIT_COND(raidPtr->waitForReconCond, raidPtr->mutex);
    221       1.4     oster 		}
    222       1.4     oster 		raidPtr->reconInProgress++;
    223       1.4     oster 		RF_UNLOCK_MUTEX(raidPtr->mutex);
    224  1.56.2.2     skrll 		rc = rf_ReconstructFailedDiskBasic(raidPtr, col);
    225       1.6     oster 		RF_LOCK_MUTEX(raidPtr->mutex);
    226       1.6     oster 		raidPtr->reconInProgress--;
    227       1.6     oster 		RF_UNLOCK_MUTEX(raidPtr->mutex);
    228       1.4     oster 	} else {
    229       1.4     oster 		RF_ERRORMSG1("RECON: no way to reconstruct failed disk for arch %c\n",
    230       1.4     oster 		    lp->parityConfig);
    231       1.4     oster 		rc = EIO;
    232       1.4     oster 	}
    233       1.4     oster 	RF_SIGNAL_COND(raidPtr->waitForReconCond);
    234       1.4     oster 	return (rc);
    235       1.4     oster }
    236       1.4     oster 
    237       1.4     oster int
    238  1.56.2.2     skrll rf_ReconstructFailedDiskBasic(RF_Raid_t *raidPtr, RF_RowCol_t col)
    239       1.4     oster {
    240       1.5     oster 	RF_ComponentLabel_t c_label;
    241       1.4     oster 	RF_RaidDisk_t *spareDiskPtr = NULL;
    242       1.4     oster 	RF_RaidReconDesc_t *reconDesc;
    243  1.56.2.2     skrll 	RF_RowCol_t scol;
    244       1.4     oster 	int     numDisksDone = 0, rc;
    245       1.4     oster 
    246       1.4     oster 	/* first look for a spare drive onto which to reconstruct the data */
    247       1.4     oster 	/* spare disk descriptors are stored in row 0.  This may have to
    248       1.4     oster 	 * change eventually */
    249       1.4     oster 
    250       1.4     oster 	RF_LOCK_MUTEX(raidPtr->mutex);
    251  1.56.2.2     skrll 	RF_ASSERT(raidPtr->Disks[col].status == rf_ds_failed);
    252  1.56.2.2     skrll #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
    253       1.4     oster 	if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
    254  1.56.2.2     skrll 		if (raidPtr->status != rf_rs_degraded) {
    255  1.56.2.2     skrll 			RF_ERRORMSG1("Unable to reconstruct disk at col %d because status not degraded\n", col);
    256       1.4     oster 			RF_UNLOCK_MUTEX(raidPtr->mutex);
    257       1.4     oster 			return (EINVAL);
    258       1.4     oster 		}
    259       1.4     oster 		scol = (-1);
    260       1.4     oster 	} else {
    261  1.56.2.2     skrll #endif
    262       1.4     oster 		for (scol = raidPtr->numCol; scol < raidPtr->numCol + raidPtr->numSpare; scol++) {
    263  1.56.2.2     skrll 			if (raidPtr->Disks[scol].status == rf_ds_spare) {
    264  1.56.2.2     skrll 				spareDiskPtr = &raidPtr->Disks[scol];
    265       1.4     oster 				spareDiskPtr->status = rf_ds_used_spare;
    266       1.4     oster 				break;
    267       1.4     oster 			}
    268       1.4     oster 		}
    269       1.4     oster 		if (!spareDiskPtr) {
    270  1.56.2.2     skrll 			RF_ERRORMSG1("Unable to reconstruct disk at col %d because no spares are available\n", col);
    271       1.4     oster 			RF_UNLOCK_MUTEX(raidPtr->mutex);
    272       1.4     oster 			return (ENOSPC);
    273       1.4     oster 		}
    274  1.56.2.2     skrll 		printf("RECON: initiating reconstruction on col %d -> spare at col %d\n", col, scol);
    275  1.56.2.2     skrll #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
    276       1.4     oster 	}
    277  1.56.2.2     skrll #endif
    278       1.4     oster 	RF_UNLOCK_MUTEX(raidPtr->mutex);
    279       1.1     oster 
    280  1.56.2.2     skrll 	reconDesc = AllocRaidReconDesc((void *) raidPtr, col, spareDiskPtr, numDisksDone, scol);
    281       1.4     oster 	raidPtr->reconDesc = (void *) reconDesc;
    282       1.1     oster #if RF_RECON_STATS > 0
    283       1.4     oster 	reconDesc->hsStallCount = 0;
    284       1.4     oster 	reconDesc->numReconExecDelays = 0;
    285       1.4     oster 	reconDesc->numReconEventWaits = 0;
    286       1.4     oster #endif				/* RF_RECON_STATS > 0 */
    287       1.4     oster 	reconDesc->reconExecTimerRunning = 0;
    288       1.4     oster 	reconDesc->reconExecTicks = 0;
    289       1.4     oster 	reconDesc->maxReconExecTicks = 0;
    290       1.4     oster 	rc = rf_ContinueReconstructFailedDisk(reconDesc);
    291       1.5     oster 
    292       1.5     oster 	if (!rc) {
    293       1.5     oster 		/* fix up the component label */
    294       1.5     oster 		/* Don't actually need the read here.. */
    295       1.5     oster 		raidread_component_label(
    296  1.56.2.2     skrll                         raidPtr->raid_cinfo[scol].ci_dev,
    297  1.56.2.2     skrll 			raidPtr->raid_cinfo[scol].ci_vp,
    298       1.5     oster 			&c_label);
    299       1.5     oster 
    300      1.15     oster 		raid_init_component_label( raidPtr, &c_label);
    301  1.56.2.2     skrll 		c_label.row = 0;
    302       1.5     oster 		c_label.column = col;
    303       1.5     oster 		c_label.clean = RF_RAID_DIRTY;
    304       1.5     oster 		c_label.status = rf_ds_optimal;
    305  1.56.2.2     skrll 		c_label.partitionSize = raidPtr->Disks[scol].partitionSize;
    306      1.15     oster 
    307      1.28     oster 		/* We've just done a rebuild based on all the other
    308      1.28     oster 		   disks, so at this point the parity is known to be
    309      1.28     oster 		   clean, even if it wasn't before. */
    310      1.28     oster 
    311      1.28     oster 		/* XXX doesn't hold for RAID 6!!*/
    312      1.28     oster 
    313      1.48     oster 		RF_LOCK_MUTEX(raidPtr->mutex);
    314      1.28     oster 		raidPtr->parity_good = RF_RAID_CLEAN;
    315      1.48     oster 		RF_UNLOCK_MUTEX(raidPtr->mutex);
    316      1.28     oster 
    317      1.15     oster 		/* XXXX MORE NEEDED HERE */
    318       1.5     oster 
    319       1.5     oster 		raidwrite_component_label(
    320  1.56.2.2     skrll                         raidPtr->raid_cinfo[scol].ci_dev,
    321  1.56.2.2     skrll 			raidPtr->raid_cinfo[scol].ci_vp,
    322       1.5     oster 			&c_label);
    323      1.49     oster 
    324  1.56.2.8     skrll 	} else {
    325  1.56.2.8     skrll 		/* Reconstruct failed. */
    326  1.56.2.8     skrll 
    327  1.56.2.8     skrll 		RF_LOCK_MUTEX(raidPtr->mutex);
    328  1.56.2.8     skrll 		/* Failed disk goes back to "failed" status */
    329  1.56.2.8     skrll 		raidPtr->Disks[col].status = rf_ds_failed;
    330  1.56.2.8     skrll 
    331  1.56.2.8     skrll 		/* Spare disk goes back to "spare" status. */
    332  1.56.2.8     skrll 		spareDiskPtr->status = rf_ds_spare;
    333  1.56.2.8     skrll 		RF_UNLOCK_MUTEX(raidPtr->mutex);
    334      1.49     oster 
    335       1.5     oster 	}
    336  1.56.2.8     skrll 	rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
    337       1.5     oster 	return (rc);
    338       1.5     oster }
    339       1.5     oster 
    340       1.5     oster /*
    341       1.5     oster 
    342       1.5     oster    Allow reconstructing a disk in-place -- i.e. component /dev/sd2e goes AWOL,
    343       1.5     oster    and you don't get a spare until the next Monday.  With this function
    344       1.5     oster    (and hot-swappable drives) you can now put your new disk containing
    345       1.5     oster    /dev/sd2e on the bus, scsictl it alive, and then use raidctl(8) to
    346       1.5     oster    rebuild the data "on the spot".
    347       1.5     oster 
    348       1.5     oster */
    349       1.5     oster 
    350       1.5     oster int
    351  1.56.2.2     skrll rf_ReconstructInPlace(RF_Raid_t *raidPtr, RF_RowCol_t col)
    352       1.5     oster {
    353       1.5     oster 	RF_RaidDisk_t *spareDiskPtr = NULL;
    354       1.5     oster 	RF_RaidReconDesc_t *reconDesc;
    355      1.52  jdolecek 	const RF_LayoutSW_t *lp;
    356       1.5     oster 	RF_ComponentLabel_t c_label;
    357       1.5     oster 	int     numDisksDone = 0, rc;
    358       1.5     oster 	struct partinfo dpart;
    359       1.5     oster 	struct vnode *vp;
    360       1.5     oster 	struct vattr va;
    361  1.56.2.4     skrll 	struct lwp *lwp;
    362       1.5     oster 	int retcode;
    363      1.21     oster 	int ac;
    364       1.5     oster 
    365       1.5     oster 	lp = raidPtr->Layout.map;
    366  1.56.2.2     skrll 	if (!lp->SubmitReconBuffer) {
    367  1.56.2.2     skrll 		RF_ERRORMSG1("RECON: no way to reconstruct failed disk for arch %c\n",
    368  1.56.2.2     skrll 			     lp->parityConfig);
    369  1.56.2.2     skrll 		/* wakeup anyone who might be waiting to do a reconstruct */
    370  1.56.2.2     skrll 		RF_SIGNAL_COND(raidPtr->waitForReconCond);
    371  1.56.2.2     skrll 		return(EIO);
    372  1.56.2.2     skrll 	}
    373       1.5     oster 
    374  1.56.2.2     skrll 	/*
    375  1.56.2.2     skrll 	 * The current infrastructure only supports reconstructing one
    376  1.56.2.2     skrll 	 * disk at a time for each array.
    377  1.56.2.2     skrll 	 */
    378  1.56.2.2     skrll 	RF_LOCK_MUTEX(raidPtr->mutex);
    379       1.5     oster 
    380  1.56.2.2     skrll 	if (raidPtr->Disks[col].status != rf_ds_failed) {
    381  1.56.2.2     skrll 		/* "It's gone..." */
    382  1.56.2.2     skrll 		raidPtr->numFailures++;
    383  1.56.2.2     skrll 		raidPtr->Disks[col].status = rf_ds_failed;
    384  1.56.2.2     skrll 		raidPtr->status = rf_rs_degraded;
    385  1.56.2.2     skrll 		RF_UNLOCK_MUTEX(raidPtr->mutex);
    386  1.56.2.2     skrll 		rf_update_component_labels(raidPtr,
    387  1.56.2.2     skrll 					   RF_NORMAL_COMPONENT_UPDATE);
    388  1.56.2.2     skrll 		RF_LOCK_MUTEX(raidPtr->mutex);
    389  1.56.2.2     skrll 	}
    390  1.56.2.2     skrll 
    391  1.56.2.2     skrll 	while (raidPtr->reconInProgress) {
    392  1.56.2.2     skrll 		RF_WAIT_COND(raidPtr->waitForReconCond, raidPtr->mutex);
    393  1.56.2.2     skrll 	}
    394  1.56.2.2     skrll 
    395  1.56.2.2     skrll 	raidPtr->reconInProgress++;
    396  1.56.2.2     skrll 
    397  1.56.2.2     skrll 	/* first look for a spare drive onto which to reconstruct the
    398  1.56.2.2     skrll 	   data.  spare disk descriptors are stored in row 0.  This
    399  1.56.2.2     skrll 	   may have to change eventually */
    400  1.56.2.2     skrll 
    401  1.56.2.2     skrll 	/* Actually, we don't care if it's failed or not...  On a RAID
    402  1.56.2.2     skrll 	   set with correct parity, this function should be callable
    403  1.56.2.2     skrll 	   on any component without ill affects. */
    404  1.56.2.2     skrll 	/* RF_ASSERT(raidPtr->Disks[col].status == rf_ds_failed); */
    405  1.56.2.2     skrll 
    406  1.56.2.2     skrll #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
    407  1.56.2.2     skrll 	if (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE) {
    408  1.56.2.2     skrll 		RF_ERRORMSG1("Unable to reconstruct to disk at col %d: operation not supported for RF_DISTRIBUTE_SPARE\n", col);
    409  1.56.2.2     skrll 
    410  1.56.2.2     skrll 		raidPtr->reconInProgress--;
    411  1.56.2.2     skrll 		RF_UNLOCK_MUTEX(raidPtr->mutex);
    412  1.56.2.2     skrll 		RF_SIGNAL_COND(raidPtr->waitForReconCond);
    413  1.56.2.2     skrll 		return (EINVAL);
    414  1.56.2.2     skrll 	}
    415  1.56.2.2     skrll #endif
    416  1.56.2.4     skrll 	lwp = LIST_FIRST(&raidPtr->engine_thread->p_lwps);
    417  1.56.2.2     skrll 
    418  1.56.2.2     skrll 	/* This device may have been opened successfully the
    419  1.56.2.2     skrll 	   first time. Close it before trying to open it again.. */
    420  1.56.2.2     skrll 
    421  1.56.2.2     skrll 	if (raidPtr->raid_cinfo[col].ci_vp != NULL) {
    422      1.37     oster #if 0
    423  1.56.2.2     skrll 		printf("Closed the open device: %s\n",
    424  1.56.2.2     skrll 		       raidPtr->Disks[col].devname);
    425      1.37     oster #endif
    426  1.56.2.2     skrll 		vp = raidPtr->raid_cinfo[col].ci_vp;
    427  1.56.2.2     skrll 		ac = raidPtr->Disks[col].auto_configured;
    428  1.56.2.2     skrll 		RF_UNLOCK_MUTEX(raidPtr->mutex);
    429  1.56.2.2     skrll 		rf_close_component(raidPtr, vp, ac);
    430  1.56.2.2     skrll 		RF_LOCK_MUTEX(raidPtr->mutex);
    431  1.56.2.2     skrll 		raidPtr->raid_cinfo[col].ci_vp = NULL;
    432  1.56.2.2     skrll 	}
    433  1.56.2.2     skrll 	/* note that this disk was *not* auto_configured (any longer)*/
    434  1.56.2.2     skrll 	raidPtr->Disks[col].auto_configured = 0;
    435  1.56.2.2     skrll 
    436      1.37     oster #if 0
    437  1.56.2.2     skrll 	printf("About to (re-)open the device for rebuilding: %s\n",
    438  1.56.2.2     skrll 	       raidPtr->Disks[col].devname);
    439      1.37     oster #endif
    440  1.56.2.2     skrll 	RF_UNLOCK_MUTEX(raidPtr->mutex);
    441  1.56.2.4     skrll 	retcode = raidlookup(raidPtr->Disks[col].devname, lwp, &vp);
    442  1.56.2.2     skrll 
    443  1.56.2.2     skrll 	if (retcode) {
    444  1.56.2.2     skrll 		printf("raid%d: rebuilding: raidlookup on device: %s failed: %d!\n",raidPtr->raidid,
    445  1.56.2.2     skrll 		       raidPtr->Disks[col].devname, retcode);
    446  1.56.2.2     skrll 
    447  1.56.2.2     skrll 		/* the component isn't responding properly...
    448  1.56.2.2     skrll 		   must be still dead :-( */
    449  1.56.2.2     skrll 		RF_LOCK_MUTEX(raidPtr->mutex);
    450  1.56.2.2     skrll 		raidPtr->reconInProgress--;
    451      1.48     oster 		RF_UNLOCK_MUTEX(raidPtr->mutex);
    452  1.56.2.2     skrll 		RF_SIGNAL_COND(raidPtr->waitForReconCond);
    453  1.56.2.2     skrll 		return(retcode);
    454  1.56.2.2     skrll 	}
    455       1.6     oster 
    456  1.56.2.2     skrll 	/* Ok, so we can at least do a lookup...
    457  1.56.2.2     skrll 	   How about actually getting a vp for it? */
    458  1.56.2.2     skrll 
    459  1.56.2.4     skrll 	if ((retcode = VOP_GETATTR(vp, &va, lwp->l_proc->p_ucred, lwp)) != 0) {
    460       1.6     oster 		RF_LOCK_MUTEX(raidPtr->mutex);
    461       1.6     oster 		raidPtr->reconInProgress--;
    462       1.6     oster 		RF_UNLOCK_MUTEX(raidPtr->mutex);
    463  1.56.2.2     skrll 		RF_SIGNAL_COND(raidPtr->waitForReconCond);
    464  1.56.2.2     skrll 		return(retcode);
    465  1.56.2.2     skrll 	}
    466       1.6     oster 
    467  1.56.2.4     skrll 	retcode = VOP_IOCTL(vp, DIOCGPART, &dpart, FREAD, lwp->l_proc->p_ucred, lwp);
    468  1.56.2.2     skrll 	if (retcode) {
    469  1.56.2.2     skrll 		RF_LOCK_MUTEX(raidPtr->mutex);
    470  1.56.2.2     skrll 		raidPtr->reconInProgress--;
    471  1.56.2.2     skrll 		RF_UNLOCK_MUTEX(raidPtr->mutex);
    472  1.56.2.2     skrll 		RF_SIGNAL_COND(raidPtr->waitForReconCond);
    473  1.56.2.2     skrll 		return(retcode);
    474       1.5     oster 	}
    475  1.56.2.2     skrll 	RF_LOCK_MUTEX(raidPtr->mutex);
    476  1.56.2.2     skrll 	raidPtr->Disks[col].blockSize =	dpart.disklab->d_secsize;
    477  1.56.2.2     skrll 
    478  1.56.2.2     skrll 	raidPtr->Disks[col].numBlocks = dpart.part->p_size -
    479  1.56.2.2     skrll 		rf_protectedSectors;
    480  1.56.2.2     skrll 
    481  1.56.2.2     skrll 	raidPtr->raid_cinfo[col].ci_vp = vp;
    482  1.56.2.2     skrll 	raidPtr->raid_cinfo[col].ci_dev = va.va_rdev;
    483  1.56.2.2     skrll 
    484  1.56.2.2     skrll 	raidPtr->Disks[col].dev = va.va_rdev;
    485  1.56.2.2     skrll 
    486  1.56.2.2     skrll 	/* we allow the user to specify that only a fraction
    487  1.56.2.2     skrll 	   of the disks should be used this is just for debug:
    488  1.56.2.2     skrll 	   it speeds up * the parity scan */
    489  1.56.2.2     skrll 	raidPtr->Disks[col].numBlocks = raidPtr->Disks[col].numBlocks *
    490  1.56.2.2     skrll 		rf_sizePercentage / 100;
    491  1.56.2.2     skrll 	RF_UNLOCK_MUTEX(raidPtr->mutex);
    492  1.56.2.2     skrll 
    493  1.56.2.2     skrll 	spareDiskPtr = &raidPtr->Disks[col];
    494  1.56.2.2     skrll 	spareDiskPtr->status = rf_ds_used_spare;
    495  1.56.2.2     skrll 
    496  1.56.2.2     skrll 	printf("raid%d: initiating in-place reconstruction on column %d\n",
    497  1.56.2.2     skrll 	       raidPtr->raidid, col);
    498  1.56.2.2     skrll 
    499  1.56.2.2     skrll 	reconDesc = AllocRaidReconDesc((void *) raidPtr, col, spareDiskPtr,
    500  1.56.2.2     skrll 				       numDisksDone, col);
    501  1.56.2.2     skrll 	raidPtr->reconDesc = (void *) reconDesc;
    502  1.56.2.2     skrll #if RF_RECON_STATS > 0
    503  1.56.2.2     skrll 	reconDesc->hsStallCount = 0;
    504  1.56.2.2     skrll 	reconDesc->numReconExecDelays = 0;
    505  1.56.2.2     skrll 	reconDesc->numReconEventWaits = 0;
    506  1.56.2.2     skrll #endif				/* RF_RECON_STATS > 0 */
    507  1.56.2.2     skrll 	reconDesc->reconExecTimerRunning = 0;
    508  1.56.2.2     skrll 	reconDesc->reconExecTicks = 0;
    509  1.56.2.2     skrll 	reconDesc->maxReconExecTicks = 0;
    510  1.56.2.2     skrll 	rc = rf_ContinueReconstructFailedDisk(reconDesc);
    511  1.56.2.2     skrll 
    512       1.5     oster 	if (!rc) {
    513      1.48     oster 		RF_LOCK_MUTEX(raidPtr->mutex);
    514       1.5     oster 		/* Need to set these here, as at this point it'll be claiming
    515       1.5     oster 		   that the disk is in rf_ds_spared!  But we know better :-) */
    516       1.5     oster 
    517  1.56.2.2     skrll 		raidPtr->Disks[col].status = rf_ds_optimal;
    518  1.56.2.2     skrll 		raidPtr->status = rf_rs_optimal;
    519      1.48     oster 		RF_UNLOCK_MUTEX(raidPtr->mutex);
    520       1.5     oster 
    521       1.5     oster 		/* fix up the component label */
    522       1.5     oster 		/* Don't actually need the read here.. */
    523  1.56.2.2     skrll 		raidread_component_label(raidPtr->raid_cinfo[col].ci_dev,
    524  1.56.2.2     skrll 					 raidPtr->raid_cinfo[col].ci_vp,
    525       1.5     oster 					 &c_label);
    526      1.16     oster 
    527      1.48     oster 		RF_LOCK_MUTEX(raidPtr->mutex);
    528      1.16     oster 		raid_init_component_label(raidPtr, &c_label);
    529      1.16     oster 
    530  1.56.2.2     skrll 		c_label.row = 0;
    531       1.5     oster 		c_label.column = col;
    532      1.28     oster 
    533      1.28     oster 		/* We've just done a rebuild based on all the other
    534      1.28     oster 		   disks, so at this point the parity is known to be
    535      1.28     oster 		   clean, even if it wasn't before. */
    536      1.28     oster 
    537      1.28     oster 		/* XXX doesn't hold for RAID 6!!*/
    538      1.28     oster 
    539      1.28     oster 		raidPtr->parity_good = RF_RAID_CLEAN;
    540      1.48     oster 		RF_UNLOCK_MUTEX(raidPtr->mutex);
    541      1.28     oster 
    542  1.56.2.2     skrll 		raidwrite_component_label(raidPtr->raid_cinfo[col].ci_dev,
    543  1.56.2.2     skrll 					  raidPtr->raid_cinfo[col].ci_vp,
    544       1.5     oster 					  &c_label);
    545      1.49     oster 
    546  1.56.2.8     skrll 	} else {
    547  1.56.2.8     skrll 		/* Reconstruct-in-place failed.  Disk goes back to
    548  1.56.2.8     skrll 		   "failed" status, regardless of what it was before.  */
    549  1.56.2.8     skrll 		RF_LOCK_MUTEX(raidPtr->mutex);
    550  1.56.2.8     skrll 		raidPtr->Disks[col].status = rf_ds_failed;
    551  1.56.2.8     skrll 		RF_UNLOCK_MUTEX(raidPtr->mutex);
    552       1.5     oster 	}
    553  1.56.2.8     skrll 
    554  1.56.2.8     skrll 	rf_update_component_labels(raidPtr, RF_NORMAL_COMPONENT_UPDATE);
    555  1.56.2.8     skrll 
    556  1.56.2.8     skrll 	RF_LOCK_MUTEX(raidPtr->mutex);
    557  1.56.2.8     skrll 	raidPtr->reconInProgress--;
    558  1.56.2.8     skrll 	RF_UNLOCK_MUTEX(raidPtr->mutex);
    559  1.56.2.8     skrll 
    560       1.5     oster 	RF_SIGNAL_COND(raidPtr->waitForReconCond);
    561       1.4     oster 	return (rc);
    562       1.4     oster }
    563       1.4     oster 
    564       1.4     oster 
    565       1.4     oster int
    566  1.56.2.2     skrll rf_ContinueReconstructFailedDisk(RF_RaidReconDesc_t *reconDesc)
    567       1.4     oster {
    568       1.4     oster 	RF_Raid_t *raidPtr = reconDesc->raidPtr;
    569       1.4     oster 	RF_RowCol_t col = reconDesc->col;
    570       1.4     oster 	RF_RowCol_t scol = reconDesc->scol;
    571       1.4     oster 	RF_ReconMap_t *mapPtr;
    572      1.46     oster 	RF_ReconCtrl_t *tmp_reconctrl;
    573       1.4     oster 	RF_ReconEvent_t *event;
    574  1.56.2.8     skrll 	RF_CallbackDesc_t *p;
    575       1.4     oster 	struct timeval etime, elpsd;
    576       1.4     oster 	unsigned long xor_s, xor_resid_us;
    577      1.54    simonb 	int     i, ds;
    578  1.56.2.8     skrll 	int status;
    579  1.56.2.8     skrll 	int recon_error, write_error;
    580       1.4     oster 
    581  1.56.2.6     skrll 	raidPtr->accumXorTimeUs = 0;
    582  1.56.2.2     skrll #if RF_ACC_TRACE > 0
    583  1.56.2.6     skrll 	/* create one trace record per physical disk */
    584  1.56.2.6     skrll 	RF_Malloc(raidPtr->recon_tracerecs, raidPtr->numCol * sizeof(RF_AccTraceEntry_t), (RF_AccTraceEntry_t *));
    585  1.56.2.2     skrll #endif
    586  1.56.2.6     skrll 
    587  1.56.2.6     skrll 	/* quiesce the array prior to starting recon.  this is needed
    588  1.56.2.6     skrll 	 * to assure no nasty interactions with pending user writes.
    589  1.56.2.6     skrll 	 * We need to do this before we change the disk or row status. */
    590  1.56.2.6     skrll 
    591  1.56.2.6     skrll 	Dprintf("RECON: begin request suspend\n");
    592  1.56.2.6     skrll 	rf_SuspendNewRequestsAndWait(raidPtr);
    593  1.56.2.6     skrll 	Dprintf("RECON: end request suspend\n");
    594  1.56.2.6     skrll 
    595  1.56.2.6     skrll 	/* allocate our RF_ReconCTRL_t before we protect raidPtr->reconControl[row] */
    596  1.56.2.6     skrll 	tmp_reconctrl = rf_MakeReconControl(reconDesc, col, scol);
    597  1.56.2.6     skrll 
    598  1.56.2.6     skrll 	RF_LOCK_MUTEX(raidPtr->mutex);
    599  1.56.2.6     skrll 
    600  1.56.2.6     skrll 	/* create the reconstruction control pointer and install it in
    601  1.56.2.6     skrll 	 * the right slot */
    602  1.56.2.6     skrll 	raidPtr->reconControl = tmp_reconctrl;
    603  1.56.2.6     skrll 	mapPtr = raidPtr->reconControl->reconMap;
    604  1.56.2.6     skrll 	raidPtr->status = rf_rs_reconstructing;
    605  1.56.2.6     skrll 	raidPtr->Disks[col].status = rf_ds_reconstructing;
    606  1.56.2.6     skrll 	raidPtr->Disks[col].spareCol = scol;
    607  1.56.2.6     skrll 
    608  1.56.2.6     skrll 	RF_UNLOCK_MUTEX(raidPtr->mutex);
    609  1.56.2.6     skrll 
    610  1.56.2.6     skrll 	RF_GETTIME(raidPtr->reconControl->starttime);
    611  1.56.2.6     skrll 
    612  1.56.2.6     skrll 	/* now start up the actual reconstruction: issue a read for
    613  1.56.2.6     skrll 	 * each surviving disk */
    614  1.56.2.6     skrll 
    615  1.56.2.6     skrll 	reconDesc->numDisksDone = 0;
    616  1.56.2.6     skrll 	for (i = 0; i < raidPtr->numCol; i++) {
    617  1.56.2.6     skrll 		if (i != col) {
    618  1.56.2.6     skrll 			/* find and issue the next I/O on the
    619  1.56.2.6     skrll 			 * indicated disk */
    620  1.56.2.6     skrll 			if (IssueNextReadRequest(raidPtr, i)) {
    621  1.56.2.6     skrll 				Dprintf1("RECON: done issuing for c%d\n", i);
    622       1.4     oster 				reconDesc->numDisksDone++;
    623       1.4     oster 			}
    624       1.4     oster 		}
    625  1.56.2.6     skrll 	}
    626       1.4     oster 
    627  1.56.2.6     skrll 	Dprintf("RECON: resume requests\n");
    628  1.56.2.6     skrll 	rf_ResumeNewRequests(raidPtr);
    629  1.56.2.6     skrll 
    630  1.56.2.6     skrll 	/* process reconstruction events until all disks report that
    631  1.56.2.6     skrll 	 * they've completed all work */
    632       1.4     oster 
    633  1.56.2.6     skrll 	mapPtr = raidPtr->reconControl->reconMap;
    634  1.56.2.8     skrll 	recon_error = 0;
    635  1.56.2.8     skrll 	write_error = 0;
    636  1.56.2.8     skrll 
    637  1.56.2.6     skrll 	while (reconDesc->numDisksDone < raidPtr->numCol - 1) {
    638  1.56.2.6     skrll 
    639  1.56.2.6     skrll 		event = rf_GetNextReconEvent(reconDesc);
    640  1.56.2.8     skrll 		status = ProcessReconEvent(raidPtr, event);
    641  1.56.2.8     skrll 
    642  1.56.2.8     skrll 		/* the normal case is that a read completes, and all is well. */
    643  1.56.2.8     skrll 		if (status == RF_RECON_DONE_READS) {
    644  1.56.2.6     skrll 			reconDesc->numDisksDone++;
    645  1.56.2.8     skrll 		} else if ((status == RF_RECON_READ_ERROR) ||
    646  1.56.2.8     skrll 			   (status == RF_RECON_WRITE_ERROR)) {
    647  1.56.2.8     skrll 			/* an error was encountered while reconstructing...
    648  1.56.2.8     skrll 			   Pretend we've finished this disk.
    649  1.56.2.8     skrll 			*/
    650  1.56.2.8     skrll 			recon_error = 1;
    651  1.56.2.8     skrll 			raidPtr->reconControl->error = 1;
    652  1.56.2.8     skrll 
    653  1.56.2.8     skrll 			/* bump the numDisksDone count for reads,
    654  1.56.2.8     skrll 			   but not for writes */
    655  1.56.2.8     skrll 			if (status == RF_RECON_READ_ERROR)
    656  1.56.2.8     skrll 				reconDesc->numDisksDone++;
    657  1.56.2.8     skrll 
    658  1.56.2.8     skrll 			/* write errors are special -- when we are
    659  1.56.2.8     skrll 			   done dealing with the reads that are
    660  1.56.2.8     skrll 			   finished, we don't want to wait for any
    661  1.56.2.8     skrll 			   writes */
    662  1.56.2.8     skrll 			if (status == RF_RECON_WRITE_ERROR)
    663  1.56.2.8     skrll 				write_error = 1;
    664  1.56.2.8     skrll 
    665  1.56.2.8     skrll 		} else if (status == RF_RECON_READ_STOPPED) {
    666  1.56.2.8     skrll 			/* count this component as being "done" */
    667  1.56.2.8     skrll 			reconDesc->numDisksDone++;
    668  1.56.2.8     skrll 		}
    669  1.56.2.8     skrll 
    670  1.56.2.8     skrll 		if (recon_error) {
    671  1.56.2.8     skrll 
    672  1.56.2.8     skrll 			/* make sure any stragglers are woken up so that
    673  1.56.2.8     skrll 			   their theads will complete, and we can get out
    674  1.56.2.8     skrll 			   of here with all IO processed */
    675  1.56.2.8     skrll 
    676  1.56.2.8     skrll 			while (raidPtr->reconControl->headSepCBList) {
    677  1.56.2.8     skrll 				p = raidPtr->reconControl->headSepCBList;
    678  1.56.2.8     skrll 				raidPtr->reconControl->headSepCBList = p->next;
    679  1.56.2.8     skrll 				p->next = NULL;
    680  1.56.2.8     skrll 				rf_CauseReconEvent(raidPtr, p->col, NULL, RF_REVENT_HEADSEPCLEAR);
    681  1.56.2.8     skrll 				rf_FreeCallbackDesc(p);
    682  1.56.2.8     skrll 			}
    683  1.56.2.8     skrll 		}
    684  1.56.2.8     skrll 
    685  1.56.2.6     skrll 		raidPtr->reconControl->numRUsTotal =
    686  1.56.2.6     skrll 			mapPtr->totalRUs;
    687  1.56.2.6     skrll 		raidPtr->reconControl->numRUsComplete =
    688  1.56.2.6     skrll 			mapPtr->totalRUs -
    689  1.56.2.6     skrll 			rf_UnitsLeftToReconstruct(mapPtr);
    690  1.56.2.8     skrll 
    691      1.41     oster #if RF_DEBUG_RECON
    692  1.56.2.6     skrll 		raidPtr->reconControl->percentComplete =
    693  1.56.2.6     skrll 			(raidPtr->reconControl->numRUsComplete * 100 / raidPtr->reconControl->numRUsTotal);
    694  1.56.2.6     skrll 		if (rf_prReconSched) {
    695  1.56.2.6     skrll 			rf_PrintReconSchedule(raidPtr->reconControl->reconMap, &(raidPtr->reconControl->starttime));
    696  1.56.2.6     skrll 		}
    697      1.41     oster #endif
    698  1.56.2.6     skrll 	}
    699  1.56.2.6     skrll 
    700  1.56.2.6     skrll 	mapPtr = raidPtr->reconControl->reconMap;
    701  1.56.2.6     skrll 	if (rf_reconDebug) {
    702  1.56.2.6     skrll 		printf("RECON: all reads completed\n");
    703  1.56.2.6     skrll 	}
    704  1.56.2.6     skrll 	/* at this point all the reads have completed.  We now wait
    705  1.56.2.6     skrll 	 * for any pending writes to complete, and then we're done */
    706  1.56.2.8     skrll 
    707  1.56.2.8     skrll 	while (!recon_error && rf_UnitsLeftToReconstruct(raidPtr->reconControl->reconMap) > 0) {
    708  1.56.2.6     skrll 
    709  1.56.2.6     skrll 		event = rf_GetNextReconEvent(reconDesc);
    710  1.56.2.8     skrll 		status = ProcessReconEvent(raidPtr, event);
    711  1.56.2.8     skrll 
    712  1.56.2.8     skrll 		if (status == RF_RECON_WRITE_ERROR) {
    713  1.56.2.8     skrll 			recon_error = 1;
    714  1.56.2.8     skrll 			raidPtr->reconControl->error = 1;
    715  1.56.2.8     skrll 			/* an error was encountered at the very end... bail */
    716  1.56.2.8     skrll 		} else {
    717  1.56.2.6     skrll #if RF_DEBUG_RECON
    718  1.56.2.8     skrll 			raidPtr->reconControl->percentComplete = 100 - (rf_UnitsLeftToReconstruct(mapPtr) * 100 / mapPtr->totalRUs);
    719  1.56.2.8     skrll 			if (rf_prReconSched) {
    720  1.56.2.8     skrll 				rf_PrintReconSchedule(raidPtr->reconControl->reconMap, &(raidPtr->reconControl->starttime));
    721  1.56.2.8     skrll 			}
    722  1.56.2.8     skrll #endif
    723       1.4     oster 		}
    724  1.56.2.8     skrll 	}
    725  1.56.2.8     skrll 
    726  1.56.2.8     skrll 	if (recon_error) {
    727  1.56.2.8     skrll 		/* we've encountered an error in reconstructing. */
    728  1.56.2.8     skrll 		printf("raid%d: reconstruction failed.\n", raidPtr->raidid);
    729  1.56.2.8     skrll 
    730  1.56.2.8     skrll 		/* we start by blocking IO to the RAID set. */
    731  1.56.2.8     skrll 		rf_SuspendNewRequestsAndWait(raidPtr);
    732  1.56.2.8     skrll 
    733  1.56.2.8     skrll 		RF_LOCK_MUTEX(raidPtr->mutex);
    734  1.56.2.8     skrll 		/* mark set as being degraded, rather than
    735  1.56.2.8     skrll 		   rf_rs_reconstructing as we were before the problem.
    736  1.56.2.8     skrll 		   After this is done we can update status of the
    737  1.56.2.8     skrll 		   component disks without worrying about someone
    738  1.56.2.8     skrll 		   trying to read from a failed component.
    739  1.56.2.8     skrll 		*/
    740  1.56.2.8     skrll 		raidPtr->status = rf_rs_degraded;
    741  1.56.2.8     skrll 		RF_UNLOCK_MUTEX(raidPtr->mutex);
    742  1.56.2.8     skrll 
    743  1.56.2.8     skrll 		/* resume IO */
    744  1.56.2.8     skrll 		rf_ResumeNewRequests(raidPtr);
    745  1.56.2.8     skrll 
    746  1.56.2.8     skrll 		/* At this point there are two cases:
    747  1.56.2.8     skrll 		   1) If we've experienced a read error, then we've
    748  1.56.2.8     skrll 		   already waited for all the reads we're going to get,
    749  1.56.2.8     skrll 		   and we just need to wait for the writes.
    750  1.56.2.8     skrll 
    751  1.56.2.8     skrll 		   2) If we've experienced a write error, we've also
    752  1.56.2.8     skrll 		   already waited for all the reads to complete,
    753  1.56.2.8     skrll 		   but there is little point in waiting for the writes --
    754  1.56.2.8     skrll 		   when they do complete, they will just be ignored.
    755  1.56.2.8     skrll 
    756  1.56.2.8     skrll 		   So we just wait for writes to complete if we didn't have a
    757  1.56.2.8     skrll 		   write error.
    758  1.56.2.8     skrll 		*/
    759  1.56.2.8     skrll 
    760  1.56.2.8     skrll 		if (!write_error) {
    761  1.56.2.8     skrll 			/* wait for writes to complete */
    762  1.56.2.8     skrll 			while (raidPtr->reconControl->pending_writes > 0) {
    763  1.56.2.8     skrll 
    764  1.56.2.8     skrll 				event = rf_GetNextReconEvent(reconDesc);
    765  1.56.2.8     skrll 				status = ProcessReconEvent(raidPtr, event);
    766  1.56.2.8     skrll 
    767  1.56.2.8     skrll 				if (status == RF_RECON_WRITE_ERROR) {
    768  1.56.2.8     skrll 					raidPtr->reconControl->error = 1;
    769  1.56.2.8     skrll 					/* an error was encountered at the very end... bail.
    770  1.56.2.8     skrll 					   This will be very bad news for the user, since
    771  1.56.2.8     skrll 					   at this point there will have been a read error
    772  1.56.2.8     skrll 					   on one component, and a write error on another!
    773  1.56.2.8     skrll 					*/
    774  1.56.2.8     skrll 					break;
    775  1.56.2.8     skrll 				}
    776  1.56.2.8     skrll 			}
    777  1.56.2.8     skrll 		}
    778  1.56.2.8     skrll 
    779  1.56.2.8     skrll 
    780  1.56.2.8     skrll 		/* cleanup */
    781  1.56.2.8     skrll 
    782  1.56.2.8     skrll 		/* drain the event queue - after waiting for the writes above,
    783  1.56.2.8     skrll 		   there shouldn't be much (if anything!) left in the queue. */
    784  1.56.2.8     skrll 
    785  1.56.2.8     skrll 		rf_DrainReconEventQueue(reconDesc);
    786  1.56.2.8     skrll 
    787  1.56.2.8     skrll 		/* XXX  As much as we'd like to free the recon control structure
    788  1.56.2.8     skrll 		   and the reconDesc, we have no way of knowing if/when those will
    789  1.56.2.8     skrll 		   be touched by IO that has yet to occur.  It is rather poor to be
    790  1.56.2.8     skrll 		   basically causing a 'memory leak' here, but there doesn't seem to be
    791  1.56.2.8     skrll 		   a cleaner alternative at this time.  Perhaps when the reconstruct code
    792  1.56.2.8     skrll 		   gets a makeover this problem will go away.
    793  1.56.2.8     skrll 		*/
    794  1.56.2.8     skrll #if 0
    795  1.56.2.8     skrll 		rf_FreeReconControl(raidPtr);
    796  1.56.2.8     skrll #endif
    797  1.56.2.8     skrll 
    798  1.56.2.8     skrll #if RF_ACC_TRACE > 0
    799  1.56.2.8     skrll 		RF_Free(raidPtr->recon_tracerecs, raidPtr->numCol * sizeof(RF_AccTraceEntry_t));
    800  1.56.2.8     skrll #endif
    801  1.56.2.8     skrll 		/* XXX see comment above */
    802  1.56.2.8     skrll #if 0
    803  1.56.2.8     skrll 		FreeReconDesc(reconDesc);
    804  1.56.2.6     skrll #endif
    805  1.56.2.8     skrll 
    806  1.56.2.8     skrll 		return (1);
    807  1.56.2.6     skrll 	}
    808      1.14     oster 
    809  1.56.2.6     skrll 	/* Success:  mark the dead disk as reconstructed.  We quiesce
    810  1.56.2.6     skrll 	 * the array here to assure no nasty interactions with pending
    811  1.56.2.6     skrll 	 * user accesses when we free up the psstatus structure as
    812  1.56.2.6     skrll 	 * part of FreeReconControl() */
    813  1.56.2.6     skrll 
    814  1.56.2.6     skrll 	rf_SuspendNewRequestsAndWait(raidPtr);
    815  1.56.2.6     skrll 
    816  1.56.2.6     skrll 	RF_LOCK_MUTEX(raidPtr->mutex);
    817  1.56.2.6     skrll 	raidPtr->numFailures--;
    818  1.56.2.6     skrll 	ds = (raidPtr->Layout.map->flags & RF_DISTRIBUTE_SPARE);
    819  1.56.2.6     skrll 	raidPtr->Disks[col].status = (ds) ? rf_ds_dist_spared : rf_ds_spared;
    820  1.56.2.6     skrll 	raidPtr->status = (ds) ? rf_rs_reconfigured : rf_rs_optimal;
    821  1.56.2.6     skrll 	RF_UNLOCK_MUTEX(raidPtr->mutex);
    822  1.56.2.6     skrll 	RF_GETTIME(etime);
    823  1.56.2.6     skrll 	RF_TIMEVAL_DIFF(&(raidPtr->reconControl->starttime), &etime, &elpsd);
    824  1.56.2.6     skrll 
    825  1.56.2.6     skrll 	rf_ResumeNewRequests(raidPtr);
    826  1.56.2.6     skrll 
    827  1.56.2.6     skrll 	printf("raid%d: Reconstruction of disk at col %d completed\n",
    828  1.56.2.6     skrll 	       raidPtr->raidid, col);
    829  1.56.2.6     skrll 	xor_s = raidPtr->accumXorTimeUs / 1000000;
    830  1.56.2.6     skrll 	xor_resid_us = raidPtr->accumXorTimeUs % 1000000;
    831  1.56.2.6     skrll 	printf("raid%d: Recon time was %d.%06d seconds, accumulated XOR time was %ld us (%ld.%06ld)\n",
    832  1.56.2.6     skrll 	       raidPtr->raidid,
    833  1.56.2.6     skrll 	       (int) elpsd.tv_sec, (int) elpsd.tv_usec,
    834  1.56.2.6     skrll 	       raidPtr->accumXorTimeUs, xor_s, xor_resid_us);
    835  1.56.2.6     skrll 	printf("raid%d:  (start time %d sec %d usec, end time %d sec %d usec)\n",
    836  1.56.2.6     skrll 	       raidPtr->raidid,
    837  1.56.2.6     skrll 	       (int) raidPtr->reconControl->starttime.tv_sec,
    838  1.56.2.6     skrll 	       (int) raidPtr->reconControl->starttime.tv_usec,
    839  1.56.2.6     skrll 	       (int) etime.tv_sec, (int) etime.tv_usec);
    840       1.1     oster #if RF_RECON_STATS > 0
    841  1.56.2.6     skrll 	printf("raid%d: Total head-sep stall count was %d\n",
    842  1.56.2.6     skrll 	       raidPtr->raidid, (int) reconDesc->hsStallCount);
    843       1.4     oster #endif				/* RF_RECON_STATS > 0 */
    844  1.56.2.6     skrll 	rf_FreeReconControl(raidPtr);
    845  1.56.2.2     skrll #if RF_ACC_TRACE > 0
    846  1.56.2.6     skrll 	RF_Free(raidPtr->recon_tracerecs, raidPtr->numCol * sizeof(RF_AccTraceEntry_t));
    847  1.56.2.2     skrll #endif
    848  1.56.2.6     skrll 	FreeReconDesc(reconDesc);
    849  1.56.2.6     skrll 
    850       1.4     oster 	return (0);
    851  1.56.2.8     skrll 
    852       1.1     oster }
    853      1.13     oster /*****************************************************************************
    854       1.1     oster  * do the right thing upon each reconstruction event.
    855      1.13     oster  *****************************************************************************/
    856       1.4     oster static int
    857  1.56.2.2     skrll ProcessReconEvent(RF_Raid_t *raidPtr, RF_ReconEvent_t *event)
    858       1.4     oster {
    859       1.4     oster 	int     retcode = 0, submitblocked;
    860       1.4     oster 	RF_ReconBuffer_t *rbuf;
    861       1.4     oster 	RF_SectorCount_t sectorsPerRU;
    862       1.4     oster 
    863  1.56.2.8     skrll 	retcode = RF_RECON_READ_STOPPED;
    864  1.56.2.8     skrll 
    865       1.4     oster 	Dprintf1("RECON: ProcessReconEvent type %d\n", event->type);
    866       1.4     oster 	switch (event->type) {
    867       1.4     oster 
    868       1.4     oster 		/* a read I/O has completed */
    869       1.4     oster 	case RF_REVENT_READDONE:
    870  1.56.2.2     skrll 		rbuf = raidPtr->reconControl->perDiskInfo[event->col].rbuf;
    871  1.56.2.2     skrll 		Dprintf2("RECON: READDONE EVENT: col %d psid %ld\n",
    872  1.56.2.2     skrll 		    event->col, rbuf->parityStripeID);
    873       1.4     oster 		Dprintf7("RECON: done read  psid %ld buf %lx  %02x %02x %02x %02x %02x\n",
    874       1.4     oster 		    rbuf->parityStripeID, rbuf->buffer, rbuf->buffer[0] & 0xff, rbuf->buffer[1] & 0xff,
    875       1.4     oster 		    rbuf->buffer[2] & 0xff, rbuf->buffer[3] & 0xff, rbuf->buffer[4] & 0xff);
    876       1.4     oster 		rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
    877  1.56.2.8     skrll 		if (!raidPtr->reconControl->error) {
    878  1.56.2.8     skrll 			submitblocked = rf_SubmitReconBuffer(rbuf, 0, 0);
    879  1.56.2.8     skrll 			Dprintf1("RECON: submitblocked=%d\n", submitblocked);
    880  1.56.2.8     skrll 			if (!submitblocked)
    881  1.56.2.8     skrll 				retcode = IssueNextReadRequest(raidPtr, event->col);
    882  1.56.2.8     skrll 		}
    883       1.4     oster 		break;
    884       1.4     oster 
    885       1.4     oster 		/* a write I/O has completed */
    886       1.4     oster 	case RF_REVENT_WRITEDONE:
    887      1.40     oster #if RF_DEBUG_RECON
    888       1.4     oster 		if (rf_floatingRbufDebug) {
    889       1.4     oster 			rf_CheckFloatingRbufCount(raidPtr, 1);
    890       1.4     oster 		}
    891      1.38     oster #endif
    892       1.4     oster 		sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU;
    893       1.4     oster 		rbuf = (RF_ReconBuffer_t *) event->arg;
    894       1.4     oster 		rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
    895       1.4     oster 		Dprintf3("RECON: WRITEDONE EVENT: psid %d ru %d (%d %% complete)\n",
    896  1.56.2.2     skrll 		    rbuf->parityStripeID, rbuf->which_ru, raidPtr->reconControl->percentComplete);
    897  1.56.2.2     skrll 		rf_ReconMapUpdate(raidPtr, raidPtr->reconControl->reconMap,
    898       1.4     oster 		    rbuf->failedDiskSectorOffset, rbuf->failedDiskSectorOffset + sectorsPerRU - 1);
    899  1.56.2.2     skrll 		rf_RemoveFromActiveReconTable(raidPtr, rbuf->parityStripeID, rbuf->which_ru);
    900       1.4     oster 
    901  1.56.2.8     skrll 		RF_LOCK_MUTEX(raidPtr->reconControl->rb_mutex);
    902  1.56.2.8     skrll 		raidPtr->reconControl->pending_writes--;
    903  1.56.2.8     skrll 		RF_UNLOCK_MUTEX(raidPtr->reconControl->rb_mutex);
    904  1.56.2.8     skrll 
    905       1.4     oster 		if (rbuf->type == RF_RBUF_TYPE_FLOATING) {
    906  1.56.2.2     skrll 			RF_LOCK_MUTEX(raidPtr->reconControl->rb_mutex);
    907  1.56.2.2     skrll 			while(raidPtr->reconControl->rb_lock) {
    908  1.56.2.2     skrll 				ltsleep(&raidPtr->reconControl->rb_lock, PRIBIO, "reconctrlpre1", 0,
    909  1.56.2.2     skrll 					&raidPtr->reconControl->rb_mutex);
    910  1.56.2.2     skrll 			}
    911  1.56.2.2     skrll 			raidPtr->reconControl->rb_lock = 1;
    912  1.56.2.2     skrll 			RF_UNLOCK_MUTEX(raidPtr->reconControl->rb_mutex);
    913  1.56.2.2     skrll 
    914       1.4     oster 			raidPtr->numFullReconBuffers--;
    915  1.56.2.2     skrll 			rf_ReleaseFloatingReconBuffer(raidPtr, rbuf);
    916  1.56.2.2     skrll 
    917  1.56.2.2     skrll 			RF_LOCK_MUTEX(raidPtr->reconControl->rb_mutex);
    918  1.56.2.2     skrll 			raidPtr->reconControl->rb_lock = 0;
    919  1.56.2.2     skrll 			wakeup(&raidPtr->reconControl->rb_lock);
    920  1.56.2.2     skrll 			RF_UNLOCK_MUTEX(raidPtr->reconControl->rb_mutex);
    921       1.4     oster 		} else
    922       1.4     oster 			if (rbuf->type == RF_RBUF_TYPE_FORCED)
    923       1.4     oster 				rf_FreeReconBuffer(rbuf);
    924       1.4     oster 			else
    925       1.4     oster 				RF_ASSERT(0);
    926  1.56.2.8     skrll 		retcode = 0;
    927       1.4     oster 		break;
    928       1.4     oster 
    929       1.4     oster 	case RF_REVENT_BUFCLEAR:	/* A buffer-stall condition has been
    930       1.4     oster 					 * cleared */
    931  1.56.2.2     skrll 		Dprintf1("RECON: BUFCLEAR EVENT: col %d\n", event->col);
    932  1.56.2.8     skrll 		if (!raidPtr->reconControl->error) {
    933  1.56.2.8     skrll 			submitblocked = rf_SubmitReconBuffer(raidPtr->reconControl->perDiskInfo[event->col].rbuf,
    934  1.56.2.8     skrll 							     0, (int) (long) event->arg);
    935  1.56.2.8     skrll 			RF_ASSERT(!submitblocked);	/* we wouldn't have gotten the
    936  1.56.2.8     skrll 							 * BUFCLEAR event if we
    937  1.56.2.8     skrll 							 * couldn't submit */
    938  1.56.2.8     skrll 			retcode = IssueNextReadRequest(raidPtr, event->col);
    939  1.56.2.8     skrll 		}
    940       1.4     oster 		break;
    941       1.4     oster 
    942       1.4     oster 	case RF_REVENT_BLOCKCLEAR:	/* A user-write reconstruction
    943       1.4     oster 					 * blockage has been cleared */
    944  1.56.2.2     skrll 		DDprintf1("RECON: BLOCKCLEAR EVENT: col %d\n", event->col);
    945  1.56.2.8     skrll 		if (!raidPtr->reconControl->error) {
    946  1.56.2.8     skrll 			retcode = TryToRead(raidPtr, event->col);
    947  1.56.2.8     skrll 		}
    948       1.4     oster 		break;
    949       1.4     oster 
    950       1.4     oster 	case RF_REVENT_HEADSEPCLEAR:	/* A max-head-separation
    951       1.4     oster 					 * reconstruction blockage has been
    952       1.4     oster 					 * cleared */
    953  1.56.2.2     skrll 		Dprintf1("RECON: HEADSEPCLEAR EVENT: col %d\n", event->col);
    954  1.56.2.8     skrll 		if (!raidPtr->reconControl->error) {
    955  1.56.2.8     skrll 			retcode = TryToRead(raidPtr, event->col);
    956  1.56.2.8     skrll 		}
    957       1.4     oster 		break;
    958       1.4     oster 
    959       1.4     oster 		/* a buffer has become ready to write */
    960       1.4     oster 	case RF_REVENT_BUFREADY:
    961  1.56.2.2     skrll 		Dprintf1("RECON: BUFREADY EVENT: col %d\n", event->col);
    962  1.56.2.8     skrll 		if (!raidPtr->reconControl->error) {
    963  1.56.2.8     skrll 			retcode = IssueNextWriteRequest(raidPtr);
    964      1.40     oster #if RF_DEBUG_RECON
    965  1.56.2.8     skrll 			if (rf_floatingRbufDebug) {
    966  1.56.2.8     skrll 				rf_CheckFloatingRbufCount(raidPtr, 1);
    967  1.56.2.8     skrll 			}
    968      1.38     oster #endif
    969  1.56.2.8     skrll 		}
    970       1.4     oster 		break;
    971       1.4     oster 
    972       1.4     oster 		/* we need to skip the current RU entirely because it got
    973       1.4     oster 		 * recon'd while we were waiting for something else to happen */
    974       1.4     oster 	case RF_REVENT_SKIP:
    975  1.56.2.2     skrll 		DDprintf1("RECON: SKIP EVENT: col %d\n", event->col);
    976  1.56.2.8     skrll 		if (!raidPtr->reconControl->error) {
    977  1.56.2.8     skrll 			retcode = IssueNextReadRequest(raidPtr, event->col);
    978  1.56.2.8     skrll 		}
    979       1.4     oster 		break;
    980       1.4     oster 
    981       1.4     oster 		/* a forced-reconstruction read access has completed.  Just
    982       1.4     oster 		 * submit the buffer */
    983       1.4     oster 	case RF_REVENT_FORCEDREADDONE:
    984       1.4     oster 		rbuf = (RF_ReconBuffer_t *) event->arg;
    985       1.4     oster 		rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
    986  1.56.2.2     skrll 		DDprintf1("RECON: FORCEDREADDONE EVENT: col %d\n", event->col);
    987  1.56.2.8     skrll 		if (!raidPtr->reconControl->error) {
    988  1.56.2.8     skrll 			submitblocked = rf_SubmitReconBuffer(rbuf, 1, 0);
    989  1.56.2.8     skrll 			RF_ASSERT(!submitblocked);
    990  1.56.2.8     skrll 		}
    991       1.4     oster 		break;
    992       1.4     oster 
    993  1.56.2.2     skrll 		/* A read I/O failed to complete */
    994  1.56.2.2     skrll 	case RF_REVENT_READ_FAILED:
    995  1.56.2.8     skrll 		retcode = RF_RECON_READ_ERROR;
    996  1.56.2.8     skrll 		break;
    997  1.56.2.2     skrll 
    998  1.56.2.2     skrll 		/* A write I/O failed to complete */
    999  1.56.2.2     skrll 	case RF_REVENT_WRITE_FAILED:
   1000  1.56.2.8     skrll 		retcode = RF_RECON_WRITE_ERROR;
   1001  1.56.2.8     skrll 
   1002  1.56.2.8     skrll 		rbuf = (RF_ReconBuffer_t *) event->arg;
   1003  1.56.2.8     skrll 
   1004  1.56.2.8     skrll 		/* cleanup the disk queue data */
   1005  1.56.2.8     skrll 		rf_FreeDiskQueueData((RF_DiskQueueData_t *) rbuf->arg);
   1006  1.56.2.8     skrll 
   1007  1.56.2.8     skrll 		/* At this point we're erroring out, badly, and floatingRbufs
   1008  1.56.2.8     skrll 		   may not even be valid.  Rather than putting this back onto
   1009  1.56.2.8     skrll 		   the floatingRbufs list, just arrange for its immediate
   1010  1.56.2.8     skrll 		   destruction.
   1011  1.56.2.8     skrll 		*/
   1012  1.56.2.8     skrll 		rf_FreeReconBuffer(rbuf);
   1013  1.56.2.8     skrll 		break;
   1014  1.56.2.2     skrll 
   1015  1.56.2.2     skrll 		/* a forced read I/O failed to complete */
   1016  1.56.2.2     skrll 	case RF_REVENT_FORCEDREAD_FAILED:
   1017  1.56.2.8     skrll 		retcode = RF_RECON_READ_ERROR;
   1018  1.56.2.8     skrll 		break;
   1019  1.56.2.2     skrll 
   1020       1.4     oster 	default:
   1021       1.4     oster 		RF_PANIC();
   1022       1.4     oster 	}
   1023       1.4     oster 	rf_FreeReconEventDesc(event);
   1024       1.4     oster 	return (retcode);
   1025       1.1     oster }
   1026      1.13     oster /*****************************************************************************
   1027       1.1     oster  *
   1028      1.13     oster  * find the next thing that's needed on the indicated disk, and issue
   1029      1.13     oster  * a read request for it.  We assume that the reconstruction buffer
   1030      1.13     oster  * associated with this process is free to receive the data.  If
   1031      1.13     oster  * reconstruction is blocked on the indicated RU, we issue a
   1032      1.13     oster  * blockage-release request instead of a physical disk read request.
   1033      1.13     oster  * If the current disk gets too far ahead of the others, we issue a
   1034      1.13     oster  * head-separation wait request and return.
   1035      1.13     oster  *
   1036      1.13     oster  * ctrl->{ru_count, curPSID, diskOffset} and
   1037      1.22     soren  * rbuf->failedDiskSectorOffset are maintained to point to the unit
   1038      1.13     oster  * we're currently accessing.  Note that this deviates from the
   1039      1.13     oster  * standard C idiom of having counters point to the next thing to be
   1040      1.13     oster  * accessed.  This allows us to easily retry when we're blocked by
   1041      1.13     oster  * head separation or reconstruction-blockage events.
   1042       1.1     oster  *
   1043      1.13     oster  *****************************************************************************/
   1044       1.4     oster static int
   1045  1.56.2.2     skrll IssueNextReadRequest(RF_Raid_t *raidPtr, RF_RowCol_t col)
   1046       1.4     oster {
   1047  1.56.2.2     skrll 	RF_PerDiskReconCtrl_t *ctrl = &raidPtr->reconControl->perDiskInfo[col];
   1048       1.4     oster 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
   1049       1.4     oster 	RF_ReconBuffer_t *rbuf = ctrl->rbuf;
   1050       1.4     oster 	RF_ReconUnitCount_t RUsPerPU = layoutPtr->SUsPerPU / layoutPtr->SUsPerRU;
   1051       1.4     oster 	RF_SectorCount_t sectorsPerRU = layoutPtr->sectorsPerStripeUnit * layoutPtr->SUsPerRU;
   1052       1.4     oster 	int     do_new_check = 0, retcode = 0, status;
   1053       1.4     oster 
   1054       1.4     oster 	/* if we are currently the slowest disk, mark that we have to do a new
   1055       1.4     oster 	 * check */
   1056  1.56.2.2     skrll 	if (ctrl->headSepCounter <= raidPtr->reconControl->minHeadSepCounter)
   1057       1.4     oster 		do_new_check = 1;
   1058       1.4     oster 
   1059       1.4     oster 	while (1) {
   1060       1.4     oster 
   1061       1.4     oster 		ctrl->ru_count++;
   1062       1.4     oster 		if (ctrl->ru_count < RUsPerPU) {
   1063       1.4     oster 			ctrl->diskOffset += sectorsPerRU;
   1064       1.4     oster 			rbuf->failedDiskSectorOffset += sectorsPerRU;
   1065       1.4     oster 		} else {
   1066       1.4     oster 			ctrl->curPSID++;
   1067       1.4     oster 			ctrl->ru_count = 0;
   1068       1.4     oster 			/* code left over from when head-sep was based on
   1069       1.4     oster 			 * parity stripe id */
   1070  1.56.2.2     skrll 			if (ctrl->curPSID >= raidPtr->reconControl->lastPSID) {
   1071  1.56.2.2     skrll 				CheckForNewMinHeadSep(raidPtr, ++(ctrl->headSepCounter));
   1072  1.56.2.8     skrll 				return (RF_RECON_DONE_READS);	/* finito! */
   1073       1.4     oster 			}
   1074       1.4     oster 			/* find the disk offsets of the start of the parity
   1075       1.4     oster 			 * stripe on both the current disk and the failed
   1076       1.4     oster 			 * disk. skip this entire parity stripe if either disk
   1077       1.4     oster 			 * does not appear in the indicated PS */
   1078  1.56.2.2     skrll 			status = ComputePSDiskOffsets(raidPtr, ctrl->curPSID, col, &ctrl->diskOffset, &rbuf->failedDiskSectorOffset,
   1079  1.56.2.2     skrll 			    &rbuf->spCol, &rbuf->spOffset);
   1080       1.4     oster 			if (status) {
   1081       1.4     oster 				ctrl->ru_count = RUsPerPU - 1;
   1082       1.4     oster 				continue;
   1083       1.4     oster 			}
   1084       1.4     oster 		}
   1085       1.4     oster 		rbuf->which_ru = ctrl->ru_count;
   1086       1.4     oster 
   1087       1.4     oster 		/* skip this RU if it's already been reconstructed */
   1088  1.56.2.2     skrll 		if (rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, rbuf->failedDiskSectorOffset)) {
   1089       1.4     oster 			Dprintf2("Skipping psid %ld ru %d: already reconstructed\n", ctrl->curPSID, ctrl->ru_count);
   1090       1.4     oster 			continue;
   1091       1.4     oster 		}
   1092       1.4     oster 		break;
   1093       1.4     oster 	}
   1094       1.4     oster 	ctrl->headSepCounter++;
   1095       1.4     oster 	if (do_new_check)
   1096  1.56.2.2     skrll 		CheckForNewMinHeadSep(raidPtr, ctrl->headSepCounter);	/* update min if needed */
   1097       1.4     oster 
   1098       1.4     oster 
   1099       1.4     oster 	/* at this point, we have definitely decided what to do, and we have
   1100       1.4     oster 	 * only to see if we can actually do it now */
   1101       1.4     oster 	rbuf->parityStripeID = ctrl->curPSID;
   1102       1.4     oster 	rbuf->which_ru = ctrl->ru_count;
   1103  1.56.2.2     skrll #if RF_ACC_TRACE > 0
   1104      1.29   thorpej 	memset((char *) &raidPtr->recon_tracerecs[col], 0,
   1105      1.29   thorpej 	    sizeof(raidPtr->recon_tracerecs[col]));
   1106       1.4     oster 	raidPtr->recon_tracerecs[col].reconacc = 1;
   1107       1.4     oster 	RF_ETIMER_START(raidPtr->recon_tracerecs[col].recon_timer);
   1108  1.56.2.2     skrll #endif
   1109  1.56.2.2     skrll 	retcode = TryToRead(raidPtr, col);
   1110       1.4     oster 	return (retcode);
   1111       1.1     oster }
   1112      1.13     oster 
   1113      1.13     oster /*
   1114      1.13     oster  * tries to issue the next read on the indicated disk.  We may be
   1115      1.13     oster  * blocked by (a) the heads being too far apart, or (b) recon on the
   1116      1.13     oster  * indicated RU being blocked due to a write by a user thread.  In
   1117      1.13     oster  * this case, we issue a head-sep or blockage wait request, which will
   1118      1.13     oster  * cause this same routine to be invoked again later when the blockage
   1119      1.13     oster  * has cleared.
   1120       1.1     oster  */
   1121      1.13     oster 
   1122       1.4     oster static int
   1123  1.56.2.2     skrll TryToRead(RF_Raid_t *raidPtr, RF_RowCol_t col)
   1124       1.4     oster {
   1125  1.56.2.2     skrll 	RF_PerDiskReconCtrl_t *ctrl = &raidPtr->reconControl->perDiskInfo[col];
   1126       1.4     oster 	RF_SectorCount_t sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU;
   1127       1.4     oster 	RF_StripeNum_t psid = ctrl->curPSID;
   1128       1.4     oster 	RF_ReconUnitNum_t which_ru = ctrl->ru_count;
   1129       1.4     oster 	RF_DiskQueueData_t *req;
   1130  1.56.2.2     skrll 	int     status;
   1131  1.56.2.2     skrll 	RF_ReconParityStripeStatus_t *pssPtr, *newpssPtr;
   1132       1.4     oster 
   1133       1.4     oster 	/* if the current disk is too far ahead of the others, issue a
   1134       1.4     oster 	 * head-separation wait and return */
   1135  1.56.2.2     skrll 	if (CheckHeadSeparation(raidPtr, ctrl, col, ctrl->headSepCounter, which_ru))
   1136       1.4     oster 		return (0);
   1137  1.56.2.2     skrll 
   1138  1.56.2.2     skrll 	/* allocate a new PSS in case we need it */
   1139  1.56.2.2     skrll 	newpssPtr = rf_AllocPSStatus(raidPtr);
   1140  1.56.2.2     skrll 
   1141  1.56.2.2     skrll 	RF_LOCK_PSS_MUTEX(raidPtr, psid);
   1142  1.56.2.2     skrll 	pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_CREATE, newpssPtr);
   1143  1.56.2.2     skrll 
   1144  1.56.2.2     skrll 	if (pssPtr != newpssPtr) {
   1145  1.56.2.2     skrll 		rf_FreePSStatus(raidPtr, newpssPtr);
   1146  1.56.2.2     skrll 	}
   1147       1.4     oster 
   1148       1.4     oster 	/* if recon is blocked on the indicated parity stripe, issue a
   1149       1.4     oster 	 * block-wait request and return. this also must mark the indicated RU
   1150       1.4     oster 	 * in the stripe as under reconstruction if not blocked. */
   1151  1.56.2.2     skrll 	status = CheckForcedOrBlockedReconstruction(raidPtr, pssPtr, ctrl, col, psid, which_ru);
   1152       1.4     oster 	if (status == RF_PSS_RECON_BLOCKED) {
   1153       1.4     oster 		Dprintf2("RECON: Stalling psid %ld ru %d: recon blocked\n", psid, which_ru);
   1154       1.4     oster 		goto out;
   1155       1.4     oster 	} else
   1156       1.4     oster 		if (status == RF_PSS_FORCED_ON_WRITE) {
   1157  1.56.2.2     skrll 			rf_CauseReconEvent(raidPtr, col, NULL, RF_REVENT_SKIP);
   1158       1.4     oster 			goto out;
   1159       1.4     oster 		}
   1160       1.4     oster 	/* make one last check to be sure that the indicated RU didn't get
   1161       1.4     oster 	 * reconstructed while we were waiting for something else to happen.
   1162       1.4     oster 	 * This is unfortunate in that it causes us to make this check twice
   1163       1.4     oster 	 * in the normal case.  Might want to make some attempt to re-work
   1164       1.4     oster 	 * this so that we only do this check if we've definitely blocked on
   1165       1.4     oster 	 * one of the above checks.  When this condition is detected, we may
   1166       1.4     oster 	 * have just created a bogus status entry, which we need to delete. */
   1167  1.56.2.2     skrll 	if (rf_CheckRUReconstructed(raidPtr->reconControl->reconMap, ctrl->rbuf->failedDiskSectorOffset)) {
   1168       1.4     oster 		Dprintf2("RECON: Skipping psid %ld ru %d: prior recon after stall\n", psid, which_ru);
   1169  1.56.2.2     skrll 		if (pssPtr == newpssPtr)
   1170  1.56.2.2     skrll 			rf_PSStatusDelete(raidPtr, raidPtr->reconControl->pssTable, pssPtr);
   1171  1.56.2.2     skrll 		rf_CauseReconEvent(raidPtr, col, NULL, RF_REVENT_SKIP);
   1172       1.4     oster 		goto out;
   1173       1.4     oster 	}
   1174       1.4     oster 	/* found something to read.  issue the I/O */
   1175  1.56.2.2     skrll 	Dprintf4("RECON: Read for psid %ld on col %d offset %ld buf %lx\n",
   1176  1.56.2.2     skrll 	    psid, col, ctrl->diskOffset, ctrl->rbuf->buffer);
   1177  1.56.2.2     skrll #if RF_ACC_TRACE > 0
   1178       1.4     oster 	RF_ETIMER_STOP(raidPtr->recon_tracerecs[col].recon_timer);
   1179       1.4     oster 	RF_ETIMER_EVAL(raidPtr->recon_tracerecs[col].recon_timer);
   1180       1.4     oster 	raidPtr->recon_tracerecs[col].specific.recon.recon_start_to_fetch_us =
   1181       1.4     oster 	    RF_ETIMER_VAL_US(raidPtr->recon_tracerecs[col].recon_timer);
   1182       1.4     oster 	RF_ETIMER_START(raidPtr->recon_tracerecs[col].recon_timer);
   1183  1.56.2.2     skrll #endif
   1184       1.4     oster 	/* should be ok to use a NULL proc pointer here, all the bufs we use
   1185       1.4     oster 	 * should be in kernel space */
   1186       1.4     oster 	req = rf_CreateDiskQueueData(RF_IO_TYPE_READ, ctrl->diskOffset, sectorsPerRU, ctrl->rbuf->buffer, psid, which_ru,
   1187  1.56.2.9     skrll 	    ReconReadDoneProc, (void *) ctrl,
   1188  1.56.2.2     skrll #if RF_ACC_TRACE > 0
   1189  1.56.2.2     skrll 				     &raidPtr->recon_tracerecs[col],
   1190  1.56.2.2     skrll #else
   1191  1.56.2.2     skrll 				     NULL,
   1192  1.56.2.2     skrll #endif
   1193  1.56.2.9     skrll 				     (void *) raidPtr, 0, NULL, PR_WAITOK);
   1194       1.4     oster 
   1195       1.4     oster 	ctrl->rbuf->arg = (void *) req;
   1196  1.56.2.2     skrll 	rf_DiskIOEnqueue(&raidPtr->Queues[col], req, RF_IO_RECON_PRIORITY);
   1197       1.4     oster 	pssPtr->issued[col] = 1;
   1198       1.1     oster 
   1199       1.1     oster out:
   1200  1.56.2.2     skrll 	RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
   1201       1.4     oster 	return (0);
   1202       1.1     oster }
   1203       1.1     oster 
   1204       1.1     oster 
   1205      1.13     oster /*
   1206      1.13     oster  * given a parity stripe ID, we want to find out whether both the
   1207      1.13     oster  * current disk and the failed disk exist in that parity stripe.  If
   1208      1.13     oster  * not, we want to skip this whole PS.  If so, we want to find the
   1209      1.13     oster  * disk offset of the start of the PS on both the current disk and the
   1210      1.13     oster  * failed disk.
   1211      1.13     oster  *
   1212      1.13     oster  * this works by getting a list of disks comprising the indicated
   1213      1.13     oster  * parity stripe, and searching the list for the current and failed
   1214      1.13     oster  * disks.  Once we've decided they both exist in the parity stripe, we
   1215      1.13     oster  * need to decide whether each is data or parity, so that we'll know
   1216      1.13     oster  * which mapping function to call to get the corresponding disk
   1217       1.1     oster  * offsets.
   1218       1.1     oster  *
   1219      1.13     oster  * this is kind of unpleasant, but doing it this way allows the
   1220      1.13     oster  * reconstruction code to use parity stripe IDs rather than physical
   1221      1.13     oster  * disks address to march through the failed disk, which greatly
   1222      1.13     oster  * simplifies a lot of code, as well as eliminating the need for a
   1223      1.13     oster  * reverse-mapping function.  I also think it will execute faster,
   1224      1.13     oster  * since the calls to the mapping module are kept to a minimum.
   1225       1.1     oster  *
   1226      1.13     oster  * ASSUMES THAT THE STRIPE IDENTIFIER IDENTIFIES THE DISKS COMPRISING
   1227  1.56.2.2     skrll  * THE STRIPE IN THE CORRECT ORDER
   1228  1.56.2.2     skrll  *
   1229  1.56.2.2     skrll  * raidPtr          - raid descriptor
   1230  1.56.2.2     skrll  * psid             - parity stripe identifier
   1231  1.56.2.2     skrll  * col              - column of disk to find the offsets for
   1232  1.56.2.2     skrll  * spCol            - out: col of spare unit for failed unit
   1233  1.56.2.2     skrll  * spOffset         - out: offset into disk containing spare unit
   1234  1.56.2.2     skrll  *
   1235  1.56.2.2     skrll  */
   1236      1.13     oster 
   1237      1.13     oster 
   1238       1.4     oster static int
   1239  1.56.2.2     skrll ComputePSDiskOffsets(RF_Raid_t *raidPtr, RF_StripeNum_t psid,
   1240  1.56.2.2     skrll 		     RF_RowCol_t col, RF_SectorNum_t *outDiskOffset,
   1241  1.56.2.2     skrll 		     RF_SectorNum_t *outFailedDiskSectorOffset,
   1242  1.56.2.2     skrll 		     RF_RowCol_t *spCol, RF_SectorNum_t *spOffset)
   1243  1.56.2.2     skrll {
   1244       1.4     oster 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
   1245  1.56.2.2     skrll 	RF_RowCol_t fcol = raidPtr->reconControl->fcol;
   1246       1.4     oster 	RF_RaidAddr_t sosRaidAddress;	/* start-of-stripe */
   1247       1.4     oster 	RF_RowCol_t *diskids;
   1248       1.4     oster 	u_int   i, j, k, i_offset, j_offset;
   1249  1.56.2.2     skrll 	RF_RowCol_t pcol;
   1250  1.56.2.2     skrll 	int     testcol;
   1251       1.4     oster 	RF_SectorNum_t poffset;
   1252       1.4     oster 	char    i_is_parity = 0, j_is_parity = 0;
   1253       1.4     oster 	RF_RowCol_t stripeWidth = layoutPtr->numDataCol + layoutPtr->numParityCol;
   1254       1.4     oster 
   1255       1.4     oster 	/* get a listing of the disks comprising that stripe */
   1256       1.4     oster 	sosRaidAddress = rf_ParityStripeIDToRaidAddress(layoutPtr, psid);
   1257  1.56.2.2     skrll 	(layoutPtr->map->IdentifyStripe) (raidPtr, sosRaidAddress, &diskids);
   1258       1.4     oster 	RF_ASSERT(diskids);
   1259       1.4     oster 
   1260       1.4     oster 	/* reject this entire parity stripe if it does not contain the
   1261       1.4     oster 	 * indicated disk or it does not contain the failed disk */
   1262  1.56.2.2     skrll 
   1263       1.4     oster 	for (i = 0; i < stripeWidth; i++) {
   1264       1.4     oster 		if (col == diskids[i])
   1265       1.4     oster 			break;
   1266       1.4     oster 	}
   1267       1.4     oster 	if (i == stripeWidth)
   1268       1.4     oster 		goto skipit;
   1269       1.4     oster 	for (j = 0; j < stripeWidth; j++) {
   1270       1.4     oster 		if (fcol == diskids[j])
   1271       1.4     oster 			break;
   1272       1.4     oster 	}
   1273       1.4     oster 	if (j == stripeWidth) {
   1274       1.4     oster 		goto skipit;
   1275       1.4     oster 	}
   1276       1.4     oster 	/* find out which disk the parity is on */
   1277  1.56.2.2     skrll 	(layoutPtr->map->MapParity) (raidPtr, sosRaidAddress, &pcol, &poffset, RF_DONT_REMAP);
   1278       1.4     oster 
   1279       1.4     oster 	/* find out if either the current RU or the failed RU is parity */
   1280       1.4     oster 	/* also, if the parity occurs in this stripe prior to the data and/or
   1281       1.4     oster 	 * failed col, we need to decrement i and/or j */
   1282       1.4     oster 	for (k = 0; k < stripeWidth; k++)
   1283       1.4     oster 		if (diskids[k] == pcol)
   1284       1.4     oster 			break;
   1285       1.4     oster 	RF_ASSERT(k < stripeWidth);
   1286       1.4     oster 	i_offset = i;
   1287       1.4     oster 	j_offset = j;
   1288       1.4     oster 	if (k < i)
   1289       1.4     oster 		i_offset--;
   1290       1.4     oster 	else
   1291       1.4     oster 		if (k == i) {
   1292       1.4     oster 			i_is_parity = 1;
   1293       1.4     oster 			i_offset = 0;
   1294       1.4     oster 		}		/* set offsets to zero to disable multiply
   1295       1.4     oster 				 * below */
   1296       1.4     oster 	if (k < j)
   1297       1.4     oster 		j_offset--;
   1298       1.4     oster 	else
   1299       1.4     oster 		if (k == j) {
   1300       1.4     oster 			j_is_parity = 1;
   1301       1.4     oster 			j_offset = 0;
   1302       1.4     oster 		}
   1303       1.4     oster 	/* at this point, [ij]_is_parity tells us whether the [current,failed]
   1304       1.4     oster 	 * disk is parity at the start of this RU, and, if data, "[ij]_offset"
   1305       1.4     oster 	 * tells us how far into the stripe the [current,failed] disk is. */
   1306       1.4     oster 
   1307       1.4     oster 	/* call the mapping routine to get the offset into the current disk,
   1308       1.4     oster 	 * repeat for failed disk. */
   1309       1.4     oster 	if (i_is_parity)
   1310  1.56.2.2     skrll 		layoutPtr->map->MapParity(raidPtr, sosRaidAddress + i_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outDiskOffset, RF_DONT_REMAP);
   1311       1.4     oster 	else
   1312  1.56.2.2     skrll 		layoutPtr->map->MapSector(raidPtr, sosRaidAddress + i_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outDiskOffset, RF_DONT_REMAP);
   1313       1.4     oster 
   1314  1.56.2.2     skrll 	RF_ASSERT(col == testcol);
   1315       1.4     oster 
   1316       1.4     oster 	if (j_is_parity)
   1317  1.56.2.2     skrll 		layoutPtr->map->MapParity(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outFailedDiskSectorOffset, RF_DONT_REMAP);
   1318       1.4     oster 	else
   1319  1.56.2.2     skrll 		layoutPtr->map->MapSector(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, &testcol, outFailedDiskSectorOffset, RF_DONT_REMAP);
   1320  1.56.2.2     skrll 	RF_ASSERT(fcol == testcol);
   1321       1.4     oster 
   1322       1.4     oster 	/* now locate the spare unit for the failed unit */
   1323  1.56.2.2     skrll #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
   1324       1.4     oster 	if (layoutPtr->map->flags & RF_DISTRIBUTE_SPARE) {
   1325       1.4     oster 		if (j_is_parity)
   1326  1.56.2.2     skrll 			layoutPtr->map->MapParity(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, spCol, spOffset, RF_REMAP);
   1327       1.4     oster 		else
   1328  1.56.2.2     skrll 			layoutPtr->map->MapSector(raidPtr, sosRaidAddress + j_offset * layoutPtr->sectorsPerStripeUnit, spCol, spOffset, RF_REMAP);
   1329       1.4     oster 	} else {
   1330  1.56.2.2     skrll #endif
   1331  1.56.2.2     skrll 		*spCol = raidPtr->reconControl->spareCol;
   1332       1.4     oster 		*spOffset = *outFailedDiskSectorOffset;
   1333  1.56.2.2     skrll #if RF_INCLUDE_PARITY_DECLUSTERING_DS > 0
   1334       1.4     oster 	}
   1335  1.56.2.2     skrll #endif
   1336       1.4     oster 	return (0);
   1337       1.1     oster 
   1338       1.1     oster skipit:
   1339  1.56.2.2     skrll 	Dprintf2("RECON: Skipping psid %ld: nothing needed from r%d c%d\n",
   1340  1.56.2.2     skrll 	    psid, col);
   1341       1.4     oster 	return (1);
   1342       1.1     oster }
   1343       1.4     oster /* this is called when a buffer has become ready to write to the replacement disk */
   1344       1.4     oster static int
   1345  1.56.2.2     skrll IssueNextWriteRequest(RF_Raid_t *raidPtr)
   1346       1.4     oster {
   1347       1.4     oster 	RF_RaidLayout_t *layoutPtr = &raidPtr->Layout;
   1348       1.4     oster 	RF_SectorCount_t sectorsPerRU = layoutPtr->sectorsPerStripeUnit * layoutPtr->SUsPerRU;
   1349  1.56.2.2     skrll #if RF_ACC_TRACE > 0
   1350  1.56.2.2     skrll 	RF_RowCol_t fcol = raidPtr->reconControl->fcol;
   1351  1.56.2.2     skrll #endif
   1352       1.4     oster 	RF_ReconBuffer_t *rbuf;
   1353       1.4     oster 	RF_DiskQueueData_t *req;
   1354       1.4     oster 
   1355  1.56.2.2     skrll 	rbuf = rf_GetFullReconBuffer(raidPtr->reconControl);
   1356       1.4     oster 	RF_ASSERT(rbuf);	/* there must be one available, or we wouldn't
   1357       1.4     oster 				 * have gotten the event that sent us here */
   1358       1.4     oster 	RF_ASSERT(rbuf->pssPtr);
   1359       1.4     oster 
   1360       1.4     oster 	rbuf->pssPtr->writeRbuf = rbuf;
   1361       1.4     oster 	rbuf->pssPtr = NULL;
   1362       1.4     oster 
   1363  1.56.2.2     skrll 	Dprintf6("RECON: New write (c %d offs %d) for psid %ld ru %d (failed disk offset %ld) buf %lx\n",
   1364  1.56.2.2     skrll 	    rbuf->spCol, rbuf->spOffset, rbuf->parityStripeID,
   1365       1.4     oster 	    rbuf->which_ru, rbuf->failedDiskSectorOffset, rbuf->buffer);
   1366       1.4     oster 	Dprintf6("RECON: new write psid %ld   %02x %02x %02x %02x %02x\n",
   1367       1.4     oster 	    rbuf->parityStripeID, rbuf->buffer[0] & 0xff, rbuf->buffer[1] & 0xff,
   1368       1.4     oster 	    rbuf->buffer[2] & 0xff, rbuf->buffer[3] & 0xff, rbuf->buffer[4] & 0xff);
   1369       1.4     oster 
   1370       1.4     oster 	/* should be ok to use a NULL b_proc here b/c all addrs should be in
   1371       1.4     oster 	 * kernel space */
   1372       1.4     oster 	req = rf_CreateDiskQueueData(RF_IO_TYPE_WRITE, rbuf->spOffset,
   1373       1.4     oster 	    sectorsPerRU, rbuf->buffer,
   1374       1.4     oster 	    rbuf->parityStripeID, rbuf->which_ru,
   1375  1.56.2.9     skrll 	    ReconWriteDoneProc, (void *) rbuf,
   1376  1.56.2.2     skrll #if RF_ACC_TRACE > 0
   1377       1.4     oster 	    &raidPtr->recon_tracerecs[fcol],
   1378  1.56.2.2     skrll #else
   1379  1.56.2.2     skrll 				     NULL,
   1380  1.56.2.2     skrll #endif
   1381  1.56.2.9     skrll 	    (void *) raidPtr, 0, NULL, PR_WAITOK);
   1382       1.1     oster 
   1383       1.4     oster 	rbuf->arg = (void *) req;
   1384  1.56.2.8     skrll 	RF_LOCK_MUTEX(raidPtr->reconControl->rb_mutex);
   1385  1.56.2.8     skrll 	raidPtr->reconControl->pending_writes++;
   1386  1.56.2.8     skrll 	RF_UNLOCK_MUTEX(raidPtr->reconControl->rb_mutex);
   1387  1.56.2.2     skrll 	rf_DiskIOEnqueue(&raidPtr->Queues[rbuf->spCol], req, RF_IO_RECON_PRIORITY);
   1388       1.1     oster 
   1389       1.4     oster 	return (0);
   1390       1.1     oster }
   1391      1.13     oster 
   1392      1.13     oster /*
   1393      1.13     oster  * this gets called upon the completion of a reconstruction read
   1394      1.13     oster  * operation the arg is a pointer to the per-disk reconstruction
   1395      1.13     oster  * control structure for the process that just finished a read.
   1396       1.1     oster  *
   1397      1.13     oster  * called at interrupt context in the kernel, so don't do anything
   1398      1.13     oster  * illegal here.
   1399       1.1     oster  */
   1400       1.4     oster static int
   1401  1.56.2.2     skrll ReconReadDoneProc(void *arg, int status)
   1402       1.4     oster {
   1403       1.4     oster 	RF_PerDiskReconCtrl_t *ctrl = (RF_PerDiskReconCtrl_t *) arg;
   1404  1.56.2.8     skrll 	RF_Raid_t *raidPtr;
   1405  1.56.2.8     skrll 
   1406  1.56.2.8     skrll 	/* Detect that reconCtrl is no longer valid, and if that
   1407  1.56.2.8     skrll 	   is the case, bail without calling rf_CauseReconEvent().
   1408  1.56.2.8     skrll 	   There won't be anyone listening for this event anyway */
   1409  1.56.2.8     skrll 
   1410  1.56.2.8     skrll 	if (ctrl->reconCtrl == NULL)
   1411  1.56.2.8     skrll 		return(0);
   1412  1.56.2.8     skrll 
   1413  1.56.2.8     skrll 	raidPtr = ctrl->reconCtrl->reconDesc->raidPtr;
   1414       1.4     oster 
   1415       1.4     oster 	if (status) {
   1416  1.56.2.2     skrll 		printf("raid%d: Recon read failed!\n", raidPtr->raidid);
   1417  1.56.2.2     skrll 		rf_CauseReconEvent(raidPtr, ctrl->col, NULL, RF_REVENT_READ_FAILED);
   1418  1.56.2.2     skrll 		return(0);
   1419       1.4     oster 	}
   1420  1.56.2.2     skrll #if RF_ACC_TRACE > 0
   1421       1.4     oster 	RF_ETIMER_STOP(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
   1422       1.4     oster 	RF_ETIMER_EVAL(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
   1423       1.4     oster 	raidPtr->recon_tracerecs[ctrl->col].specific.recon.recon_fetch_to_return_us =
   1424       1.4     oster 	    RF_ETIMER_VAL_US(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
   1425       1.4     oster 	RF_ETIMER_START(raidPtr->recon_tracerecs[ctrl->col].recon_timer);
   1426  1.56.2.2     skrll #endif
   1427  1.56.2.2     skrll 	rf_CauseReconEvent(raidPtr, ctrl->col, NULL, RF_REVENT_READDONE);
   1428       1.4     oster 	return (0);
   1429       1.1     oster }
   1430       1.1     oster /* this gets called upon the completion of a reconstruction write operation.
   1431       1.1     oster  * the arg is a pointer to the rbuf that was just written
   1432       1.1     oster  *
   1433       1.1     oster  * called at interrupt context in the kernel, so don't do anything illegal here.
   1434       1.1     oster  */
   1435       1.4     oster static int
   1436  1.56.2.2     skrll ReconWriteDoneProc(void *arg, int status)
   1437       1.4     oster {
   1438       1.4     oster 	RF_ReconBuffer_t *rbuf = (RF_ReconBuffer_t *) arg;
   1439       1.4     oster 
   1440  1.56.2.8     skrll 	/* Detect that reconControl is no longer valid, and if that
   1441  1.56.2.8     skrll 	   is the case, bail without calling rf_CauseReconEvent().
   1442  1.56.2.8     skrll 	   There won't be anyone listening for this event anyway */
   1443  1.56.2.8     skrll 
   1444  1.56.2.8     skrll 	if (rbuf->raidPtr->reconControl == NULL)
   1445  1.56.2.8     skrll 		return(0);
   1446  1.56.2.8     skrll 
   1447       1.4     oster 	Dprintf2("Reconstruction completed on psid %ld ru %d\n", rbuf->parityStripeID, rbuf->which_ru);
   1448       1.4     oster 	if (status) {
   1449  1.56.2.2     skrll 		printf("raid%d: Recon write failed!\n", rbuf->raidPtr->raidid);
   1450  1.56.2.2     skrll 		rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, arg, RF_REVENT_WRITE_FAILED);
   1451  1.56.2.2     skrll 		return(0);
   1452       1.4     oster 	}
   1453  1.56.2.2     skrll 	rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, arg, RF_REVENT_WRITEDONE);
   1454       1.4     oster 	return (0);
   1455       1.1     oster }
   1456       1.1     oster 
   1457       1.1     oster 
   1458      1.13     oster /*
   1459      1.13     oster  * computes a new minimum head sep, and wakes up anyone who needs to
   1460      1.13     oster  * be woken as a result
   1461      1.13     oster  */
   1462       1.4     oster static void
   1463  1.56.2.2     skrll CheckForNewMinHeadSep(RF_Raid_t *raidPtr, RF_HeadSepLimit_t hsCtr)
   1464       1.4     oster {
   1465  1.56.2.2     skrll 	RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl;
   1466       1.4     oster 	RF_HeadSepLimit_t new_min;
   1467       1.4     oster 	RF_RowCol_t i;
   1468       1.4     oster 	RF_CallbackDesc_t *p;
   1469       1.4     oster 	RF_ASSERT(hsCtr >= reconCtrlPtr->minHeadSepCounter);	/* from the definition
   1470       1.4     oster 								 * of a minimum */
   1471       1.4     oster 
   1472       1.4     oster 
   1473       1.4     oster 	RF_LOCK_MUTEX(reconCtrlPtr->rb_mutex);
   1474  1.56.2.2     skrll 	while(reconCtrlPtr->rb_lock) {
   1475  1.56.2.2     skrll 		ltsleep(&reconCtrlPtr->rb_lock, PRIBIO, "reconctlcnmhs", 0, &reconCtrlPtr->rb_mutex);
   1476  1.56.2.2     skrll 	}
   1477  1.56.2.2     skrll 	reconCtrlPtr->rb_lock = 1;
   1478  1.56.2.2     skrll 	RF_UNLOCK_MUTEX(reconCtrlPtr->rb_mutex);
   1479       1.4     oster 
   1480       1.4     oster 	new_min = ~(1L << (8 * sizeof(long) - 1));	/* 0x7FFF....FFF */
   1481       1.4     oster 	for (i = 0; i < raidPtr->numCol; i++)
   1482       1.4     oster 		if (i != reconCtrlPtr->fcol) {
   1483       1.4     oster 			if (reconCtrlPtr->perDiskInfo[i].headSepCounter < new_min)
   1484       1.4     oster 				new_min = reconCtrlPtr->perDiskInfo[i].headSepCounter;
   1485       1.4     oster 		}
   1486       1.4     oster 	/* set the new minimum and wake up anyone who can now run again */
   1487       1.4     oster 	if (new_min != reconCtrlPtr->minHeadSepCounter) {
   1488       1.4     oster 		reconCtrlPtr->minHeadSepCounter = new_min;
   1489       1.4     oster 		Dprintf1("RECON:  new min head pos counter val is %ld\n", new_min);
   1490       1.4     oster 		while (reconCtrlPtr->headSepCBList) {
   1491       1.4     oster 			if (reconCtrlPtr->headSepCBList->callbackArg.v > new_min)
   1492       1.4     oster 				break;
   1493       1.4     oster 			p = reconCtrlPtr->headSepCBList;
   1494       1.4     oster 			reconCtrlPtr->headSepCBList = p->next;
   1495       1.4     oster 			p->next = NULL;
   1496  1.56.2.2     skrll 			rf_CauseReconEvent(raidPtr, p->col, NULL, RF_REVENT_HEADSEPCLEAR);
   1497       1.4     oster 			rf_FreeCallbackDesc(p);
   1498       1.4     oster 		}
   1499       1.1     oster 
   1500       1.4     oster 	}
   1501  1.56.2.2     skrll 	RF_LOCK_MUTEX(reconCtrlPtr->rb_mutex);
   1502  1.56.2.2     skrll 	reconCtrlPtr->rb_lock = 0;
   1503  1.56.2.2     skrll 	wakeup(&reconCtrlPtr->rb_lock);
   1504       1.4     oster 	RF_UNLOCK_MUTEX(reconCtrlPtr->rb_mutex);
   1505       1.1     oster }
   1506      1.13     oster 
   1507      1.13     oster /*
   1508      1.13     oster  * checks to see that the maximum head separation will not be violated
   1509      1.13     oster  * if we initiate a reconstruction I/O on the indicated disk.
   1510      1.13     oster  * Limiting the maximum head separation between two disks eliminates
   1511      1.13     oster  * the nasty buffer-stall conditions that occur when one disk races
   1512      1.13     oster  * ahead of the others and consumes all of the floating recon buffers.
   1513      1.13     oster  * This code is complex and unpleasant but it's necessary to avoid
   1514      1.13     oster  * some very nasty, albeit fairly rare, reconstruction behavior.
   1515       1.1     oster  *
   1516      1.13     oster  * returns non-zero if and only if we have to stop working on the
   1517      1.13     oster  * indicated disk due to a head-separation delay.
   1518       1.1     oster  */
   1519       1.4     oster static int
   1520  1.56.2.2     skrll CheckHeadSeparation(RF_Raid_t *raidPtr, RF_PerDiskReconCtrl_t *ctrl,
   1521  1.56.2.2     skrll 		    RF_RowCol_t col, RF_HeadSepLimit_t hsCtr,
   1522  1.56.2.2     skrll 		    RF_ReconUnitNum_t which_ru)
   1523       1.4     oster {
   1524  1.56.2.2     skrll 	RF_ReconCtrl_t *reconCtrlPtr = raidPtr->reconControl;
   1525       1.4     oster 	RF_CallbackDesc_t *cb, *p, *pt;
   1526      1.10     oster 	int     retval = 0;
   1527       1.4     oster 
   1528       1.4     oster 	/* if we're too far ahead of the slowest disk, stop working on this
   1529       1.4     oster 	 * disk until the slower ones catch up.  We do this by scheduling a
   1530       1.4     oster 	 * wakeup callback for the time when the slowest disk has caught up.
   1531       1.4     oster 	 * We define "caught up" with 20% hysteresis, i.e. the head separation
   1532       1.4     oster 	 * must have fallen to at most 80% of the max allowable head
   1533       1.4     oster 	 * separation before we'll wake up.
   1534       1.4     oster 	 *
   1535       1.4     oster 	 */
   1536       1.4     oster 	RF_LOCK_MUTEX(reconCtrlPtr->rb_mutex);
   1537  1.56.2.2     skrll 	while(reconCtrlPtr->rb_lock) {
   1538  1.56.2.2     skrll 		ltsleep(&reconCtrlPtr->rb_lock, PRIBIO, "reconctlchs", 0, &reconCtrlPtr->rb_mutex);
   1539  1.56.2.2     skrll 	}
   1540  1.56.2.2     skrll 	reconCtrlPtr->rb_lock = 1;
   1541  1.56.2.2     skrll 	RF_UNLOCK_MUTEX(reconCtrlPtr->rb_mutex);
   1542       1.4     oster 	if ((raidPtr->headSepLimit >= 0) &&
   1543       1.4     oster 	    ((ctrl->headSepCounter - reconCtrlPtr->minHeadSepCounter) > raidPtr->headSepLimit)) {
   1544  1.56.2.2     skrll 		Dprintf5("raid%d: RECON: head sep stall: col %d hsCtr %ld minHSCtr %ld limit %ld\n",
   1545  1.56.2.2     skrll 			 raidPtr->raidid, col, ctrl->headSepCounter,
   1546      1.10     oster 			 reconCtrlPtr->minHeadSepCounter,
   1547      1.10     oster 			 raidPtr->headSepLimit);
   1548       1.4     oster 		cb = rf_AllocCallbackDesc();
   1549       1.4     oster 		/* the minHeadSepCounter value we have to get to before we'll
   1550       1.4     oster 		 * wake up.  build in 20% hysteresis. */
   1551       1.4     oster 		cb->callbackArg.v = (ctrl->headSepCounter - raidPtr->headSepLimit + raidPtr->headSepLimit / 5);
   1552       1.4     oster 		cb->col = col;
   1553       1.4     oster 		cb->next = NULL;
   1554       1.4     oster 
   1555       1.4     oster 		/* insert this callback descriptor into the sorted list of
   1556       1.4     oster 		 * pending head-sep callbacks */
   1557       1.4     oster 		p = reconCtrlPtr->headSepCBList;
   1558       1.4     oster 		if (!p)
   1559       1.4     oster 			reconCtrlPtr->headSepCBList = cb;
   1560       1.4     oster 		else
   1561       1.4     oster 			if (cb->callbackArg.v < p->callbackArg.v) {
   1562       1.4     oster 				cb->next = reconCtrlPtr->headSepCBList;
   1563       1.4     oster 				reconCtrlPtr->headSepCBList = cb;
   1564       1.4     oster 			} else {
   1565       1.4     oster 				for (pt = p, p = p->next; p && (p->callbackArg.v < cb->callbackArg.v); pt = p, p = p->next);
   1566       1.4     oster 				cb->next = p;
   1567       1.4     oster 				pt->next = cb;
   1568       1.4     oster 			}
   1569       1.4     oster 		retval = 1;
   1570       1.1     oster #if RF_RECON_STATS > 0
   1571       1.4     oster 		ctrl->reconCtrl->reconDesc->hsStallCount++;
   1572       1.4     oster #endif				/* RF_RECON_STATS > 0 */
   1573       1.4     oster 	}
   1574  1.56.2.2     skrll 	RF_LOCK_MUTEX(reconCtrlPtr->rb_mutex);
   1575  1.56.2.2     skrll 	reconCtrlPtr->rb_lock = 0;
   1576  1.56.2.2     skrll 	wakeup(&reconCtrlPtr->rb_lock);
   1577       1.4     oster 	RF_UNLOCK_MUTEX(reconCtrlPtr->rb_mutex);
   1578       1.1     oster 
   1579       1.4     oster 	return (retval);
   1580       1.1     oster }
   1581      1.13     oster /*
   1582      1.13     oster  * checks to see if reconstruction has been either forced or blocked
   1583      1.13     oster  * by a user operation.  if forced, we skip this RU entirely.  else if
   1584      1.13     oster  * blocked, put ourselves on the wait list.  else return 0.
   1585       1.1     oster  *
   1586      1.13     oster  * ASSUMES THE PSS MUTEX IS LOCKED UPON ENTRY
   1587       1.1     oster  */
   1588       1.4     oster static int
   1589  1.56.2.2     skrll CheckForcedOrBlockedReconstruction(RF_Raid_t *raidPtr,
   1590  1.56.2.2     skrll 				   RF_ReconParityStripeStatus_t *pssPtr,
   1591  1.56.2.2     skrll 				   RF_PerDiskReconCtrl_t *ctrl,
   1592  1.56.2.2     skrll 				   RF_RowCol_t col, RF_StripeNum_t psid,
   1593  1.56.2.2     skrll 				   RF_ReconUnitNum_t which_ru)
   1594       1.4     oster {
   1595       1.4     oster 	RF_CallbackDesc_t *cb;
   1596       1.4     oster 	int     retcode = 0;
   1597       1.4     oster 
   1598       1.4     oster 	if ((pssPtr->flags & RF_PSS_FORCED_ON_READ) || (pssPtr->flags & RF_PSS_FORCED_ON_WRITE))
   1599       1.4     oster 		retcode = RF_PSS_FORCED_ON_WRITE;
   1600       1.4     oster 	else
   1601       1.4     oster 		if (pssPtr->flags & RF_PSS_RECON_BLOCKED) {
   1602  1.56.2.2     skrll 			Dprintf3("RECON: col %d blocked at psid %ld ru %d\n", col, psid, which_ru);
   1603       1.4     oster 			cb = rf_AllocCallbackDesc();	/* append ourselves to
   1604       1.4     oster 							 * the blockage-wait
   1605       1.4     oster 							 * list */
   1606       1.4     oster 			cb->col = col;
   1607       1.4     oster 			cb->next = pssPtr->blockWaitList;
   1608       1.4     oster 			pssPtr->blockWaitList = cb;
   1609       1.4     oster 			retcode = RF_PSS_RECON_BLOCKED;
   1610       1.4     oster 		}
   1611       1.4     oster 	if (!retcode)
   1612       1.4     oster 		pssPtr->flags |= RF_PSS_UNDER_RECON;	/* mark this RU as under
   1613       1.4     oster 							 * reconstruction */
   1614       1.4     oster 
   1615       1.4     oster 	return (retcode);
   1616       1.1     oster }
   1617      1.13     oster /*
   1618      1.13     oster  * if reconstruction is currently ongoing for the indicated stripeID,
   1619      1.13     oster  * reconstruction is forced to completion and we return non-zero to
   1620      1.13     oster  * indicate that the caller must wait.  If not, then reconstruction is
   1621      1.13     oster  * blocked on the indicated stripe and the routine returns zero.  If
   1622      1.13     oster  * and only if we return non-zero, we'll cause the cbFunc to get
   1623      1.13     oster  * invoked with the cbArg when the reconstruction has completed.
   1624       1.1     oster  */
   1625       1.4     oster int
   1626  1.56.2.2     skrll rf_ForceOrBlockRecon(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap,
   1627  1.56.2.2     skrll 		     void (*cbFunc)(RF_Raid_t *, void *), void *cbArg)
   1628       1.4     oster {
   1629       1.4     oster 	RF_StripeNum_t stripeID = asmap->stripeID;	/* the stripe ID we're
   1630       1.4     oster 							 * forcing recon on */
   1631       1.4     oster 	RF_SectorCount_t sectorsPerRU = raidPtr->Layout.sectorsPerStripeUnit * raidPtr->Layout.SUsPerRU;	/* num sects in one RU */
   1632  1.56.2.2     skrll 	RF_ReconParityStripeStatus_t *pssPtr, *newpssPtr;	/* a pointer to the parity
   1633       1.4     oster 						 * stripe status structure */
   1634       1.4     oster 	RF_StripeNum_t psid;	/* parity stripe id */
   1635       1.4     oster 	RF_SectorNum_t offset, fd_offset;	/* disk offset, failed-disk
   1636       1.4     oster 						 * offset */
   1637       1.4     oster 	RF_RowCol_t *diskids;
   1638       1.4     oster 	RF_ReconUnitNum_t which_ru;	/* RU within parity stripe */
   1639       1.4     oster 	RF_RowCol_t fcol, diskno, i;
   1640       1.4     oster 	RF_ReconBuffer_t *new_rbuf;	/* ptr to newly allocated rbufs */
   1641       1.4     oster 	RF_DiskQueueData_t *req;/* disk I/O req to be enqueued */
   1642       1.4     oster 	RF_CallbackDesc_t *cb;
   1643  1.56.2.2     skrll 	int     nPromoted;
   1644       1.4     oster 
   1645       1.4     oster 	psid = rf_MapStripeIDToParityStripeID(&raidPtr->Layout, stripeID, &which_ru);
   1646       1.4     oster 
   1647  1.56.2.2     skrll 	/* allocate a new PSS in case we need it */
   1648  1.56.2.2     skrll         newpssPtr = rf_AllocPSStatus(raidPtr);
   1649       1.4     oster 
   1650  1.56.2.2     skrll 	RF_LOCK_PSS_MUTEX(raidPtr, psid);
   1651  1.56.2.2     skrll 
   1652  1.56.2.2     skrll 	pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_CREATE | RF_PSS_RECON_BLOCKED, newpssPtr);
   1653  1.56.2.2     skrll 
   1654  1.56.2.2     skrll         if (pssPtr != newpssPtr) {
   1655  1.56.2.2     skrll                 rf_FreePSStatus(raidPtr, newpssPtr);
   1656  1.56.2.2     skrll         }
   1657       1.4     oster 
   1658       1.4     oster 	/* if recon is not ongoing on this PS, just return */
   1659       1.4     oster 	if (!(pssPtr->flags & RF_PSS_UNDER_RECON)) {
   1660  1.56.2.2     skrll 		RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
   1661       1.4     oster 		return (0);
   1662       1.4     oster 	}
   1663       1.4     oster 	/* otherwise, we have to wait for reconstruction to complete on this
   1664       1.4     oster 	 * RU. */
   1665       1.4     oster 	/* In order to avoid waiting for a potentially large number of
   1666       1.4     oster 	 * low-priority accesses to complete, we force a normal-priority (i.e.
   1667       1.4     oster 	 * not low-priority) reconstruction on this RU. */
   1668       1.4     oster 	if (!(pssPtr->flags & RF_PSS_FORCED_ON_WRITE) && !(pssPtr->flags & RF_PSS_FORCED_ON_READ)) {
   1669       1.4     oster 		DDprintf1("Forcing recon on psid %ld\n", psid);
   1670       1.4     oster 		pssPtr->flags |= RF_PSS_FORCED_ON_WRITE;	/* mark this RU as under
   1671       1.4     oster 								 * forced recon */
   1672       1.4     oster 		pssPtr->flags &= ~RF_PSS_RECON_BLOCKED;	/* clear the blockage
   1673       1.4     oster 							 * that we just set */
   1674  1.56.2.2     skrll 		fcol = raidPtr->reconControl->fcol;
   1675       1.4     oster 
   1676       1.4     oster 		/* get a listing of the disks comprising the indicated stripe */
   1677  1.56.2.2     skrll 		(raidPtr->Layout.map->IdentifyStripe) (raidPtr, asmap->raidAddress, &diskids);
   1678       1.4     oster 
   1679       1.4     oster 		/* For previously issued reads, elevate them to normal
   1680       1.4     oster 		 * priority.  If the I/O has already completed, it won't be
   1681       1.4     oster 		 * found in the queue, and hence this will be a no-op. For
   1682       1.4     oster 		 * unissued reads, allocate buffers and issue new reads.  The
   1683       1.4     oster 		 * fact that we've set the FORCED bit means that the regular
   1684       1.4     oster 		 * recon procs will not re-issue these reqs */
   1685       1.4     oster 		for (i = 0; i < raidPtr->Layout.numDataCol + raidPtr->Layout.numParityCol; i++)
   1686       1.4     oster 			if ((diskno = diskids[i]) != fcol) {
   1687       1.4     oster 				if (pssPtr->issued[diskno]) {
   1688  1.56.2.2     skrll 					nPromoted = rf_DiskIOPromote(&raidPtr->Queues[diskno], psid, which_ru);
   1689       1.4     oster 					if (rf_reconDebug && nPromoted)
   1690  1.56.2.2     skrll 						printf("raid%d: promoted read from col %d\n", raidPtr->raidid, diskno);
   1691       1.4     oster 				} else {
   1692  1.56.2.2     skrll 					new_rbuf = rf_MakeReconBuffer(raidPtr, diskno, RF_RBUF_TYPE_FORCED);	/* create new buf */
   1693  1.56.2.2     skrll 					ComputePSDiskOffsets(raidPtr, psid, diskno, &offset, &fd_offset,
   1694  1.56.2.2     skrll 					    &new_rbuf->spCol, &new_rbuf->spOffset);	/* find offsets & spare
   1695       1.4     oster 													 * location */
   1696       1.4     oster 					new_rbuf->parityStripeID = psid;	/* fill in the buffer */
   1697       1.4     oster 					new_rbuf->which_ru = which_ru;
   1698       1.4     oster 					new_rbuf->failedDiskSectorOffset = fd_offset;
   1699       1.4     oster 					new_rbuf->priority = RF_IO_NORMAL_PRIORITY;
   1700       1.4     oster 
   1701       1.4     oster 					/* use NULL b_proc b/c all addrs
   1702       1.4     oster 					 * should be in kernel space */
   1703       1.4     oster 					req = rf_CreateDiskQueueData(RF_IO_TYPE_READ, offset + which_ru * sectorsPerRU, sectorsPerRU, new_rbuf->buffer,
   1704  1.56.2.9     skrll 					    psid, which_ru, (int (*) (void *, int)) ForceReconReadDoneProc, (void *) new_rbuf,
   1705  1.56.2.9     skrll 					    NULL, (void *) raidPtr, 0, NULL, PR_WAITOK);
   1706       1.4     oster 
   1707       1.4     oster 					new_rbuf->arg = req;
   1708  1.56.2.2     skrll 					rf_DiskIOEnqueue(&raidPtr->Queues[diskno], req, RF_IO_NORMAL_PRIORITY);	/* enqueue the I/O */
   1709  1.56.2.2     skrll 					Dprintf2("raid%d: Issued new read req on col %d\n", raidPtr->raidid, diskno);
   1710       1.4     oster 				}
   1711       1.4     oster 			}
   1712       1.4     oster 		/* if the write is sitting in the disk queue, elevate its
   1713       1.4     oster 		 * priority */
   1714  1.56.2.2     skrll 		if (rf_DiskIOPromote(&raidPtr->Queues[fcol], psid, which_ru))
   1715  1.56.2.2     skrll 			printf("raid%d: promoted write to col %d\n",
   1716  1.56.2.2     skrll 			       raidPtr->raidid, fcol);
   1717       1.4     oster 	}
   1718       1.4     oster 	/* install a callback descriptor to be invoked when recon completes on
   1719       1.4     oster 	 * this parity stripe. */
   1720       1.4     oster 	cb = rf_AllocCallbackDesc();
   1721       1.4     oster 	/* XXX the following is bogus.. These functions don't really match!!
   1722       1.4     oster 	 * GO */
   1723       1.4     oster 	cb->callbackFunc = (void (*) (RF_CBParam_t)) cbFunc;
   1724       1.4     oster 	cb->callbackArg.p = (void *) cbArg;
   1725       1.4     oster 	cb->next = pssPtr->procWaitList;
   1726       1.4     oster 	pssPtr->procWaitList = cb;
   1727      1.10     oster 	DDprintf2("raid%d: Waiting for forced recon on psid %ld\n",
   1728      1.10     oster 		  raidPtr->raidid, psid);
   1729       1.4     oster 
   1730  1.56.2.2     skrll 	RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
   1731       1.4     oster 	return (1);
   1732       1.1     oster }
   1733       1.1     oster /* called upon the completion of a forced reconstruction read.
   1734       1.1     oster  * all we do is schedule the FORCEDREADONE event.
   1735       1.1     oster  * called at interrupt context in the kernel, so don't do anything illegal here.
   1736       1.1     oster  */
   1737       1.4     oster static void
   1738  1.56.2.2     skrll ForceReconReadDoneProc(void *arg, int status)
   1739       1.4     oster {
   1740       1.4     oster 	RF_ReconBuffer_t *rbuf = arg;
   1741       1.4     oster 
   1742  1.56.2.8     skrll 	/* Detect that reconControl is no longer valid, and if that
   1743  1.56.2.8     skrll 	   is the case, bail without calling rf_CauseReconEvent().
   1744  1.56.2.8     skrll 	   There won't be anyone listening for this event anyway */
   1745  1.56.2.8     skrll 
   1746  1.56.2.8     skrll 	if (rbuf->raidPtr->reconControl == NULL)
   1747  1.56.2.8     skrll 		return;
   1748  1.56.2.8     skrll 
   1749       1.4     oster 	if (status) {
   1750  1.56.2.2     skrll 		printf("raid%d: Forced recon read failed!\n", rbuf->raidPtr->raidid);
   1751  1.56.2.2     skrll 		rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, (void *) rbuf, RF_REVENT_FORCEDREAD_FAILED);
   1752  1.56.2.7     skrll 		return;
   1753       1.4     oster 	}
   1754  1.56.2.2     skrll 	rf_CauseReconEvent(rbuf->raidPtr, rbuf->col, (void *) rbuf, RF_REVENT_FORCEDREADDONE);
   1755       1.1     oster }
   1756       1.1     oster /* releases a block on the reconstruction of the indicated stripe */
   1757       1.4     oster int
   1758  1.56.2.2     skrll rf_UnblockRecon(RF_Raid_t *raidPtr, RF_AccessStripeMap_t *asmap)
   1759       1.4     oster {
   1760       1.4     oster 	RF_StripeNum_t stripeID = asmap->stripeID;
   1761       1.4     oster 	RF_ReconParityStripeStatus_t *pssPtr;
   1762       1.4     oster 	RF_ReconUnitNum_t which_ru;
   1763       1.4     oster 	RF_StripeNum_t psid;
   1764       1.4     oster 	RF_CallbackDesc_t *cb;
   1765       1.4     oster 
   1766       1.4     oster 	psid = rf_MapStripeIDToParityStripeID(&raidPtr->Layout, stripeID, &which_ru);
   1767  1.56.2.2     skrll 	RF_LOCK_PSS_MUTEX(raidPtr, psid);
   1768  1.56.2.2     skrll 	pssPtr = rf_LookupRUStatus(raidPtr, raidPtr->reconControl->pssTable, psid, which_ru, RF_PSS_NONE, NULL);
   1769       1.4     oster 
   1770       1.4     oster 	/* When recon is forced, the pss desc can get deleted before we get
   1771       1.4     oster 	 * back to unblock recon. But, this can _only_ happen when recon is
   1772       1.4     oster 	 * forced. It would be good to put some kind of sanity check here, but
   1773       1.4     oster 	 * how to decide if recon was just forced or not? */
   1774       1.4     oster 	if (!pssPtr) {
   1775       1.4     oster 		/* printf("Warning: no pss descriptor upon unblock on psid %ld
   1776       1.4     oster 		 * RU %d\n",psid,which_ru); */
   1777      1.43     oster #if (RF_DEBUG_RECON > 0) || (RF_DEBUG_PSS > 0)
   1778       1.4     oster 		if (rf_reconDebug || rf_pssDebug)
   1779       1.4     oster 			printf("Warning: no pss descriptor upon unblock on psid %ld RU %d\n", (long) psid, which_ru);
   1780      1.43     oster #endif
   1781       1.4     oster 		goto out;
   1782       1.4     oster 	}
   1783       1.4     oster 	pssPtr->blockCount--;
   1784      1.10     oster 	Dprintf3("raid%d: unblocking recon on psid %ld: blockcount is %d\n",
   1785      1.10     oster 		 raidPtr->raidid, psid, pssPtr->blockCount);
   1786       1.4     oster 	if (pssPtr->blockCount == 0) {	/* if recon blockage has been released */
   1787       1.4     oster 
   1788       1.4     oster 		/* unblock recon before calling CauseReconEvent in case
   1789       1.4     oster 		 * CauseReconEvent causes us to try to issue a new read before
   1790       1.4     oster 		 * returning here. */
   1791       1.4     oster 		pssPtr->flags &= ~RF_PSS_RECON_BLOCKED;
   1792       1.4     oster 
   1793       1.4     oster 
   1794      1.13     oster 		while (pssPtr->blockWaitList) {
   1795      1.13     oster 			/* spin through the block-wait list and
   1796      1.13     oster 			   release all the waiters */
   1797       1.4     oster 			cb = pssPtr->blockWaitList;
   1798       1.4     oster 			pssPtr->blockWaitList = cb->next;
   1799       1.4     oster 			cb->next = NULL;
   1800  1.56.2.2     skrll 			rf_CauseReconEvent(raidPtr, cb->col, NULL, RF_REVENT_BLOCKCLEAR);
   1801       1.4     oster 			rf_FreeCallbackDesc(cb);
   1802       1.4     oster 		}
   1803      1.13     oster 		if (!(pssPtr->flags & RF_PSS_UNDER_RECON)) {
   1804      1.13     oster 			/* if no recon was requested while recon was blocked */
   1805  1.56.2.2     skrll 			rf_PSStatusDelete(raidPtr, raidPtr->reconControl->pssTable, pssPtr);
   1806       1.4     oster 		}
   1807       1.4     oster 	}
   1808       1.1     oster out:
   1809  1.56.2.2     skrll 	RF_UNLOCK_PSS_MUTEX(raidPtr, psid);
   1810       1.4     oster 	return (0);
   1811       1.1     oster }
   1812