Home | History | Annotate | Line # | Download | only in raidframe
rf_revent.c revision 1.22.22.2
      1 /*	$NetBSD: rf_revent.c,v 1.22.22.2 2006/12/10 07:18:13 yamt Exp $	*/
      2 /*
      3  * Copyright (c) 1995 Carnegie-Mellon University.
      4  * All rights reserved.
      5  *
      6  * Author:
      7  *
      8  * Permission to use, copy, modify and distribute this software and
      9  * its documentation is hereby granted, provided that both the copyright
     10  * notice and this permission notice appear in all copies of the
     11  * software, derivative works or modified versions, and any portions
     12  * thereof, and that both notices appear in supporting documentation.
     13  *
     14  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     15  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     16  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     17  *
     18  * Carnegie Mellon requests users of this software to return to
     19  *
     20  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     21  *  School of Computer Science
     22  *  Carnegie Mellon University
     23  *  Pittsburgh PA 15213-3890
     24  *
     25  * any improvements or extensions that they make and grant Carnegie the
     26  * rights to redistribute these changes.
     27  */
     28 /*
     29  * revent.c -- reconstruction event handling code
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: rf_revent.c,v 1.22.22.2 2006/12/10 07:18:13 yamt Exp $");
     34 
     35 #include <sys/errno.h>
     36 
     37 #include "rf_raid.h"
     38 #include "rf_revent.h"
     39 #include "rf_etimer.h"
     40 #include "rf_general.h"
     41 #include "rf_desc.h"
     42 #include "rf_shutdown.h"
     43 
     44 #define RF_MAX_FREE_REVENT 128
     45 #define RF_MIN_FREE_REVENT  32
     46 
     47 #include <sys/proc.h>
     48 #include <sys/kernel.h>
     49 
     50 static void rf_ShutdownReconEvent(void *);
     51 
     52 static RF_ReconEvent_t *
     53 GetReconEventDesc(RF_RowCol_t col, void *arg, RF_Revent_t type);
     54 
     55 static void rf_ShutdownReconEvent(void *ignored)
     56 {
     57 	pool_destroy(&rf_pools.revent);
     58 }
     59 
     60 int
     61 rf_ConfigureReconEvent(RF_ShutdownList_t **listp)
     62 {
     63 
     64 	rf_pool_init(&rf_pools.revent, sizeof(RF_ReconEvent_t),
     65 		     "rf_revent_pl", RF_MIN_FREE_REVENT, RF_MAX_FREE_REVENT);
     66 	rf_ShutdownCreate(listp, rf_ShutdownReconEvent, NULL);
     67 
     68 	return (0);
     69 }
     70 
     71 /* returns the next reconstruction event, blocking the calling thread
     72  * until one becomes available.  will now return null if it is blocked
     73  * or will return an event if it is not */
     74 
     75 RF_ReconEvent_t *
     76 rf_GetNextReconEvent(RF_RaidReconDesc_t *reconDesc)
     77 {
     78 	RF_Raid_t *raidPtr = reconDesc->raidPtr;
     79 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
     80 	RF_ReconEvent_t *event;
     81 
     82 	RF_LOCK_MUTEX(rctrl->eq_mutex);
     83 	/* q null and count==0 must be equivalent conditions */
     84 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
     85 
     86 	/* mpsleep timeout value: secs = timo_val/hz.  'ticks' here is
     87 	   defined as cycle-counter ticks, not softclock ticks */
     88 
     89 #define MAX_RECON_EXEC_USECS (100 * 1000)  /* 100 ms */
     90 #define RECON_DELAY_MS 25
     91 #define RECON_TIMO     ((RECON_DELAY_MS * hz) / 1000)
     92 
     93 	/* we are not pre-emptible in the kernel, but we don't want to run
     94 	 * forever.  If we run w/o blocking for more than MAX_RECON_EXEC_TICKS
     95 	 * ticks of the cycle counter, delay for RECON_DELAY before
     96 	 * continuing. this may murder us with context switches, so we may
     97 	 * need to increase both the MAX...TICKS and the RECON_DELAY_MS. */
     98 	if (reconDesc->reconExecTimerRunning) {
     99 		int     status;
    100 
    101 		RF_ETIMER_STOP(reconDesc->recon_exec_timer);
    102 		RF_ETIMER_EVAL(reconDesc->recon_exec_timer);
    103 		reconDesc->reconExecTicks +=
    104 			RF_ETIMER_VAL_US(reconDesc->recon_exec_timer);
    105 		if (reconDesc->reconExecTicks > reconDesc->maxReconExecTicks)
    106 			reconDesc->maxReconExecTicks =
    107 				reconDesc->reconExecTicks;
    108 		if (reconDesc->reconExecTicks >= MAX_RECON_EXEC_USECS) {
    109 			/* we've been running too long.  delay for
    110 			 * RECON_DELAY_MS */
    111 #if RF_RECON_STATS > 0
    112 			reconDesc->numReconExecDelays++;
    113 #endif				/* RF_RECON_STATS > 0 */
    114 
    115 			status = ltsleep(&reconDesc->reconExecTicks, PRIBIO,
    116 					 "recon delay", RECON_TIMO,
    117 					 &rctrl->eq_mutex);
    118 			RF_ASSERT(status == EWOULDBLOCK);
    119 			reconDesc->reconExecTicks = 0;
    120 		}
    121 	}
    122 	while (!rctrl->eventQueue) {
    123 #if RF_RECON_STATS > 0
    124 		reconDesc->numReconEventWaits++;
    125 #endif				/* RF_RECON_STATS > 0 */
    126 
    127 		ltsleep(&(rctrl)->eventQueue, PRIBIO,  "raidframe eventq",
    128 			0, &((rctrl)->eq_mutex));
    129 
    130 		reconDesc->reconExecTicks = 0;	/* we've just waited */
    131 	}
    132 
    133 	reconDesc->reconExecTimerRunning = 1;
    134 	if (RF_ETIMER_VAL_US(reconDesc->recon_exec_timer)!=0) {
    135 		/* it moved!!  reset the timer. */
    136 		RF_ETIMER_START(reconDesc->recon_exec_timer);
    137 	}
    138 	event = rctrl->eventQueue;
    139 	rctrl->eventQueue = event->next;
    140 	event->next = NULL;
    141 	rctrl->eq_count--;
    142 
    143 	/* q null and count==0 must be equivalent conditions */
    144 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
    145 	RF_UNLOCK_MUTEX(rctrl->eq_mutex);
    146 	return (event);
    147 }
    148 /* enqueues a reconstruction event on the indicated queue */
    149 void
    150 rf_CauseReconEvent(RF_Raid_t *raidPtr, RF_RowCol_t col, void *arg,
    151 		   RF_Revent_t type)
    152 {
    153 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
    154 	RF_ReconEvent_t *event = GetReconEventDesc(col, arg, type);
    155 
    156 	if (type == RF_REVENT_BUFCLEAR) {
    157 		RF_ASSERT(col != rctrl->fcol);
    158 	}
    159 	RF_ASSERT(col >= 0 && col <= raidPtr->numCol);
    160 	RF_LOCK_MUTEX(rctrl->eq_mutex);
    161 	/* q null and count==0 must be equivalent conditions */
    162 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
    163 	event->next = rctrl->eventQueue;
    164 	rctrl->eventQueue = event;
    165 	rctrl->eq_count++;
    166 	RF_UNLOCK_MUTEX(rctrl->eq_mutex);
    167 
    168 	wakeup(&(rctrl)->eventQueue);
    169 }
    170 /* allocates and initializes a recon event descriptor */
    171 static RF_ReconEvent_t *
    172 GetReconEventDesc(RF_RowCol_t col, void *arg, RF_Revent_t type)
    173 {
    174 	RF_ReconEvent_t *t;
    175 
    176 	t = pool_get(&rf_pools.revent, PR_WAITOK);
    177 	t->col = col;
    178 	t->arg = arg;
    179 	t->type = type;
    180 	t->next = NULL;
    181 	return (t);
    182 }
    183 
    184 /*
    185   rf_DrainReconEventQueue() -- used in the event of a reconstruction
    186   problem, this function simply drains all pending events from the
    187   reconstruct event queue.
    188  */
    189 
    190 void
    191 rf_DrainReconEventQueue(RF_RaidReconDesc_t *reconDesc)
    192 {
    193 	RF_ReconCtrl_t *rctrl = reconDesc->raidPtr->reconControl;
    194 	RF_ReconEvent_t *event;
    195 
    196 	RF_LOCK_MUTEX(rctrl->eq_mutex);
    197 	while (rctrl->eventQueue!=NULL) {
    198 
    199 		event = rctrl->eventQueue;
    200 		rctrl->eventQueue = event->next;
    201 		event->next = NULL;
    202 		rctrl->eq_count--;
    203 		/* dump it */
    204 		rf_FreeReconEventDesc(event);
    205 	}
    206 	RF_UNLOCK_MUTEX(rctrl->eq_mutex);
    207 }
    208 
    209 void
    210 rf_FreeReconEventDesc(RF_ReconEvent_t *event)
    211 {
    212 	pool_put(&rf_pools.revent, event);
    213 }
    214