Home | History | Annotate | Line # | Download | only in raidframe
rf_revent.c revision 1.19.4.1
      1  1.19.4.1   kent /*	$NetBSD: rf_revent.c,v 1.19.4.1 2005/04/29 11:29:15 kent Exp $	*/
      2       1.1  oster /*
      3       1.1  oster  * Copyright (c) 1995 Carnegie-Mellon University.
      4       1.1  oster  * All rights reserved.
      5       1.1  oster  *
      6       1.1  oster  * Author:
      7       1.1  oster  *
      8       1.1  oster  * Permission to use, copy, modify and distribute this software and
      9       1.1  oster  * its documentation is hereby granted, provided that both the copyright
     10       1.1  oster  * notice and this permission notice appear in all copies of the
     11       1.1  oster  * software, derivative works or modified versions, and any portions
     12       1.1  oster  * thereof, and that both notices appear in supporting documentation.
     13       1.1  oster  *
     14       1.1  oster  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     15       1.1  oster  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     16       1.1  oster  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     17       1.1  oster  *
     18       1.1  oster  * Carnegie Mellon requests users of this software to return to
     19       1.1  oster  *
     20       1.1  oster  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     21       1.1  oster  *  School of Computer Science
     22       1.1  oster  *  Carnegie Mellon University
     23       1.1  oster  *  Pittsburgh PA 15213-3890
     24       1.1  oster  *
     25       1.1  oster  * any improvements or extensions that they make and grant Carnegie the
     26       1.1  oster  * rights to redistribute these changes.
     27       1.1  oster  */
     28       1.1  oster /*
     29       1.1  oster  * revent.c -- reconstruction event handling code
     30       1.1  oster  */
     31      1.10  lukem 
     32      1.10  lukem #include <sys/cdefs.h>
     33  1.19.4.1   kent __KERNEL_RCSID(0, "$NetBSD: rf_revent.c,v 1.19.4.1 2005/04/29 11:29:15 kent Exp $");
     34       1.1  oster 
     35       1.1  oster #include <sys/errno.h>
     36       1.1  oster 
     37       1.1  oster #include "rf_raid.h"
     38       1.1  oster #include "rf_revent.h"
     39       1.1  oster #include "rf_etimer.h"
     40       1.1  oster #include "rf_general.h"
     41       1.1  oster #include "rf_desc.h"
     42       1.1  oster #include "rf_shutdown.h"
     43       1.1  oster 
     44       1.1  oster #define RF_MAX_FREE_REVENT 128
     45      1.18  oster #define RF_MIN_FREE_REVENT  32
     46       1.1  oster 
     47       1.1  oster #include <sys/proc.h>
     48       1.7  oster #include <sys/kernel.h>
     49       1.1  oster 
     50       1.1  oster static void rf_ShutdownReconEvent(void *);
     51       1.1  oster 
     52       1.3  oster static RF_ReconEvent_t *
     53      1.12  oster GetReconEventDesc(RF_RowCol_t col, void *arg, RF_Revent_t type);
     54       1.1  oster 
     55      1.14  oster static void rf_ShutdownReconEvent(void *ignored)
     56       1.1  oster {
     57      1.18  oster 	pool_destroy(&rf_pools.revent);
     58       1.1  oster }
     59       1.1  oster 
     60  1.19.4.1   kent int
     61      1.14  oster rf_ConfigureReconEvent(RF_ShutdownList_t **listp)
     62       1.3  oster {
     63       1.3  oster 
     64      1.18  oster 	rf_pool_init(&rf_pools.revent, sizeof(RF_ReconEvent_t),
     65      1.18  oster 		     "rf_revent_pl", RF_MIN_FREE_REVENT, RF_MAX_FREE_REVENT);
     66      1.15  oster 	rf_ShutdownCreate(listp, rf_ShutdownReconEvent, NULL);
     67      1.13  oster 
     68       1.3  oster 	return (0);
     69       1.1  oster }
     70       1.1  oster 
     71       1.7  oster /* returns the next reconstruction event, blocking the calling thread
     72       1.7  oster  * until one becomes available.  will now return null if it is blocked
     73       1.7  oster  * or will return an event if it is not */
     74       1.1  oster 
     75       1.3  oster RF_ReconEvent_t *
     76      1.19  oster rf_GetNextReconEvent(RF_RaidReconDesc_t *reconDesc)
     77       1.3  oster {
     78       1.3  oster 	RF_Raid_t *raidPtr = reconDesc->raidPtr;
     79      1.12  oster 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
     80       1.3  oster 	RF_ReconEvent_t *event;
     81       1.3  oster 
     82       1.3  oster 	RF_LOCK_MUTEX(rctrl->eq_mutex);
     83       1.7  oster 	/* q null and count==0 must be equivalent conditions */
     84       1.7  oster 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
     85       1.1  oster 
     86       1.7  oster 	/* mpsleep timeout value: secs = timo_val/hz.  'ticks' here is
     87       1.7  oster 	   defined as cycle-counter ticks, not softclock ticks */
     88       1.7  oster 
     89       1.5  oster #define MAX_RECON_EXEC_USECS (100 * 1000)  /* 100 ms */
     90       1.1  oster #define RECON_DELAY_MS 25
     91       1.1  oster #define RECON_TIMO     ((RECON_DELAY_MS * hz) / 1000)
     92       1.1  oster 
     93       1.3  oster 	/* we are not pre-emptible in the kernel, but we don't want to run
     94       1.3  oster 	 * forever.  If we run w/o blocking for more than MAX_RECON_EXEC_TICKS
     95       1.3  oster 	 * ticks of the cycle counter, delay for RECON_DELAY before
     96       1.3  oster 	 * continuing. this may murder us with context switches, so we may
     97       1.3  oster 	 * need to increase both the MAX...TICKS and the RECON_DELAY_MS. */
     98       1.3  oster 	if (reconDesc->reconExecTimerRunning) {
     99       1.3  oster 		int     status;
    100       1.3  oster 
    101       1.3  oster 		RF_ETIMER_STOP(reconDesc->recon_exec_timer);
    102       1.3  oster 		RF_ETIMER_EVAL(reconDesc->recon_exec_timer);
    103  1.19.4.1   kent 		reconDesc->reconExecTicks +=
    104       1.7  oster 			RF_ETIMER_VAL_US(reconDesc->recon_exec_timer);
    105       1.3  oster 		if (reconDesc->reconExecTicks > reconDesc->maxReconExecTicks)
    106  1.19.4.1   kent 			reconDesc->maxReconExecTicks =
    107       1.7  oster 				reconDesc->reconExecTicks;
    108       1.5  oster 		if (reconDesc->reconExecTicks >= MAX_RECON_EXEC_USECS) {
    109       1.3  oster 			/* we've been running too long.  delay for
    110       1.3  oster 			 * RECON_DELAY_MS */
    111       1.1  oster #if RF_RECON_STATS > 0
    112       1.3  oster 			reconDesc->numReconExecDelays++;
    113       1.3  oster #endif				/* RF_RECON_STATS > 0 */
    114       1.9  oster 
    115  1.19.4.1   kent 			status = ltsleep(&reconDesc->reconExecTicks, PRIBIO,
    116       1.9  oster 					 "recon delay", RECON_TIMO,
    117       1.9  oster 					 &rctrl->eq_mutex);
    118       1.3  oster 			RF_ASSERT(status == EWOULDBLOCK);
    119       1.3  oster 			reconDesc->reconExecTicks = 0;
    120       1.3  oster 		}
    121       1.3  oster 	}
    122       1.3  oster 	while (!rctrl->eventQueue) {
    123       1.1  oster #if RF_RECON_STATS > 0
    124       1.3  oster 		reconDesc->numReconEventWaits++;
    125       1.3  oster #endif				/* RF_RECON_STATS > 0 */
    126      1.17  oster 
    127      1.17  oster 		ltsleep(&(rctrl)->eventQueue, PRIBIO,  "raidframe eventq",
    128      1.17  oster 			0, &((rctrl)->eq_mutex));
    129      1.17  oster 
    130       1.3  oster 		reconDesc->reconExecTicks = 0;	/* we've just waited */
    131       1.3  oster 	}
    132       1.3  oster 
    133       1.3  oster 	reconDesc->reconExecTimerRunning = 1;
    134       1.8  oster 	if (RF_ETIMER_VAL_US(reconDesc->recon_exec_timer)!=0) {
    135       1.8  oster 		/* it moved!!  reset the timer. */
    136       1.8  oster 		RF_ETIMER_START(reconDesc->recon_exec_timer);
    137       1.8  oster 	}
    138       1.3  oster 	event = rctrl->eventQueue;
    139       1.3  oster 	rctrl->eventQueue = event->next;
    140       1.3  oster 	event->next = NULL;
    141       1.3  oster 	rctrl->eq_count--;
    142       1.7  oster 
    143       1.7  oster 	/* q null and count==0 must be equivalent conditions */
    144       1.7  oster 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
    145       1.3  oster 	RF_UNLOCK_MUTEX(rctrl->eq_mutex);
    146       1.3  oster 	return (event);
    147       1.1  oster }
    148       1.1  oster /* enqueues a reconstruction event on the indicated queue */
    149  1.19.4.1   kent void
    150  1.19.4.1   kent rf_CauseReconEvent(RF_Raid_t *raidPtr, RF_RowCol_t col, void *arg,
    151      1.14  oster 		   RF_Revent_t type)
    152       1.3  oster {
    153      1.12  oster 	RF_ReconCtrl_t *rctrl = raidPtr->reconControl;
    154      1.12  oster 	RF_ReconEvent_t *event = GetReconEventDesc(col, arg, type);
    155       1.3  oster 
    156       1.3  oster 	if (type == RF_REVENT_BUFCLEAR) {
    157       1.3  oster 		RF_ASSERT(col != rctrl->fcol);
    158       1.3  oster 	}
    159      1.12  oster 	RF_ASSERT(col >= 0 && col <= raidPtr->numCol);
    160       1.3  oster 	RF_LOCK_MUTEX(rctrl->eq_mutex);
    161       1.7  oster 	/* q null and count==0 must be equivalent conditions */
    162       1.7  oster 	RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0));
    163       1.3  oster 	event->next = rctrl->eventQueue;
    164       1.3  oster 	rctrl->eventQueue = event;
    165       1.3  oster 	rctrl->eq_count++;
    166       1.3  oster 	RF_UNLOCK_MUTEX(rctrl->eq_mutex);
    167       1.1  oster 
    168      1.17  oster 	wakeup(&(rctrl)->eventQueue);
    169       1.1  oster }
    170       1.1  oster /* allocates and initializes a recon event descriptor */
    171       1.3  oster static RF_ReconEvent_t *
    172      1.14  oster GetReconEventDesc(RF_RowCol_t col, void *arg, RF_Revent_t type)
    173       1.1  oster {
    174       1.1  oster 	RF_ReconEvent_t *t;
    175       1.1  oster 
    176      1.18  oster 	t = pool_get(&rf_pools.revent, PR_WAITOK);
    177       1.1  oster 	t->col = col;
    178       1.1  oster 	t->arg = arg;
    179       1.1  oster 	t->type = type;
    180      1.13  oster 	t->next = NULL;
    181       1.3  oster 	return (t);
    182       1.1  oster }
    183       1.1  oster 
    184  1.19.4.1   kent /*
    185  1.19.4.1   kent   rf_DrainReconEventQueue() -- used in the event of a reconstruction
    186  1.19.4.1   kent   problem, this function simply drains all pending events from the
    187  1.19.4.1   kent   reconstruct event queue.
    188  1.19.4.1   kent  */
    189  1.19.4.1   kent 
    190  1.19.4.1   kent void
    191  1.19.4.1   kent rf_DrainReconEventQueue(RF_RaidReconDesc_t *reconDesc)
    192  1.19.4.1   kent {
    193  1.19.4.1   kent 	RF_ReconCtrl_t *rctrl = reconDesc->raidPtr->reconControl;
    194  1.19.4.1   kent 	RF_ReconEvent_t *event;
    195  1.19.4.1   kent 
    196  1.19.4.1   kent 	RF_LOCK_MUTEX(rctrl->eq_mutex);
    197  1.19.4.1   kent 	while (rctrl->eventQueue!=NULL) {
    198  1.19.4.1   kent 
    199  1.19.4.1   kent 		event = rctrl->eventQueue;
    200  1.19.4.1   kent 		rctrl->eventQueue = event->next;
    201  1.19.4.1   kent 		event->next = NULL;
    202  1.19.4.1   kent 		rctrl->eq_count--;
    203  1.19.4.1   kent 		/* dump it */
    204  1.19.4.1   kent 		rf_FreeReconEventDesc(event);
    205  1.19.4.1   kent 	}
    206  1.19.4.1   kent 	RF_UNLOCK_MUTEX(rctrl->eq_mutex);
    207  1.19.4.1   kent }
    208  1.19.4.1   kent 
    209  1.19.4.1   kent void
    210      1.14  oster rf_FreeReconEventDesc(RF_ReconEvent_t *event)
    211       1.1  oster {
    212      1.18  oster 	pool_put(&rf_pools.revent, event);
    213       1.1  oster }
    214