1 | /* $NetBSD: rf_revent.c,v 1.28 2011/05/02 01:07:24 mrg Exp $ */ |
2 | /* |
3 | * Copyright (c) 1995 Carnegie-Mellon University. |
4 | * All rights reserved. |
5 | * |
6 | * Author: |
7 | * |
8 | * Permission to use, copy, modify and distribute this software and |
9 | * its documentation is hereby granted, provided that both the copyright |
10 | * notice and this permission notice appear in all copies of the |
11 | * software, derivative works or modified versions, and any portions |
12 | * thereof, and that both notices appear in supporting documentation. |
13 | * |
14 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
15 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND |
16 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
17 | * |
18 | * Carnegie Mellon requests users of this software to return to |
19 | * |
20 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
21 | * School of Computer Science |
22 | * Carnegie Mellon University |
23 | * Pittsburgh PA 15213-3890 |
24 | * |
25 | * any improvements or extensions that they make and grant Carnegie the |
26 | * rights to redistribute these changes. |
27 | */ |
28 | /* |
29 | * revent.c -- reconstruction event handling code |
30 | */ |
31 | |
32 | #include <sys/cdefs.h> |
33 | __KERNEL_RCSID(0, "$NetBSD: rf_revent.c,v 1.28 2011/05/02 01:07:24 mrg Exp $" ); |
34 | |
35 | #include <sys/errno.h> |
36 | |
37 | #include "rf_raid.h" |
38 | #include "rf_revent.h" |
39 | #include "rf_etimer.h" |
40 | #include "rf_general.h" |
41 | #include "rf_desc.h" |
42 | #include "rf_shutdown.h" |
43 | |
44 | #define RF_MAX_FREE_REVENT 128 |
45 | #define RF_MIN_FREE_REVENT 32 |
46 | #define RF_EVENTQ_WAIT 5000 |
47 | |
48 | #include <sys/proc.h> |
49 | #include <sys/kernel.h> |
50 | |
51 | static void rf_ShutdownReconEvent(void *); |
52 | |
53 | static RF_ReconEvent_t * |
54 | GetReconEventDesc(RF_RowCol_t col, void *arg, RF_Revent_t type); |
55 | |
56 | static void rf_ShutdownReconEvent(void *ignored) |
57 | { |
58 | pool_destroy(&rf_pools.revent); |
59 | } |
60 | |
61 | int |
62 | rf_ConfigureReconEvent(RF_ShutdownList_t **listp) |
63 | { |
64 | |
65 | rf_pool_init(&rf_pools.revent, sizeof(RF_ReconEvent_t), |
66 | "rf_revent_pl" , RF_MIN_FREE_REVENT, RF_MAX_FREE_REVENT); |
67 | rf_ShutdownCreate(listp, rf_ShutdownReconEvent, NULL); |
68 | |
69 | return (0); |
70 | } |
71 | |
72 | /* returns the next reconstruction event, blocking the calling thread |
73 | * until one becomes available. will now return null if it is blocked |
74 | * or will return an event if it is not */ |
75 | |
76 | RF_ReconEvent_t * |
77 | rf_GetNextReconEvent(RF_RaidReconDesc_t *reconDesc) |
78 | { |
79 | RF_Raid_t *raidPtr = reconDesc->raidPtr; |
80 | RF_ReconCtrl_t *rctrl = raidPtr->reconControl; |
81 | RF_ReconEvent_t *event; |
82 | int stall_count; |
83 | |
84 | rf_lock_mutex2(rctrl->eq_mutex); |
85 | /* q null and count==0 must be equivalent conditions */ |
86 | RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0)); |
87 | |
88 | /* mpsleep timeout value: secs = timo_val/hz. 'ticks' here is |
89 | defined as cycle-counter ticks, not softclock ticks */ |
90 | |
91 | #define MAX_RECON_EXEC_USECS (100 * 1000) /* 100 ms */ |
92 | #define RECON_DELAY_MS 25 |
93 | #define RECON_TIMO ((RECON_DELAY_MS * hz) / 1000) |
94 | |
95 | /* we are not pre-emptible in the kernel, but we don't want to run |
96 | * forever. If we run w/o blocking for more than MAX_RECON_EXEC_TICKS |
97 | * ticks of the cycle counter, delay for RECON_DELAY before |
98 | * continuing. this may murder us with context switches, so we may |
99 | * need to increase both the MAX...TICKS and the RECON_DELAY_MS. */ |
100 | if (reconDesc->reconExecTimerRunning) { |
101 | int status; |
102 | |
103 | RF_ETIMER_STOP(reconDesc->recon_exec_timer); |
104 | RF_ETIMER_EVAL(reconDesc->recon_exec_timer); |
105 | reconDesc->reconExecTicks += |
106 | RF_ETIMER_VAL_US(reconDesc->recon_exec_timer); |
107 | if (reconDesc->reconExecTicks > reconDesc->maxReconExecTicks) |
108 | reconDesc->maxReconExecTicks = |
109 | reconDesc->reconExecTicks; |
110 | if (reconDesc->reconExecTicks >= MAX_RECON_EXEC_USECS) { |
111 | /* we've been running too long. delay for |
112 | * RECON_DELAY_MS */ |
113 | #if RF_RECON_STATS > 0 |
114 | reconDesc->numReconExecDelays++; |
115 | #endif /* RF_RECON_STATS > 0 */ |
116 | |
117 | status = rf_sleep("rfrecond" , RECON_TIMO, |
118 | rctrl->eq_mutex); |
119 | RF_ASSERT(status == EWOULDBLOCK); |
120 | reconDesc->reconExecTicks = 0; |
121 | } |
122 | } |
123 | |
124 | stall_count = 0; |
125 | while (!rctrl->eventQueue) { |
126 | #if RF_RECON_STATS > 0 |
127 | reconDesc->numReconEventWaits++; |
128 | #endif /* RF_RECON_STATS > 0 */ |
129 | |
130 | rf_timedwait_cond2(rctrl->eq_cv, rctrl->eq_mutex, |
131 | RF_EVENTQ_WAIT); |
132 | |
133 | stall_count++; |
134 | |
135 | if ((stall_count > 10) && |
136 | rctrl->headSepCBList) { |
137 | /* There is work to do on the callback list, and |
138 | we've waited long enough... */ |
139 | rf_WakeupHeadSepCBWaiters(raidPtr); |
140 | stall_count = 0; |
141 | } |
142 | reconDesc->reconExecTicks = 0; /* we've just waited */ |
143 | } |
144 | |
145 | reconDesc->reconExecTimerRunning = 1; |
146 | if (RF_ETIMER_VAL_US(reconDesc->recon_exec_timer)!=0) { |
147 | /* it moved!! reset the timer. */ |
148 | RF_ETIMER_START(reconDesc->recon_exec_timer); |
149 | } |
150 | event = rctrl->eventQueue; |
151 | rctrl->eventQueue = event->next; |
152 | event->next = NULL; |
153 | rctrl->eq_count--; |
154 | |
155 | /* q null and count==0 must be equivalent conditions */ |
156 | RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0)); |
157 | rf_unlock_mutex2(rctrl->eq_mutex); |
158 | return (event); |
159 | } |
160 | /* enqueues a reconstruction event on the indicated queue */ |
161 | void |
162 | rf_CauseReconEvent(RF_Raid_t *raidPtr, RF_RowCol_t col, void *arg, |
163 | RF_Revent_t type) |
164 | { |
165 | RF_ReconCtrl_t *rctrl = raidPtr->reconControl; |
166 | RF_ReconEvent_t *event = GetReconEventDesc(col, arg, type); |
167 | |
168 | if (type == RF_REVENT_BUFCLEAR) { |
169 | RF_ASSERT(col != rctrl->fcol); |
170 | } |
171 | RF_ASSERT(col >= 0 && col <= raidPtr->numCol); |
172 | rf_lock_mutex2(rctrl->eq_mutex); |
173 | /* q null and count==0 must be equivalent conditions */ |
174 | RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0)); |
175 | event->next = rctrl->eventQueue; |
176 | rctrl->eventQueue = event; |
177 | rctrl->eq_count++; |
178 | rf_broadcast_cond2(rctrl->eq_cv); |
179 | rf_unlock_mutex2(rctrl->eq_mutex); |
180 | } |
181 | /* allocates and initializes a recon event descriptor */ |
182 | static RF_ReconEvent_t * |
183 | GetReconEventDesc(RF_RowCol_t col, void *arg, RF_Revent_t type) |
184 | { |
185 | RF_ReconEvent_t *t; |
186 | |
187 | t = pool_get(&rf_pools.revent, PR_WAITOK); |
188 | t->col = col; |
189 | t->arg = arg; |
190 | t->type = type; |
191 | t->next = NULL; |
192 | return (t); |
193 | } |
194 | |
195 | /* |
196 | rf_DrainReconEventQueue() -- used in the event of a reconstruction |
197 | problem, this function simply drains all pending events from the |
198 | reconstruct event queue. |
199 | */ |
200 | |
201 | void |
202 | rf_DrainReconEventQueue(RF_RaidReconDesc_t *reconDesc) |
203 | { |
204 | RF_ReconCtrl_t *rctrl = reconDesc->raidPtr->reconControl; |
205 | RF_ReconEvent_t *event; |
206 | |
207 | rf_lock_mutex2(rctrl->eq_mutex); |
208 | while (rctrl->eventQueue!=NULL) { |
209 | |
210 | event = rctrl->eventQueue; |
211 | rctrl->eventQueue = event->next; |
212 | event->next = NULL; |
213 | rctrl->eq_count--; |
214 | /* dump it */ |
215 | rf_FreeReconEventDesc(event); |
216 | } |
217 | rf_unlock_mutex2(rctrl->eq_mutex); |
218 | } |
219 | |
220 | void |
221 | rf_FreeReconEventDesc(RF_ReconEvent_t *event) |
222 | { |
223 | pool_put(&rf_pools.revent, event); |
224 | } |
225 | |