swdmover.c revision 1.4 1 /* $NetBSD: swdmover.c,v 1.4 2003/03/06 21:10:45 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * swdmover.c: Software back-end providing the dmover functions
40 * mentioned in dmover(9).
41 *
42 * This module provides a fallback for cases where no hardware
43 * data movers are present in a system, and also serves an an
44 * example of how to write a dmover back-end.
45 *
46 * Note that even through the software dmover doesn't require
47 * interrupts to be blocked, we block them anyway to demonstrate
48 * the locking protocol.
49 */
50
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: swdmover.c,v 1.4 2003/03/06 21:10:45 thorpej Exp $");
53
54 #include <sys/param.h>
55 #include <sys/lock.h>
56 #include <sys/kthread.h>
57 #include <sys/systm.h>
58 #include <sys/uio.h>
59
60 #include <dev/dmover/dmovervar.h>
61
62 struct swdmover_function {
63 void (*sdf_process)(struct dmover_request *);
64 };
65
66 static struct dmover_backend swdmover_backend;
67 static struct proc *swdmover_proc;
68 static int swdmover_cv;
69
70 void swdmoverattach(int);
71
72 /*
73 * swdmover_process:
74 *
75 * Dmover back-end entry point.
76 */
77 static void
78 swdmover_process(struct dmover_backend *dmb)
79 {
80 int s;
81
82 /*
83 * Just wake up the processing thread. This will allow
84 * requests to linger on the middle-end's queue so that
85 * they can be cancelled, if need-be.
86 */
87 s = splbio();
88 /* XXXLOCK */
89 if (TAILQ_EMPTY(&dmb->dmb_pendreqs) == 0)
90 wakeup(&swdmover_cv);
91 /* XXXUNLOCK */
92 splx(s);
93 }
94
95 /*
96 * swdmover_thread:
97 *
98 * Request processing thread.
99 */
100 static void
101 swdmover_thread(void *arg)
102 {
103 struct dmover_backend *dmb = arg;
104 struct dmover_request *dreq;
105 struct swdmover_function *sdf;
106 int s;
107
108 s = splbio();
109 /* XXXLOCK */
110
111 for (;;) {
112 dreq = TAILQ_FIRST(&dmb->dmb_pendreqs);
113 if (dreq == NULL) {
114 /* XXXUNLOCK */
115 (void) tsleep(&swdmover_cv, PRIBIO, "swdmvr", 0);
116 continue;
117 }
118
119 dmover_backend_remque(dmb, dreq);
120 dreq->dreq_flags |= DMOVER_REQ_RUNNING;
121
122 /* XXXUNLOCK */
123 splx(s);
124
125 sdf = dreq->dreq_assignment->das_algdesc->dad_data;
126 (*sdf->sdf_process)(dreq);
127
128 s = splbio();
129 /* XXXLOCK */
130 }
131 }
132
133 /*
134 * swdmover_func_zero_process:
135 *
136 * Processing routine for the "zero" function.
137 */
138 static void
139 swdmover_func_zero_process(struct dmover_request *dreq)
140 {
141
142 switch (dreq->dreq_outbuf_type) {
143 case DMOVER_BUF_LINEAR:
144 memset(dreq->dreq_outbuf.dmbuf_linear.l_addr, 0,
145 dreq->dreq_outbuf.dmbuf_linear.l_len);
146 break;
147
148 case DMOVER_BUF_UIO:
149 {
150 struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
151 char *cp;
152 size_t count, buflen;
153 int error;
154
155 if (uio->uio_rw != UIO_READ) {
156 /* XXXLOCK */
157 dreq->dreq_error = EINVAL;
158 dreq->dreq_flags |= DMOVER_REQ_ERROR;
159 /* XXXUNLOCK */
160 break;
161 }
162
163 buflen = uio->uio_resid;
164 if (buflen > 1024)
165 buflen = 1024;
166 cp = alloca(buflen);
167 memset(cp, 0, buflen);
168
169 while ((count = uio->uio_resid) != 0) {
170 if (count > buflen)
171 count = buflen;
172 error = uiomove(cp, count, uio);
173 if (error) {
174 /* XXXLOCK */
175 dreq->dreq_error = error;
176 dreq->dreq_flags |= DMOVER_REQ_ERROR;
177 /* XXXUNLOCK */
178 break;
179 }
180 }
181 break;
182 }
183
184 default:
185 /* XXXLOCK */
186 dreq->dreq_error = EINVAL;
187 dreq->dreq_flags |= DMOVER_REQ_ERROR;
188 /* XXXUNLOCK */
189 }
190
191 dmover_done(dreq);
192 }
193
194 /*
195 * swdmover_func_fill8_process:
196 *
197 * Processing routine for the "fill8" function.
198 */
199 static void
200 swdmover_func_fill8_process(struct dmover_request *dreq)
201 {
202
203 switch (dreq->dreq_outbuf_type) {
204 case DMOVER_BUF_LINEAR:
205 memset(dreq->dreq_outbuf.dmbuf_linear.l_addr,
206 dreq->dreq_immediate[0],
207 dreq->dreq_outbuf.dmbuf_linear.l_len);
208 break;
209
210 case DMOVER_BUF_UIO:
211 {
212 struct uio *uio = dreq->dreq_outbuf.dmbuf_uio;
213 char *cp;
214 size_t count, buflen;
215 int error;
216
217 if (uio->uio_rw != UIO_READ) {
218 /* XXXLOCK */
219 dreq->dreq_error = EINVAL;
220 dreq->dreq_flags |= DMOVER_REQ_ERROR;
221 /* XXXUNLOCK */
222 break;
223 }
224
225 buflen = uio->uio_resid;
226 if (buflen > 1024)
227 buflen = 1024;
228 cp = alloca(buflen);
229 memset(cp, dreq->dreq_immediate[0], buflen);
230
231 while ((count = uio->uio_resid) != 0) {
232 if (count > buflen)
233 count = buflen;
234 error = uiomove(cp, count, uio);
235 if (error) {
236 /* XXXLOCK */
237 dreq->dreq_error = error;
238 dreq->dreq_flags |= DMOVER_REQ_ERROR;
239 /* XXXUNLOCK */
240 break;
241 }
242 }
243 break;
244 }
245
246 default:
247 /* XXXLOCK */
248 dreq->dreq_error = EINVAL;
249 dreq->dreq_flags |= DMOVER_REQ_ERROR;
250 /* XXXUNLOCK */
251 }
252
253 dmover_done(dreq);
254 }
255
256 /*
257 * swdmover_func_copy_process:
258 *
259 * Processing routine for the "copy" function.
260 */
261 static void
262 swdmover_func_copy_process(struct dmover_request *dreq)
263 {
264
265 /* XXX Currently, both buffers must be of same type. */
266 if (dreq->dreq_inbuf_type != dreq->dreq_outbuf_type) {
267 /* XXXLOCK */
268 dreq->dreq_error = EINVAL;
269 dreq->dreq_flags |= DMOVER_REQ_ERROR;
270 /* XXXUNLOCK */
271 goto done;
272 }
273
274 switch (dreq->dreq_outbuf_type) {
275 case DMOVER_BUF_LINEAR:
276 if (dreq->dreq_outbuf.dmbuf_linear.l_len !=
277 dreq->dreq_inbuf[0].dmbuf_linear.l_len) {
278 /* XXXLOCK */
279 dreq->dreq_error = EINVAL;
280 dreq->dreq_flags |= DMOVER_REQ_ERROR;
281 /* XXXUNLOCK */
282 break;
283 }
284 memcpy(dreq->dreq_outbuf.dmbuf_linear.l_addr,
285 dreq->dreq_inbuf[0].dmbuf_linear.l_addr,
286 dreq->dreq_outbuf.dmbuf_linear.l_len);
287 break;
288
289 case DMOVER_BUF_UIO:
290 {
291 struct uio *uio_out = dreq->dreq_outbuf.dmbuf_uio;
292 struct uio *uio_in = dreq->dreq_inbuf[0].dmbuf_uio;
293 char *cp;
294 size_t count, buflen;
295 int error;
296
297 if (uio_in->uio_rw != UIO_WRITE ||
298 uio_out->uio_rw != UIO_READ ||
299 uio_in->uio_resid != uio_out->uio_resid) {
300 /* XXXLOCK */
301 dreq->dreq_error = EINVAL;
302 dreq->dreq_flags |= DMOVER_REQ_ERROR;
303 /* XXXUNLOCK */
304 break;
305 }
306
307 buflen = uio_in->uio_resid;
308 if (buflen > 1024)
309 buflen = 1024;
310 cp = alloca(buflen);
311
312 while ((count = uio_in->uio_resid) != 0) {
313 if (count > buflen)
314 count = buflen;
315 error = uiomove(cp, count, uio_in);
316 if (error == 0)
317 error = uiomove(cp, count, uio_out);
318 if (error) {
319 /* XXXLOCK */
320 dreq->dreq_error = error;
321 dreq->dreq_flags |= DMOVER_REQ_ERROR;
322 /* XXXUNLOCK */
323 break;
324 }
325 }
326 break;
327 }
328
329 default:
330 /* XXXLOCK */
331 dreq->dreq_error = EINVAL;
332 dreq->dreq_flags |= DMOVER_REQ_ERROR;
333 /* XXXUNLOCK */
334 }
335
336 done:
337 dmover_done(dreq);
338 }
339
340 static struct swdmover_function swdmover_func_zero = {
341 swdmover_func_zero_process
342 };
343
344 static struct swdmover_function swdmover_func_fill8 = {
345 swdmover_func_fill8_process
346 };
347
348 struct swdmover_function swdmover_func_copy = {
349 swdmover_func_copy_process
350 };
351
352 const struct dmover_algdesc swdmover_algdescs[] = {
353 {
354 DMOVER_FUNC_ZERO,
355 &swdmover_func_zero,
356 0
357 },
358 {
359 DMOVER_FUNC_FILL8,
360 &swdmover_func_fill8,
361 0
362 },
363 {
364 DMOVER_FUNC_COPY,
365 &swdmover_func_copy,
366 1
367 },
368 };
369 #define SWDMOVER_ALGDESC_COUNT \
370 (sizeof(swdmover_algdescs) / sizeof(swdmover_algdescs[0]))
371
372 /*
373 * swdmover_create_thread:
374 *
375 * Actually create the swdmover processing thread.
376 */
377 static void
378 swdmover_create_thread(void *arg)
379 {
380 int error;
381
382 error = kthread_create1(swdmover_thread, arg, &swdmover_proc,
383 "swdmover");
384 if (error)
385 printf("WARNING: unable to create swdmover thread, "
386 "error = %d\n", error);
387 }
388
389 /*
390 * swdmoverattach:
391 *
392 * Pesudo-device attach routine.
393 */
394 void
395 swdmoverattach(int count)
396 {
397
398 swdmover_backend.dmb_name = "swdmover";
399 swdmover_backend.dmb_speed = 1; /* XXX */
400 swdmover_backend.dmb_cookie = NULL;
401 swdmover_backend.dmb_algdescs = swdmover_algdescs;
402 swdmover_backend.dmb_nalgdescs = SWDMOVER_ALGDESC_COUNT;
403 swdmover_backend.dmb_process = swdmover_process;
404
405 kthread_create(swdmover_create_thread, &swdmover_backend);
406
407 /* XXX Should only register this when kthread creation succeeds. */
408 dmover_backend_register(&swdmover_backend);
409 }
410