scsipi_base.c revision 1.38.4.5 1 1.38.4.5 nathanw /* $NetBSD: scsipi_base.c,v 1.38.4.5 2001/10/22 20:41:42 nathanw Exp $ */
2 1.2 bouyer
3 1.8 mycroft /*-
4 1.38.4.1 nathanw * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 1.8 mycroft * All rights reserved.
6 1.8 mycroft *
7 1.8 mycroft * This code is derived from software contributed to The NetBSD Foundation
8 1.38.4.1 nathanw * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 1.38.4.1 nathanw * Simulation Facility, NASA Ames Research Center.
10 1.2 bouyer *
11 1.2 bouyer * Redistribution and use in source and binary forms, with or without
12 1.2 bouyer * modification, are permitted provided that the following conditions
13 1.2 bouyer * are met:
14 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
15 1.2 bouyer * notice, this list of conditions and the following disclaimer.
16 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
17 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
18 1.2 bouyer * documentation and/or other materials provided with the distribution.
19 1.2 bouyer * 3. All advertising materials mentioning features or use of this software
20 1.2 bouyer * must display the following acknowledgement:
21 1.8 mycroft * This product includes software developed by the NetBSD
22 1.8 mycroft * Foundation, Inc. and its contributors.
23 1.8 mycroft * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.8 mycroft * contributors may be used to endorse or promote products derived
25 1.8 mycroft * from this software without specific prior written permission.
26 1.2 bouyer *
27 1.8 mycroft * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.8 mycroft * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.8 mycroft * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.8 mycroft * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.8 mycroft * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.8 mycroft * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.8 mycroft * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.8 mycroft * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.8 mycroft * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.8 mycroft * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.8 mycroft * POSSIBILITY OF SUCH DAMAGE.
38 1.2 bouyer */
39 1.2 bouyer
40 1.13 bouyer #include "opt_scsi.h"
41 1.13 bouyer
42 1.2 bouyer #include <sys/types.h>
43 1.2 bouyer #include <sys/param.h>
44 1.2 bouyer #include <sys/systm.h>
45 1.2 bouyer #include <sys/kernel.h>
46 1.2 bouyer #include <sys/buf.h>
47 1.2 bouyer #include <sys/uio.h>
48 1.2 bouyer #include <sys/malloc.h>
49 1.6 thorpej #include <sys/pool.h>
50 1.2 bouyer #include <sys/errno.h>
51 1.2 bouyer #include <sys/device.h>
52 1.2 bouyer #include <sys/proc.h>
53 1.38.4.1 nathanw #include <sys/kthread.h>
54 1.2 bouyer
55 1.2 bouyer #include <dev/scsipi/scsipi_all.h>
56 1.2 bouyer #include <dev/scsipi/scsipi_disk.h>
57 1.2 bouyer #include <dev/scsipi/scsipiconf.h>
58 1.2 bouyer #include <dev/scsipi/scsipi_base.h>
59 1.2 bouyer
60 1.38.4.1 nathanw #include <dev/scsipi/scsi_all.h>
61 1.38.4.1 nathanw #include <dev/scsipi/scsi_message.h>
62 1.38.4.1 nathanw
63 1.38.4.1 nathanw int scsipi_complete __P((struct scsipi_xfer *));
64 1.38.4.1 nathanw void scsipi_request_sense __P((struct scsipi_xfer *));
65 1.38.4.1 nathanw int scsipi_enqueue __P((struct scsipi_xfer *));
66 1.38.4.1 nathanw void scsipi_run_queue __P((struct scsipi_channel *chan));
67 1.38.4.1 nathanw
68 1.38.4.1 nathanw void scsipi_completion_thread __P((void *));
69 1.38.4.1 nathanw
70 1.38.4.1 nathanw void scsipi_get_tag __P((struct scsipi_xfer *));
71 1.38.4.1 nathanw void scsipi_put_tag __P((struct scsipi_xfer *));
72 1.38.4.1 nathanw
73 1.38.4.1 nathanw int scsipi_get_resource __P((struct scsipi_channel *));
74 1.38.4.1 nathanw void scsipi_put_resource __P((struct scsipi_channel *));
75 1.38.4.1 nathanw __inline int scsipi_grow_resources __P((struct scsipi_channel *));
76 1.38.4.1 nathanw
77 1.38.4.1 nathanw void scsipi_async_event_max_openings __P((struct scsipi_channel *,
78 1.38.4.1 nathanw struct scsipi_max_openings *));
79 1.38.4.1 nathanw void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
80 1.38.4.1 nathanw struct scsipi_xfer_mode *));
81 1.38.4.1 nathanw void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
82 1.6 thorpej
83 1.38.4.1 nathanw struct pool scsipi_xfer_pool;
84 1.2 bouyer
85 1.2 bouyer /*
86 1.38.4.1 nathanw * scsipi_init:
87 1.38.4.1 nathanw *
88 1.38.4.1 nathanw * Called when a scsibus or atapibus is attached to the system
89 1.38.4.1 nathanw * to initialize shared data structures.
90 1.6 thorpej */
91 1.6 thorpej void
92 1.6 thorpej scsipi_init()
93 1.6 thorpej {
94 1.6 thorpej static int scsipi_init_done;
95 1.6 thorpej
96 1.6 thorpej if (scsipi_init_done)
97 1.6 thorpej return;
98 1.6 thorpej scsipi_init_done = 1;
99 1.6 thorpej
100 1.6 thorpej /* Initialize the scsipi_xfer pool. */
101 1.6 thorpej pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
102 1.6 thorpej 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
103 1.6 thorpej }
104 1.6 thorpej
105 1.6 thorpej /*
106 1.38.4.1 nathanw * scsipi_channel_init:
107 1.38.4.1 nathanw *
108 1.38.4.1 nathanw * Initialize a scsipi_channel when it is attached.
109 1.38.4.1 nathanw */
110 1.38.4.1 nathanw int
111 1.38.4.1 nathanw scsipi_channel_init(chan)
112 1.38.4.1 nathanw struct scsipi_channel *chan;
113 1.38.4.1 nathanw {
114 1.38.4.1 nathanw size_t nbytes;
115 1.38.4.1 nathanw int i;
116 1.38.4.1 nathanw
117 1.38.4.1 nathanw /* Initialize shared data. */
118 1.38.4.1 nathanw scsipi_init();
119 1.38.4.1 nathanw
120 1.38.4.1 nathanw /* Initialize the queues. */
121 1.38.4.1 nathanw TAILQ_INIT(&chan->chan_queue);
122 1.38.4.1 nathanw TAILQ_INIT(&chan->chan_complete);
123 1.38.4.1 nathanw
124 1.38.4.1 nathanw nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
125 1.38.4.1 nathanw chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
126 1.38.4.1 nathanw if (chan->chan_periphs == NULL)
127 1.38.4.1 nathanw return (ENOMEM);
128 1.38.4.1 nathanw
129 1.38.4.1 nathanw
130 1.38.4.1 nathanw nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
131 1.38.4.1 nathanw for (i = 0; i < chan->chan_ntargets; i++) {
132 1.38.4.1 nathanw chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT);
133 1.38.4.1 nathanw if (chan->chan_periphs[i] == NULL) {
134 1.38.4.1 nathanw while (--i >= 0) {
135 1.38.4.1 nathanw free(chan->chan_periphs[i], M_DEVBUF);
136 1.38.4.1 nathanw }
137 1.38.4.1 nathanw return (ENOMEM);
138 1.38.4.1 nathanw }
139 1.38.4.1 nathanw memset(chan->chan_periphs[i], 0, nbytes);
140 1.38.4.1 nathanw }
141 1.38.4.1 nathanw
142 1.38.4.1 nathanw /*
143 1.38.4.1 nathanw * Create the asynchronous completion thread.
144 1.38.4.1 nathanw */
145 1.38.4.1 nathanw kthread_create(scsipi_create_completion_thread, chan);
146 1.38.4.1 nathanw return (0);
147 1.38.4.1 nathanw }
148 1.38.4.1 nathanw
149 1.38.4.1 nathanw /*
150 1.38.4.1 nathanw * scsipi_channel_shutdown:
151 1.38.4.1 nathanw *
152 1.38.4.1 nathanw * Shutdown a scsipi_channel.
153 1.38.4.1 nathanw */
154 1.38.4.1 nathanw void
155 1.38.4.1 nathanw scsipi_channel_shutdown(chan)
156 1.38.4.1 nathanw struct scsipi_channel *chan;
157 1.38.4.1 nathanw {
158 1.38.4.1 nathanw
159 1.38.4.1 nathanw /*
160 1.38.4.1 nathanw * Shut down the completion thread.
161 1.38.4.1 nathanw */
162 1.38.4.5 nathanw chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
163 1.38.4.1 nathanw wakeup(&chan->chan_complete);
164 1.38.4.1 nathanw
165 1.38.4.1 nathanw /*
166 1.38.4.1 nathanw * Now wait for the thread to exit.
167 1.38.4.1 nathanw */
168 1.38.4.1 nathanw while (chan->chan_thread != NULL)
169 1.38.4.1 nathanw (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
170 1.38.4.1 nathanw }
171 1.38.4.1 nathanw
172 1.38.4.1 nathanw /*
173 1.38.4.1 nathanw * scsipi_insert_periph:
174 1.38.4.1 nathanw *
175 1.38.4.1 nathanw * Insert a periph into the channel.
176 1.38.4.1 nathanw */
177 1.38.4.1 nathanw void
178 1.38.4.1 nathanw scsipi_insert_periph(chan, periph)
179 1.38.4.1 nathanw struct scsipi_channel *chan;
180 1.38.4.1 nathanw struct scsipi_periph *periph;
181 1.38.4.1 nathanw {
182 1.38.4.1 nathanw int s;
183 1.38.4.1 nathanw
184 1.38.4.1 nathanw s = splbio();
185 1.38.4.1 nathanw chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
186 1.38.4.1 nathanw splx(s);
187 1.38.4.1 nathanw }
188 1.38.4.1 nathanw
189 1.38.4.1 nathanw /*
190 1.38.4.1 nathanw * scsipi_remove_periph:
191 1.38.4.1 nathanw *
192 1.38.4.1 nathanw * Remove a periph from the channel.
193 1.38.4.1 nathanw */
194 1.38.4.1 nathanw void
195 1.38.4.1 nathanw scsipi_remove_periph(chan, periph)
196 1.38.4.1 nathanw struct scsipi_channel *chan;
197 1.38.4.1 nathanw struct scsipi_periph *periph;
198 1.38.4.1 nathanw {
199 1.38.4.1 nathanw int s;
200 1.38.4.1 nathanw
201 1.38.4.1 nathanw s = splbio();
202 1.38.4.1 nathanw chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
203 1.38.4.1 nathanw splx(s);
204 1.38.4.1 nathanw }
205 1.38.4.1 nathanw
206 1.38.4.1 nathanw /*
207 1.38.4.1 nathanw * scsipi_lookup_periph:
208 1.38.4.1 nathanw *
209 1.38.4.1 nathanw * Lookup a periph on the specified channel.
210 1.38.4.1 nathanw */
211 1.38.4.1 nathanw struct scsipi_periph *
212 1.38.4.1 nathanw scsipi_lookup_periph(chan, target, lun)
213 1.38.4.1 nathanw struct scsipi_channel *chan;
214 1.38.4.1 nathanw int target, lun;
215 1.38.4.1 nathanw {
216 1.38.4.1 nathanw struct scsipi_periph *periph;
217 1.38.4.1 nathanw int s;
218 1.38.4.1 nathanw
219 1.38.4.1 nathanw if (target >= chan->chan_ntargets ||
220 1.38.4.1 nathanw lun >= chan->chan_nluns)
221 1.38.4.1 nathanw return (NULL);
222 1.38.4.1 nathanw
223 1.38.4.1 nathanw s = splbio();
224 1.38.4.1 nathanw periph = chan->chan_periphs[target][lun];
225 1.38.4.1 nathanw splx(s);
226 1.38.4.1 nathanw
227 1.38.4.1 nathanw return (periph);
228 1.38.4.1 nathanw }
229 1.38.4.1 nathanw
230 1.38.4.1 nathanw /*
231 1.38.4.1 nathanw * scsipi_get_resource:
232 1.38.4.1 nathanw *
233 1.38.4.1 nathanw * Allocate a single xfer `resource' from the channel.
234 1.38.4.1 nathanw *
235 1.38.4.1 nathanw * NOTE: Must be called at splbio().
236 1.38.4.1 nathanw */
237 1.38.4.1 nathanw int
238 1.38.4.1 nathanw scsipi_get_resource(chan)
239 1.38.4.1 nathanw struct scsipi_channel *chan;
240 1.38.4.1 nathanw {
241 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
242 1.38.4.1 nathanw
243 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
244 1.38.4.1 nathanw if (chan->chan_openings > 0) {
245 1.38.4.1 nathanw chan->chan_openings--;
246 1.38.4.1 nathanw return (1);
247 1.38.4.1 nathanw }
248 1.38.4.1 nathanw return (0);
249 1.38.4.1 nathanw }
250 1.38.4.1 nathanw
251 1.38.4.1 nathanw if (adapt->adapt_openings > 0) {
252 1.38.4.1 nathanw adapt->adapt_openings--;
253 1.38.4.1 nathanw return (1);
254 1.38.4.1 nathanw }
255 1.38.4.1 nathanw return (0);
256 1.38.4.1 nathanw }
257 1.38.4.1 nathanw
258 1.38.4.1 nathanw /*
259 1.38.4.1 nathanw * scsipi_grow_resources:
260 1.38.4.1 nathanw *
261 1.38.4.1 nathanw * Attempt to grow resources for a channel. If this succeeds,
262 1.38.4.1 nathanw * we allocate one for our caller.
263 1.38.4.1 nathanw *
264 1.38.4.1 nathanw * NOTE: Must be called at splbio().
265 1.38.4.1 nathanw */
266 1.38.4.1 nathanw __inline int
267 1.38.4.1 nathanw scsipi_grow_resources(chan)
268 1.38.4.1 nathanw struct scsipi_channel *chan;
269 1.38.4.1 nathanw {
270 1.38.4.1 nathanw
271 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
272 1.38.4.5 nathanw if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
273 1.38.4.5 nathanw scsipi_adapter_request(chan,
274 1.38.4.5 nathanw ADAPTER_REQ_GROW_RESOURCES, NULL);
275 1.38.4.5 nathanw return (scsipi_get_resource(chan));
276 1.38.4.5 nathanw }
277 1.38.4.5 nathanw /*
278 1.38.4.5 nathanw * ask the channel thread to do it. It'll have to thaw the
279 1.38.4.5 nathanw * queue
280 1.38.4.5 nathanw */
281 1.38.4.5 nathanw scsipi_channel_freeze(chan, 1);
282 1.38.4.5 nathanw chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
283 1.38.4.5 nathanw wakeup(&chan->chan_complete);
284 1.38.4.5 nathanw return (0);
285 1.38.4.1 nathanw }
286 1.38.4.1 nathanw
287 1.38.4.1 nathanw return (0);
288 1.38.4.1 nathanw }
289 1.38.4.1 nathanw
290 1.38.4.1 nathanw /*
291 1.38.4.1 nathanw * scsipi_put_resource:
292 1.38.4.1 nathanw *
293 1.38.4.1 nathanw * Free a single xfer `resource' to the channel.
294 1.38.4.1 nathanw *
295 1.38.4.1 nathanw * NOTE: Must be called at splbio().
296 1.38.4.1 nathanw */
297 1.38.4.1 nathanw void
298 1.38.4.1 nathanw scsipi_put_resource(chan)
299 1.38.4.1 nathanw struct scsipi_channel *chan;
300 1.38.4.1 nathanw {
301 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
302 1.38.4.1 nathanw
303 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
304 1.38.4.1 nathanw chan->chan_openings++;
305 1.38.4.1 nathanw else
306 1.38.4.1 nathanw adapt->adapt_openings++;
307 1.38.4.1 nathanw }
308 1.38.4.1 nathanw
309 1.38.4.1 nathanw /*
310 1.38.4.1 nathanw * scsipi_get_tag:
311 1.38.4.1 nathanw *
312 1.38.4.1 nathanw * Get a tag ID for the specified xfer.
313 1.38.4.1 nathanw *
314 1.38.4.1 nathanw * NOTE: Must be called at splbio().
315 1.38.4.1 nathanw */
316 1.38.4.1 nathanw void
317 1.38.4.1 nathanw scsipi_get_tag(xs)
318 1.38.4.1 nathanw struct scsipi_xfer *xs;
319 1.38.4.1 nathanw {
320 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
321 1.38.4.1 nathanw int word, bit, tag;
322 1.38.4.1 nathanw
323 1.38.4.1 nathanw for (word = 0; word < PERIPH_NTAGWORDS; word++) {
324 1.38.4.1 nathanw bit = ffs(periph->periph_freetags[word]);
325 1.38.4.1 nathanw if (bit != 0)
326 1.38.4.1 nathanw break;
327 1.38.4.1 nathanw }
328 1.38.4.1 nathanw #ifdef DIAGNOSTIC
329 1.38.4.1 nathanw if (word == PERIPH_NTAGWORDS) {
330 1.38.4.1 nathanw scsipi_printaddr(periph);
331 1.38.4.1 nathanw printf("no free tags\n");
332 1.38.4.1 nathanw panic("scsipi_get_tag");
333 1.38.4.1 nathanw }
334 1.38.4.1 nathanw #endif
335 1.38.4.1 nathanw
336 1.38.4.1 nathanw bit -= 1;
337 1.38.4.1 nathanw periph->periph_freetags[word] &= ~(1 << bit);
338 1.38.4.1 nathanw tag = (word << 5) | bit;
339 1.38.4.1 nathanw
340 1.38.4.1 nathanw /* XXX Should eventually disallow this completely. */
341 1.38.4.1 nathanw if (tag >= periph->periph_openings) {
342 1.38.4.1 nathanw scsipi_printaddr(periph);
343 1.38.4.1 nathanw printf("WARNING: tag %d greater than available openings %d\n",
344 1.38.4.1 nathanw tag, periph->periph_openings);
345 1.38.4.1 nathanw }
346 1.38.4.1 nathanw
347 1.38.4.1 nathanw xs->xs_tag_id = tag;
348 1.38.4.1 nathanw }
349 1.38.4.1 nathanw
350 1.38.4.1 nathanw /*
351 1.38.4.1 nathanw * scsipi_put_tag:
352 1.38.4.1 nathanw *
353 1.38.4.1 nathanw * Put the tag ID for the specified xfer back into the pool.
354 1.38.4.1 nathanw *
355 1.38.4.1 nathanw * NOTE: Must be called at splbio().
356 1.2 bouyer */
357 1.38.4.1 nathanw void
358 1.38.4.1 nathanw scsipi_put_tag(xs)
359 1.38.4.1 nathanw struct scsipi_xfer *xs;
360 1.38.4.1 nathanw {
361 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
362 1.38.4.1 nathanw int word, bit;
363 1.38.4.1 nathanw
364 1.38.4.1 nathanw word = xs->xs_tag_id >> 5;
365 1.38.4.1 nathanw bit = xs->xs_tag_id & 0x1f;
366 1.38.4.1 nathanw
367 1.38.4.1 nathanw periph->periph_freetags[word] |= (1 << bit);
368 1.38.4.1 nathanw }
369 1.2 bouyer
370 1.38.4.1 nathanw /*
371 1.38.4.1 nathanw * scsipi_get_xs:
372 1.38.4.1 nathanw *
373 1.38.4.1 nathanw * Allocate an xfer descriptor and associate it with the
374 1.38.4.1 nathanw * specified peripherial. If the peripherial has no more
375 1.38.4.1 nathanw * available command openings, we either block waiting for
376 1.38.4.1 nathanw * one to become available, or fail.
377 1.38.4.1 nathanw */
378 1.2 bouyer struct scsipi_xfer *
379 1.38.4.1 nathanw scsipi_get_xs(periph, flags)
380 1.38.4.1 nathanw struct scsipi_periph *periph;
381 1.38.4.1 nathanw int flags;
382 1.2 bouyer {
383 1.2 bouyer struct scsipi_xfer *xs;
384 1.2 bouyer int s;
385 1.2 bouyer
386 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
387 1.6 thorpej
388 1.24 thorpej /*
389 1.24 thorpej * If we're cold, make sure we poll.
390 1.24 thorpej */
391 1.24 thorpej if (cold)
392 1.24 thorpej flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
393 1.24 thorpej
394 1.38.4.1 nathanw #ifdef DIAGNOSTIC
395 1.38.4.1 nathanw /*
396 1.38.4.1 nathanw * URGENT commands can never be ASYNC.
397 1.38.4.1 nathanw */
398 1.38.4.1 nathanw if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
399 1.38.4.1 nathanw (XS_CTL_URGENT|XS_CTL_ASYNC)) {
400 1.38.4.1 nathanw scsipi_printaddr(periph);
401 1.38.4.1 nathanw printf("URGENT and ASYNC\n");
402 1.38.4.1 nathanw panic("scsipi_get_xs");
403 1.38.4.1 nathanw }
404 1.38.4.1 nathanw #endif
405 1.38.4.1 nathanw
406 1.2 bouyer s = splbio();
407 1.38.4.1 nathanw /*
408 1.38.4.1 nathanw * Wait for a command opening to become available. Rules:
409 1.38.4.1 nathanw *
410 1.38.4.1 nathanw * - All xfers must wait for an available opening.
411 1.38.4.1 nathanw * Exception: URGENT xfers can proceed when
412 1.38.4.1 nathanw * active == openings, because we use the opening
413 1.38.4.1 nathanw * of the command we're recovering for.
414 1.38.4.1 nathanw * - if the periph has sense pending, only URGENT & REQSENSE
415 1.38.4.1 nathanw * xfers may proceed.
416 1.38.4.1 nathanw *
417 1.38.4.1 nathanw * - If the periph is recovering, only URGENT xfers may
418 1.38.4.1 nathanw * proceed.
419 1.38.4.1 nathanw *
420 1.38.4.1 nathanw * - If the periph is currently executing a recovery
421 1.38.4.1 nathanw * command, URGENT commands must block, because only
422 1.38.4.1 nathanw * one recovery command can execute at a time.
423 1.38.4.1 nathanw */
424 1.38.4.1 nathanw for (;;) {
425 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
426 1.38.4.1 nathanw if (periph->periph_active > periph->periph_openings)
427 1.38.4.1 nathanw goto wait_for_opening;
428 1.38.4.1 nathanw if (periph->periph_flags & PERIPH_SENSE) {
429 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
430 1.38.4.1 nathanw goto wait_for_opening;
431 1.38.4.1 nathanw } else {
432 1.38.4.1 nathanw if ((periph->periph_flags &
433 1.38.4.1 nathanw PERIPH_RECOVERY_ACTIVE) != 0)
434 1.38.4.1 nathanw goto wait_for_opening;
435 1.38.4.1 nathanw periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
436 1.38.4.1 nathanw }
437 1.38.4.1 nathanw break;
438 1.38.4.1 nathanw }
439 1.38.4.1 nathanw if (periph->periph_active >= periph->periph_openings ||
440 1.38.4.1 nathanw (periph->periph_flags & PERIPH_RECOVERING) != 0)
441 1.38.4.1 nathanw goto wait_for_opening;
442 1.38.4.1 nathanw periph->periph_active++;
443 1.38.4.1 nathanw break;
444 1.38.4.1 nathanw
445 1.38.4.1 nathanw wait_for_opening:
446 1.38.4.1 nathanw if (flags & XS_CTL_NOSLEEP) {
447 1.2 bouyer splx(s);
448 1.38.4.1 nathanw return (NULL);
449 1.2 bouyer }
450 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
451 1.38.4.1 nathanw periph->periph_flags |= PERIPH_WAITING;
452 1.38.4.1 nathanw (void) tsleep(periph, PRIBIO, "getxs", 0);
453 1.2 bouyer }
454 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
455 1.6 thorpej xs = pool_get(&scsipi_xfer_pool,
456 1.24 thorpej ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
457 1.38.4.1 nathanw if (xs == NULL) {
458 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
459 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
460 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
461 1.38.4.1 nathanw } else
462 1.38.4.1 nathanw periph->periph_active--;
463 1.38.4.1 nathanw scsipi_printaddr(periph);
464 1.38.4.1 nathanw printf("unable to allocate %sscsipi_xfer\n",
465 1.38.4.1 nathanw (flags & XS_CTL_URGENT) ? "URGENT " : "");
466 1.2 bouyer }
467 1.6 thorpej splx(s);
468 1.2 bouyer
469 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
470 1.6 thorpej
471 1.7 scottr if (xs != NULL) {
472 1.30 thorpej callout_init(&xs->xs_callout);
473 1.38.4.1 nathanw memset(xs, 0, sizeof(*xs));
474 1.38.4.1 nathanw xs->xs_periph = periph;
475 1.24 thorpej xs->xs_control = flags;
476 1.37 fvdl xs->xs_status = 0;
477 1.38.4.1 nathanw s = splbio();
478 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
479 1.38.4.1 nathanw splx(s);
480 1.7 scottr }
481 1.3 enami return (xs);
482 1.2 bouyer }
483 1.2 bouyer
484 1.2 bouyer /*
485 1.38.4.1 nathanw * scsipi_put_xs:
486 1.38.4.1 nathanw *
487 1.38.4.1 nathanw * Release an xfer descriptor, decreasing the outstanding command
488 1.38.4.1 nathanw * count for the peripherial. If there is a thread waiting for
489 1.38.4.1 nathanw * an opening, wake it up. If not, kick any queued I/O the
490 1.38.4.1 nathanw * peripherial may have.
491 1.6 thorpej *
492 1.38.4.1 nathanw * NOTE: Must be called at splbio().
493 1.2 bouyer */
494 1.3 enami void
495 1.38.4.1 nathanw scsipi_put_xs(xs)
496 1.2 bouyer struct scsipi_xfer *xs;
497 1.2 bouyer {
498 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
499 1.38.4.1 nathanw int flags = xs->xs_control;
500 1.2 bouyer
501 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
502 1.38.4.1 nathanw
503 1.38.4.1 nathanw TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
504 1.6 thorpej pool_put(&scsipi_xfer_pool, xs);
505 1.2 bouyer
506 1.38.4.1 nathanw #ifdef DIAGNOSTIC
507 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
508 1.38.4.1 nathanw periph->periph_active == 0) {
509 1.38.4.1 nathanw scsipi_printaddr(periph);
510 1.38.4.1 nathanw printf("recovery without a command to recovery for\n");
511 1.38.4.1 nathanw panic("scsipi_put_xs");
512 1.38.4.1 nathanw }
513 1.38.4.1 nathanw #endif
514 1.38.4.1 nathanw
515 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
516 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
517 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
518 1.38.4.1 nathanw } else
519 1.38.4.1 nathanw periph->periph_active--;
520 1.38.4.1 nathanw if (periph->periph_active == 0 &&
521 1.38.4.1 nathanw (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
522 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_WAITDRAIN;
523 1.38.4.1 nathanw wakeup(&periph->periph_active);
524 1.38.4.1 nathanw }
525 1.38.4.1 nathanw
526 1.38.4.1 nathanw if (periph->periph_flags & PERIPH_WAITING) {
527 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_WAITING;
528 1.38.4.1 nathanw wakeup(periph);
529 1.2 bouyer } else {
530 1.38.4.1 nathanw if (periph->periph_switch->psw_start != NULL) {
531 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
532 1.3 enami ("calling private start()\n"));
533 1.38.4.1 nathanw (*periph->periph_switch->psw_start)(periph);
534 1.2 bouyer }
535 1.2 bouyer }
536 1.15 thorpej }
537 1.15 thorpej
538 1.15 thorpej /*
539 1.38.4.1 nathanw * scsipi_channel_freeze:
540 1.38.4.1 nathanw *
541 1.38.4.1 nathanw * Freeze a channel's xfer queue.
542 1.38.4.1 nathanw */
543 1.38.4.1 nathanw void
544 1.38.4.1 nathanw scsipi_channel_freeze(chan, count)
545 1.38.4.1 nathanw struct scsipi_channel *chan;
546 1.38.4.1 nathanw int count;
547 1.38.4.1 nathanw {
548 1.38.4.1 nathanw int s;
549 1.38.4.1 nathanw
550 1.38.4.1 nathanw s = splbio();
551 1.38.4.1 nathanw chan->chan_qfreeze += count;
552 1.38.4.1 nathanw splx(s);
553 1.38.4.1 nathanw }
554 1.38.4.1 nathanw
555 1.38.4.1 nathanw /*
556 1.38.4.1 nathanw * scsipi_channel_thaw:
557 1.38.4.1 nathanw *
558 1.38.4.1 nathanw * Thaw a channel's xfer queue.
559 1.38.4.1 nathanw */
560 1.38.4.1 nathanw void
561 1.38.4.1 nathanw scsipi_channel_thaw(chan, count)
562 1.38.4.1 nathanw struct scsipi_channel *chan;
563 1.38.4.1 nathanw int count;
564 1.38.4.1 nathanw {
565 1.38.4.1 nathanw int s;
566 1.38.4.1 nathanw
567 1.38.4.1 nathanw s = splbio();
568 1.38.4.1 nathanw chan->chan_qfreeze -= count;
569 1.38.4.1 nathanw /*
570 1.38.4.1 nathanw * Don't let the freeze count go negative.
571 1.38.4.1 nathanw *
572 1.38.4.1 nathanw * Presumably the adapter driver could keep track of this,
573 1.38.4.1 nathanw * but it might just be easier to do this here so as to allow
574 1.38.4.1 nathanw * multiple callers, including those outside the adapter driver.
575 1.38.4.1 nathanw */
576 1.38.4.1 nathanw if (chan->chan_qfreeze < 0) {
577 1.38.4.1 nathanw chan->chan_qfreeze = 0;
578 1.38.4.1 nathanw }
579 1.38.4.1 nathanw splx(s);
580 1.38.4.1 nathanw /*
581 1.38.4.1 nathanw * Kick the channel's queue here. Note, we may be running in
582 1.38.4.1 nathanw * interrupt context (softclock or HBA's interrupt), so the adapter
583 1.38.4.1 nathanw * driver had better not sleep.
584 1.38.4.1 nathanw */
585 1.38.4.1 nathanw if (chan->chan_qfreeze == 0)
586 1.38.4.1 nathanw scsipi_run_queue(chan);
587 1.38.4.1 nathanw }
588 1.38.4.1 nathanw
589 1.38.4.1 nathanw /*
590 1.38.4.1 nathanw * scsipi_channel_timed_thaw:
591 1.38.4.1 nathanw *
592 1.38.4.1 nathanw * Thaw a channel after some time has expired. This will also
593 1.38.4.1 nathanw * run the channel's queue if the freeze count has reached 0.
594 1.38.4.1 nathanw */
595 1.38.4.1 nathanw void
596 1.38.4.1 nathanw scsipi_channel_timed_thaw(arg)
597 1.38.4.1 nathanw void *arg;
598 1.38.4.1 nathanw {
599 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
600 1.38.4.1 nathanw
601 1.38.4.1 nathanw scsipi_channel_thaw(chan, 1);
602 1.38.4.1 nathanw }
603 1.38.4.1 nathanw
604 1.38.4.1 nathanw /*
605 1.38.4.1 nathanw * scsipi_periph_freeze:
606 1.38.4.1 nathanw *
607 1.38.4.1 nathanw * Freeze a device's xfer queue.
608 1.38.4.1 nathanw */
609 1.38.4.1 nathanw void
610 1.38.4.1 nathanw scsipi_periph_freeze(periph, count)
611 1.38.4.1 nathanw struct scsipi_periph *periph;
612 1.38.4.1 nathanw int count;
613 1.38.4.1 nathanw {
614 1.38.4.1 nathanw int s;
615 1.38.4.1 nathanw
616 1.38.4.1 nathanw s = splbio();
617 1.38.4.1 nathanw periph->periph_qfreeze += count;
618 1.38.4.1 nathanw splx(s);
619 1.38.4.1 nathanw }
620 1.38.4.1 nathanw
621 1.38.4.1 nathanw /*
622 1.38.4.1 nathanw * scsipi_periph_thaw:
623 1.38.4.1 nathanw *
624 1.38.4.1 nathanw * Thaw a device's xfer queue.
625 1.38.4.1 nathanw */
626 1.38.4.1 nathanw void
627 1.38.4.1 nathanw scsipi_periph_thaw(periph, count)
628 1.38.4.1 nathanw struct scsipi_periph *periph;
629 1.38.4.1 nathanw int count;
630 1.38.4.1 nathanw {
631 1.38.4.1 nathanw int s;
632 1.38.4.1 nathanw
633 1.38.4.1 nathanw s = splbio();
634 1.38.4.1 nathanw periph->periph_qfreeze -= count;
635 1.38.4.4 nathanw #ifdef DIAGNOSTIC
636 1.38.4.4 nathanw if (periph->periph_qfreeze < 0) {
637 1.38.4.4 nathanw static const char pc[] = "periph freeze count < 0";
638 1.38.4.4 nathanw scsipi_printaddr(periph);
639 1.38.4.4 nathanw printf("%s\n", pc);
640 1.38.4.4 nathanw panic(pc);
641 1.38.4.4 nathanw }
642 1.38.4.4 nathanw #endif
643 1.38.4.1 nathanw if (periph->periph_qfreeze == 0 &&
644 1.38.4.1 nathanw (periph->periph_flags & PERIPH_WAITING) != 0)
645 1.38.4.1 nathanw wakeup(periph);
646 1.38.4.1 nathanw splx(s);
647 1.38.4.1 nathanw }
648 1.38.4.1 nathanw
649 1.38.4.1 nathanw /*
650 1.38.4.1 nathanw * scsipi_periph_timed_thaw:
651 1.38.4.1 nathanw *
652 1.38.4.1 nathanw * Thaw a device after some time has expired.
653 1.38.4.1 nathanw */
654 1.38.4.1 nathanw void
655 1.38.4.1 nathanw scsipi_periph_timed_thaw(arg)
656 1.38.4.1 nathanw void *arg;
657 1.38.4.1 nathanw {
658 1.38.4.4 nathanw int s;
659 1.38.4.1 nathanw struct scsipi_periph *periph = arg;
660 1.38.4.1 nathanw
661 1.38.4.1 nathanw callout_stop(&periph->periph_callout);
662 1.38.4.1 nathanw
663 1.38.4.4 nathanw s = splbio();
664 1.38.4.4 nathanw scsipi_periph_thaw(periph, 1);
665 1.38.4.4 nathanw if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
666 1.38.4.4 nathanw /*
667 1.38.4.4 nathanw * Kick the channel's queue here. Note, we're running in
668 1.38.4.4 nathanw * interrupt context (softclock), so the adapter driver
669 1.38.4.4 nathanw * had better not sleep.
670 1.38.4.4 nathanw */
671 1.38.4.4 nathanw scsipi_run_queue(periph->periph_channel);
672 1.38.4.4 nathanw } else {
673 1.38.4.4 nathanw /*
674 1.38.4.4 nathanw * Tell the completion thread to kick the channel's queue here.
675 1.38.4.4 nathanw */
676 1.38.4.5 nathanw periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
677 1.38.4.4 nathanw wakeup(&periph->periph_channel->chan_complete);
678 1.38.4.4 nathanw }
679 1.38.4.4 nathanw splx(s);
680 1.38.4.1 nathanw }
681 1.38.4.1 nathanw
682 1.38.4.1 nathanw /*
683 1.38.4.1 nathanw * scsipi_wait_drain:
684 1.38.4.1 nathanw *
685 1.38.4.1 nathanw * Wait for a periph's pending xfers to drain.
686 1.15 thorpej */
687 1.15 thorpej void
688 1.38.4.1 nathanw scsipi_wait_drain(periph)
689 1.38.4.1 nathanw struct scsipi_periph *periph;
690 1.15 thorpej {
691 1.15 thorpej int s;
692 1.15 thorpej
693 1.15 thorpej s = splbio();
694 1.38.4.1 nathanw while (periph->periph_active != 0) {
695 1.38.4.1 nathanw periph->periph_flags |= PERIPH_WAITDRAIN;
696 1.38.4.1 nathanw (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
697 1.15 thorpej }
698 1.15 thorpej splx(s);
699 1.23 thorpej }
700 1.23 thorpej
701 1.23 thorpej /*
702 1.38.4.1 nathanw * scsipi_kill_pending:
703 1.23 thorpej *
704 1.38.4.1 nathanw * Kill off all pending xfers for a periph.
705 1.38.4.1 nathanw *
706 1.38.4.1 nathanw * NOTE: Must be called at splbio().
707 1.23 thorpej */
708 1.23 thorpej void
709 1.38.4.1 nathanw scsipi_kill_pending(periph)
710 1.38.4.1 nathanw struct scsipi_periph *periph;
711 1.23 thorpej {
712 1.23 thorpej
713 1.38.4.1 nathanw (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
714 1.38.4.1 nathanw #ifdef DIAGNOSTIC
715 1.38.4.1 nathanw if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
716 1.38.4.1 nathanw panic("scsipi_kill_pending");
717 1.38.4.1 nathanw #endif
718 1.38.4.1 nathanw scsipi_wait_drain(periph);
719 1.2 bouyer }
720 1.2 bouyer
721 1.2 bouyer /*
722 1.38.4.1 nathanw * scsipi_interpret_sense:
723 1.38.4.1 nathanw *
724 1.38.4.1 nathanw * Look at the returned sense and act on the error, determining
725 1.38.4.1 nathanw * the unix error number to pass back. (0 = report no error)
726 1.13 bouyer *
727 1.38.4.1 nathanw * NOTE: If we return ERESTART, we are expected to haved
728 1.38.4.1 nathanw * thawed the device!
729 1.38.4.1 nathanw *
730 1.38.4.1 nathanw * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
731 1.13 bouyer */
732 1.13 bouyer int
733 1.13 bouyer scsipi_interpret_sense(xs)
734 1.13 bouyer struct scsipi_xfer *xs;
735 1.13 bouyer {
736 1.13 bouyer struct scsipi_sense_data *sense;
737 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
738 1.13 bouyer u_int8_t key;
739 1.13 bouyer u_int32_t info;
740 1.13 bouyer int error;
741 1.13 bouyer #ifndef SCSIVERBOSE
742 1.13 bouyer static char *error_mes[] = {
743 1.13 bouyer "soft error (corrected)",
744 1.13 bouyer "not ready", "medium error",
745 1.13 bouyer "non-media hardware failure", "illegal request",
746 1.13 bouyer "unit attention", "readonly device",
747 1.13 bouyer "no data found", "vendor unique",
748 1.13 bouyer "copy aborted", "command aborted",
749 1.13 bouyer "search returned equal", "volume overflow",
750 1.13 bouyer "verify miscompare", "unknown error key"
751 1.13 bouyer };
752 1.13 bouyer #endif
753 1.13 bouyer
754 1.13 bouyer sense = &xs->sense.scsi_sense;
755 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
756 1.38.4.1 nathanw if (periph->periph_flags & SCSIPI_DB1) {
757 1.13 bouyer int count;
758 1.38.4.1 nathanw scsipi_printaddr(periph);
759 1.38.4.1 nathanw printf(" sense debug information:\n");
760 1.38.4.1 nathanw printf("\tcode 0x%x valid 0x%x\n",
761 1.13 bouyer sense->error_code & SSD_ERRCODE,
762 1.13 bouyer sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
763 1.38.4.1 nathanw printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
764 1.13 bouyer sense->segment,
765 1.13 bouyer sense->flags & SSD_KEY,
766 1.13 bouyer sense->flags & SSD_ILI ? 1 : 0,
767 1.13 bouyer sense->flags & SSD_EOM ? 1 : 0,
768 1.13 bouyer sense->flags & SSD_FILEMARK ? 1 : 0);
769 1.38.4.1 nathanw printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
770 1.38.4.1 nathanw "extra bytes\n",
771 1.13 bouyer sense->info[0],
772 1.13 bouyer sense->info[1],
773 1.13 bouyer sense->info[2],
774 1.13 bouyer sense->info[3],
775 1.13 bouyer sense->extra_len);
776 1.38.4.1 nathanw printf("\textra: ");
777 1.13 bouyer for (count = 0; count < ADD_BYTES_LIM(sense); count++)
778 1.13 bouyer printf("0x%x ", sense->cmd_spec_info[count]);
779 1.13 bouyer printf("\n");
780 1.13 bouyer }
781 1.38.4.1 nathanw #endif
782 1.38.4.1 nathanw
783 1.13 bouyer /*
784 1.38.4.1 nathanw * If the periph has it's own error handler, call it first.
785 1.13 bouyer * If it returns a legit error value, return that, otherwise
786 1.13 bouyer * it wants us to continue with normal error processing.
787 1.13 bouyer */
788 1.38.4.1 nathanw if (periph->periph_switch->psw_error != NULL) {
789 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
790 1.13 bouyer ("calling private err_handler()\n"));
791 1.38.4.1 nathanw error = (*periph->periph_switch->psw_error)(xs);
792 1.38.4.1 nathanw if (error != EJUSTRETURN)
793 1.38.4.1 nathanw return (error);
794 1.13 bouyer }
795 1.13 bouyer /* otherwise use the default */
796 1.13 bouyer switch (sense->error_code & SSD_ERRCODE) {
797 1.13 bouyer /*
798 1.13 bouyer * If it's code 70, use the extended stuff and
799 1.13 bouyer * interpret the key
800 1.13 bouyer */
801 1.13 bouyer case 0x71: /* delayed error */
802 1.38.4.1 nathanw scsipi_printaddr(periph);
803 1.13 bouyer key = sense->flags & SSD_KEY;
804 1.13 bouyer printf(" DEFERRED ERROR, key = 0x%x\n", key);
805 1.13 bouyer /* FALLTHROUGH */
806 1.13 bouyer case 0x70:
807 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
808 1.13 bouyer info = _4btol(sense->info);
809 1.13 bouyer else
810 1.13 bouyer info = 0;
811 1.13 bouyer key = sense->flags & SSD_KEY;
812 1.13 bouyer
813 1.13 bouyer switch (key) {
814 1.13 bouyer case SKEY_NO_SENSE:
815 1.13 bouyer case SKEY_RECOVERED_ERROR:
816 1.13 bouyer if (xs->resid == xs->datalen && xs->datalen) {
817 1.13 bouyer /*
818 1.13 bouyer * Why is this here?
819 1.13 bouyer */
820 1.13 bouyer xs->resid = 0; /* not short read */
821 1.13 bouyer }
822 1.13 bouyer case SKEY_EQUAL:
823 1.13 bouyer error = 0;
824 1.13 bouyer break;
825 1.13 bouyer case SKEY_NOT_READY:
826 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
827 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
828 1.24 thorpej if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
829 1.13 bouyer return (0);
830 1.38.4.2 nathanw if (sense->add_sense_code == 0x3A) {
831 1.19 bouyer error = ENODEV; /* Medium not present */
832 1.38.4.2 nathanw if (xs->xs_control & XS_CTL_SILENT_NODEV)
833 1.38.4.2 nathanw return (error);
834 1.38.4.2 nathanw } else
835 1.19 bouyer error = EIO;
836 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
837 1.19 bouyer return (error);
838 1.13 bouyer break;
839 1.13 bouyer case SKEY_ILLEGAL_REQUEST:
840 1.24 thorpej if ((xs->xs_control &
841 1.24 thorpej XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
842 1.13 bouyer return (0);
843 1.24 thorpej /*
844 1.24 thorpej * Handle the case where a device reports
845 1.24 thorpej * Logical Unit Not Supported during discovery.
846 1.24 thorpej */
847 1.24 thorpej if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
848 1.24 thorpej sense->add_sense_code == 0x25 &&
849 1.24 thorpej sense->add_sense_code_qual == 0x00)
850 1.24 thorpej return (EINVAL);
851 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
852 1.13 bouyer return (EIO);
853 1.13 bouyer error = EINVAL;
854 1.13 bouyer break;
855 1.13 bouyer case SKEY_UNIT_ATTENTION:
856 1.20 bouyer if (sense->add_sense_code == 0x29 &&
857 1.38.4.1 nathanw sense->add_sense_code_qual == 0x00) {
858 1.38.4.1 nathanw /* device or bus reset */
859 1.38.4.1 nathanw return (ERESTART);
860 1.38.4.1 nathanw }
861 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
862 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
863 1.24 thorpej if ((xs->xs_control &
864 1.24 thorpej XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
865 1.13 bouyer /* XXX Should reupload any transient state. */
866 1.38.4.1 nathanw (periph->periph_flags &
867 1.38.4.1 nathanw PERIPH_REMOVABLE) == 0) {
868 1.13 bouyer return (ERESTART);
869 1.38.4.1 nathanw }
870 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
871 1.13 bouyer return (EIO);
872 1.13 bouyer error = EIO;
873 1.13 bouyer break;
874 1.13 bouyer case SKEY_WRITE_PROTECT:
875 1.13 bouyer error = EROFS;
876 1.13 bouyer break;
877 1.13 bouyer case SKEY_BLANK_CHECK:
878 1.13 bouyer error = 0;
879 1.13 bouyer break;
880 1.13 bouyer case SKEY_ABORTED_COMMAND:
881 1.13 bouyer error = ERESTART;
882 1.13 bouyer break;
883 1.13 bouyer case SKEY_VOLUME_OVERFLOW:
884 1.13 bouyer error = ENOSPC;
885 1.13 bouyer break;
886 1.13 bouyer default:
887 1.13 bouyer error = EIO;
888 1.13 bouyer break;
889 1.13 bouyer }
890 1.13 bouyer
891 1.13 bouyer #ifdef SCSIVERBOSE
892 1.32 augustss if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
893 1.13 bouyer scsipi_print_sense(xs, 0);
894 1.13 bouyer #else
895 1.13 bouyer if (key) {
896 1.38.4.1 nathanw scsipi_printaddr(periph);
897 1.13 bouyer printf("%s", error_mes[key - 1]);
898 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
899 1.13 bouyer switch (key) {
900 1.13 bouyer case SKEY_NOT_READY:
901 1.13 bouyer case SKEY_ILLEGAL_REQUEST:
902 1.13 bouyer case SKEY_UNIT_ATTENTION:
903 1.13 bouyer case SKEY_WRITE_PROTECT:
904 1.13 bouyer break;
905 1.13 bouyer case SKEY_BLANK_CHECK:
906 1.13 bouyer printf(", requested size: %d (decimal)",
907 1.13 bouyer info);
908 1.13 bouyer break;
909 1.13 bouyer case SKEY_ABORTED_COMMAND:
910 1.38.4.1 nathanw if (xs->xs_retries)
911 1.13 bouyer printf(", retrying");
912 1.13 bouyer printf(", cmd 0x%x, info 0x%x",
913 1.13 bouyer xs->cmd->opcode, info);
914 1.13 bouyer break;
915 1.13 bouyer default:
916 1.13 bouyer printf(", info = %d (decimal)", info);
917 1.13 bouyer }
918 1.13 bouyer }
919 1.13 bouyer if (sense->extra_len != 0) {
920 1.13 bouyer int n;
921 1.13 bouyer printf(", data =");
922 1.13 bouyer for (n = 0; n < sense->extra_len; n++)
923 1.13 bouyer printf(" %02x",
924 1.13 bouyer sense->cmd_spec_info[n]);
925 1.13 bouyer }
926 1.13 bouyer printf("\n");
927 1.13 bouyer }
928 1.13 bouyer #endif
929 1.13 bouyer return (error);
930 1.13 bouyer
931 1.13 bouyer /*
932 1.13 bouyer * Not code 70, just report it
933 1.13 bouyer */
934 1.13 bouyer default:
935 1.38.4.1 nathanw #if defined(SCSIDEBUG) || defined(DEBUG)
936 1.28 mjacob {
937 1.28 mjacob static char *uc = "undecodable sense error";
938 1.28 mjacob int i;
939 1.28 mjacob u_int8_t *cptr = (u_int8_t *) sense;
940 1.38.4.1 nathanw scsipi_printaddr(periph);
941 1.28 mjacob if (xs->cmd == &xs->cmdstore) {
942 1.28 mjacob printf("%s for opcode 0x%x, data=",
943 1.28 mjacob uc, xs->cmdstore.opcode);
944 1.28 mjacob } else {
945 1.28 mjacob printf("%s, data=", uc);
946 1.28 mjacob }
947 1.28 mjacob for (i = 0; i < sizeof (sense); i++)
948 1.28 mjacob printf(" 0x%02x", *(cptr++) & 0xff);
949 1.28 mjacob printf("\n");
950 1.28 mjacob }
951 1.28 mjacob #else
952 1.38.4.1 nathanw scsipi_printaddr(periph);
953 1.17 mjacob printf("Sense Error Code 0x%x",
954 1.17 mjacob sense->error_code & SSD_ERRCODE);
955 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
956 1.13 bouyer struct scsipi_sense_data_unextended *usense =
957 1.13 bouyer (struct scsipi_sense_data_unextended *)sense;
958 1.13 bouyer printf(" at block no. %d (decimal)",
959 1.13 bouyer _3btol(usense->block));
960 1.13 bouyer }
961 1.13 bouyer printf("\n");
962 1.28 mjacob #endif
963 1.13 bouyer return (EIO);
964 1.13 bouyer }
965 1.13 bouyer }
966 1.13 bouyer
967 1.13 bouyer /*
968 1.38.4.1 nathanw * scsipi_size:
969 1.38.4.1 nathanw *
970 1.38.4.1 nathanw * Find out from the device what its capacity is.
971 1.2 bouyer */
972 1.2 bouyer u_long
973 1.38.4.1 nathanw scsipi_size(periph, flags)
974 1.38.4.1 nathanw struct scsipi_periph *periph;
975 1.2 bouyer int flags;
976 1.2 bouyer {
977 1.2 bouyer struct scsipi_read_cap_data rdcap;
978 1.2 bouyer struct scsipi_read_capacity scsipi_cmd;
979 1.2 bouyer
980 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
981 1.2 bouyer scsipi_cmd.opcode = READ_CAPACITY;
982 1.2 bouyer
983 1.2 bouyer /*
984 1.2 bouyer * If the command works, interpret the result as a 4 byte
985 1.2 bouyer * number of blocks
986 1.2 bouyer */
987 1.38.4.1 nathanw if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
988 1.3 enami sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
989 1.38 enami SCSIPIRETRIES, 20000, NULL,
990 1.38 enami flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
991 1.38.4.1 nathanw scsipi_printaddr(periph);
992 1.2 bouyer printf("could not get size\n");
993 1.3 enami return (0);
994 1.2 bouyer }
995 1.2 bouyer
996 1.3 enami return (_4btol(rdcap.addr) + 1);
997 1.2 bouyer }
998 1.2 bouyer
999 1.2 bouyer /*
1000 1.38.4.1 nathanw * scsipi_test_unit_ready:
1001 1.38.4.1 nathanw *
1002 1.38.4.1 nathanw * Issue a `test unit ready' request.
1003 1.2 bouyer */
1004 1.3 enami int
1005 1.38.4.1 nathanw scsipi_test_unit_ready(periph, flags)
1006 1.38.4.1 nathanw struct scsipi_periph *periph;
1007 1.2 bouyer int flags;
1008 1.2 bouyer {
1009 1.2 bouyer struct scsipi_test_unit_ready scsipi_cmd;
1010 1.2 bouyer
1011 1.2 bouyer /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1012 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NOTUR)
1013 1.3 enami return (0);
1014 1.2 bouyer
1015 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1016 1.2 bouyer scsipi_cmd.opcode = TEST_UNIT_READY;
1017 1.2 bouyer
1018 1.38.4.1 nathanw return (scsipi_command(periph,
1019 1.3 enami (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1020 1.29 bouyer 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
1021 1.2 bouyer }
1022 1.2 bouyer
1023 1.2 bouyer /*
1024 1.38.4.1 nathanw * scsipi_inquire:
1025 1.38.4.1 nathanw *
1026 1.38.4.1 nathanw * Ask the device about itself.
1027 1.2 bouyer */
1028 1.3 enami int
1029 1.38.4.1 nathanw scsipi_inquire(periph, inqbuf, flags)
1030 1.38.4.1 nathanw struct scsipi_periph *periph;
1031 1.2 bouyer struct scsipi_inquiry_data *inqbuf;
1032 1.2 bouyer int flags;
1033 1.2 bouyer {
1034 1.2 bouyer struct scsipi_inquiry scsipi_cmd;
1035 1.2 bouyer
1036 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1037 1.2 bouyer scsipi_cmd.opcode = INQUIRY;
1038 1.2 bouyer scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1039 1.2 bouyer
1040 1.38.4.1 nathanw return (scsipi_command(periph,
1041 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1042 1.3 enami (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1043 1.29 bouyer SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
1044 1.2 bouyer }
1045 1.2 bouyer
1046 1.2 bouyer /*
1047 1.38.4.1 nathanw * scsipi_prevent:
1048 1.38.4.1 nathanw *
1049 1.38.4.1 nathanw * Prevent or allow the user to remove the media
1050 1.2 bouyer */
1051 1.3 enami int
1052 1.38.4.1 nathanw scsipi_prevent(periph, type, flags)
1053 1.38.4.1 nathanw struct scsipi_periph *periph;
1054 1.2 bouyer int type, flags;
1055 1.2 bouyer {
1056 1.2 bouyer struct scsipi_prevent scsipi_cmd;
1057 1.2 bouyer
1058 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1059 1.3 enami return (0);
1060 1.2 bouyer
1061 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1062 1.2 bouyer scsipi_cmd.opcode = PREVENT_ALLOW;
1063 1.2 bouyer scsipi_cmd.how = type;
1064 1.38.4.1 nathanw
1065 1.38.4.1 nathanw return (scsipi_command(periph,
1066 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1067 1.29 bouyer 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1068 1.2 bouyer }
1069 1.2 bouyer
1070 1.2 bouyer /*
1071 1.38.4.1 nathanw * scsipi_start:
1072 1.38.4.1 nathanw *
1073 1.38.4.1 nathanw * Send a START UNIT.
1074 1.2 bouyer */
1075 1.3 enami int
1076 1.38.4.1 nathanw scsipi_start(periph, type, flags)
1077 1.38.4.1 nathanw struct scsipi_periph *periph;
1078 1.2 bouyer int type, flags;
1079 1.2 bouyer {
1080 1.2 bouyer struct scsipi_start_stop scsipi_cmd;
1081 1.18 bouyer
1082 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1083 1.18 bouyer return 0;
1084 1.2 bouyer
1085 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1086 1.2 bouyer scsipi_cmd.opcode = START_STOP;
1087 1.2 bouyer scsipi_cmd.byte2 = 0x00;
1088 1.2 bouyer scsipi_cmd.how = type;
1089 1.38.4.1 nathanw
1090 1.38.4.1 nathanw return (scsipi_command(periph,
1091 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1092 1.29 bouyer 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1093 1.29 bouyer NULL, flags));
1094 1.2 bouyer }
1095 1.2 bouyer
1096 1.2 bouyer /*
1097 1.38.4.1 nathanw * scsipi_mode_sense, scsipi_mode_sense_big:
1098 1.38.4.1 nathanw * get a sense page from a device
1099 1.2 bouyer */
1100 1.2 bouyer
1101 1.38.4.1 nathanw int
1102 1.38.4.1 nathanw scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1103 1.38.4.1 nathanw struct scsipi_periph *periph;
1104 1.38.4.1 nathanw int byte2, page, len, flags, retries, timeout;
1105 1.38.4.1 nathanw struct scsipi_mode_header *data;
1106 1.38.4.1 nathanw {
1107 1.38.4.1 nathanw struct scsipi_mode_sense scsipi_cmd;
1108 1.38.4.1 nathanw int error;
1109 1.38.4.1 nathanw
1110 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1111 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SENSE;
1112 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1113 1.38.4.1 nathanw scsipi_cmd.page = page;
1114 1.38.4.1 nathanw if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1115 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.u_len.atapi.length);
1116 1.38.4.1 nathanw else
1117 1.38.4.1 nathanw scsipi_cmd.u_len.scsi.length = len & 0xff;
1118 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1119 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1120 1.38.4.1 nathanw flags | XS_CTL_DATA_IN);
1121 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1122 1.38.4.1 nathanw ("scsipi_mode_sense: error=%d\n", error));
1123 1.38.4.1 nathanw return (error);
1124 1.38.4.1 nathanw }
1125 1.38.4.1 nathanw
1126 1.38.4.1 nathanw int
1127 1.38.4.1 nathanw scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1128 1.38.4.1 nathanw struct scsipi_periph *periph;
1129 1.38.4.1 nathanw int byte2, page, len, flags, retries, timeout;
1130 1.38.4.1 nathanw struct scsipi_mode_header_big *data;
1131 1.38.4.1 nathanw {
1132 1.38.4.1 nathanw struct scsipi_mode_sense_big scsipi_cmd;
1133 1.38.4.1 nathanw int error;
1134 1.38.4.1 nathanw
1135 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1136 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SENSE_BIG;
1137 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1138 1.38.4.1 nathanw scsipi_cmd.page = page;
1139 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.length);
1140 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1141 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1142 1.38.4.1 nathanw flags | XS_CTL_DATA_IN);
1143 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1144 1.38.4.1 nathanw ("scsipi_mode_sense_big: error=%d\n", error));
1145 1.38.4.1 nathanw return (error);
1146 1.38.4.1 nathanw }
1147 1.38.4.1 nathanw
1148 1.38.4.1 nathanw int
1149 1.38.4.1 nathanw scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1150 1.38.4.1 nathanw struct scsipi_periph *periph;
1151 1.38.4.1 nathanw int byte2, len, flags, retries, timeout;
1152 1.38.4.1 nathanw struct scsipi_mode_header *data;
1153 1.38.4.1 nathanw {
1154 1.38.4.1 nathanw struct scsipi_mode_select scsipi_cmd;
1155 1.38.4.1 nathanw int error;
1156 1.38.4.1 nathanw
1157 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1158 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SELECT;
1159 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1160 1.38.4.1 nathanw if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1161 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.u_len.atapi.length);
1162 1.38.4.1 nathanw else
1163 1.38.4.1 nathanw scsipi_cmd.u_len.scsi.length = len & 0xff;
1164 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1165 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1166 1.38.4.1 nathanw flags | XS_CTL_DATA_OUT);
1167 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1168 1.38.4.1 nathanw ("scsipi_mode_select: error=%d\n", error));
1169 1.38.4.1 nathanw return (error);
1170 1.38.4.1 nathanw }
1171 1.38.4.1 nathanw
1172 1.38.4.1 nathanw int
1173 1.38.4.1 nathanw scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1174 1.38.4.1 nathanw struct scsipi_periph *periph;
1175 1.38.4.1 nathanw int byte2, len, flags, retries, timeout;
1176 1.38.4.1 nathanw struct scsipi_mode_header_big *data;
1177 1.38.4.1 nathanw {
1178 1.38.4.1 nathanw struct scsipi_mode_select_big scsipi_cmd;
1179 1.38.4.1 nathanw int error;
1180 1.38.4.1 nathanw
1181 1.38.4.2 nathanw memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1182 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SELECT_BIG;
1183 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1184 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.length);
1185 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1186 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1187 1.38.4.1 nathanw flags | XS_CTL_DATA_OUT);
1188 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1189 1.38.4.1 nathanw ("scsipi_mode_select: error=%d\n", error));
1190 1.38.4.1 nathanw return (error);
1191 1.38.4.1 nathanw }
1192 1.38.4.1 nathanw
1193 1.38.4.1 nathanw /*
1194 1.38.4.1 nathanw * scsipi_done:
1195 1.38.4.1 nathanw *
1196 1.38.4.1 nathanw * This routine is called by an adapter's interrupt handler when
1197 1.38.4.1 nathanw * an xfer is completed.
1198 1.38.4.1 nathanw */
1199 1.38.4.1 nathanw void
1200 1.38.4.1 nathanw scsipi_done(xs)
1201 1.38.4.1 nathanw struct scsipi_xfer *xs;
1202 1.38.4.1 nathanw {
1203 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1204 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1205 1.38.4.1 nathanw int s, freezecnt;
1206 1.38.4.1 nathanw
1207 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1208 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
1209 1.38.4.1 nathanw if (periph->periph_dbflags & SCSIPI_DB1)
1210 1.2 bouyer show_scsipi_cmd(xs);
1211 1.38.4.1 nathanw #endif
1212 1.2 bouyer
1213 1.38.4.1 nathanw s = splbio();
1214 1.2 bouyer /*
1215 1.38.4.1 nathanw * The resource this command was using is now free.
1216 1.3 enami */
1217 1.38.4.1 nathanw scsipi_put_resource(chan);
1218 1.38.4.1 nathanw xs->xs_periph->periph_sent--;
1219 1.2 bouyer
1220 1.38.4.1 nathanw /*
1221 1.38.4.1 nathanw * If the command was tagged, free the tag.
1222 1.38.4.1 nathanw */
1223 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) != 0)
1224 1.38.4.1 nathanw scsipi_put_tag(xs);
1225 1.38.4.1 nathanw else
1226 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_UNTAG;
1227 1.2 bouyer
1228 1.38.4.1 nathanw /* Mark the command as `done'. */
1229 1.38.4.1 nathanw xs->xs_status |= XS_STS_DONE;
1230 1.38.4.1 nathanw
1231 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1232 1.38.4.1 nathanw if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1233 1.38.4.1 nathanw (XS_CTL_ASYNC|XS_CTL_POLL))
1234 1.38.4.1 nathanw panic("scsipi_done: ASYNC and POLL");
1235 1.38.4.1 nathanw #endif
1236 1.2 bouyer
1237 1.2 bouyer /*
1238 1.38.4.1 nathanw * If the xfer had an error of any sort, freeze the
1239 1.38.4.1 nathanw * periph's queue. Freeze it again if we were requested
1240 1.38.4.1 nathanw * to do so in the xfer.
1241 1.2 bouyer */
1242 1.38.4.1 nathanw freezecnt = 0;
1243 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1244 1.38.4.1 nathanw freezecnt++;
1245 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1246 1.38.4.1 nathanw freezecnt++;
1247 1.38.4.1 nathanw if (freezecnt != 0)
1248 1.38.4.1 nathanw scsipi_periph_freeze(periph, freezecnt);
1249 1.2 bouyer
1250 1.38.4.1 nathanw /*
1251 1.38.4.1 nathanw * record the xfer with a pending sense, in case a SCSI reset is
1252 1.38.4.1 nathanw * received before the thread is waked up.
1253 1.38.4.1 nathanw */
1254 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1255 1.38.4.1 nathanw periph->periph_flags |= PERIPH_SENSE;
1256 1.38.4.1 nathanw periph->periph_xscheck = xs;
1257 1.20 bouyer }
1258 1.2 bouyer
1259 1.38.4.1 nathanw /*
1260 1.38.4.4 nathanw * If this was an xfer that was not to complete asynchronously,
1261 1.38.4.1 nathanw * let the requesting thread perform error checking/handling
1262 1.38.4.1 nathanw * in its context.
1263 1.38.4.1 nathanw */
1264 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1265 1.38.4.1 nathanw splx(s);
1266 1.2 bouyer /*
1267 1.38.4.1 nathanw * If it's a polling job, just return, to unwind the
1268 1.38.4.1 nathanw * call graph. We don't need to restart the queue,
1269 1.38.4.1 nathanw * because pollings jobs are treated specially, and
1270 1.38.4.1 nathanw * are really only used during crash dumps anyway
1271 1.38.4.1 nathanw * (XXX or during boot-time autconfiguration of
1272 1.38.4.1 nathanw * ATAPI devices).
1273 1.2 bouyer */
1274 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_POLL)
1275 1.38.4.1 nathanw return;
1276 1.38.4.1 nathanw wakeup(xs);
1277 1.38.4.1 nathanw goto out;
1278 1.2 bouyer }
1279 1.38.4.1 nathanw
1280 1.9 scottr /*
1281 1.38.4.1 nathanw * Catch the extremely common case of I/O completing
1282 1.38.4.1 nathanw * without error; no use in taking a context switch
1283 1.38.4.1 nathanw * if we can handle it in interrupt context.
1284 1.9 scottr */
1285 1.38.4.1 nathanw if (xs->error == XS_NOERROR) {
1286 1.22 pk splx(s);
1287 1.38.4.1 nathanw (void) scsipi_complete(xs);
1288 1.38.4.1 nathanw goto out;
1289 1.22 pk }
1290 1.2 bouyer
1291 1.2 bouyer /*
1292 1.38.4.1 nathanw * There is an error on this xfer. Put it on the channel's
1293 1.38.4.1 nathanw * completion queue, and wake up the completion thread.
1294 1.38.4.1 nathanw */
1295 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1296 1.38.4.1 nathanw splx(s);
1297 1.38.4.1 nathanw wakeup(&chan->chan_complete);
1298 1.2 bouyer
1299 1.38.4.1 nathanw out:
1300 1.38.4.1 nathanw /*
1301 1.38.4.1 nathanw * If there are more xfers on the channel's queue, attempt to
1302 1.38.4.1 nathanw * run them.
1303 1.38.4.1 nathanw */
1304 1.38.4.1 nathanw scsipi_run_queue(chan);
1305 1.2 bouyer }
1306 1.2 bouyer
1307 1.38.4.1 nathanw /*
1308 1.38.4.1 nathanw * scsipi_complete:
1309 1.38.4.1 nathanw *
1310 1.38.4.1 nathanw * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1311 1.38.4.1 nathanw *
1312 1.38.4.1 nathanw * NOTE: This routine MUST be called with valid thread context
1313 1.38.4.1 nathanw * except for the case where the following two conditions are
1314 1.38.4.1 nathanw * true:
1315 1.38.4.1 nathanw *
1316 1.38.4.1 nathanw * xs->error == XS_NOERROR
1317 1.38.4.1 nathanw * XS_CTL_ASYNC is set in xs->xs_control
1318 1.38.4.1 nathanw *
1319 1.38.4.1 nathanw * The semantics of this routine can be tricky, so here is an
1320 1.38.4.1 nathanw * explanation:
1321 1.38.4.1 nathanw *
1322 1.38.4.1 nathanw * 0 Xfer completed successfully.
1323 1.38.4.1 nathanw *
1324 1.38.4.1 nathanw * ERESTART Xfer had an error, but was restarted.
1325 1.38.4.1 nathanw *
1326 1.38.4.1 nathanw * anything else Xfer had an error, return value is Unix
1327 1.38.4.1 nathanw * errno.
1328 1.38.4.1 nathanw *
1329 1.38.4.1 nathanw * If the return value is anything but ERESTART:
1330 1.38.4.1 nathanw *
1331 1.38.4.1 nathanw * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1332 1.38.4.1 nathanw * the pool.
1333 1.38.4.1 nathanw * - If there is a buf associated with the xfer,
1334 1.38.4.1 nathanw * it has been biodone()'d.
1335 1.38.4.1 nathanw */
1336 1.3 enami int
1337 1.38.4.1 nathanw scsipi_complete(xs)
1338 1.2 bouyer struct scsipi_xfer *xs;
1339 1.2 bouyer {
1340 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1341 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1342 1.38.4.1 nathanw struct buf *bp;
1343 1.38.4.1 nathanw int error, s;
1344 1.2 bouyer
1345 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1346 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1347 1.38.4.1 nathanw panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1348 1.38.4.1 nathanw #endif
1349 1.2 bouyer /*
1350 1.38.4.1 nathanw * If command terminated with a CHECK CONDITION, we need to issue a
1351 1.38.4.1 nathanw * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1352 1.38.4.1 nathanw * we'll have the real status.
1353 1.38.4.1 nathanw * Must be processed at splbio() to avoid missing a SCSI bus reset
1354 1.38.4.1 nathanw * for this command.
1355 1.38.4.1 nathanw */
1356 1.38.4.1 nathanw s = splbio();
1357 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1358 1.38.4.1 nathanw /* request sense for a request sense ? */
1359 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
1360 1.38.4.1 nathanw scsipi_printaddr(periph);
1361 1.38.4.2 nathanw printf("request sense for a request sense ?\n");
1362 1.38.4.1 nathanw /* XXX maybe we should reset the device ? */
1363 1.38.4.1 nathanw /* we've been frozen because xs->error != XS_NOERROR */
1364 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1365 1.38.4.1 nathanw splx(s);
1366 1.38.4.2 nathanw if (xs->resid < xs->datalen) {
1367 1.38.4.2 nathanw printf("we read %d bytes of sense anyway:\n",
1368 1.38.4.2 nathanw xs->datalen - xs->resid);
1369 1.38.4.2 nathanw #ifdef SCSIVERBOSE
1370 1.38.4.2 nathanw scsipi_print_sense_data((void *)xs->data, 0);
1371 1.38.4.2 nathanw #endif
1372 1.38.4.2 nathanw }
1373 1.38.4.1 nathanw return EINVAL;
1374 1.38.4.1 nathanw }
1375 1.38.4.1 nathanw scsipi_request_sense(xs);
1376 1.38.4.1 nathanw }
1377 1.38.4.1 nathanw splx(s);
1378 1.38.4.2 nathanw
1379 1.38.4.1 nathanw /*
1380 1.38.4.1 nathanw * If it's a user level request, bypass all usual completion
1381 1.38.4.1 nathanw * processing, let the user work it out..
1382 1.2 bouyer */
1383 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1384 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1385 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1386 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1387 1.38.4.1 nathanw scsipi_user_done(xs);
1388 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1389 1.38.4.1 nathanw return 0;
1390 1.38.4.1 nathanw }
1391 1.38.4.1 nathanw
1392 1.2 bouyer switch (xs->error) {
1393 1.38.4.1 nathanw case XS_NOERROR:
1394 1.2 bouyer error = 0;
1395 1.2 bouyer break;
1396 1.2 bouyer
1397 1.2 bouyer case XS_SENSE:
1398 1.13 bouyer case XS_SHORTSENSE:
1399 1.38.4.1 nathanw error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1400 1.2 bouyer break;
1401 1.2 bouyer
1402 1.38.4.1 nathanw case XS_RESOURCE_SHORTAGE:
1403 1.38.4.1 nathanw /*
1404 1.38.4.1 nathanw * XXX Should freeze channel's queue.
1405 1.38.4.1 nathanw */
1406 1.38.4.1 nathanw scsipi_printaddr(periph);
1407 1.38.4.1 nathanw printf("adapter resource shortage\n");
1408 1.38.4.1 nathanw /* FALLTHROUGH */
1409 1.38.4.1 nathanw
1410 1.2 bouyer case XS_BUSY:
1411 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1412 1.38.4.1 nathanw struct scsipi_max_openings mo;
1413 1.38.4.1 nathanw
1414 1.38.4.1 nathanw /*
1415 1.38.4.1 nathanw * We set the openings to active - 1, assuming that
1416 1.38.4.1 nathanw * the command that got us here is the first one that
1417 1.38.4.1 nathanw * can't fit into the device's queue. If that's not
1418 1.38.4.1 nathanw * the case, I guess we'll find out soon enough.
1419 1.38.4.1 nathanw */
1420 1.38.4.1 nathanw mo.mo_target = periph->periph_target;
1421 1.38.4.1 nathanw mo.mo_lun = periph->periph_lun;
1422 1.38.4.1 nathanw if (periph->periph_active < periph->periph_openings)
1423 1.38.4.1 nathanw mo.mo_openings = periph->periph_active - 1;
1424 1.2 bouyer else
1425 1.38.4.1 nathanw mo.mo_openings = periph->periph_openings - 1;
1426 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1427 1.38.4.1 nathanw if (mo.mo_openings < 0) {
1428 1.38.4.1 nathanw scsipi_printaddr(periph);
1429 1.38.4.1 nathanw printf("QUEUE FULL resulted in < 0 openings\n");
1430 1.38.4.1 nathanw panic("scsipi_done");
1431 1.38.4.1 nathanw }
1432 1.2 bouyer #endif
1433 1.38.4.1 nathanw if (mo.mo_openings == 0) {
1434 1.38.4.1 nathanw scsipi_printaddr(periph);
1435 1.38.4.1 nathanw printf("QUEUE FULL resulted in 0 openings\n");
1436 1.38.4.1 nathanw mo.mo_openings = 1;
1437 1.38.4.1 nathanw }
1438 1.38.4.1 nathanw scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1439 1.38.4.1 nathanw error = ERESTART;
1440 1.38.4.1 nathanw } else if (xs->xs_retries != 0) {
1441 1.38.4.1 nathanw xs->xs_retries--;
1442 1.38.4.1 nathanw /*
1443 1.38.4.1 nathanw * Wait one second, and try again.
1444 1.38.4.1 nathanw */
1445 1.38.4.4 nathanw if ((xs->xs_control & XS_CTL_POLL) ||
1446 1.38.4.4 nathanw (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1447 1.38.4.1 nathanw delay(1000000);
1448 1.38.4.4 nathanw } else {
1449 1.38.4.1 nathanw scsipi_periph_freeze(periph, 1);
1450 1.38.4.1 nathanw callout_reset(&periph->periph_callout,
1451 1.38.4.1 nathanw hz, scsipi_periph_timed_thaw, periph);
1452 1.38.4.1 nathanw }
1453 1.38.4.1 nathanw error = ERESTART;
1454 1.38.4.1 nathanw } else
1455 1.38.4.1 nathanw error = EBUSY;
1456 1.38.4.1 nathanw break;
1457 1.38.4.1 nathanw
1458 1.38.4.1 nathanw case XS_REQUEUE:
1459 1.38.4.1 nathanw error = ERESTART;
1460 1.38.4.1 nathanw break;
1461 1.38.4.1 nathanw
1462 1.2 bouyer case XS_TIMEOUT:
1463 1.38.4.1 nathanw if (xs->xs_retries != 0) {
1464 1.38.4.1 nathanw xs->xs_retries--;
1465 1.38.4.1 nathanw error = ERESTART;
1466 1.38.4.1 nathanw } else
1467 1.38.4.1 nathanw error = EIO;
1468 1.2 bouyer break;
1469 1.2 bouyer
1470 1.2 bouyer case XS_SELTIMEOUT:
1471 1.2 bouyer /* XXX Disable device? */
1472 1.12 thorpej error = EIO;
1473 1.12 thorpej break;
1474 1.12 thorpej
1475 1.12 thorpej case XS_RESET:
1476 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
1477 1.38.4.1 nathanw /*
1478 1.38.4.1 nathanw * request sense interrupted by reset: signal it
1479 1.38.4.1 nathanw * with EINTR return code.
1480 1.38.4.1 nathanw */
1481 1.38.4.1 nathanw error = EINTR;
1482 1.38.4.1 nathanw } else {
1483 1.38.4.1 nathanw if (xs->xs_retries != 0) {
1484 1.38.4.1 nathanw xs->xs_retries--;
1485 1.38.4.1 nathanw error = ERESTART;
1486 1.38.4.1 nathanw } else
1487 1.38.4.1 nathanw error = EIO;
1488 1.12 thorpej }
1489 1.2 bouyer break;
1490 1.2 bouyer
1491 1.2 bouyer default:
1492 1.38.4.1 nathanw scsipi_printaddr(periph);
1493 1.38.4.1 nathanw printf("invalid return code from adapter: %d\n", xs->error);
1494 1.2 bouyer error = EIO;
1495 1.2 bouyer break;
1496 1.2 bouyer }
1497 1.2 bouyer
1498 1.38.4.1 nathanw s = splbio();
1499 1.38.4.1 nathanw if (error == ERESTART) {
1500 1.38.4.1 nathanw /*
1501 1.38.4.1 nathanw * If we get here, the periph has been thawed and frozen
1502 1.38.4.1 nathanw * again if we had to issue recovery commands. Alternatively,
1503 1.38.4.1 nathanw * it may have been frozen again and in a timed thaw. In
1504 1.38.4.1 nathanw * any case, we thaw the periph once we re-enqueue the
1505 1.38.4.1 nathanw * command. Once the periph is fully thawed, it will begin
1506 1.38.4.1 nathanw * operation again.
1507 1.38.4.1 nathanw */
1508 1.38.4.1 nathanw xs->error = XS_NOERROR;
1509 1.38.4.1 nathanw xs->status = SCSI_OK;
1510 1.38.4.1 nathanw xs->xs_status &= ~XS_STS_DONE;
1511 1.38.4.1 nathanw xs->xs_requeuecnt++;
1512 1.38.4.1 nathanw error = scsipi_enqueue(xs);
1513 1.38.4.1 nathanw if (error == 0) {
1514 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1515 1.38.4.1 nathanw splx(s);
1516 1.38.4.1 nathanw return (ERESTART);
1517 1.38.4.1 nathanw }
1518 1.38.4.1 nathanw }
1519 1.38.4.1 nathanw
1520 1.38.4.1 nathanw /*
1521 1.38.4.1 nathanw * scsipi_done() freezes the queue if not XS_NOERROR.
1522 1.38.4.1 nathanw * Thaw it here.
1523 1.38.4.1 nathanw */
1524 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1525 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1526 1.38.4.1 nathanw
1527 1.38.4.1 nathanw
1528 1.38.4.1 nathanw if (periph->periph_switch->psw_done)
1529 1.38.4.1 nathanw periph->periph_switch->psw_done(xs);
1530 1.38.4.1 nathanw if ((bp = xs->bp) != NULL) {
1531 1.38.4.1 nathanw if (error) {
1532 1.38.4.1 nathanw bp->b_error = error;
1533 1.38.4.1 nathanw bp->b_flags |= B_ERROR;
1534 1.38.4.1 nathanw bp->b_resid = bp->b_bcount;
1535 1.38.4.1 nathanw } else {
1536 1.38.4.1 nathanw bp->b_error = 0;
1537 1.38.4.1 nathanw bp->b_resid = xs->resid;
1538 1.38.4.4 nathanw }
1539 1.38.4.1 nathanw biodone(bp);
1540 1.38.4.1 nathanw }
1541 1.38.4.1 nathanw
1542 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_ASYNC)
1543 1.38.4.1 nathanw scsipi_put_xs(xs);
1544 1.38.4.1 nathanw splx(s);
1545 1.38.4.1 nathanw
1546 1.3 enami return (error);
1547 1.2 bouyer }
1548 1.2 bouyer
1549 1.14 thorpej /*
1550 1.38.4.1 nathanw * Issue a request sense for the given scsipi_xfer. Called when the xfer
1551 1.38.4.1 nathanw * returns with a CHECK_CONDITION status. Must be called in valid thread
1552 1.38.4.1 nathanw * context and at splbio().
1553 1.38.4.1 nathanw */
1554 1.38.4.1 nathanw
1555 1.38.4.1 nathanw void
1556 1.38.4.1 nathanw scsipi_request_sense(xs)
1557 1.38.4.1 nathanw struct scsipi_xfer *xs;
1558 1.38.4.1 nathanw {
1559 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1560 1.38.4.1 nathanw int flags, error;
1561 1.38.4.1 nathanw struct scsipi_sense cmd;
1562 1.38.4.1 nathanw
1563 1.38.4.1 nathanw periph->periph_flags |= PERIPH_SENSE;
1564 1.38.4.1 nathanw
1565 1.38.4.1 nathanw /* if command was polling, request sense will too */
1566 1.38.4.1 nathanw flags = xs->xs_control & XS_CTL_POLL;
1567 1.38.4.1 nathanw /* Polling commands can't sleep */
1568 1.38.4.1 nathanw if (flags)
1569 1.38.4.1 nathanw flags |= XS_CTL_NOSLEEP;
1570 1.38.4.1 nathanw
1571 1.38.4.1 nathanw flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1572 1.38.4.1 nathanw XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1573 1.38.4.1 nathanw
1574 1.38.4.2 nathanw memset(&cmd, 0, sizeof(cmd));
1575 1.38.4.1 nathanw cmd.opcode = REQUEST_SENSE;
1576 1.38.4.1 nathanw cmd.length = sizeof(struct scsipi_sense_data);
1577 1.38.4.1 nathanw
1578 1.38.4.1 nathanw error = scsipi_command(periph,
1579 1.38.4.1 nathanw (struct scsipi_generic *) &cmd, sizeof(cmd),
1580 1.38.4.1 nathanw (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1581 1.38.4.1 nathanw 0, 1000, NULL, flags);
1582 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_SENSE;
1583 1.38.4.1 nathanw periph->periph_xscheck = NULL;
1584 1.38.4.1 nathanw switch(error) {
1585 1.38.4.1 nathanw case 0:
1586 1.38.4.1 nathanw /* we have a valid sense */
1587 1.38.4.1 nathanw xs->error = XS_SENSE;
1588 1.38.4.1 nathanw return;
1589 1.38.4.1 nathanw case EINTR:
1590 1.38.4.1 nathanw /* REQUEST_SENSE interrupted by bus reset. */
1591 1.38.4.1 nathanw xs->error = XS_RESET;
1592 1.38.4.1 nathanw return;
1593 1.38.4.1 nathanw case EIO:
1594 1.38.4.1 nathanw /* request sense coudn't be performed */
1595 1.38.4.1 nathanw /*
1596 1.38.4.1 nathanw * XXX this isn't quite rigth but we don't have anything
1597 1.38.4.1 nathanw * better for now
1598 1.38.4.1 nathanw */
1599 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1600 1.38.4.1 nathanw return;
1601 1.38.4.1 nathanw default:
1602 1.38.4.1 nathanw /* Notify that request sense failed. */
1603 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1604 1.38.4.1 nathanw scsipi_printaddr(periph);
1605 1.38.4.1 nathanw printf("request sense failed with error %d\n", error);
1606 1.38.4.1 nathanw return;
1607 1.38.4.1 nathanw }
1608 1.38.4.1 nathanw }
1609 1.38.4.1 nathanw
1610 1.38.4.1 nathanw /*
1611 1.38.4.1 nathanw * scsipi_enqueue:
1612 1.38.4.1 nathanw *
1613 1.38.4.1 nathanw * Enqueue an xfer on a channel.
1614 1.14 thorpej */
1615 1.14 thorpej int
1616 1.38.4.1 nathanw scsipi_enqueue(xs)
1617 1.38.4.1 nathanw struct scsipi_xfer *xs;
1618 1.14 thorpej {
1619 1.38.4.1 nathanw struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1620 1.38.4.1 nathanw struct scsipi_xfer *qxs;
1621 1.38.4.1 nathanw int s;
1622 1.14 thorpej
1623 1.14 thorpej s = splbio();
1624 1.38.4.1 nathanw
1625 1.38.4.1 nathanw /*
1626 1.38.4.1 nathanw * If the xfer is to be polled, and there are already jobs on
1627 1.38.4.1 nathanw * the queue, we can't proceed.
1628 1.38.4.1 nathanw */
1629 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1630 1.38.4.1 nathanw TAILQ_FIRST(&chan->chan_queue) != NULL) {
1631 1.38.4.1 nathanw splx(s);
1632 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1633 1.38.4.1 nathanw return (EAGAIN);
1634 1.38.4.1 nathanw }
1635 1.38.4.1 nathanw
1636 1.38.4.1 nathanw /*
1637 1.38.4.1 nathanw * If we have an URGENT xfer, it's an error recovery command
1638 1.38.4.1 nathanw * and it should just go on the head of the channel's queue.
1639 1.38.4.1 nathanw */
1640 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_URGENT) {
1641 1.38.4.1 nathanw TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1642 1.38.4.1 nathanw goto out;
1643 1.38.4.1 nathanw }
1644 1.38.4.1 nathanw
1645 1.38.4.1 nathanw /*
1646 1.38.4.1 nathanw * If this xfer has already been on the queue before, we
1647 1.38.4.1 nathanw * need to reinsert it in the correct order. That order is:
1648 1.38.4.1 nathanw *
1649 1.38.4.1 nathanw * Immediately before the first xfer for this periph
1650 1.38.4.1 nathanw * with a requeuecnt less than xs->xs_requeuecnt.
1651 1.38.4.1 nathanw *
1652 1.38.4.1 nathanw * Failing that, at the end of the queue. (We'll end up
1653 1.38.4.1 nathanw * there naturally.)
1654 1.38.4.1 nathanw */
1655 1.38.4.1 nathanw if (xs->xs_requeuecnt != 0) {
1656 1.38.4.1 nathanw for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1657 1.38.4.1 nathanw qxs = TAILQ_NEXT(qxs, channel_q)) {
1658 1.38.4.1 nathanw if (qxs->xs_periph == xs->xs_periph &&
1659 1.38.4.1 nathanw qxs->xs_requeuecnt < xs->xs_requeuecnt)
1660 1.38.4.1 nathanw break;
1661 1.38.4.1 nathanw }
1662 1.38.4.1 nathanw if (qxs != NULL) {
1663 1.38.4.1 nathanw TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1664 1.38.4.1 nathanw channel_q);
1665 1.38.4.1 nathanw goto out;
1666 1.38.4.1 nathanw }
1667 1.14 thorpej }
1668 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1669 1.38.4.1 nathanw out:
1670 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_THAW_PERIPH)
1671 1.38.4.1 nathanw scsipi_periph_thaw(xs->xs_periph, 1);
1672 1.14 thorpej splx(s);
1673 1.38.4.1 nathanw return (0);
1674 1.14 thorpej }
1675 1.14 thorpej
1676 1.14 thorpej /*
1677 1.38.4.1 nathanw * scsipi_run_queue:
1678 1.38.4.1 nathanw *
1679 1.38.4.1 nathanw * Start as many xfers as possible running on the channel.
1680 1.14 thorpej */
1681 1.14 thorpej void
1682 1.38.4.1 nathanw scsipi_run_queue(chan)
1683 1.38.4.1 nathanw struct scsipi_channel *chan;
1684 1.14 thorpej {
1685 1.38.4.1 nathanw struct scsipi_xfer *xs;
1686 1.38.4.1 nathanw struct scsipi_periph *periph;
1687 1.14 thorpej int s;
1688 1.14 thorpej
1689 1.38.4.1 nathanw for (;;) {
1690 1.38.4.1 nathanw s = splbio();
1691 1.38.4.1 nathanw
1692 1.38.4.1 nathanw /*
1693 1.38.4.1 nathanw * If the channel is frozen, we can't do any work right
1694 1.38.4.1 nathanw * now.
1695 1.38.4.1 nathanw */
1696 1.38.4.1 nathanw if (chan->chan_qfreeze != 0) {
1697 1.38.4.1 nathanw splx(s);
1698 1.38.4.1 nathanw return;
1699 1.38.4.1 nathanw }
1700 1.38.4.1 nathanw
1701 1.38.4.1 nathanw /*
1702 1.38.4.1 nathanw * Look for work to do, and make sure we can do it.
1703 1.38.4.1 nathanw */
1704 1.38.4.1 nathanw for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1705 1.38.4.1 nathanw xs = TAILQ_NEXT(xs, channel_q)) {
1706 1.38.4.1 nathanw periph = xs->xs_periph;
1707 1.38.4.1 nathanw
1708 1.38.4.1 nathanw if ((periph->periph_sent >= periph->periph_openings) ||
1709 1.38.4.1 nathanw periph->periph_qfreeze != 0 ||
1710 1.38.4.1 nathanw (periph->periph_flags & PERIPH_UNTAG) != 0)
1711 1.38.4.1 nathanw continue;
1712 1.38.4.1 nathanw
1713 1.38.4.1 nathanw if ((periph->periph_flags &
1714 1.38.4.1 nathanw (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1715 1.38.4.1 nathanw (xs->xs_control & XS_CTL_URGENT) == 0)
1716 1.38.4.1 nathanw continue;
1717 1.38.4.1 nathanw
1718 1.38.4.1 nathanw /*
1719 1.38.4.1 nathanw * We can issue this xfer!
1720 1.38.4.1 nathanw */
1721 1.38.4.1 nathanw goto got_one;
1722 1.38.4.1 nathanw }
1723 1.38.4.1 nathanw
1724 1.38.4.1 nathanw /*
1725 1.38.4.1 nathanw * Can't find any work to do right now.
1726 1.38.4.1 nathanw */
1727 1.38.4.1 nathanw splx(s);
1728 1.38.4.1 nathanw return;
1729 1.38.4.1 nathanw
1730 1.38.4.1 nathanw got_one:
1731 1.38.4.1 nathanw /*
1732 1.38.4.1 nathanw * Have an xfer to run. Allocate a resource from
1733 1.38.4.1 nathanw * the adapter to run it. If we can't allocate that
1734 1.38.4.1 nathanw * resource, we don't dequeue the xfer.
1735 1.38.4.1 nathanw */
1736 1.38.4.1 nathanw if (scsipi_get_resource(chan) == 0) {
1737 1.38.4.1 nathanw /*
1738 1.38.4.1 nathanw * Adapter is out of resources. If the adapter
1739 1.38.4.1 nathanw * supports it, attempt to grow them.
1740 1.38.4.1 nathanw */
1741 1.38.4.1 nathanw if (scsipi_grow_resources(chan) == 0) {
1742 1.38.4.1 nathanw /*
1743 1.38.4.1 nathanw * Wasn't able to grow resources,
1744 1.38.4.1 nathanw * nothing more we can do.
1745 1.38.4.1 nathanw */
1746 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_POLL) {
1747 1.38.4.1 nathanw scsipi_printaddr(xs->xs_periph);
1748 1.38.4.1 nathanw printf("polling command but no "
1749 1.38.4.1 nathanw "adapter resources");
1750 1.38.4.1 nathanw /* We'll panic shortly... */
1751 1.38.4.1 nathanw }
1752 1.38.4.1 nathanw splx(s);
1753 1.38.4.1 nathanw
1754 1.38.4.1 nathanw /*
1755 1.38.4.1 nathanw * XXX: We should be able to note that
1756 1.38.4.1 nathanw * XXX: that resources are needed here!
1757 1.38.4.1 nathanw */
1758 1.38.4.1 nathanw return;
1759 1.38.4.1 nathanw }
1760 1.38.4.1 nathanw /*
1761 1.38.4.1 nathanw * scsipi_grow_resources() allocated the resource
1762 1.38.4.1 nathanw * for us.
1763 1.38.4.1 nathanw */
1764 1.38.4.1 nathanw }
1765 1.38.4.1 nathanw
1766 1.38.4.1 nathanw /*
1767 1.38.4.1 nathanw * We have a resource to run this xfer, do it!
1768 1.38.4.1 nathanw */
1769 1.38.4.1 nathanw TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1770 1.38.4.1 nathanw
1771 1.38.4.1 nathanw /*
1772 1.38.4.1 nathanw * If the command is to be tagged, allocate a tag ID
1773 1.38.4.1 nathanw * for it.
1774 1.38.4.1 nathanw */
1775 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) != 0)
1776 1.38.4.1 nathanw scsipi_get_tag(xs);
1777 1.38.4.1 nathanw else
1778 1.38.4.1 nathanw periph->periph_flags |= PERIPH_UNTAG;
1779 1.38.4.1 nathanw periph->periph_sent++;
1780 1.38.4.1 nathanw splx(s);
1781 1.38.4.1 nathanw
1782 1.38.4.1 nathanw scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1783 1.38.4.1 nathanw }
1784 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1785 1.38.4.1 nathanw panic("scsipi_run_queue: impossible");
1786 1.38.4.1 nathanw #endif
1787 1.38.4.1 nathanw }
1788 1.38.4.1 nathanw
1789 1.38.4.1 nathanw /*
1790 1.38.4.1 nathanw * scsipi_execute_xs:
1791 1.38.4.1 nathanw *
1792 1.38.4.1 nathanw * Begin execution of an xfer, waiting for it to complete, if necessary.
1793 1.38.4.1 nathanw */
1794 1.38.4.1 nathanw int
1795 1.38.4.1 nathanw scsipi_execute_xs(xs)
1796 1.38.4.1 nathanw struct scsipi_xfer *xs;
1797 1.38.4.1 nathanw {
1798 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1799 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1800 1.38.4.1 nathanw int async, poll, retries, error, s;
1801 1.38.4.1 nathanw
1802 1.38.4.1 nathanw xs->xs_status &= ~XS_STS_DONE;
1803 1.38.4.1 nathanw xs->error = XS_NOERROR;
1804 1.38.4.1 nathanw xs->resid = xs->datalen;
1805 1.38.4.1 nathanw xs->status = SCSI_OK;
1806 1.38.4.1 nathanw
1807 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
1808 1.38.4.1 nathanw if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1809 1.38.4.1 nathanw printf("scsipi_execute_xs: ");
1810 1.38.4.1 nathanw show_scsipi_xs(xs);
1811 1.38.4.1 nathanw printf("\n");
1812 1.38.4.1 nathanw }
1813 1.38.4.1 nathanw #endif
1814 1.38.4.1 nathanw
1815 1.38.4.1 nathanw /*
1816 1.38.4.1 nathanw * Deal with command tagging:
1817 1.38.4.1 nathanw *
1818 1.38.4.1 nathanw * - If the device's current operating mode doesn't
1819 1.38.4.1 nathanw * include tagged queueing, clear the tag mask.
1820 1.38.4.1 nathanw *
1821 1.38.4.1 nathanw * - If the device's current operating mode *does*
1822 1.38.4.1 nathanw * include tagged queueing, set the tag_type in
1823 1.38.4.1 nathanw * the xfer to the appropriate byte for the tag
1824 1.38.4.1 nathanw * message.
1825 1.38.4.1 nathanw */
1826 1.38.4.1 nathanw if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1827 1.38.4.1 nathanw (xs->xs_control & XS_CTL_REQSENSE)) {
1828 1.38.4.1 nathanw xs->xs_control &= ~XS_CTL_TAGMASK;
1829 1.38.4.1 nathanw xs->xs_tag_type = 0;
1830 1.38.4.1 nathanw } else {
1831 1.38.4.1 nathanw /*
1832 1.38.4.1 nathanw * If the request doesn't specify a tag, give Head
1833 1.38.4.1 nathanw * tags to URGENT operations and Ordered tags to
1834 1.38.4.1 nathanw * everything else.
1835 1.38.4.1 nathanw */
1836 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) == 0) {
1837 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_URGENT)
1838 1.38.4.1 nathanw xs->xs_control |= XS_CTL_HEAD_TAG;
1839 1.38.4.1 nathanw else
1840 1.38.4.1 nathanw xs->xs_control |= XS_CTL_ORDERED_TAG;
1841 1.38.4.1 nathanw }
1842 1.38.4.1 nathanw
1843 1.38.4.1 nathanw switch (XS_CTL_TAGTYPE(xs)) {
1844 1.38.4.1 nathanw case XS_CTL_ORDERED_TAG:
1845 1.38.4.1 nathanw xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1846 1.38.4.1 nathanw break;
1847 1.38.4.1 nathanw
1848 1.38.4.1 nathanw case XS_CTL_SIMPLE_TAG:
1849 1.38.4.1 nathanw xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1850 1.38.4.1 nathanw break;
1851 1.38.4.1 nathanw
1852 1.38.4.1 nathanw case XS_CTL_HEAD_TAG:
1853 1.38.4.1 nathanw xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1854 1.38.4.1 nathanw break;
1855 1.38.4.1 nathanw
1856 1.38.4.1 nathanw default:
1857 1.38.4.1 nathanw scsipi_printaddr(periph);
1858 1.38.4.1 nathanw printf("invalid tag mask 0x%08x\n",
1859 1.38.4.1 nathanw XS_CTL_TAGTYPE(xs));
1860 1.38.4.1 nathanw panic("scsipi_execute_xs");
1861 1.38.4.1 nathanw }
1862 1.38.4.1 nathanw }
1863 1.38.4.1 nathanw
1864 1.38.4.1 nathanw /* If the adaptor wants us to poll, poll. */
1865 1.38.4.1 nathanw if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1866 1.38.4.1 nathanw xs->xs_control |= XS_CTL_POLL;
1867 1.38.4.1 nathanw
1868 1.38.4.1 nathanw /*
1869 1.38.4.1 nathanw * If we don't yet have a completion thread, or we are to poll for
1870 1.38.4.1 nathanw * completion, clear the ASYNC flag.
1871 1.38.4.1 nathanw */
1872 1.38.4.1 nathanw if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1873 1.38.4.1 nathanw xs->xs_control &= ~XS_CTL_ASYNC;
1874 1.38.4.1 nathanw
1875 1.38.4.1 nathanw async = (xs->xs_control & XS_CTL_ASYNC);
1876 1.38.4.1 nathanw poll = (xs->xs_control & XS_CTL_POLL);
1877 1.38.4.1 nathanw retries = xs->xs_retries; /* for polling commands */
1878 1.38.4.1 nathanw
1879 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1880 1.38.4.1 nathanw if (async != 0 && xs->bp == NULL)
1881 1.38.4.1 nathanw panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1882 1.38.4.1 nathanw #endif
1883 1.38.4.1 nathanw
1884 1.38.4.1 nathanw /*
1885 1.38.4.1 nathanw * Enqueue the transfer. If we're not polling for completion, this
1886 1.38.4.1 nathanw * should ALWAYS return `no error'.
1887 1.38.4.1 nathanw */
1888 1.38.4.1 nathanw try_again:
1889 1.38.4.1 nathanw error = scsipi_enqueue(xs);
1890 1.38.4.1 nathanw if (error) {
1891 1.38.4.1 nathanw if (poll == 0) {
1892 1.38.4.1 nathanw scsipi_printaddr(periph);
1893 1.38.4.1 nathanw printf("not polling, but enqueue failed with %d\n",
1894 1.38.4.1 nathanw error);
1895 1.38.4.1 nathanw panic("scsipi_execute_xs");
1896 1.38.4.1 nathanw }
1897 1.38.4.1 nathanw
1898 1.38.4.1 nathanw scsipi_printaddr(periph);
1899 1.38.4.1 nathanw printf("failed to enqueue polling command");
1900 1.38.4.1 nathanw if (retries != 0) {
1901 1.38.4.1 nathanw printf(", retrying...\n");
1902 1.38.4.1 nathanw delay(1000000);
1903 1.38.4.1 nathanw retries--;
1904 1.38.4.1 nathanw goto try_again;
1905 1.38.4.1 nathanw }
1906 1.38.4.1 nathanw printf("\n");
1907 1.38.4.1 nathanw goto free_xs;
1908 1.38.4.1 nathanw }
1909 1.38.4.1 nathanw
1910 1.38.4.1 nathanw restarted:
1911 1.38.4.1 nathanw scsipi_run_queue(chan);
1912 1.38.4.1 nathanw
1913 1.38.4.1 nathanw /*
1914 1.38.4.1 nathanw * The xfer is enqueued, and possibly running. If it's to be
1915 1.38.4.1 nathanw * completed asynchronously, just return now.
1916 1.38.4.1 nathanw */
1917 1.38.4.1 nathanw if (async)
1918 1.38.4.1 nathanw return (EJUSTRETURN);
1919 1.38.4.1 nathanw
1920 1.38.4.1 nathanw /*
1921 1.38.4.1 nathanw * Not an asynchronous command; wait for it to complete.
1922 1.38.4.1 nathanw */
1923 1.38.4.1 nathanw s = splbio();
1924 1.38.4.1 nathanw while ((xs->xs_status & XS_STS_DONE) == 0) {
1925 1.38.4.1 nathanw if (poll) {
1926 1.38.4.1 nathanw scsipi_printaddr(periph);
1927 1.38.4.1 nathanw printf("polling command not done\n");
1928 1.38.4.1 nathanw panic("scsipi_execute_xs");
1929 1.38.4.1 nathanw }
1930 1.38.4.1 nathanw (void) tsleep(xs, PRIBIO, "xscmd", 0);
1931 1.38.4.1 nathanw }
1932 1.38.4.1 nathanw splx(s);
1933 1.38.4.1 nathanw
1934 1.38.4.1 nathanw /*
1935 1.38.4.1 nathanw * Command is complete. scsipi_done() has awakened us to perform
1936 1.38.4.1 nathanw * the error handling.
1937 1.38.4.1 nathanw */
1938 1.38.4.1 nathanw error = scsipi_complete(xs);
1939 1.38.4.1 nathanw if (error == ERESTART)
1940 1.38.4.1 nathanw goto restarted;
1941 1.38.4.1 nathanw
1942 1.38.4.1 nathanw /*
1943 1.38.4.1 nathanw * Command completed successfully or fatal error occurred. Fall
1944 1.38.4.1 nathanw * into....
1945 1.38.4.1 nathanw */
1946 1.38.4.1 nathanw free_xs:
1947 1.38.4.1 nathanw s = splbio();
1948 1.38.4.1 nathanw scsipi_put_xs(xs);
1949 1.38.4.1 nathanw splx(s);
1950 1.38.4.1 nathanw
1951 1.38.4.1 nathanw /*
1952 1.38.4.1 nathanw * Kick the queue, keep it running in case it stopped for some
1953 1.38.4.1 nathanw * reason.
1954 1.38.4.1 nathanw */
1955 1.38.4.1 nathanw scsipi_run_queue(chan);
1956 1.38.4.1 nathanw
1957 1.38.4.1 nathanw return (error);
1958 1.38.4.1 nathanw }
1959 1.38.4.1 nathanw
1960 1.38.4.1 nathanw /*
1961 1.38.4.1 nathanw * scsipi_completion_thread:
1962 1.38.4.1 nathanw *
1963 1.38.4.1 nathanw * This is the completion thread. We wait for errors on
1964 1.38.4.1 nathanw * asynchronous xfers, and perform the error handling
1965 1.38.4.1 nathanw * function, restarting the command, if necessary.
1966 1.38.4.1 nathanw */
1967 1.38.4.1 nathanw void
1968 1.38.4.1 nathanw scsipi_completion_thread(arg)
1969 1.38.4.1 nathanw void *arg;
1970 1.38.4.1 nathanw {
1971 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
1972 1.38.4.1 nathanw struct scsipi_xfer *xs;
1973 1.38.4.1 nathanw int s;
1974 1.38.4.1 nathanw
1975 1.38.4.4 nathanw s = splbio();
1976 1.38.4.4 nathanw chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
1977 1.38.4.4 nathanw splx(s);
1978 1.38.4.1 nathanw for (;;) {
1979 1.38.4.1 nathanw s = splbio();
1980 1.38.4.1 nathanw xs = TAILQ_FIRST(&chan->chan_complete);
1981 1.38.4.5 nathanw if (xs == NULL && chan->chan_tflags == 0) {
1982 1.38.4.5 nathanw /* nothing to do; wait */
1983 1.38.4.1 nathanw (void) tsleep(&chan->chan_complete, PRIBIO,
1984 1.38.4.1 nathanw "sccomp", 0);
1985 1.38.4.1 nathanw splx(s);
1986 1.38.4.1 nathanw continue;
1987 1.38.4.1 nathanw }
1988 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
1989 1.38.4.2 nathanw /* call chan_callback from thread context */
1990 1.38.4.5 nathanw chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
1991 1.38.4.2 nathanw chan->chan_callback(chan, chan->chan_callback_arg);
1992 1.38.4.4 nathanw splx(s);
1993 1.38.4.4 nathanw continue;
1994 1.38.4.4 nathanw }
1995 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
1996 1.38.4.5 nathanw /* attempt to get more openings for this channel */
1997 1.38.4.5 nathanw chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
1998 1.38.4.5 nathanw scsipi_adapter_request(chan,
1999 1.38.4.5 nathanw ADAPTER_REQ_GROW_RESOURCES, NULL);
2000 1.38.4.5 nathanw scsipi_channel_thaw(chan, 1);
2001 1.38.4.5 nathanw splx(s);
2002 1.38.4.5 nathanw continue;
2003 1.38.4.5 nathanw }
2004 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2005 1.38.4.4 nathanw /* explicitly run the queues for this channel */
2006 1.38.4.5 nathanw chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2007 1.38.4.4 nathanw scsipi_run_queue(chan);
2008 1.38.4.2 nathanw splx(s);
2009 1.38.4.2 nathanw continue;
2010 1.38.4.2 nathanw }
2011 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2012 1.38.4.1 nathanw splx(s);
2013 1.38.4.1 nathanw break;
2014 1.38.4.1 nathanw }
2015 1.38.4.2 nathanw if (xs) {
2016 1.38.4.2 nathanw TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2017 1.38.4.2 nathanw splx(s);
2018 1.38.4.1 nathanw
2019 1.38.4.2 nathanw /*
2020 1.38.4.2 nathanw * Have an xfer with an error; process it.
2021 1.38.4.2 nathanw */
2022 1.38.4.2 nathanw (void) scsipi_complete(xs);
2023 1.38.4.1 nathanw
2024 1.38.4.2 nathanw /*
2025 1.38.4.2 nathanw * Kick the queue; keep it running if it was stopped
2026 1.38.4.2 nathanw * for some reason.
2027 1.38.4.2 nathanw */
2028 1.38.4.2 nathanw scsipi_run_queue(chan);
2029 1.38.4.2 nathanw } else {
2030 1.38.4.2 nathanw splx(s);
2031 1.38.4.2 nathanw }
2032 1.38.4.1 nathanw }
2033 1.38.4.1 nathanw
2034 1.38.4.1 nathanw chan->chan_thread = NULL;
2035 1.38.4.1 nathanw
2036 1.38.4.1 nathanw /* In case parent is waiting for us to exit. */
2037 1.38.4.1 nathanw wakeup(&chan->chan_thread);
2038 1.38.4.1 nathanw
2039 1.38.4.1 nathanw kthread_exit(0);
2040 1.38.4.1 nathanw }
2041 1.38.4.1 nathanw
2042 1.38.4.1 nathanw /*
2043 1.38.4.1 nathanw * scsipi_create_completion_thread:
2044 1.38.4.1 nathanw *
2045 1.38.4.1 nathanw * Callback to actually create the completion thread.
2046 1.38.4.1 nathanw */
2047 1.38.4.1 nathanw void
2048 1.38.4.1 nathanw scsipi_create_completion_thread(arg)
2049 1.38.4.1 nathanw void *arg;
2050 1.38.4.1 nathanw {
2051 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
2052 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
2053 1.38.4.1 nathanw
2054 1.38.4.1 nathanw if (kthread_create1(scsipi_completion_thread, chan,
2055 1.38.4.1 nathanw &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
2056 1.38.4.1 nathanw chan->chan_channel)) {
2057 1.38.4.1 nathanw printf("%s: unable to create completion thread for "
2058 1.38.4.1 nathanw "channel %d\n", adapt->adapt_dev->dv_xname,
2059 1.38.4.1 nathanw chan->chan_channel);
2060 1.38.4.1 nathanw panic("scsipi_create_completion_thread");
2061 1.38.4.1 nathanw }
2062 1.38.4.1 nathanw }
2063 1.38.4.1 nathanw
2064 1.38.4.1 nathanw /*
2065 1.38.4.2 nathanw * scsipi_thread_call_callback:
2066 1.38.4.2 nathanw *
2067 1.38.4.2 nathanw * request to call a callback from the completion thread
2068 1.38.4.2 nathanw */
2069 1.38.4.2 nathanw int
2070 1.38.4.2 nathanw scsipi_thread_call_callback(chan, callback, arg)
2071 1.38.4.2 nathanw struct scsipi_channel *chan;
2072 1.38.4.2 nathanw void (*callback) __P((struct scsipi_channel *, void *));
2073 1.38.4.2 nathanw void *arg;
2074 1.38.4.2 nathanw {
2075 1.38.4.2 nathanw int s;
2076 1.38.4.2 nathanw
2077 1.38.4.2 nathanw s = splbio();
2078 1.38.4.5 nathanw if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2079 1.38.4.5 nathanw /* kernel thread doesn't exist yet */
2080 1.38.4.5 nathanw splx(s);
2081 1.38.4.5 nathanw return ESRCH;
2082 1.38.4.5 nathanw }
2083 1.38.4.5 nathanw if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2084 1.38.4.2 nathanw splx(s);
2085 1.38.4.2 nathanw return EBUSY;
2086 1.38.4.2 nathanw }
2087 1.38.4.2 nathanw scsipi_channel_freeze(chan, 1);
2088 1.38.4.2 nathanw chan->chan_callback = callback;
2089 1.38.4.2 nathanw chan->chan_callback_arg = arg;
2090 1.38.4.5 nathanw chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2091 1.38.4.2 nathanw wakeup(&chan->chan_complete);
2092 1.38.4.2 nathanw splx(s);
2093 1.38.4.2 nathanw return(0);
2094 1.38.4.2 nathanw }
2095 1.38.4.2 nathanw
2096 1.38.4.2 nathanw /*
2097 1.38.4.1 nathanw * scsipi_async_event:
2098 1.38.4.1 nathanw *
2099 1.38.4.1 nathanw * Handle an asynchronous event from an adapter.
2100 1.38.4.1 nathanw */
2101 1.38.4.1 nathanw void
2102 1.38.4.1 nathanw scsipi_async_event(chan, event, arg)
2103 1.38.4.1 nathanw struct scsipi_channel *chan;
2104 1.38.4.1 nathanw scsipi_async_event_t event;
2105 1.38.4.1 nathanw void *arg;
2106 1.38.4.1 nathanw {
2107 1.38.4.1 nathanw int s;
2108 1.38.4.1 nathanw
2109 1.38.4.1 nathanw s = splbio();
2110 1.38.4.1 nathanw switch (event) {
2111 1.38.4.1 nathanw case ASYNC_EVENT_MAX_OPENINGS:
2112 1.38.4.1 nathanw scsipi_async_event_max_openings(chan,
2113 1.38.4.1 nathanw (struct scsipi_max_openings *)arg);
2114 1.38.4.1 nathanw break;
2115 1.38.4.1 nathanw
2116 1.38.4.1 nathanw case ASYNC_EVENT_XFER_MODE:
2117 1.38.4.1 nathanw scsipi_async_event_xfer_mode(chan,
2118 1.38.4.1 nathanw (struct scsipi_xfer_mode *)arg);
2119 1.38.4.1 nathanw break;
2120 1.38.4.1 nathanw case ASYNC_EVENT_RESET:
2121 1.38.4.1 nathanw scsipi_async_event_channel_reset(chan);
2122 1.38.4.1 nathanw break;
2123 1.38.4.1 nathanw }
2124 1.38.4.1 nathanw splx(s);
2125 1.38.4.1 nathanw }
2126 1.38.4.1 nathanw
2127 1.38.4.1 nathanw /*
2128 1.38.4.1 nathanw * scsipi_print_xfer_mode:
2129 1.38.4.1 nathanw *
2130 1.38.4.1 nathanw * Print a periph's capabilities.
2131 1.38.4.1 nathanw */
2132 1.38.4.1 nathanw void
2133 1.38.4.1 nathanw scsipi_print_xfer_mode(periph)
2134 1.38.4.1 nathanw struct scsipi_periph *periph;
2135 1.38.4.1 nathanw {
2136 1.38.4.1 nathanw int period, freq, speed, mbs;
2137 1.38.4.1 nathanw
2138 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2139 1.38.4.1 nathanw return;
2140 1.38.4.1 nathanw
2141 1.38.4.1 nathanw printf("%s: ", periph->periph_dev->dv_xname);
2142 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_SYNC) {
2143 1.38.4.1 nathanw period = scsipi_sync_factor_to_period(periph->periph_period);
2144 1.38.4.1 nathanw printf("sync (%d.%dns offset %d)",
2145 1.38.4.1 nathanw period / 10, period % 10, periph->periph_offset);
2146 1.38.4.1 nathanw } else
2147 1.38.4.1 nathanw printf("async");
2148 1.38.4.1 nathanw
2149 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_WIDE32)
2150 1.38.4.1 nathanw printf(", 32-bit");
2151 1.38.4.1 nathanw else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2152 1.38.4.1 nathanw printf(", 16-bit");
2153 1.38.4.1 nathanw else
2154 1.38.4.1 nathanw printf(", 8-bit");
2155 1.38.4.1 nathanw
2156 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_SYNC) {
2157 1.38.4.1 nathanw freq = scsipi_sync_factor_to_freq(periph->periph_period);
2158 1.38.4.1 nathanw speed = freq;
2159 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_WIDE32)
2160 1.38.4.1 nathanw speed *= 4;
2161 1.38.4.1 nathanw else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2162 1.38.4.1 nathanw speed *= 2;
2163 1.38.4.1 nathanw mbs = speed / 1000;
2164 1.38.4.1 nathanw if (mbs > 0)
2165 1.38.4.1 nathanw printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2166 1.38.4.1 nathanw else
2167 1.38.4.1 nathanw printf(" (%dKB/s)", speed % 1000);
2168 1.38.4.1 nathanw }
2169 1.38.4.1 nathanw
2170 1.38.4.1 nathanw printf(" transfers");
2171 1.38.4.1 nathanw
2172 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_TQING)
2173 1.38.4.1 nathanw printf(", tagged queueing");
2174 1.38.4.1 nathanw
2175 1.38.4.1 nathanw printf("\n");
2176 1.38.4.1 nathanw }
2177 1.38.4.1 nathanw
2178 1.38.4.1 nathanw /*
2179 1.38.4.1 nathanw * scsipi_async_event_max_openings:
2180 1.38.4.1 nathanw *
2181 1.38.4.1 nathanw * Update the maximum number of outstanding commands a
2182 1.38.4.1 nathanw * device may have.
2183 1.38.4.1 nathanw */
2184 1.38.4.1 nathanw void
2185 1.38.4.1 nathanw scsipi_async_event_max_openings(chan, mo)
2186 1.38.4.1 nathanw struct scsipi_channel *chan;
2187 1.38.4.1 nathanw struct scsipi_max_openings *mo;
2188 1.38.4.1 nathanw {
2189 1.38.4.1 nathanw struct scsipi_periph *periph;
2190 1.38.4.1 nathanw int minlun, maxlun;
2191 1.38.4.1 nathanw
2192 1.38.4.1 nathanw if (mo->mo_lun == -1) {
2193 1.38.4.1 nathanw /*
2194 1.38.4.1 nathanw * Wildcarded; apply it to all LUNs.
2195 1.38.4.1 nathanw */
2196 1.38.4.1 nathanw minlun = 0;
2197 1.38.4.1 nathanw maxlun = chan->chan_nluns - 1;
2198 1.38.4.1 nathanw } else
2199 1.38.4.1 nathanw minlun = maxlun = mo->mo_lun;
2200 1.38.4.1 nathanw
2201 1.38.4.1 nathanw for (; minlun <= maxlun; minlun++) {
2202 1.38.4.1 nathanw periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2203 1.38.4.1 nathanw if (periph == NULL)
2204 1.38.4.1 nathanw continue;
2205 1.38.4.1 nathanw
2206 1.38.4.1 nathanw if (mo->mo_openings < periph->periph_openings)
2207 1.38.4.1 nathanw periph->periph_openings = mo->mo_openings;
2208 1.38.4.1 nathanw else if (mo->mo_openings > periph->periph_openings &&
2209 1.38.4.1 nathanw (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2210 1.38.4.1 nathanw periph->periph_openings = mo->mo_openings;
2211 1.38.4.1 nathanw }
2212 1.38.4.1 nathanw }
2213 1.38.4.1 nathanw
2214 1.38.4.1 nathanw /*
2215 1.38.4.1 nathanw * scsipi_async_event_xfer_mode:
2216 1.38.4.1 nathanw *
2217 1.38.4.1 nathanw * Update the xfer mode for all periphs sharing the
2218 1.38.4.1 nathanw * specified I_T Nexus.
2219 1.38.4.1 nathanw */
2220 1.38.4.1 nathanw void
2221 1.38.4.1 nathanw scsipi_async_event_xfer_mode(chan, xm)
2222 1.38.4.1 nathanw struct scsipi_channel *chan;
2223 1.38.4.1 nathanw struct scsipi_xfer_mode *xm;
2224 1.38.4.1 nathanw {
2225 1.38.4.1 nathanw struct scsipi_periph *periph;
2226 1.38.4.1 nathanw int lun, announce, mode, period, offset;
2227 1.38.4.1 nathanw
2228 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2229 1.38.4.1 nathanw periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2230 1.38.4.1 nathanw if (periph == NULL)
2231 1.38.4.1 nathanw continue;
2232 1.38.4.1 nathanw announce = 0;
2233 1.38.4.1 nathanw
2234 1.38.4.1 nathanw /*
2235 1.38.4.1 nathanw * Clamp the xfer mode down to this periph's capabilities.
2236 1.38.4.1 nathanw */
2237 1.38.4.1 nathanw mode = xm->xm_mode & periph->periph_cap;
2238 1.38.4.1 nathanw if (mode & PERIPH_CAP_SYNC) {
2239 1.38.4.1 nathanw period = xm->xm_period;
2240 1.38.4.1 nathanw offset = xm->xm_offset;
2241 1.38.4.1 nathanw } else {
2242 1.38.4.1 nathanw period = 0;
2243 1.38.4.1 nathanw offset = 0;
2244 1.38.4.1 nathanw }
2245 1.38.4.1 nathanw
2246 1.38.4.1 nathanw /*
2247 1.38.4.1 nathanw * If we do not have a valid xfer mode yet, or the parameters
2248 1.38.4.1 nathanw * are different, announce them.
2249 1.38.4.1 nathanw */
2250 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2251 1.38.4.1 nathanw periph->periph_mode != mode ||
2252 1.38.4.1 nathanw periph->periph_period != period ||
2253 1.38.4.1 nathanw periph->periph_offset != offset)
2254 1.38.4.1 nathanw announce = 1;
2255 1.38.4.1 nathanw
2256 1.38.4.1 nathanw periph->periph_mode = mode;
2257 1.38.4.1 nathanw periph->periph_period = period;
2258 1.38.4.1 nathanw periph->periph_offset = offset;
2259 1.38.4.1 nathanw periph->periph_flags |= PERIPH_MODE_VALID;
2260 1.38.4.1 nathanw
2261 1.38.4.1 nathanw if (announce)
2262 1.38.4.1 nathanw scsipi_print_xfer_mode(periph);
2263 1.38.4.1 nathanw }
2264 1.38.4.1 nathanw }
2265 1.38.4.1 nathanw
2266 1.38.4.1 nathanw /*
2267 1.38.4.1 nathanw * scsipi_set_xfer_mode:
2268 1.38.4.1 nathanw *
2269 1.38.4.1 nathanw * Set the xfer mode for the specified I_T Nexus.
2270 1.38.4.1 nathanw */
2271 1.38.4.1 nathanw void
2272 1.38.4.1 nathanw scsipi_set_xfer_mode(chan, target, immed)
2273 1.38.4.1 nathanw struct scsipi_channel *chan;
2274 1.38.4.1 nathanw int target, immed;
2275 1.38.4.1 nathanw {
2276 1.38.4.1 nathanw struct scsipi_xfer_mode xm;
2277 1.38.4.1 nathanw struct scsipi_periph *itperiph;
2278 1.38.4.1 nathanw int lun, s;
2279 1.38.4.1 nathanw
2280 1.38.4.1 nathanw /*
2281 1.38.4.1 nathanw * Go to the minimal xfer mode.
2282 1.38.4.1 nathanw */
2283 1.38.4.1 nathanw xm.xm_target = target;
2284 1.38.4.1 nathanw xm.xm_mode = 0;
2285 1.38.4.1 nathanw xm.xm_period = 0; /* ignored */
2286 1.38.4.1 nathanw xm.xm_offset = 0; /* ignored */
2287 1.38.4.1 nathanw
2288 1.38.4.1 nathanw /*
2289 1.38.4.1 nathanw * Find the first LUN we know about on this I_T Nexus.
2290 1.38.4.1 nathanw */
2291 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2292 1.38.4.1 nathanw itperiph = scsipi_lookup_periph(chan, target, lun);
2293 1.38.4.1 nathanw if (itperiph != NULL)
2294 1.38.4.1 nathanw break;
2295 1.38.4.1 nathanw }
2296 1.38.4.2 nathanw if (itperiph != NULL) {
2297 1.38.4.1 nathanw xm.xm_mode = itperiph->periph_cap;
2298 1.38.4.2 nathanw /*
2299 1.38.4.2 nathanw * Now issue the request to the adapter.
2300 1.38.4.2 nathanw */
2301 1.38.4.2 nathanw s = splbio();
2302 1.38.4.2 nathanw scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2303 1.38.4.2 nathanw splx(s);
2304 1.38.4.2 nathanw /*
2305 1.38.4.2 nathanw * If we want this to happen immediately, issue a dummy
2306 1.38.4.2 nathanw * command, since most adapters can't really negotiate unless
2307 1.38.4.2 nathanw * they're executing a job.
2308 1.38.4.2 nathanw */
2309 1.38.4.2 nathanw if (immed != 0) {
2310 1.38.4.2 nathanw (void) scsipi_test_unit_ready(itperiph,
2311 1.38.4.2 nathanw XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2312 1.38.4.2 nathanw XS_CTL_IGNORE_NOT_READY |
2313 1.38.4.2 nathanw XS_CTL_IGNORE_MEDIA_CHANGE);
2314 1.38.4.2 nathanw }
2315 1.38.4.1 nathanw }
2316 1.38.4.1 nathanw }
2317 1.38.4.1 nathanw
2318 1.38.4.1 nathanw /*
2319 1.38.4.1 nathanw * scsipi_channel_reset:
2320 1.38.4.1 nathanw *
2321 1.38.4.1 nathanw * handle scsi bus reset
2322 1.38.4.1 nathanw * called at splbio
2323 1.38.4.1 nathanw */
2324 1.38.4.1 nathanw void
2325 1.38.4.1 nathanw scsipi_async_event_channel_reset(chan)
2326 1.38.4.1 nathanw struct scsipi_channel *chan;
2327 1.38.4.1 nathanw {
2328 1.38.4.1 nathanw struct scsipi_xfer *xs, *xs_next;
2329 1.38.4.1 nathanw struct scsipi_periph *periph;
2330 1.38.4.1 nathanw int target, lun;
2331 1.38.4.1 nathanw
2332 1.38.4.1 nathanw /*
2333 1.38.4.1 nathanw * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2334 1.38.4.1 nathanw * commands; as the sense is not available any more.
2335 1.38.4.1 nathanw * can't call scsipi_done() from here, as the command has not been
2336 1.38.4.1 nathanw * sent to the adapter yet (this would corrupt accounting).
2337 1.38.4.1 nathanw */
2338 1.38.4.1 nathanw
2339 1.38.4.1 nathanw for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2340 1.38.4.1 nathanw xs_next = TAILQ_NEXT(xs, channel_q);
2341 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
2342 1.38.4.1 nathanw TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2343 1.38.4.1 nathanw xs->error = XS_RESET;
2344 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2345 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2346 1.38.4.1 nathanw channel_q);
2347 1.38.4.1 nathanw }
2348 1.38.4.1 nathanw }
2349 1.38.4.1 nathanw wakeup(&chan->chan_complete);
2350 1.38.4.1 nathanw /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2351 1.38.4.1 nathanw for (target = 0; target < chan->chan_ntargets; target++) {
2352 1.38.4.1 nathanw if (target == chan->chan_id)
2353 1.38.4.1 nathanw continue;
2354 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2355 1.38.4.1 nathanw periph = chan->chan_periphs[target][lun];
2356 1.38.4.1 nathanw if (periph) {
2357 1.38.4.1 nathanw xs = periph->periph_xscheck;
2358 1.38.4.1 nathanw if (xs)
2359 1.38.4.1 nathanw xs->error = XS_RESET;
2360 1.38.4.1 nathanw }
2361 1.38.4.1 nathanw }
2362 1.38.4.1 nathanw }
2363 1.38.4.1 nathanw }
2364 1.38.4.1 nathanw
2365 1.38.4.2 nathanw /*
2366 1.38.4.2 nathanw * scsipi_target_detach:
2367 1.38.4.2 nathanw *
2368 1.38.4.2 nathanw * detach all periph associated with a I_T
2369 1.38.4.2 nathanw * must be called from valid thread context
2370 1.38.4.2 nathanw */
2371 1.38.4.2 nathanw int
2372 1.38.4.2 nathanw scsipi_target_detach(chan, target, lun, flags)
2373 1.38.4.2 nathanw struct scsipi_channel *chan;
2374 1.38.4.2 nathanw int target, lun;
2375 1.38.4.2 nathanw int flags;
2376 1.38.4.2 nathanw {
2377 1.38.4.2 nathanw struct scsipi_periph *periph;
2378 1.38.4.2 nathanw int ctarget, mintarget, maxtarget;
2379 1.38.4.2 nathanw int clun, minlun, maxlun;
2380 1.38.4.2 nathanw int error;
2381 1.38.4.2 nathanw
2382 1.38.4.2 nathanw if (target == -1) {
2383 1.38.4.2 nathanw mintarget = 0;
2384 1.38.4.2 nathanw maxtarget = chan->chan_ntargets;
2385 1.38.4.2 nathanw } else {
2386 1.38.4.2 nathanw if (target == chan->chan_id)
2387 1.38.4.2 nathanw return EINVAL;
2388 1.38.4.2 nathanw if (target < 0 || target >= chan->chan_ntargets)
2389 1.38.4.2 nathanw return EINVAL;
2390 1.38.4.2 nathanw mintarget = target;
2391 1.38.4.2 nathanw maxtarget = target + 1;
2392 1.38.4.2 nathanw }
2393 1.38.4.2 nathanw
2394 1.38.4.2 nathanw if (lun == -1) {
2395 1.38.4.2 nathanw minlun = 0;
2396 1.38.4.2 nathanw maxlun = chan->chan_nluns;
2397 1.38.4.2 nathanw } else {
2398 1.38.4.2 nathanw if (lun < 0 || lun >= chan->chan_nluns)
2399 1.38.4.2 nathanw return EINVAL;
2400 1.38.4.2 nathanw minlun = lun;
2401 1.38.4.2 nathanw maxlun = lun + 1;
2402 1.38.4.2 nathanw }
2403 1.38.4.2 nathanw
2404 1.38.4.2 nathanw for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2405 1.38.4.2 nathanw if (ctarget == chan->chan_id)
2406 1.38.4.2 nathanw continue;
2407 1.38.4.2 nathanw
2408 1.38.4.2 nathanw for (clun = minlun; clun < maxlun; clun++) {
2409 1.38.4.2 nathanw periph = scsipi_lookup_periph(chan, ctarget, clun);
2410 1.38.4.2 nathanw if (periph == NULL)
2411 1.38.4.2 nathanw continue;
2412 1.38.4.2 nathanw error = config_detach(periph->periph_dev, flags);
2413 1.38.4.2 nathanw if (error)
2414 1.38.4.2 nathanw return (error);
2415 1.38.4.2 nathanw scsipi_remove_periph(chan, periph);
2416 1.38.4.2 nathanw free(periph, M_DEVBUF);
2417 1.38.4.2 nathanw }
2418 1.38.4.2 nathanw }
2419 1.38.4.2 nathanw return(0);
2420 1.38.4.2 nathanw }
2421 1.38.4.1 nathanw
2422 1.38.4.1 nathanw /*
2423 1.38.4.1 nathanw * scsipi_adapter_addref:
2424 1.38.4.1 nathanw *
2425 1.38.4.1 nathanw * Add a reference to the adapter pointed to by the provided
2426 1.38.4.1 nathanw * link, enabling the adapter if necessary.
2427 1.38.4.1 nathanw */
2428 1.38.4.1 nathanw int
2429 1.38.4.1 nathanw scsipi_adapter_addref(adapt)
2430 1.38.4.1 nathanw struct scsipi_adapter *adapt;
2431 1.38.4.1 nathanw {
2432 1.38.4.1 nathanw int s, error = 0;
2433 1.38.4.1 nathanw
2434 1.38.4.1 nathanw s = splbio();
2435 1.38.4.1 nathanw if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2436 1.38.4.1 nathanw error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2437 1.38.4.1 nathanw if (error)
2438 1.38.4.1 nathanw adapt->adapt_refcnt--;
2439 1.38.4.1 nathanw }
2440 1.38.4.1 nathanw splx(s);
2441 1.38.4.1 nathanw return (error);
2442 1.38.4.1 nathanw }
2443 1.38.4.1 nathanw
2444 1.38.4.1 nathanw /*
2445 1.38.4.1 nathanw * scsipi_adapter_delref:
2446 1.38.4.1 nathanw *
2447 1.38.4.1 nathanw * Delete a reference to the adapter pointed to by the provided
2448 1.38.4.1 nathanw * link, disabling the adapter if possible.
2449 1.38.4.1 nathanw */
2450 1.38.4.1 nathanw void
2451 1.38.4.1 nathanw scsipi_adapter_delref(adapt)
2452 1.38.4.1 nathanw struct scsipi_adapter *adapt;
2453 1.38.4.1 nathanw {
2454 1.38.4.1 nathanw int s;
2455 1.38.4.1 nathanw
2456 1.38.4.1 nathanw s = splbio();
2457 1.38.4.1 nathanw if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2458 1.38.4.1 nathanw (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2459 1.38.4.1 nathanw splx(s);
2460 1.38.4.1 nathanw }
2461 1.38.4.1 nathanw
2462 1.38.4.1 nathanw struct scsipi_syncparam {
2463 1.38.4.1 nathanw int ss_factor;
2464 1.38.4.1 nathanw int ss_period; /* ns * 10 */
2465 1.38.4.1 nathanw } scsipi_syncparams[] = {
2466 1.38.4.3 nathanw { 0x09, 125 },
2467 1.38.4.1 nathanw { 0x0a, 250 },
2468 1.38.4.1 nathanw { 0x0b, 303 },
2469 1.38.4.1 nathanw { 0x0c, 500 },
2470 1.38.4.1 nathanw };
2471 1.38.4.1 nathanw const int scsipi_nsyncparams =
2472 1.38.4.1 nathanw sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2473 1.38.4.1 nathanw
2474 1.38.4.1 nathanw int
2475 1.38.4.1 nathanw scsipi_sync_period_to_factor(period)
2476 1.38.4.1 nathanw int period; /* ns * 10 */
2477 1.38.4.1 nathanw {
2478 1.38.4.1 nathanw int i;
2479 1.38.4.1 nathanw
2480 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2481 1.38.4.1 nathanw if (period <= scsipi_syncparams[i].ss_period)
2482 1.38.4.1 nathanw return (scsipi_syncparams[i].ss_factor);
2483 1.38.4.1 nathanw }
2484 1.38.4.1 nathanw
2485 1.38.4.1 nathanw return ((period / 10) / 4);
2486 1.38.4.1 nathanw }
2487 1.38.4.1 nathanw
2488 1.38.4.1 nathanw int
2489 1.38.4.1 nathanw scsipi_sync_factor_to_period(factor)
2490 1.38.4.1 nathanw int factor;
2491 1.38.4.1 nathanw {
2492 1.38.4.1 nathanw int i;
2493 1.38.4.1 nathanw
2494 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2495 1.38.4.1 nathanw if (factor == scsipi_syncparams[i].ss_factor)
2496 1.38.4.1 nathanw return (scsipi_syncparams[i].ss_period);
2497 1.38.4.1 nathanw }
2498 1.38.4.1 nathanw
2499 1.38.4.1 nathanw return ((factor * 4) * 10);
2500 1.38.4.1 nathanw }
2501 1.38.4.1 nathanw
2502 1.38.4.1 nathanw int
2503 1.38.4.1 nathanw scsipi_sync_factor_to_freq(factor)
2504 1.38.4.1 nathanw int factor;
2505 1.38.4.1 nathanw {
2506 1.38.4.1 nathanw int i;
2507 1.38.4.1 nathanw
2508 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2509 1.38.4.1 nathanw if (factor == scsipi_syncparams[i].ss_factor)
2510 1.38.4.1 nathanw return (10000000 / scsipi_syncparams[i].ss_period);
2511 1.38.4.1 nathanw }
2512 1.38.4.1 nathanw
2513 1.38.4.1 nathanw return (10000000 / ((factor * 4) * 10));
2514 1.14 thorpej }
2515 1.14 thorpej
2516 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
2517 1.2 bouyer /*
2518 1.2 bouyer * Given a scsipi_xfer, dump the request, in all it's glory
2519 1.2 bouyer */
2520 1.2 bouyer void
2521 1.2 bouyer show_scsipi_xs(xs)
2522 1.2 bouyer struct scsipi_xfer *xs;
2523 1.2 bouyer {
2524 1.3 enami
2525 1.2 bouyer printf("xs(%p): ", xs);
2526 1.24 thorpej printf("xs_control(0x%08x)", xs->xs_control);
2527 1.24 thorpej printf("xs_status(0x%08x)", xs->xs_status);
2528 1.38.4.1 nathanw printf("periph(%p)", xs->xs_periph);
2529 1.38.4.1 nathanw printf("retr(0x%x)", xs->xs_retries);
2530 1.2 bouyer printf("timo(0x%x)", xs->timeout);
2531 1.2 bouyer printf("cmd(%p)", xs->cmd);
2532 1.2 bouyer printf("len(0x%x)", xs->cmdlen);
2533 1.2 bouyer printf("data(%p)", xs->data);
2534 1.2 bouyer printf("len(0x%x)", xs->datalen);
2535 1.2 bouyer printf("res(0x%x)", xs->resid);
2536 1.2 bouyer printf("err(0x%x)", xs->error);
2537 1.2 bouyer printf("bp(%p)", xs->bp);
2538 1.2 bouyer show_scsipi_cmd(xs);
2539 1.2 bouyer }
2540 1.2 bouyer
2541 1.2 bouyer void
2542 1.2 bouyer show_scsipi_cmd(xs)
2543 1.2 bouyer struct scsipi_xfer *xs;
2544 1.2 bouyer {
2545 1.2 bouyer u_char *b = (u_char *) xs->cmd;
2546 1.3 enami int i = 0;
2547 1.2 bouyer
2548 1.38.4.1 nathanw scsipi_printaddr(xs->xs_periph);
2549 1.38.4.1 nathanw printf(" command: ");
2550 1.2 bouyer
2551 1.24 thorpej if ((xs->xs_control & XS_CTL_RESET) == 0) {
2552 1.2 bouyer while (i < xs->cmdlen) {
2553 1.2 bouyer if (i)
2554 1.2 bouyer printf(",");
2555 1.2 bouyer printf("0x%x", b[i++]);
2556 1.2 bouyer }
2557 1.2 bouyer printf("-[%d bytes]\n", xs->datalen);
2558 1.2 bouyer if (xs->datalen)
2559 1.2 bouyer show_mem(xs->data, min(64, xs->datalen));
2560 1.2 bouyer } else
2561 1.2 bouyer printf("-RESET-\n");
2562 1.2 bouyer }
2563 1.2 bouyer
2564 1.2 bouyer void
2565 1.2 bouyer show_mem(address, num)
2566 1.2 bouyer u_char *address;
2567 1.2 bouyer int num;
2568 1.2 bouyer {
2569 1.2 bouyer int x;
2570 1.2 bouyer
2571 1.2 bouyer printf("------------------------------");
2572 1.2 bouyer for (x = 0; x < num; x++) {
2573 1.2 bouyer if ((x % 16) == 0)
2574 1.2 bouyer printf("\n%03d: ", x);
2575 1.2 bouyer printf("%02x ", *address++);
2576 1.2 bouyer }
2577 1.2 bouyer printf("\n------------------------------\n");
2578 1.2 bouyer }
2579 1.38.4.1 nathanw #endif /* SCSIPI_DEBUG */
2580