scsipi_base.c revision 1.38.4.1 1 1.38.4.1 nathanw /* $NetBSD: scsipi_base.c,v 1.38.4.1 2001/06/21 20:05:54 nathanw Exp $ */
2 1.2 bouyer
3 1.8 mycroft /*-
4 1.38.4.1 nathanw * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 1.8 mycroft * All rights reserved.
6 1.8 mycroft *
7 1.8 mycroft * This code is derived from software contributed to The NetBSD Foundation
8 1.38.4.1 nathanw * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9 1.38.4.1 nathanw * Simulation Facility, NASA Ames Research Center.
10 1.2 bouyer *
11 1.2 bouyer * Redistribution and use in source and binary forms, with or without
12 1.2 bouyer * modification, are permitted provided that the following conditions
13 1.2 bouyer * are met:
14 1.2 bouyer * 1. Redistributions of source code must retain the above copyright
15 1.2 bouyer * notice, this list of conditions and the following disclaimer.
16 1.2 bouyer * 2. Redistributions in binary form must reproduce the above copyright
17 1.2 bouyer * notice, this list of conditions and the following disclaimer in the
18 1.2 bouyer * documentation and/or other materials provided with the distribution.
19 1.2 bouyer * 3. All advertising materials mentioning features or use of this software
20 1.2 bouyer * must display the following acknowledgement:
21 1.8 mycroft * This product includes software developed by the NetBSD
22 1.8 mycroft * Foundation, Inc. and its contributors.
23 1.8 mycroft * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.8 mycroft * contributors may be used to endorse or promote products derived
25 1.8 mycroft * from this software without specific prior written permission.
26 1.2 bouyer *
27 1.8 mycroft * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.8 mycroft * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.8 mycroft * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.8 mycroft * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.8 mycroft * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.8 mycroft * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.8 mycroft * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.8 mycroft * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.8 mycroft * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.8 mycroft * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.8 mycroft * POSSIBILITY OF SUCH DAMAGE.
38 1.2 bouyer */
39 1.2 bouyer
40 1.13 bouyer #include "opt_scsi.h"
41 1.13 bouyer
42 1.2 bouyer #include <sys/types.h>
43 1.2 bouyer #include <sys/param.h>
44 1.2 bouyer #include <sys/systm.h>
45 1.2 bouyer #include <sys/kernel.h>
46 1.2 bouyer #include <sys/buf.h>
47 1.2 bouyer #include <sys/uio.h>
48 1.2 bouyer #include <sys/malloc.h>
49 1.6 thorpej #include <sys/pool.h>
50 1.2 bouyer #include <sys/errno.h>
51 1.2 bouyer #include <sys/device.h>
52 1.2 bouyer #include <sys/proc.h>
53 1.38.4.1 nathanw #include <sys/kthread.h>
54 1.2 bouyer
55 1.2 bouyer #include <dev/scsipi/scsipi_all.h>
56 1.2 bouyer #include <dev/scsipi/scsipi_disk.h>
57 1.2 bouyer #include <dev/scsipi/scsipiconf.h>
58 1.2 bouyer #include <dev/scsipi/scsipi_base.h>
59 1.2 bouyer
60 1.38.4.1 nathanw #include <dev/scsipi/scsi_all.h>
61 1.38.4.1 nathanw #include <dev/scsipi/scsi_message.h>
62 1.38.4.1 nathanw
63 1.38.4.1 nathanw int scsipi_complete __P((struct scsipi_xfer *));
64 1.38.4.1 nathanw void scsipi_request_sense __P((struct scsipi_xfer *));
65 1.38.4.1 nathanw int scsipi_enqueue __P((struct scsipi_xfer *));
66 1.38.4.1 nathanw void scsipi_run_queue __P((struct scsipi_channel *chan));
67 1.38.4.1 nathanw
68 1.38.4.1 nathanw void scsipi_completion_thread __P((void *));
69 1.38.4.1 nathanw
70 1.38.4.1 nathanw void scsipi_get_tag __P((struct scsipi_xfer *));
71 1.38.4.1 nathanw void scsipi_put_tag __P((struct scsipi_xfer *));
72 1.38.4.1 nathanw
73 1.38.4.1 nathanw int scsipi_get_resource __P((struct scsipi_channel *));
74 1.38.4.1 nathanw void scsipi_put_resource __P((struct scsipi_channel *));
75 1.38.4.1 nathanw __inline int scsipi_grow_resources __P((struct scsipi_channel *));
76 1.38.4.1 nathanw
77 1.38.4.1 nathanw void scsipi_async_event_max_openings __P((struct scsipi_channel *,
78 1.38.4.1 nathanw struct scsipi_max_openings *));
79 1.38.4.1 nathanw void scsipi_async_event_xfer_mode __P((struct scsipi_channel *,
80 1.38.4.1 nathanw struct scsipi_xfer_mode *));
81 1.38.4.1 nathanw void scsipi_async_event_channel_reset __P((struct scsipi_channel *));
82 1.6 thorpej
83 1.38.4.1 nathanw struct pool scsipi_xfer_pool;
84 1.2 bouyer
85 1.2 bouyer /*
86 1.38.4.1 nathanw * scsipi_init:
87 1.38.4.1 nathanw *
88 1.38.4.1 nathanw * Called when a scsibus or atapibus is attached to the system
89 1.38.4.1 nathanw * to initialize shared data structures.
90 1.6 thorpej */
91 1.6 thorpej void
92 1.6 thorpej scsipi_init()
93 1.6 thorpej {
94 1.6 thorpej static int scsipi_init_done;
95 1.6 thorpej
96 1.6 thorpej if (scsipi_init_done)
97 1.6 thorpej return;
98 1.6 thorpej scsipi_init_done = 1;
99 1.6 thorpej
100 1.6 thorpej /* Initialize the scsipi_xfer pool. */
101 1.6 thorpej pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
102 1.6 thorpej 0, 0, "scxspl", 0, NULL, NULL, M_DEVBUF);
103 1.6 thorpej }
104 1.6 thorpej
105 1.6 thorpej /*
106 1.38.4.1 nathanw * scsipi_channel_init:
107 1.38.4.1 nathanw *
108 1.38.4.1 nathanw * Initialize a scsipi_channel when it is attached.
109 1.38.4.1 nathanw */
110 1.38.4.1 nathanw int
111 1.38.4.1 nathanw scsipi_channel_init(chan)
112 1.38.4.1 nathanw struct scsipi_channel *chan;
113 1.38.4.1 nathanw {
114 1.38.4.1 nathanw size_t nbytes;
115 1.38.4.1 nathanw int i;
116 1.38.4.1 nathanw
117 1.38.4.1 nathanw /* Initialize shared data. */
118 1.38.4.1 nathanw scsipi_init();
119 1.38.4.1 nathanw
120 1.38.4.1 nathanw /* Initialize the queues. */
121 1.38.4.1 nathanw TAILQ_INIT(&chan->chan_queue);
122 1.38.4.1 nathanw TAILQ_INIT(&chan->chan_complete);
123 1.38.4.1 nathanw
124 1.38.4.1 nathanw nbytes = chan->chan_ntargets * sizeof(struct scsipi_periph **);
125 1.38.4.1 nathanw chan->chan_periphs = malloc(nbytes, M_DEVBUF, M_NOWAIT);
126 1.38.4.1 nathanw if (chan->chan_periphs == NULL)
127 1.38.4.1 nathanw return (ENOMEM);
128 1.38.4.1 nathanw
129 1.38.4.1 nathanw
130 1.38.4.1 nathanw nbytes = chan->chan_nluns * sizeof(struct scsipi_periph *);
131 1.38.4.1 nathanw for (i = 0; i < chan->chan_ntargets; i++) {
132 1.38.4.1 nathanw chan->chan_periphs[i] = malloc(nbytes, M_DEVBUF, M_NOWAIT);
133 1.38.4.1 nathanw if (chan->chan_periphs[i] == NULL) {
134 1.38.4.1 nathanw while (--i >= 0) {
135 1.38.4.1 nathanw free(chan->chan_periphs[i], M_DEVBUF);
136 1.38.4.1 nathanw }
137 1.38.4.1 nathanw return (ENOMEM);
138 1.38.4.1 nathanw }
139 1.38.4.1 nathanw memset(chan->chan_periphs[i], 0, nbytes);
140 1.38.4.1 nathanw }
141 1.38.4.1 nathanw
142 1.38.4.1 nathanw /*
143 1.38.4.1 nathanw * Create the asynchronous completion thread.
144 1.38.4.1 nathanw */
145 1.38.4.1 nathanw kthread_create(scsipi_create_completion_thread, chan);
146 1.38.4.1 nathanw return (0);
147 1.38.4.1 nathanw }
148 1.38.4.1 nathanw
149 1.38.4.1 nathanw /*
150 1.38.4.1 nathanw * scsipi_channel_shutdown:
151 1.38.4.1 nathanw *
152 1.38.4.1 nathanw * Shutdown a scsipi_channel.
153 1.38.4.1 nathanw */
154 1.38.4.1 nathanw void
155 1.38.4.1 nathanw scsipi_channel_shutdown(chan)
156 1.38.4.1 nathanw struct scsipi_channel *chan;
157 1.38.4.1 nathanw {
158 1.38.4.1 nathanw
159 1.38.4.1 nathanw /*
160 1.38.4.1 nathanw * Shut down the completion thread.
161 1.38.4.1 nathanw */
162 1.38.4.1 nathanw chan->chan_flags |= SCSIPI_CHAN_SHUTDOWN;
163 1.38.4.1 nathanw wakeup(&chan->chan_complete);
164 1.38.4.1 nathanw
165 1.38.4.1 nathanw /*
166 1.38.4.1 nathanw * Now wait for the thread to exit.
167 1.38.4.1 nathanw */
168 1.38.4.1 nathanw while (chan->chan_thread != NULL)
169 1.38.4.1 nathanw (void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
170 1.38.4.1 nathanw }
171 1.38.4.1 nathanw
172 1.38.4.1 nathanw /*
173 1.38.4.1 nathanw * scsipi_insert_periph:
174 1.38.4.1 nathanw *
175 1.38.4.1 nathanw * Insert a periph into the channel.
176 1.38.4.1 nathanw */
177 1.38.4.1 nathanw void
178 1.38.4.1 nathanw scsipi_insert_periph(chan, periph)
179 1.38.4.1 nathanw struct scsipi_channel *chan;
180 1.38.4.1 nathanw struct scsipi_periph *periph;
181 1.38.4.1 nathanw {
182 1.38.4.1 nathanw int s;
183 1.38.4.1 nathanw
184 1.38.4.1 nathanw s = splbio();
185 1.38.4.1 nathanw chan->chan_periphs[periph->periph_target][periph->periph_lun] = periph;
186 1.38.4.1 nathanw splx(s);
187 1.38.4.1 nathanw }
188 1.38.4.1 nathanw
189 1.38.4.1 nathanw /*
190 1.38.4.1 nathanw * scsipi_remove_periph:
191 1.38.4.1 nathanw *
192 1.38.4.1 nathanw * Remove a periph from the channel.
193 1.38.4.1 nathanw */
194 1.38.4.1 nathanw void
195 1.38.4.1 nathanw scsipi_remove_periph(chan, periph)
196 1.38.4.1 nathanw struct scsipi_channel *chan;
197 1.38.4.1 nathanw struct scsipi_periph *periph;
198 1.38.4.1 nathanw {
199 1.38.4.1 nathanw int s;
200 1.38.4.1 nathanw
201 1.38.4.1 nathanw s = splbio();
202 1.38.4.1 nathanw chan->chan_periphs[periph->periph_target][periph->periph_lun] = NULL;
203 1.38.4.1 nathanw splx(s);
204 1.38.4.1 nathanw }
205 1.38.4.1 nathanw
206 1.38.4.1 nathanw /*
207 1.38.4.1 nathanw * scsipi_lookup_periph:
208 1.38.4.1 nathanw *
209 1.38.4.1 nathanw * Lookup a periph on the specified channel.
210 1.38.4.1 nathanw */
211 1.38.4.1 nathanw struct scsipi_periph *
212 1.38.4.1 nathanw scsipi_lookup_periph(chan, target, lun)
213 1.38.4.1 nathanw struct scsipi_channel *chan;
214 1.38.4.1 nathanw int target, lun;
215 1.38.4.1 nathanw {
216 1.38.4.1 nathanw struct scsipi_periph *periph;
217 1.38.4.1 nathanw int s;
218 1.38.4.1 nathanw
219 1.38.4.1 nathanw if (target >= chan->chan_ntargets ||
220 1.38.4.1 nathanw lun >= chan->chan_nluns)
221 1.38.4.1 nathanw return (NULL);
222 1.38.4.1 nathanw
223 1.38.4.1 nathanw s = splbio();
224 1.38.4.1 nathanw periph = chan->chan_periphs[target][lun];
225 1.38.4.1 nathanw splx(s);
226 1.38.4.1 nathanw
227 1.38.4.1 nathanw return (periph);
228 1.38.4.1 nathanw }
229 1.38.4.1 nathanw
230 1.38.4.1 nathanw /*
231 1.38.4.1 nathanw * scsipi_get_resource:
232 1.38.4.1 nathanw *
233 1.38.4.1 nathanw * Allocate a single xfer `resource' from the channel.
234 1.38.4.1 nathanw *
235 1.38.4.1 nathanw * NOTE: Must be called at splbio().
236 1.38.4.1 nathanw */
237 1.38.4.1 nathanw int
238 1.38.4.1 nathanw scsipi_get_resource(chan)
239 1.38.4.1 nathanw struct scsipi_channel *chan;
240 1.38.4.1 nathanw {
241 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
242 1.38.4.1 nathanw
243 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
244 1.38.4.1 nathanw if (chan->chan_openings > 0) {
245 1.38.4.1 nathanw chan->chan_openings--;
246 1.38.4.1 nathanw return (1);
247 1.38.4.1 nathanw }
248 1.38.4.1 nathanw return (0);
249 1.38.4.1 nathanw }
250 1.38.4.1 nathanw
251 1.38.4.1 nathanw if (adapt->adapt_openings > 0) {
252 1.38.4.1 nathanw adapt->adapt_openings--;
253 1.38.4.1 nathanw return (1);
254 1.38.4.1 nathanw }
255 1.38.4.1 nathanw return (0);
256 1.38.4.1 nathanw }
257 1.38.4.1 nathanw
258 1.38.4.1 nathanw /*
259 1.38.4.1 nathanw * scsipi_grow_resources:
260 1.38.4.1 nathanw *
261 1.38.4.1 nathanw * Attempt to grow resources for a channel. If this succeeds,
262 1.38.4.1 nathanw * we allocate one for our caller.
263 1.38.4.1 nathanw *
264 1.38.4.1 nathanw * NOTE: Must be called at splbio().
265 1.38.4.1 nathanw */
266 1.38.4.1 nathanw __inline int
267 1.38.4.1 nathanw scsipi_grow_resources(chan)
268 1.38.4.1 nathanw struct scsipi_channel *chan;
269 1.38.4.1 nathanw {
270 1.38.4.1 nathanw
271 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
272 1.38.4.1 nathanw scsipi_adapter_request(chan, ADAPTER_REQ_GROW_RESOURCES, NULL);
273 1.38.4.1 nathanw return (scsipi_get_resource(chan));
274 1.38.4.1 nathanw }
275 1.38.4.1 nathanw
276 1.38.4.1 nathanw return (0);
277 1.38.4.1 nathanw }
278 1.38.4.1 nathanw
279 1.38.4.1 nathanw /*
280 1.38.4.1 nathanw * scsipi_put_resource:
281 1.38.4.1 nathanw *
282 1.38.4.1 nathanw * Free a single xfer `resource' to the channel.
283 1.38.4.1 nathanw *
284 1.38.4.1 nathanw * NOTE: Must be called at splbio().
285 1.38.4.1 nathanw */
286 1.38.4.1 nathanw void
287 1.38.4.1 nathanw scsipi_put_resource(chan)
288 1.38.4.1 nathanw struct scsipi_channel *chan;
289 1.38.4.1 nathanw {
290 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
291 1.38.4.1 nathanw
292 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
293 1.38.4.1 nathanw chan->chan_openings++;
294 1.38.4.1 nathanw else
295 1.38.4.1 nathanw adapt->adapt_openings++;
296 1.38.4.1 nathanw }
297 1.38.4.1 nathanw
298 1.38.4.1 nathanw /*
299 1.38.4.1 nathanw * scsipi_get_tag:
300 1.38.4.1 nathanw *
301 1.38.4.1 nathanw * Get a tag ID for the specified xfer.
302 1.38.4.1 nathanw *
303 1.38.4.1 nathanw * NOTE: Must be called at splbio().
304 1.38.4.1 nathanw */
305 1.38.4.1 nathanw void
306 1.38.4.1 nathanw scsipi_get_tag(xs)
307 1.38.4.1 nathanw struct scsipi_xfer *xs;
308 1.38.4.1 nathanw {
309 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
310 1.38.4.1 nathanw int word, bit, tag;
311 1.38.4.1 nathanw
312 1.38.4.1 nathanw for (word = 0; word < PERIPH_NTAGWORDS; word++) {
313 1.38.4.1 nathanw bit = ffs(periph->periph_freetags[word]);
314 1.38.4.1 nathanw if (bit != 0)
315 1.38.4.1 nathanw break;
316 1.38.4.1 nathanw }
317 1.38.4.1 nathanw #ifdef DIAGNOSTIC
318 1.38.4.1 nathanw if (word == PERIPH_NTAGWORDS) {
319 1.38.4.1 nathanw scsipi_printaddr(periph);
320 1.38.4.1 nathanw printf("no free tags\n");
321 1.38.4.1 nathanw panic("scsipi_get_tag");
322 1.38.4.1 nathanw }
323 1.38.4.1 nathanw #endif
324 1.38.4.1 nathanw
325 1.38.4.1 nathanw bit -= 1;
326 1.38.4.1 nathanw periph->periph_freetags[word] &= ~(1 << bit);
327 1.38.4.1 nathanw tag = (word << 5) | bit;
328 1.38.4.1 nathanw
329 1.38.4.1 nathanw /* XXX Should eventually disallow this completely. */
330 1.38.4.1 nathanw if (tag >= periph->periph_openings) {
331 1.38.4.1 nathanw scsipi_printaddr(periph);
332 1.38.4.1 nathanw printf("WARNING: tag %d greater than available openings %d\n",
333 1.38.4.1 nathanw tag, periph->periph_openings);
334 1.38.4.1 nathanw }
335 1.38.4.1 nathanw
336 1.38.4.1 nathanw xs->xs_tag_id = tag;
337 1.38.4.1 nathanw }
338 1.38.4.1 nathanw
339 1.38.4.1 nathanw /*
340 1.38.4.1 nathanw * scsipi_put_tag:
341 1.38.4.1 nathanw *
342 1.38.4.1 nathanw * Put the tag ID for the specified xfer back into the pool.
343 1.38.4.1 nathanw *
344 1.38.4.1 nathanw * NOTE: Must be called at splbio().
345 1.2 bouyer */
346 1.38.4.1 nathanw void
347 1.38.4.1 nathanw scsipi_put_tag(xs)
348 1.38.4.1 nathanw struct scsipi_xfer *xs;
349 1.38.4.1 nathanw {
350 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
351 1.38.4.1 nathanw int word, bit;
352 1.38.4.1 nathanw
353 1.38.4.1 nathanw word = xs->xs_tag_id >> 5;
354 1.38.4.1 nathanw bit = xs->xs_tag_id & 0x1f;
355 1.38.4.1 nathanw
356 1.38.4.1 nathanw periph->periph_freetags[word] |= (1 << bit);
357 1.38.4.1 nathanw }
358 1.2 bouyer
359 1.38.4.1 nathanw /*
360 1.38.4.1 nathanw * scsipi_get_xs:
361 1.38.4.1 nathanw *
362 1.38.4.1 nathanw * Allocate an xfer descriptor and associate it with the
363 1.38.4.1 nathanw * specified peripherial. If the peripherial has no more
364 1.38.4.1 nathanw * available command openings, we either block waiting for
365 1.38.4.1 nathanw * one to become available, or fail.
366 1.38.4.1 nathanw */
367 1.2 bouyer struct scsipi_xfer *
368 1.38.4.1 nathanw scsipi_get_xs(periph, flags)
369 1.38.4.1 nathanw struct scsipi_periph *periph;
370 1.38.4.1 nathanw int flags;
371 1.2 bouyer {
372 1.2 bouyer struct scsipi_xfer *xs;
373 1.2 bouyer int s;
374 1.2 bouyer
375 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
376 1.6 thorpej
377 1.24 thorpej /*
378 1.24 thorpej * If we're cold, make sure we poll.
379 1.24 thorpej */
380 1.24 thorpej if (cold)
381 1.24 thorpej flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
382 1.24 thorpej
383 1.38.4.1 nathanw #ifdef DIAGNOSTIC
384 1.38.4.1 nathanw /*
385 1.38.4.1 nathanw * URGENT commands can never be ASYNC.
386 1.38.4.1 nathanw */
387 1.38.4.1 nathanw if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
388 1.38.4.1 nathanw (XS_CTL_URGENT|XS_CTL_ASYNC)) {
389 1.38.4.1 nathanw scsipi_printaddr(periph);
390 1.38.4.1 nathanw printf("URGENT and ASYNC\n");
391 1.38.4.1 nathanw panic("scsipi_get_xs");
392 1.38.4.1 nathanw }
393 1.38.4.1 nathanw #endif
394 1.38.4.1 nathanw
395 1.2 bouyer s = splbio();
396 1.38.4.1 nathanw /*
397 1.38.4.1 nathanw * Wait for a command opening to become available. Rules:
398 1.38.4.1 nathanw *
399 1.38.4.1 nathanw * - All xfers must wait for an available opening.
400 1.38.4.1 nathanw * Exception: URGENT xfers can proceed when
401 1.38.4.1 nathanw * active == openings, because we use the opening
402 1.38.4.1 nathanw * of the command we're recovering for.
403 1.38.4.1 nathanw * - if the periph has sense pending, only URGENT & REQSENSE
404 1.38.4.1 nathanw * xfers may proceed.
405 1.38.4.1 nathanw *
406 1.38.4.1 nathanw * - If the periph is recovering, only URGENT xfers may
407 1.38.4.1 nathanw * proceed.
408 1.38.4.1 nathanw *
409 1.38.4.1 nathanw * - If the periph is currently executing a recovery
410 1.38.4.1 nathanw * command, URGENT commands must block, because only
411 1.38.4.1 nathanw * one recovery command can execute at a time.
412 1.38.4.1 nathanw */
413 1.38.4.1 nathanw for (;;) {
414 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
415 1.38.4.1 nathanw if (periph->periph_active > periph->periph_openings)
416 1.38.4.1 nathanw goto wait_for_opening;
417 1.38.4.1 nathanw if (periph->periph_flags & PERIPH_SENSE) {
418 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
419 1.38.4.1 nathanw goto wait_for_opening;
420 1.38.4.1 nathanw } else {
421 1.38.4.1 nathanw if ((periph->periph_flags &
422 1.38.4.1 nathanw PERIPH_RECOVERY_ACTIVE) != 0)
423 1.38.4.1 nathanw goto wait_for_opening;
424 1.38.4.1 nathanw periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
425 1.38.4.1 nathanw }
426 1.38.4.1 nathanw break;
427 1.38.4.1 nathanw }
428 1.38.4.1 nathanw if (periph->periph_active >= periph->periph_openings ||
429 1.38.4.1 nathanw (periph->periph_flags & PERIPH_RECOVERING) != 0)
430 1.38.4.1 nathanw goto wait_for_opening;
431 1.38.4.1 nathanw periph->periph_active++;
432 1.38.4.1 nathanw break;
433 1.38.4.1 nathanw
434 1.38.4.1 nathanw wait_for_opening:
435 1.38.4.1 nathanw if (flags & XS_CTL_NOSLEEP) {
436 1.2 bouyer splx(s);
437 1.38.4.1 nathanw return (NULL);
438 1.2 bouyer }
439 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
440 1.38.4.1 nathanw periph->periph_flags |= PERIPH_WAITING;
441 1.38.4.1 nathanw (void) tsleep(periph, PRIBIO, "getxs", 0);
442 1.2 bouyer }
443 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
444 1.6 thorpej xs = pool_get(&scsipi_xfer_pool,
445 1.24 thorpej ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
446 1.38.4.1 nathanw if (xs == NULL) {
447 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
448 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
449 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
450 1.38.4.1 nathanw } else
451 1.38.4.1 nathanw periph->periph_active--;
452 1.38.4.1 nathanw scsipi_printaddr(periph);
453 1.38.4.1 nathanw printf("unable to allocate %sscsipi_xfer\n",
454 1.38.4.1 nathanw (flags & XS_CTL_URGENT) ? "URGENT " : "");
455 1.2 bouyer }
456 1.6 thorpej splx(s);
457 1.2 bouyer
458 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
459 1.6 thorpej
460 1.7 scottr if (xs != NULL) {
461 1.30 thorpej callout_init(&xs->xs_callout);
462 1.38.4.1 nathanw memset(xs, 0, sizeof(*xs));
463 1.38.4.1 nathanw xs->xs_periph = periph;
464 1.24 thorpej xs->xs_control = flags;
465 1.37 fvdl xs->xs_status = 0;
466 1.38.4.1 nathanw s = splbio();
467 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
468 1.38.4.1 nathanw splx(s);
469 1.7 scottr }
470 1.3 enami return (xs);
471 1.2 bouyer }
472 1.2 bouyer
473 1.2 bouyer /*
474 1.38.4.1 nathanw * scsipi_put_xs:
475 1.38.4.1 nathanw *
476 1.38.4.1 nathanw * Release an xfer descriptor, decreasing the outstanding command
477 1.38.4.1 nathanw * count for the peripherial. If there is a thread waiting for
478 1.38.4.1 nathanw * an opening, wake it up. If not, kick any queued I/O the
479 1.38.4.1 nathanw * peripherial may have.
480 1.6 thorpej *
481 1.38.4.1 nathanw * NOTE: Must be called at splbio().
482 1.2 bouyer */
483 1.3 enami void
484 1.38.4.1 nathanw scsipi_put_xs(xs)
485 1.2 bouyer struct scsipi_xfer *xs;
486 1.2 bouyer {
487 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
488 1.38.4.1 nathanw int flags = xs->xs_control;
489 1.2 bouyer
490 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
491 1.38.4.1 nathanw
492 1.38.4.1 nathanw TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
493 1.6 thorpej pool_put(&scsipi_xfer_pool, xs);
494 1.2 bouyer
495 1.38.4.1 nathanw #ifdef DIAGNOSTIC
496 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
497 1.38.4.1 nathanw periph->periph_active == 0) {
498 1.38.4.1 nathanw scsipi_printaddr(periph);
499 1.38.4.1 nathanw printf("recovery without a command to recovery for\n");
500 1.38.4.1 nathanw panic("scsipi_put_xs");
501 1.38.4.1 nathanw }
502 1.38.4.1 nathanw #endif
503 1.38.4.1 nathanw
504 1.38.4.1 nathanw if (flags & XS_CTL_URGENT) {
505 1.38.4.1 nathanw if ((flags & XS_CTL_REQSENSE) == 0)
506 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
507 1.38.4.1 nathanw } else
508 1.38.4.1 nathanw periph->periph_active--;
509 1.38.4.1 nathanw if (periph->periph_active == 0 &&
510 1.38.4.1 nathanw (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
511 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_WAITDRAIN;
512 1.38.4.1 nathanw wakeup(&periph->periph_active);
513 1.38.4.1 nathanw }
514 1.38.4.1 nathanw
515 1.38.4.1 nathanw if (periph->periph_flags & PERIPH_WAITING) {
516 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_WAITING;
517 1.38.4.1 nathanw wakeup(periph);
518 1.2 bouyer } else {
519 1.38.4.1 nathanw if (periph->periph_switch->psw_start != NULL) {
520 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
521 1.3 enami ("calling private start()\n"));
522 1.38.4.1 nathanw (*periph->periph_switch->psw_start)(periph);
523 1.2 bouyer }
524 1.2 bouyer }
525 1.15 thorpej }
526 1.15 thorpej
527 1.15 thorpej /*
528 1.38.4.1 nathanw * scsipi_channel_freeze:
529 1.38.4.1 nathanw *
530 1.38.4.1 nathanw * Freeze a channel's xfer queue.
531 1.38.4.1 nathanw */
532 1.38.4.1 nathanw void
533 1.38.4.1 nathanw scsipi_channel_freeze(chan, count)
534 1.38.4.1 nathanw struct scsipi_channel *chan;
535 1.38.4.1 nathanw int count;
536 1.38.4.1 nathanw {
537 1.38.4.1 nathanw int s;
538 1.38.4.1 nathanw
539 1.38.4.1 nathanw s = splbio();
540 1.38.4.1 nathanw chan->chan_qfreeze += count;
541 1.38.4.1 nathanw splx(s);
542 1.38.4.1 nathanw }
543 1.38.4.1 nathanw
544 1.38.4.1 nathanw /*
545 1.38.4.1 nathanw * scsipi_channel_thaw:
546 1.38.4.1 nathanw *
547 1.38.4.1 nathanw * Thaw a channel's xfer queue.
548 1.38.4.1 nathanw */
549 1.38.4.1 nathanw void
550 1.38.4.1 nathanw scsipi_channel_thaw(chan, count)
551 1.38.4.1 nathanw struct scsipi_channel *chan;
552 1.38.4.1 nathanw int count;
553 1.38.4.1 nathanw {
554 1.38.4.1 nathanw int s;
555 1.38.4.1 nathanw
556 1.38.4.1 nathanw s = splbio();
557 1.38.4.1 nathanw chan->chan_qfreeze -= count;
558 1.38.4.1 nathanw /*
559 1.38.4.1 nathanw * Don't let the freeze count go negative.
560 1.38.4.1 nathanw *
561 1.38.4.1 nathanw * Presumably the adapter driver could keep track of this,
562 1.38.4.1 nathanw * but it might just be easier to do this here so as to allow
563 1.38.4.1 nathanw * multiple callers, including those outside the adapter driver.
564 1.38.4.1 nathanw */
565 1.38.4.1 nathanw if (chan->chan_qfreeze < 0) {
566 1.38.4.1 nathanw chan->chan_qfreeze = 0;
567 1.38.4.1 nathanw }
568 1.38.4.1 nathanw splx(s);
569 1.38.4.1 nathanw /*
570 1.38.4.1 nathanw * Kick the channel's queue here. Note, we may be running in
571 1.38.4.1 nathanw * interrupt context (softclock or HBA's interrupt), so the adapter
572 1.38.4.1 nathanw * driver had better not sleep.
573 1.38.4.1 nathanw */
574 1.38.4.1 nathanw if (chan->chan_qfreeze == 0)
575 1.38.4.1 nathanw scsipi_run_queue(chan);
576 1.38.4.1 nathanw }
577 1.38.4.1 nathanw
578 1.38.4.1 nathanw /*
579 1.38.4.1 nathanw * scsipi_channel_timed_thaw:
580 1.38.4.1 nathanw *
581 1.38.4.1 nathanw * Thaw a channel after some time has expired. This will also
582 1.38.4.1 nathanw * run the channel's queue if the freeze count has reached 0.
583 1.38.4.1 nathanw */
584 1.38.4.1 nathanw void
585 1.38.4.1 nathanw scsipi_channel_timed_thaw(arg)
586 1.38.4.1 nathanw void *arg;
587 1.38.4.1 nathanw {
588 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
589 1.38.4.1 nathanw
590 1.38.4.1 nathanw scsipi_channel_thaw(chan, 1);
591 1.38.4.1 nathanw }
592 1.38.4.1 nathanw
593 1.38.4.1 nathanw /*
594 1.38.4.1 nathanw * scsipi_periph_freeze:
595 1.38.4.1 nathanw *
596 1.38.4.1 nathanw * Freeze a device's xfer queue.
597 1.38.4.1 nathanw */
598 1.38.4.1 nathanw void
599 1.38.4.1 nathanw scsipi_periph_freeze(periph, count)
600 1.38.4.1 nathanw struct scsipi_periph *periph;
601 1.38.4.1 nathanw int count;
602 1.38.4.1 nathanw {
603 1.38.4.1 nathanw int s;
604 1.38.4.1 nathanw
605 1.38.4.1 nathanw s = splbio();
606 1.38.4.1 nathanw periph->periph_qfreeze += count;
607 1.38.4.1 nathanw splx(s);
608 1.38.4.1 nathanw }
609 1.38.4.1 nathanw
610 1.38.4.1 nathanw /*
611 1.38.4.1 nathanw * scsipi_periph_thaw:
612 1.38.4.1 nathanw *
613 1.38.4.1 nathanw * Thaw a device's xfer queue.
614 1.38.4.1 nathanw */
615 1.38.4.1 nathanw void
616 1.38.4.1 nathanw scsipi_periph_thaw(periph, count)
617 1.38.4.1 nathanw struct scsipi_periph *periph;
618 1.38.4.1 nathanw int count;
619 1.38.4.1 nathanw {
620 1.38.4.1 nathanw int s;
621 1.38.4.1 nathanw
622 1.38.4.1 nathanw s = splbio();
623 1.38.4.1 nathanw periph->periph_qfreeze -= count;
624 1.38.4.1 nathanw if (periph->periph_qfreeze == 0 &&
625 1.38.4.1 nathanw (periph->periph_flags & PERIPH_WAITING) != 0)
626 1.38.4.1 nathanw wakeup(periph);
627 1.38.4.1 nathanw splx(s);
628 1.38.4.1 nathanw }
629 1.38.4.1 nathanw
630 1.38.4.1 nathanw /*
631 1.38.4.1 nathanw * scsipi_periph_timed_thaw:
632 1.38.4.1 nathanw *
633 1.38.4.1 nathanw * Thaw a device after some time has expired.
634 1.38.4.1 nathanw */
635 1.38.4.1 nathanw void
636 1.38.4.1 nathanw scsipi_periph_timed_thaw(arg)
637 1.38.4.1 nathanw void *arg;
638 1.38.4.1 nathanw {
639 1.38.4.1 nathanw struct scsipi_periph *periph = arg;
640 1.38.4.1 nathanw
641 1.38.4.1 nathanw callout_stop(&periph->periph_callout);
642 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
643 1.38.4.1 nathanw
644 1.38.4.1 nathanw /*
645 1.38.4.1 nathanw * Kick the channel's queue here. Note, we're running in
646 1.38.4.1 nathanw * interrupt context (softclock), so the adapter driver
647 1.38.4.1 nathanw * had better not sleep.
648 1.38.4.1 nathanw */
649 1.38.4.1 nathanw scsipi_run_queue(periph->periph_channel);
650 1.38.4.1 nathanw }
651 1.38.4.1 nathanw
652 1.38.4.1 nathanw /*
653 1.38.4.1 nathanw * scsipi_wait_drain:
654 1.38.4.1 nathanw *
655 1.38.4.1 nathanw * Wait for a periph's pending xfers to drain.
656 1.15 thorpej */
657 1.15 thorpej void
658 1.38.4.1 nathanw scsipi_wait_drain(periph)
659 1.38.4.1 nathanw struct scsipi_periph *periph;
660 1.15 thorpej {
661 1.15 thorpej int s;
662 1.15 thorpej
663 1.15 thorpej s = splbio();
664 1.38.4.1 nathanw while (periph->periph_active != 0) {
665 1.38.4.1 nathanw periph->periph_flags |= PERIPH_WAITDRAIN;
666 1.38.4.1 nathanw (void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
667 1.15 thorpej }
668 1.15 thorpej splx(s);
669 1.23 thorpej }
670 1.23 thorpej
671 1.23 thorpej /*
672 1.38.4.1 nathanw * scsipi_kill_pending:
673 1.23 thorpej *
674 1.38.4.1 nathanw * Kill off all pending xfers for a periph.
675 1.38.4.1 nathanw *
676 1.38.4.1 nathanw * NOTE: Must be called at splbio().
677 1.23 thorpej */
678 1.23 thorpej void
679 1.38.4.1 nathanw scsipi_kill_pending(periph)
680 1.38.4.1 nathanw struct scsipi_periph *periph;
681 1.23 thorpej {
682 1.23 thorpej
683 1.38.4.1 nathanw (*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
684 1.38.4.1 nathanw #ifdef DIAGNOSTIC
685 1.38.4.1 nathanw if (TAILQ_FIRST(&periph->periph_xferq) != NULL)
686 1.38.4.1 nathanw panic("scsipi_kill_pending");
687 1.38.4.1 nathanw #endif
688 1.38.4.1 nathanw scsipi_wait_drain(periph);
689 1.2 bouyer }
690 1.2 bouyer
691 1.2 bouyer /*
692 1.38.4.1 nathanw * scsipi_interpret_sense:
693 1.38.4.1 nathanw *
694 1.38.4.1 nathanw * Look at the returned sense and act on the error, determining
695 1.38.4.1 nathanw * the unix error number to pass back. (0 = report no error)
696 1.13 bouyer *
697 1.38.4.1 nathanw * NOTE: If we return ERESTART, we are expected to haved
698 1.38.4.1 nathanw * thawed the device!
699 1.38.4.1 nathanw *
700 1.38.4.1 nathanw * THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
701 1.13 bouyer */
702 1.13 bouyer int
703 1.13 bouyer scsipi_interpret_sense(xs)
704 1.13 bouyer struct scsipi_xfer *xs;
705 1.13 bouyer {
706 1.13 bouyer struct scsipi_sense_data *sense;
707 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
708 1.13 bouyer u_int8_t key;
709 1.13 bouyer u_int32_t info;
710 1.13 bouyer int error;
711 1.13 bouyer #ifndef SCSIVERBOSE
712 1.13 bouyer static char *error_mes[] = {
713 1.13 bouyer "soft error (corrected)",
714 1.13 bouyer "not ready", "medium error",
715 1.13 bouyer "non-media hardware failure", "illegal request",
716 1.13 bouyer "unit attention", "readonly device",
717 1.13 bouyer "no data found", "vendor unique",
718 1.13 bouyer "copy aborted", "command aborted",
719 1.13 bouyer "search returned equal", "volume overflow",
720 1.13 bouyer "verify miscompare", "unknown error key"
721 1.13 bouyer };
722 1.13 bouyer #endif
723 1.13 bouyer
724 1.13 bouyer sense = &xs->sense.scsi_sense;
725 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
726 1.38.4.1 nathanw if (periph->periph_flags & SCSIPI_DB1) {
727 1.13 bouyer int count;
728 1.38.4.1 nathanw scsipi_printaddr(periph);
729 1.38.4.1 nathanw printf(" sense debug information:\n");
730 1.38.4.1 nathanw printf("\tcode 0x%x valid 0x%x\n",
731 1.13 bouyer sense->error_code & SSD_ERRCODE,
732 1.13 bouyer sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
733 1.38.4.1 nathanw printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
734 1.13 bouyer sense->segment,
735 1.13 bouyer sense->flags & SSD_KEY,
736 1.13 bouyer sense->flags & SSD_ILI ? 1 : 0,
737 1.13 bouyer sense->flags & SSD_EOM ? 1 : 0,
738 1.13 bouyer sense->flags & SSD_FILEMARK ? 1 : 0);
739 1.38.4.1 nathanw printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
740 1.38.4.1 nathanw "extra bytes\n",
741 1.13 bouyer sense->info[0],
742 1.13 bouyer sense->info[1],
743 1.13 bouyer sense->info[2],
744 1.13 bouyer sense->info[3],
745 1.13 bouyer sense->extra_len);
746 1.38.4.1 nathanw printf("\textra: ");
747 1.13 bouyer for (count = 0; count < ADD_BYTES_LIM(sense); count++)
748 1.13 bouyer printf("0x%x ", sense->cmd_spec_info[count]);
749 1.13 bouyer printf("\n");
750 1.13 bouyer }
751 1.38.4.1 nathanw #endif
752 1.38.4.1 nathanw
753 1.13 bouyer /*
754 1.38.4.1 nathanw * If the periph has it's own error handler, call it first.
755 1.13 bouyer * If it returns a legit error value, return that, otherwise
756 1.13 bouyer * it wants us to continue with normal error processing.
757 1.13 bouyer */
758 1.38.4.1 nathanw if (periph->periph_switch->psw_error != NULL) {
759 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
760 1.13 bouyer ("calling private err_handler()\n"));
761 1.38.4.1 nathanw error = (*periph->periph_switch->psw_error)(xs);
762 1.38.4.1 nathanw if (error != EJUSTRETURN)
763 1.38.4.1 nathanw return (error);
764 1.13 bouyer }
765 1.13 bouyer /* otherwise use the default */
766 1.13 bouyer switch (sense->error_code & SSD_ERRCODE) {
767 1.13 bouyer /*
768 1.13 bouyer * If it's code 70, use the extended stuff and
769 1.13 bouyer * interpret the key
770 1.13 bouyer */
771 1.13 bouyer case 0x71: /* delayed error */
772 1.38.4.1 nathanw scsipi_printaddr(periph);
773 1.13 bouyer key = sense->flags & SSD_KEY;
774 1.13 bouyer printf(" DEFERRED ERROR, key = 0x%x\n", key);
775 1.13 bouyer /* FALLTHROUGH */
776 1.13 bouyer case 0x70:
777 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
778 1.13 bouyer info = _4btol(sense->info);
779 1.13 bouyer else
780 1.13 bouyer info = 0;
781 1.13 bouyer key = sense->flags & SSD_KEY;
782 1.13 bouyer
783 1.13 bouyer switch (key) {
784 1.13 bouyer case SKEY_NO_SENSE:
785 1.13 bouyer case SKEY_RECOVERED_ERROR:
786 1.13 bouyer if (xs->resid == xs->datalen && xs->datalen) {
787 1.13 bouyer /*
788 1.13 bouyer * Why is this here?
789 1.13 bouyer */
790 1.13 bouyer xs->resid = 0; /* not short read */
791 1.13 bouyer }
792 1.13 bouyer case SKEY_EQUAL:
793 1.13 bouyer error = 0;
794 1.13 bouyer break;
795 1.13 bouyer case SKEY_NOT_READY:
796 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
797 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
798 1.24 thorpej if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
799 1.13 bouyer return (0);
800 1.19 bouyer if (sense->add_sense_code == 0x3A &&
801 1.19 bouyer sense->add_sense_code_qual == 0x00)
802 1.19 bouyer error = ENODEV; /* Medium not present */
803 1.19 bouyer else
804 1.19 bouyer error = EIO;
805 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
806 1.19 bouyer return (error);
807 1.13 bouyer break;
808 1.13 bouyer case SKEY_ILLEGAL_REQUEST:
809 1.24 thorpej if ((xs->xs_control &
810 1.24 thorpej XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
811 1.13 bouyer return (0);
812 1.24 thorpej /*
813 1.24 thorpej * Handle the case where a device reports
814 1.24 thorpej * Logical Unit Not Supported during discovery.
815 1.24 thorpej */
816 1.24 thorpej if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
817 1.24 thorpej sense->add_sense_code == 0x25 &&
818 1.24 thorpej sense->add_sense_code_qual == 0x00)
819 1.24 thorpej return (EINVAL);
820 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
821 1.13 bouyer return (EIO);
822 1.13 bouyer error = EINVAL;
823 1.13 bouyer break;
824 1.13 bouyer case SKEY_UNIT_ATTENTION:
825 1.20 bouyer if (sense->add_sense_code == 0x29 &&
826 1.38.4.1 nathanw sense->add_sense_code_qual == 0x00) {
827 1.38.4.1 nathanw /* device or bus reset */
828 1.38.4.1 nathanw return (ERESTART);
829 1.38.4.1 nathanw }
830 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
831 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
832 1.24 thorpej if ((xs->xs_control &
833 1.24 thorpej XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
834 1.13 bouyer /* XXX Should reupload any transient state. */
835 1.38.4.1 nathanw (periph->periph_flags &
836 1.38.4.1 nathanw PERIPH_REMOVABLE) == 0) {
837 1.13 bouyer return (ERESTART);
838 1.38.4.1 nathanw }
839 1.24 thorpej if ((xs->xs_control & XS_CTL_SILENT) != 0)
840 1.13 bouyer return (EIO);
841 1.13 bouyer error = EIO;
842 1.13 bouyer break;
843 1.13 bouyer case SKEY_WRITE_PROTECT:
844 1.13 bouyer error = EROFS;
845 1.13 bouyer break;
846 1.13 bouyer case SKEY_BLANK_CHECK:
847 1.13 bouyer error = 0;
848 1.13 bouyer break;
849 1.13 bouyer case SKEY_ABORTED_COMMAND:
850 1.13 bouyer error = ERESTART;
851 1.13 bouyer break;
852 1.13 bouyer case SKEY_VOLUME_OVERFLOW:
853 1.13 bouyer error = ENOSPC;
854 1.13 bouyer break;
855 1.13 bouyer default:
856 1.13 bouyer error = EIO;
857 1.13 bouyer break;
858 1.13 bouyer }
859 1.13 bouyer
860 1.13 bouyer #ifdef SCSIVERBOSE
861 1.32 augustss if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
862 1.13 bouyer scsipi_print_sense(xs, 0);
863 1.13 bouyer #else
864 1.13 bouyer if (key) {
865 1.38.4.1 nathanw scsipi_printaddr(periph);
866 1.13 bouyer printf("%s", error_mes[key - 1]);
867 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
868 1.13 bouyer switch (key) {
869 1.13 bouyer case SKEY_NOT_READY:
870 1.13 bouyer case SKEY_ILLEGAL_REQUEST:
871 1.13 bouyer case SKEY_UNIT_ATTENTION:
872 1.13 bouyer case SKEY_WRITE_PROTECT:
873 1.13 bouyer break;
874 1.13 bouyer case SKEY_BLANK_CHECK:
875 1.13 bouyer printf(", requested size: %d (decimal)",
876 1.13 bouyer info);
877 1.13 bouyer break;
878 1.13 bouyer case SKEY_ABORTED_COMMAND:
879 1.38.4.1 nathanw if (xs->xs_retries)
880 1.13 bouyer printf(", retrying");
881 1.13 bouyer printf(", cmd 0x%x, info 0x%x",
882 1.13 bouyer xs->cmd->opcode, info);
883 1.13 bouyer break;
884 1.13 bouyer default:
885 1.13 bouyer printf(", info = %d (decimal)", info);
886 1.13 bouyer }
887 1.13 bouyer }
888 1.13 bouyer if (sense->extra_len != 0) {
889 1.13 bouyer int n;
890 1.13 bouyer printf(", data =");
891 1.13 bouyer for (n = 0; n < sense->extra_len; n++)
892 1.13 bouyer printf(" %02x",
893 1.13 bouyer sense->cmd_spec_info[n]);
894 1.13 bouyer }
895 1.13 bouyer printf("\n");
896 1.13 bouyer }
897 1.13 bouyer #endif
898 1.13 bouyer return (error);
899 1.13 bouyer
900 1.13 bouyer /*
901 1.13 bouyer * Not code 70, just report it
902 1.13 bouyer */
903 1.13 bouyer default:
904 1.38.4.1 nathanw #if defined(SCSIDEBUG) || defined(DEBUG)
905 1.28 mjacob {
906 1.28 mjacob static char *uc = "undecodable sense error";
907 1.28 mjacob int i;
908 1.28 mjacob u_int8_t *cptr = (u_int8_t *) sense;
909 1.38.4.1 nathanw scsipi_printaddr(periph);
910 1.28 mjacob if (xs->cmd == &xs->cmdstore) {
911 1.28 mjacob printf("%s for opcode 0x%x, data=",
912 1.28 mjacob uc, xs->cmdstore.opcode);
913 1.28 mjacob } else {
914 1.28 mjacob printf("%s, data=", uc);
915 1.28 mjacob }
916 1.28 mjacob for (i = 0; i < sizeof (sense); i++)
917 1.28 mjacob printf(" 0x%02x", *(cptr++) & 0xff);
918 1.28 mjacob printf("\n");
919 1.28 mjacob }
920 1.28 mjacob #else
921 1.38.4.1 nathanw
922 1.38.4.1 nathanw scsipi_printaddr(periph);
923 1.17 mjacob printf("Sense Error Code 0x%x",
924 1.17 mjacob sense->error_code & SSD_ERRCODE);
925 1.13 bouyer if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
926 1.13 bouyer struct scsipi_sense_data_unextended *usense =
927 1.13 bouyer (struct scsipi_sense_data_unextended *)sense;
928 1.13 bouyer printf(" at block no. %d (decimal)",
929 1.13 bouyer _3btol(usense->block));
930 1.13 bouyer }
931 1.13 bouyer printf("\n");
932 1.28 mjacob #endif
933 1.13 bouyer return (EIO);
934 1.13 bouyer }
935 1.13 bouyer }
936 1.13 bouyer
937 1.13 bouyer /*
938 1.38.4.1 nathanw * scsipi_size:
939 1.38.4.1 nathanw *
940 1.38.4.1 nathanw * Find out from the device what its capacity is.
941 1.2 bouyer */
942 1.2 bouyer u_long
943 1.38.4.1 nathanw scsipi_size(periph, flags)
944 1.38.4.1 nathanw struct scsipi_periph *periph;
945 1.2 bouyer int flags;
946 1.2 bouyer {
947 1.2 bouyer struct scsipi_read_cap_data rdcap;
948 1.2 bouyer struct scsipi_read_capacity scsipi_cmd;
949 1.2 bouyer
950 1.2 bouyer bzero(&scsipi_cmd, sizeof(scsipi_cmd));
951 1.2 bouyer scsipi_cmd.opcode = READ_CAPACITY;
952 1.2 bouyer
953 1.2 bouyer /*
954 1.2 bouyer * If the command works, interpret the result as a 4 byte
955 1.2 bouyer * number of blocks
956 1.2 bouyer */
957 1.38.4.1 nathanw if (scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
958 1.3 enami sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
959 1.38 enami SCSIPIRETRIES, 20000, NULL,
960 1.38 enami flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK) != 0) {
961 1.38.4.1 nathanw scsipi_printaddr(periph);
962 1.2 bouyer printf("could not get size\n");
963 1.3 enami return (0);
964 1.2 bouyer }
965 1.2 bouyer
966 1.3 enami return (_4btol(rdcap.addr) + 1);
967 1.2 bouyer }
968 1.2 bouyer
969 1.2 bouyer /*
970 1.38.4.1 nathanw * scsipi_test_unit_ready:
971 1.38.4.1 nathanw *
972 1.38.4.1 nathanw * Issue a `test unit ready' request.
973 1.2 bouyer */
974 1.3 enami int
975 1.38.4.1 nathanw scsipi_test_unit_ready(periph, flags)
976 1.38.4.1 nathanw struct scsipi_periph *periph;
977 1.2 bouyer int flags;
978 1.2 bouyer {
979 1.2 bouyer struct scsipi_test_unit_ready scsipi_cmd;
980 1.2 bouyer
981 1.2 bouyer /* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
982 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NOTUR)
983 1.3 enami return (0);
984 1.2 bouyer
985 1.2 bouyer bzero(&scsipi_cmd, sizeof(scsipi_cmd));
986 1.2 bouyer scsipi_cmd.opcode = TEST_UNIT_READY;
987 1.2 bouyer
988 1.38.4.1 nathanw return (scsipi_command(periph,
989 1.3 enami (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
990 1.29 bouyer 0, 0, SCSIPIRETRIES, 10000, NULL, flags));
991 1.2 bouyer }
992 1.2 bouyer
993 1.2 bouyer /*
994 1.38.4.1 nathanw * scsipi_inquire:
995 1.38.4.1 nathanw *
996 1.38.4.1 nathanw * Ask the device about itself.
997 1.2 bouyer */
998 1.3 enami int
999 1.38.4.1 nathanw scsipi_inquire(periph, inqbuf, flags)
1000 1.38.4.1 nathanw struct scsipi_periph *periph;
1001 1.2 bouyer struct scsipi_inquiry_data *inqbuf;
1002 1.2 bouyer int flags;
1003 1.2 bouyer {
1004 1.2 bouyer struct scsipi_inquiry scsipi_cmd;
1005 1.2 bouyer
1006 1.2 bouyer bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1007 1.2 bouyer scsipi_cmd.opcode = INQUIRY;
1008 1.2 bouyer scsipi_cmd.length = sizeof(struct scsipi_inquiry_data);
1009 1.2 bouyer
1010 1.38.4.1 nathanw return (scsipi_command(periph,
1011 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1012 1.3 enami (u_char *) inqbuf, sizeof(struct scsipi_inquiry_data),
1013 1.29 bouyer SCSIPIRETRIES, 10000, NULL, XS_CTL_DATA_IN | flags));
1014 1.2 bouyer }
1015 1.2 bouyer
1016 1.2 bouyer /*
1017 1.38.4.1 nathanw * scsipi_prevent:
1018 1.38.4.1 nathanw *
1019 1.38.4.1 nathanw * Prevent or allow the user to remove the media
1020 1.2 bouyer */
1021 1.3 enami int
1022 1.38.4.1 nathanw scsipi_prevent(periph, type, flags)
1023 1.38.4.1 nathanw struct scsipi_periph *periph;
1024 1.2 bouyer int type, flags;
1025 1.2 bouyer {
1026 1.2 bouyer struct scsipi_prevent scsipi_cmd;
1027 1.2 bouyer
1028 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1029 1.3 enami return (0);
1030 1.2 bouyer
1031 1.2 bouyer bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1032 1.2 bouyer scsipi_cmd.opcode = PREVENT_ALLOW;
1033 1.2 bouyer scsipi_cmd.how = type;
1034 1.38.4.1 nathanw
1035 1.38.4.1 nathanw return (scsipi_command(periph,
1036 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1037 1.29 bouyer 0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1038 1.2 bouyer }
1039 1.2 bouyer
1040 1.2 bouyer /*
1041 1.38.4.1 nathanw * scsipi_start:
1042 1.38.4.1 nathanw *
1043 1.38.4.1 nathanw * Send a START UNIT.
1044 1.2 bouyer */
1045 1.3 enami int
1046 1.38.4.1 nathanw scsipi_start(periph, type, flags)
1047 1.38.4.1 nathanw struct scsipi_periph *periph;
1048 1.2 bouyer int type, flags;
1049 1.2 bouyer {
1050 1.2 bouyer struct scsipi_start_stop scsipi_cmd;
1051 1.18 bouyer
1052 1.38.4.1 nathanw if (periph->periph_quirks & PQUIRK_NOSTARTUNIT)
1053 1.18 bouyer return 0;
1054 1.2 bouyer
1055 1.2 bouyer bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1056 1.2 bouyer scsipi_cmd.opcode = START_STOP;
1057 1.2 bouyer scsipi_cmd.byte2 = 0x00;
1058 1.2 bouyer scsipi_cmd.how = type;
1059 1.38.4.1 nathanw
1060 1.38.4.1 nathanw return (scsipi_command(periph,
1061 1.3 enami (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1062 1.29 bouyer 0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1063 1.29 bouyer NULL, flags));
1064 1.2 bouyer }
1065 1.2 bouyer
1066 1.2 bouyer /*
1067 1.38.4.1 nathanw * scsipi_mode_sense, scsipi_mode_sense_big:
1068 1.38.4.1 nathanw * get a sense page from a device
1069 1.2 bouyer */
1070 1.2 bouyer
1071 1.38.4.1 nathanw int
1072 1.38.4.1 nathanw scsipi_mode_sense(periph, byte2, page, data, len, flags, retries, timeout)
1073 1.38.4.1 nathanw struct scsipi_periph *periph;
1074 1.38.4.1 nathanw int byte2, page, len, flags, retries, timeout;
1075 1.38.4.1 nathanw struct scsipi_mode_header *data;
1076 1.38.4.1 nathanw {
1077 1.38.4.1 nathanw struct scsipi_mode_sense scsipi_cmd;
1078 1.38.4.1 nathanw int error;
1079 1.38.4.1 nathanw
1080 1.38.4.1 nathanw bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1081 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SENSE;
1082 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1083 1.38.4.1 nathanw scsipi_cmd.page = page;
1084 1.38.4.1 nathanw if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1085 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.u_len.atapi.length);
1086 1.38.4.1 nathanw else
1087 1.38.4.1 nathanw scsipi_cmd.u_len.scsi.length = len & 0xff;
1088 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1089 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1090 1.38.4.1 nathanw flags | XS_CTL_DATA_IN);
1091 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1092 1.38.4.1 nathanw ("scsipi_mode_sense: error=%d\n", error));
1093 1.38.4.1 nathanw return (error);
1094 1.38.4.1 nathanw }
1095 1.38.4.1 nathanw
1096 1.38.4.1 nathanw int
1097 1.38.4.1 nathanw scsipi_mode_sense_big(periph, byte2, page, data, len, flags, retries, timeout)
1098 1.38.4.1 nathanw struct scsipi_periph *periph;
1099 1.38.4.1 nathanw int byte2, page, len, flags, retries, timeout;
1100 1.38.4.1 nathanw struct scsipi_mode_header_big *data;
1101 1.38.4.1 nathanw {
1102 1.38.4.1 nathanw struct scsipi_mode_sense_big scsipi_cmd;
1103 1.38.4.1 nathanw int error;
1104 1.38.4.1 nathanw
1105 1.38.4.1 nathanw bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1106 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SENSE_BIG;
1107 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1108 1.38.4.1 nathanw scsipi_cmd.page = page;
1109 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.length);
1110 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1111 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1112 1.38.4.1 nathanw flags | XS_CTL_DATA_IN);
1113 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1114 1.38.4.1 nathanw ("scsipi_mode_sense_big: error=%d\n", error));
1115 1.38.4.1 nathanw return (error);
1116 1.38.4.1 nathanw }
1117 1.38.4.1 nathanw
1118 1.38.4.1 nathanw int
1119 1.38.4.1 nathanw scsipi_mode_select(periph, byte2, data, len, flags, retries, timeout)
1120 1.38.4.1 nathanw struct scsipi_periph *periph;
1121 1.38.4.1 nathanw int byte2, len, flags, retries, timeout;
1122 1.38.4.1 nathanw struct scsipi_mode_header *data;
1123 1.38.4.1 nathanw {
1124 1.38.4.1 nathanw struct scsipi_mode_select scsipi_cmd;
1125 1.38.4.1 nathanw int error;
1126 1.38.4.1 nathanw
1127 1.38.4.1 nathanw bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1128 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SELECT;
1129 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1130 1.38.4.1 nathanw if (scsipi_periph_bustype(periph) == SCSIPI_BUSTYPE_ATAPI)
1131 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.u_len.atapi.length);
1132 1.38.4.1 nathanw else
1133 1.38.4.1 nathanw scsipi_cmd.u_len.scsi.length = len & 0xff;
1134 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1135 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1136 1.38.4.1 nathanw flags | XS_CTL_DATA_OUT);
1137 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1138 1.38.4.1 nathanw ("scsipi_mode_select: error=%d\n", error));
1139 1.38.4.1 nathanw return (error);
1140 1.38.4.1 nathanw }
1141 1.38.4.1 nathanw
1142 1.38.4.1 nathanw int
1143 1.38.4.1 nathanw scsipi_mode_select_big(periph, byte2, data, len, flags, retries, timeout)
1144 1.38.4.1 nathanw struct scsipi_periph *periph;
1145 1.38.4.1 nathanw int byte2, len, flags, retries, timeout;
1146 1.38.4.1 nathanw struct scsipi_mode_header_big *data;
1147 1.38.4.1 nathanw {
1148 1.38.4.1 nathanw struct scsipi_mode_select_big scsipi_cmd;
1149 1.38.4.1 nathanw int error;
1150 1.38.4.1 nathanw
1151 1.38.4.1 nathanw bzero(&scsipi_cmd, sizeof(scsipi_cmd));
1152 1.38.4.1 nathanw scsipi_cmd.opcode = MODE_SELECT_BIG;
1153 1.38.4.1 nathanw scsipi_cmd.byte2 = byte2;
1154 1.38.4.1 nathanw _lto2b(len, scsipi_cmd.length);
1155 1.38.4.1 nathanw error = scsipi_command(periph, (struct scsipi_generic *)&scsipi_cmd,
1156 1.38.4.1 nathanw sizeof(scsipi_cmd), (void *)data, len, retries, timeout, NULL,
1157 1.38.4.1 nathanw flags | XS_CTL_DATA_OUT);
1158 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2,
1159 1.38.4.1 nathanw ("scsipi_mode_select: error=%d\n", error));
1160 1.38.4.1 nathanw return (error);
1161 1.38.4.1 nathanw }
1162 1.38.4.1 nathanw
1163 1.38.4.1 nathanw /*
1164 1.38.4.1 nathanw * scsipi_done:
1165 1.38.4.1 nathanw *
1166 1.38.4.1 nathanw * This routine is called by an adapter's interrupt handler when
1167 1.38.4.1 nathanw * an xfer is completed.
1168 1.38.4.1 nathanw */
1169 1.38.4.1 nathanw void
1170 1.38.4.1 nathanw scsipi_done(xs)
1171 1.38.4.1 nathanw struct scsipi_xfer *xs;
1172 1.38.4.1 nathanw {
1173 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1174 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1175 1.38.4.1 nathanw int s, freezecnt;
1176 1.38.4.1 nathanw
1177 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1178 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
1179 1.38.4.1 nathanw if (periph->periph_dbflags & SCSIPI_DB1)
1180 1.2 bouyer show_scsipi_cmd(xs);
1181 1.38.4.1 nathanw #endif
1182 1.2 bouyer
1183 1.38.4.1 nathanw s = splbio();
1184 1.2 bouyer /*
1185 1.38.4.1 nathanw * The resource this command was using is now free.
1186 1.3 enami */
1187 1.38.4.1 nathanw scsipi_put_resource(chan);
1188 1.38.4.1 nathanw xs->xs_periph->periph_sent--;
1189 1.2 bouyer
1190 1.38.4.1 nathanw /*
1191 1.38.4.1 nathanw * If the command was tagged, free the tag.
1192 1.38.4.1 nathanw */
1193 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) != 0)
1194 1.38.4.1 nathanw scsipi_put_tag(xs);
1195 1.38.4.1 nathanw else
1196 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_UNTAG;
1197 1.2 bouyer
1198 1.38.4.1 nathanw /* Mark the command as `done'. */
1199 1.38.4.1 nathanw xs->xs_status |= XS_STS_DONE;
1200 1.38.4.1 nathanw
1201 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1202 1.38.4.1 nathanw if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1203 1.38.4.1 nathanw (XS_CTL_ASYNC|XS_CTL_POLL))
1204 1.38.4.1 nathanw panic("scsipi_done: ASYNC and POLL");
1205 1.38.4.1 nathanw #endif
1206 1.2 bouyer
1207 1.2 bouyer /*
1208 1.38.4.1 nathanw * If the xfer had an error of any sort, freeze the
1209 1.38.4.1 nathanw * periph's queue. Freeze it again if we were requested
1210 1.38.4.1 nathanw * to do so in the xfer.
1211 1.2 bouyer */
1212 1.38.4.1 nathanw freezecnt = 0;
1213 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1214 1.38.4.1 nathanw freezecnt++;
1215 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1216 1.38.4.1 nathanw freezecnt++;
1217 1.38.4.1 nathanw if (freezecnt != 0)
1218 1.38.4.1 nathanw scsipi_periph_freeze(periph, freezecnt);
1219 1.2 bouyer
1220 1.38.4.1 nathanw /*
1221 1.38.4.1 nathanw * record the xfer with a pending sense, in case a SCSI reset is
1222 1.38.4.1 nathanw * received before the thread is waked up.
1223 1.38.4.1 nathanw */
1224 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1225 1.38.4.1 nathanw periph->periph_flags |= PERIPH_SENSE;
1226 1.38.4.1 nathanw periph->periph_xscheck = xs;
1227 1.20 bouyer }
1228 1.2 bouyer
1229 1.38.4.1 nathanw /*
1230 1.38.4.1 nathanw * If this was an xfer that was not to complete asynchrnously,
1231 1.38.4.1 nathanw * let the requesting thread perform error checking/handling
1232 1.38.4.1 nathanw * in its context.
1233 1.38.4.1 nathanw */
1234 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1235 1.38.4.1 nathanw splx(s);
1236 1.2 bouyer /*
1237 1.38.4.1 nathanw * If it's a polling job, just return, to unwind the
1238 1.38.4.1 nathanw * call graph. We don't need to restart the queue,
1239 1.38.4.1 nathanw * because pollings jobs are treated specially, and
1240 1.38.4.1 nathanw * are really only used during crash dumps anyway
1241 1.38.4.1 nathanw * (XXX or during boot-time autconfiguration of
1242 1.38.4.1 nathanw * ATAPI devices).
1243 1.2 bouyer */
1244 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_POLL)
1245 1.38.4.1 nathanw return;
1246 1.38.4.1 nathanw wakeup(xs);
1247 1.38.4.1 nathanw goto out;
1248 1.2 bouyer }
1249 1.38.4.1 nathanw
1250 1.9 scottr /*
1251 1.38.4.1 nathanw * Catch the extremely common case of I/O completing
1252 1.38.4.1 nathanw * without error; no use in taking a context switch
1253 1.38.4.1 nathanw * if we can handle it in interrupt context.
1254 1.9 scottr */
1255 1.38.4.1 nathanw if (xs->error == XS_NOERROR) {
1256 1.22 pk splx(s);
1257 1.38.4.1 nathanw (void) scsipi_complete(xs);
1258 1.38.4.1 nathanw goto out;
1259 1.22 pk }
1260 1.2 bouyer
1261 1.2 bouyer /*
1262 1.38.4.1 nathanw * There is an error on this xfer. Put it on the channel's
1263 1.38.4.1 nathanw * completion queue, and wake up the completion thread.
1264 1.38.4.1 nathanw */
1265 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1266 1.38.4.1 nathanw splx(s);
1267 1.38.4.1 nathanw wakeup(&chan->chan_complete);
1268 1.2 bouyer
1269 1.38.4.1 nathanw out:
1270 1.38.4.1 nathanw /*
1271 1.38.4.1 nathanw * If there are more xfers on the channel's queue, attempt to
1272 1.38.4.1 nathanw * run them.
1273 1.38.4.1 nathanw */
1274 1.38.4.1 nathanw scsipi_run_queue(chan);
1275 1.2 bouyer }
1276 1.2 bouyer
1277 1.38.4.1 nathanw /*
1278 1.38.4.1 nathanw * scsipi_complete:
1279 1.38.4.1 nathanw *
1280 1.38.4.1 nathanw * Completion of a scsipi_xfer. This is the guts of scsipi_done().
1281 1.38.4.1 nathanw *
1282 1.38.4.1 nathanw * NOTE: This routine MUST be called with valid thread context
1283 1.38.4.1 nathanw * except for the case where the following two conditions are
1284 1.38.4.1 nathanw * true:
1285 1.38.4.1 nathanw *
1286 1.38.4.1 nathanw * xs->error == XS_NOERROR
1287 1.38.4.1 nathanw * XS_CTL_ASYNC is set in xs->xs_control
1288 1.38.4.1 nathanw *
1289 1.38.4.1 nathanw * The semantics of this routine can be tricky, so here is an
1290 1.38.4.1 nathanw * explanation:
1291 1.38.4.1 nathanw *
1292 1.38.4.1 nathanw * 0 Xfer completed successfully.
1293 1.38.4.1 nathanw *
1294 1.38.4.1 nathanw * ERESTART Xfer had an error, but was restarted.
1295 1.38.4.1 nathanw *
1296 1.38.4.1 nathanw * anything else Xfer had an error, return value is Unix
1297 1.38.4.1 nathanw * errno.
1298 1.38.4.1 nathanw *
1299 1.38.4.1 nathanw * If the return value is anything but ERESTART:
1300 1.38.4.1 nathanw *
1301 1.38.4.1 nathanw * - If XS_CTL_ASYNC is set, `xs' has been freed back to
1302 1.38.4.1 nathanw * the pool.
1303 1.38.4.1 nathanw * - If there is a buf associated with the xfer,
1304 1.38.4.1 nathanw * it has been biodone()'d.
1305 1.38.4.1 nathanw */
1306 1.3 enami int
1307 1.38.4.1 nathanw scsipi_complete(xs)
1308 1.2 bouyer struct scsipi_xfer *xs;
1309 1.2 bouyer {
1310 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1311 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1312 1.38.4.1 nathanw struct buf *bp;
1313 1.38.4.1 nathanw int error, s;
1314 1.2 bouyer
1315 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1316 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1317 1.38.4.1 nathanw panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1318 1.38.4.1 nathanw #endif
1319 1.2 bouyer /*
1320 1.38.4.1 nathanw * If command terminated with a CHECK CONDITION, we need to issue a
1321 1.38.4.1 nathanw * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1322 1.38.4.1 nathanw * we'll have the real status.
1323 1.38.4.1 nathanw * Must be processed at splbio() to avoid missing a SCSI bus reset
1324 1.38.4.1 nathanw * for this command.
1325 1.38.4.1 nathanw */
1326 1.38.4.1 nathanw s = splbio();
1327 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1328 1.38.4.1 nathanw /* request sense for a request sense ? */
1329 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
1330 1.38.4.1 nathanw scsipi_printaddr(periph);
1331 1.38.4.1 nathanw /* XXX maybe we should reset the device ? */
1332 1.38.4.1 nathanw /* we've been frozen because xs->error != XS_NOERROR */
1333 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1334 1.38.4.1 nathanw splx(s);
1335 1.38.4.1 nathanw return EINVAL;
1336 1.38.4.1 nathanw }
1337 1.38.4.1 nathanw scsipi_request_sense(xs);
1338 1.38.4.1 nathanw }
1339 1.38.4.1 nathanw splx(s);
1340 1.38.4.1 nathanw /*
1341 1.38.4.1 nathanw * If it's a user level request, bypass all usual completion
1342 1.38.4.1 nathanw * processing, let the user work it out..
1343 1.2 bouyer */
1344 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1345 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1346 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1347 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1348 1.38.4.1 nathanw scsipi_user_done(xs);
1349 1.38.4.1 nathanw SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1350 1.38.4.1 nathanw return 0;
1351 1.38.4.1 nathanw }
1352 1.38.4.1 nathanw
1353 1.38.4.1 nathanw
1354 1.2 bouyer switch (xs->error) {
1355 1.38.4.1 nathanw case XS_NOERROR:
1356 1.2 bouyer error = 0;
1357 1.2 bouyer break;
1358 1.2 bouyer
1359 1.2 bouyer case XS_SENSE:
1360 1.13 bouyer case XS_SHORTSENSE:
1361 1.38.4.1 nathanw error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1362 1.2 bouyer break;
1363 1.2 bouyer
1364 1.38.4.1 nathanw case XS_RESOURCE_SHORTAGE:
1365 1.38.4.1 nathanw /*
1366 1.38.4.1 nathanw * XXX Should freeze channel's queue.
1367 1.38.4.1 nathanw */
1368 1.38.4.1 nathanw scsipi_printaddr(periph);
1369 1.38.4.1 nathanw printf("adapter resource shortage\n");
1370 1.38.4.1 nathanw /* FALLTHROUGH */
1371 1.38.4.1 nathanw
1372 1.2 bouyer case XS_BUSY:
1373 1.38.4.1 nathanw if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1374 1.38.4.1 nathanw struct scsipi_max_openings mo;
1375 1.38.4.1 nathanw
1376 1.38.4.1 nathanw /*
1377 1.38.4.1 nathanw * We set the openings to active - 1, assuming that
1378 1.38.4.1 nathanw * the command that got us here is the first one that
1379 1.38.4.1 nathanw * can't fit into the device's queue. If that's not
1380 1.38.4.1 nathanw * the case, I guess we'll find out soon enough.
1381 1.38.4.1 nathanw */
1382 1.38.4.1 nathanw mo.mo_target = periph->periph_target;
1383 1.38.4.1 nathanw mo.mo_lun = periph->periph_lun;
1384 1.38.4.1 nathanw if (periph->periph_active < periph->periph_openings)
1385 1.38.4.1 nathanw mo.mo_openings = periph->periph_active - 1;
1386 1.2 bouyer else
1387 1.38.4.1 nathanw mo.mo_openings = periph->periph_openings - 1;
1388 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1389 1.38.4.1 nathanw if (mo.mo_openings < 0) {
1390 1.38.4.1 nathanw scsipi_printaddr(periph);
1391 1.38.4.1 nathanw printf("QUEUE FULL resulted in < 0 openings\n");
1392 1.38.4.1 nathanw panic("scsipi_done");
1393 1.38.4.1 nathanw }
1394 1.2 bouyer #endif
1395 1.38.4.1 nathanw if (mo.mo_openings == 0) {
1396 1.38.4.1 nathanw scsipi_printaddr(periph);
1397 1.38.4.1 nathanw printf("QUEUE FULL resulted in 0 openings\n");
1398 1.38.4.1 nathanw mo.mo_openings = 1;
1399 1.38.4.1 nathanw }
1400 1.38.4.1 nathanw scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1401 1.38.4.1 nathanw error = ERESTART;
1402 1.38.4.1 nathanw } else if (xs->xs_retries != 0) {
1403 1.38.4.1 nathanw xs->xs_retries--;
1404 1.38.4.1 nathanw /*
1405 1.38.4.1 nathanw * Wait one second, and try again.
1406 1.38.4.1 nathanw */
1407 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_POLL)
1408 1.38.4.1 nathanw delay(1000000);
1409 1.38.4.1 nathanw else {
1410 1.38.4.1 nathanw scsipi_periph_freeze(periph, 1);
1411 1.38.4.1 nathanw callout_reset(&periph->periph_callout,
1412 1.38.4.1 nathanw hz, scsipi_periph_timed_thaw, periph);
1413 1.38.4.1 nathanw }
1414 1.38.4.1 nathanw error = ERESTART;
1415 1.38.4.1 nathanw } else
1416 1.38.4.1 nathanw error = EBUSY;
1417 1.38.4.1 nathanw break;
1418 1.38.4.1 nathanw
1419 1.38.4.1 nathanw case XS_REQUEUE:
1420 1.38.4.1 nathanw error = ERESTART;
1421 1.38.4.1 nathanw break;
1422 1.38.4.1 nathanw
1423 1.2 bouyer case XS_TIMEOUT:
1424 1.38.4.1 nathanw if (xs->xs_retries != 0) {
1425 1.38.4.1 nathanw xs->xs_retries--;
1426 1.38.4.1 nathanw error = ERESTART;
1427 1.38.4.1 nathanw } else
1428 1.38.4.1 nathanw error = EIO;
1429 1.2 bouyer break;
1430 1.2 bouyer
1431 1.2 bouyer case XS_SELTIMEOUT:
1432 1.2 bouyer /* XXX Disable device? */
1433 1.12 thorpej error = EIO;
1434 1.12 thorpej break;
1435 1.12 thorpej
1436 1.12 thorpej case XS_RESET:
1437 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
1438 1.38.4.1 nathanw /*
1439 1.38.4.1 nathanw * request sense interrupted by reset: signal it
1440 1.38.4.1 nathanw * with EINTR return code.
1441 1.38.4.1 nathanw */
1442 1.38.4.1 nathanw error = EINTR;
1443 1.38.4.1 nathanw } else {
1444 1.38.4.1 nathanw if (xs->xs_retries != 0) {
1445 1.38.4.1 nathanw xs->xs_retries--;
1446 1.38.4.1 nathanw error = ERESTART;
1447 1.38.4.1 nathanw } else
1448 1.38.4.1 nathanw error = EIO;
1449 1.12 thorpej }
1450 1.2 bouyer break;
1451 1.2 bouyer
1452 1.2 bouyer default:
1453 1.38.4.1 nathanw scsipi_printaddr(periph);
1454 1.38.4.1 nathanw printf("invalid return code from adapter: %d\n", xs->error);
1455 1.2 bouyer error = EIO;
1456 1.2 bouyer break;
1457 1.2 bouyer }
1458 1.2 bouyer
1459 1.38.4.1 nathanw s = splbio();
1460 1.38.4.1 nathanw if (error == ERESTART) {
1461 1.38.4.1 nathanw /*
1462 1.38.4.1 nathanw * If we get here, the periph has been thawed and frozen
1463 1.38.4.1 nathanw * again if we had to issue recovery commands. Alternatively,
1464 1.38.4.1 nathanw * it may have been frozen again and in a timed thaw. In
1465 1.38.4.1 nathanw * any case, we thaw the periph once we re-enqueue the
1466 1.38.4.1 nathanw * command. Once the periph is fully thawed, it will begin
1467 1.38.4.1 nathanw * operation again.
1468 1.38.4.1 nathanw */
1469 1.38.4.1 nathanw xs->error = XS_NOERROR;
1470 1.38.4.1 nathanw xs->status = SCSI_OK;
1471 1.38.4.1 nathanw xs->xs_status &= ~XS_STS_DONE;
1472 1.38.4.1 nathanw xs->xs_requeuecnt++;
1473 1.38.4.1 nathanw error = scsipi_enqueue(xs);
1474 1.38.4.1 nathanw if (error == 0) {
1475 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1476 1.38.4.1 nathanw splx(s);
1477 1.38.4.1 nathanw return (ERESTART);
1478 1.38.4.1 nathanw }
1479 1.38.4.1 nathanw }
1480 1.38.4.1 nathanw
1481 1.38.4.1 nathanw /*
1482 1.38.4.1 nathanw * scsipi_done() freezes the queue if not XS_NOERROR.
1483 1.38.4.1 nathanw * Thaw it here.
1484 1.38.4.1 nathanw */
1485 1.38.4.1 nathanw if (xs->error != XS_NOERROR)
1486 1.38.4.1 nathanw scsipi_periph_thaw(periph, 1);
1487 1.38.4.1 nathanw
1488 1.38.4.1 nathanw
1489 1.38.4.1 nathanw if (periph->periph_switch->psw_done)
1490 1.38.4.1 nathanw periph->periph_switch->psw_done(xs);
1491 1.38.4.1 nathanw if ((bp = xs->bp) != NULL) {
1492 1.38.4.1 nathanw if (error) {
1493 1.38.4.1 nathanw bp->b_error = error;
1494 1.38.4.1 nathanw bp->b_flags |= B_ERROR;
1495 1.38.4.1 nathanw bp->b_resid = bp->b_bcount;
1496 1.38.4.1 nathanw } else {
1497 1.38.4.1 nathanw bp->b_error = 0;
1498 1.38.4.1 nathanw bp->b_resid = xs->resid;
1499 1.38.4.1 nathanw }
1500 1.38.4.1 nathanw biodone(bp);
1501 1.38.4.1 nathanw }
1502 1.38.4.1 nathanw
1503 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_ASYNC)
1504 1.38.4.1 nathanw scsipi_put_xs(xs);
1505 1.38.4.1 nathanw splx(s);
1506 1.38.4.1 nathanw
1507 1.3 enami return (error);
1508 1.2 bouyer }
1509 1.2 bouyer
1510 1.14 thorpej /*
1511 1.38.4.1 nathanw * Issue a request sense for the given scsipi_xfer. Called when the xfer
1512 1.38.4.1 nathanw * returns with a CHECK_CONDITION status. Must be called in valid thread
1513 1.38.4.1 nathanw * context and at splbio().
1514 1.38.4.1 nathanw */
1515 1.38.4.1 nathanw
1516 1.38.4.1 nathanw void
1517 1.38.4.1 nathanw scsipi_request_sense(xs)
1518 1.38.4.1 nathanw struct scsipi_xfer *xs;
1519 1.38.4.1 nathanw {
1520 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1521 1.38.4.1 nathanw int flags, error;
1522 1.38.4.1 nathanw struct scsipi_sense cmd;
1523 1.38.4.1 nathanw
1524 1.38.4.1 nathanw periph->periph_flags |= PERIPH_SENSE;
1525 1.38.4.1 nathanw
1526 1.38.4.1 nathanw /* if command was polling, request sense will too */
1527 1.38.4.1 nathanw flags = xs->xs_control & XS_CTL_POLL;
1528 1.38.4.1 nathanw /* Polling commands can't sleep */
1529 1.38.4.1 nathanw if (flags)
1530 1.38.4.1 nathanw flags |= XS_CTL_NOSLEEP;
1531 1.38.4.1 nathanw
1532 1.38.4.1 nathanw flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1533 1.38.4.1 nathanw XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1534 1.38.4.1 nathanw
1535 1.38.4.1 nathanw bzero(&cmd, sizeof(cmd));
1536 1.38.4.1 nathanw cmd.opcode = REQUEST_SENSE;
1537 1.38.4.1 nathanw cmd.length = sizeof(struct scsipi_sense_data);
1538 1.38.4.1 nathanw
1539 1.38.4.1 nathanw error = scsipi_command(periph,
1540 1.38.4.1 nathanw (struct scsipi_generic *) &cmd, sizeof(cmd),
1541 1.38.4.1 nathanw (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1542 1.38.4.1 nathanw 0, 1000, NULL, flags);
1543 1.38.4.1 nathanw periph->periph_flags &= ~PERIPH_SENSE;
1544 1.38.4.1 nathanw periph->periph_xscheck = NULL;
1545 1.38.4.1 nathanw switch(error) {
1546 1.38.4.1 nathanw case 0:
1547 1.38.4.1 nathanw /* we have a valid sense */
1548 1.38.4.1 nathanw xs->error = XS_SENSE;
1549 1.38.4.1 nathanw return;
1550 1.38.4.1 nathanw case EINTR:
1551 1.38.4.1 nathanw /* REQUEST_SENSE interrupted by bus reset. */
1552 1.38.4.1 nathanw xs->error = XS_RESET;
1553 1.38.4.1 nathanw return;
1554 1.38.4.1 nathanw case EIO:
1555 1.38.4.1 nathanw /* request sense coudn't be performed */
1556 1.38.4.1 nathanw /*
1557 1.38.4.1 nathanw * XXX this isn't quite rigth but we don't have anything
1558 1.38.4.1 nathanw * better for now
1559 1.38.4.1 nathanw */
1560 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1561 1.38.4.1 nathanw return;
1562 1.38.4.1 nathanw default:
1563 1.38.4.1 nathanw /* Notify that request sense failed. */
1564 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1565 1.38.4.1 nathanw scsipi_printaddr(periph);
1566 1.38.4.1 nathanw printf("request sense failed with error %d\n", error);
1567 1.38.4.1 nathanw return;
1568 1.38.4.1 nathanw }
1569 1.38.4.1 nathanw }
1570 1.38.4.1 nathanw
1571 1.38.4.1 nathanw /*
1572 1.38.4.1 nathanw * scsipi_enqueue:
1573 1.38.4.1 nathanw *
1574 1.38.4.1 nathanw * Enqueue an xfer on a channel.
1575 1.14 thorpej */
1576 1.14 thorpej int
1577 1.38.4.1 nathanw scsipi_enqueue(xs)
1578 1.38.4.1 nathanw struct scsipi_xfer *xs;
1579 1.14 thorpej {
1580 1.38.4.1 nathanw struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1581 1.38.4.1 nathanw struct scsipi_xfer *qxs;
1582 1.38.4.1 nathanw int s;
1583 1.14 thorpej
1584 1.14 thorpej s = splbio();
1585 1.38.4.1 nathanw
1586 1.38.4.1 nathanw /*
1587 1.38.4.1 nathanw * If the xfer is to be polled, and there are already jobs on
1588 1.38.4.1 nathanw * the queue, we can't proceed.
1589 1.38.4.1 nathanw */
1590 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1591 1.38.4.1 nathanw TAILQ_FIRST(&chan->chan_queue) != NULL) {
1592 1.38.4.1 nathanw splx(s);
1593 1.38.4.1 nathanw xs->error = XS_DRIVER_STUFFUP;
1594 1.38.4.1 nathanw return (EAGAIN);
1595 1.38.4.1 nathanw }
1596 1.38.4.1 nathanw
1597 1.38.4.1 nathanw /*
1598 1.38.4.1 nathanw * If we have an URGENT xfer, it's an error recovery command
1599 1.38.4.1 nathanw * and it should just go on the head of the channel's queue.
1600 1.38.4.1 nathanw */
1601 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_URGENT) {
1602 1.38.4.1 nathanw TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1603 1.38.4.1 nathanw goto out;
1604 1.38.4.1 nathanw }
1605 1.38.4.1 nathanw
1606 1.38.4.1 nathanw /*
1607 1.38.4.1 nathanw * If this xfer has already been on the queue before, we
1608 1.38.4.1 nathanw * need to reinsert it in the correct order. That order is:
1609 1.38.4.1 nathanw *
1610 1.38.4.1 nathanw * Immediately before the first xfer for this periph
1611 1.38.4.1 nathanw * with a requeuecnt less than xs->xs_requeuecnt.
1612 1.38.4.1 nathanw *
1613 1.38.4.1 nathanw * Failing that, at the end of the queue. (We'll end up
1614 1.38.4.1 nathanw * there naturally.)
1615 1.38.4.1 nathanw */
1616 1.38.4.1 nathanw if (xs->xs_requeuecnt != 0) {
1617 1.38.4.1 nathanw for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1618 1.38.4.1 nathanw qxs = TAILQ_NEXT(qxs, channel_q)) {
1619 1.38.4.1 nathanw if (qxs->xs_periph == xs->xs_periph &&
1620 1.38.4.1 nathanw qxs->xs_requeuecnt < xs->xs_requeuecnt)
1621 1.38.4.1 nathanw break;
1622 1.38.4.1 nathanw }
1623 1.38.4.1 nathanw if (qxs != NULL) {
1624 1.38.4.1 nathanw TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1625 1.38.4.1 nathanw channel_q);
1626 1.38.4.1 nathanw goto out;
1627 1.38.4.1 nathanw }
1628 1.14 thorpej }
1629 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1630 1.38.4.1 nathanw out:
1631 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_THAW_PERIPH)
1632 1.38.4.1 nathanw scsipi_periph_thaw(xs->xs_periph, 1);
1633 1.14 thorpej splx(s);
1634 1.38.4.1 nathanw return (0);
1635 1.14 thorpej }
1636 1.14 thorpej
1637 1.14 thorpej /*
1638 1.38.4.1 nathanw * scsipi_run_queue:
1639 1.38.4.1 nathanw *
1640 1.38.4.1 nathanw * Start as many xfers as possible running on the channel.
1641 1.14 thorpej */
1642 1.14 thorpej void
1643 1.38.4.1 nathanw scsipi_run_queue(chan)
1644 1.38.4.1 nathanw struct scsipi_channel *chan;
1645 1.14 thorpej {
1646 1.38.4.1 nathanw struct scsipi_xfer *xs;
1647 1.38.4.1 nathanw struct scsipi_periph *periph;
1648 1.14 thorpej int s;
1649 1.14 thorpej
1650 1.38.4.1 nathanw for (;;) {
1651 1.38.4.1 nathanw s = splbio();
1652 1.38.4.1 nathanw
1653 1.38.4.1 nathanw /*
1654 1.38.4.1 nathanw * If the channel is frozen, we can't do any work right
1655 1.38.4.1 nathanw * now.
1656 1.38.4.1 nathanw */
1657 1.38.4.1 nathanw if (chan->chan_qfreeze != 0) {
1658 1.38.4.1 nathanw splx(s);
1659 1.38.4.1 nathanw return;
1660 1.38.4.1 nathanw }
1661 1.38.4.1 nathanw
1662 1.38.4.1 nathanw /*
1663 1.38.4.1 nathanw * Look for work to do, and make sure we can do it.
1664 1.38.4.1 nathanw */
1665 1.38.4.1 nathanw for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1666 1.38.4.1 nathanw xs = TAILQ_NEXT(xs, channel_q)) {
1667 1.38.4.1 nathanw periph = xs->xs_periph;
1668 1.38.4.1 nathanw
1669 1.38.4.1 nathanw if ((periph->periph_sent >= periph->periph_openings) ||
1670 1.38.4.1 nathanw periph->periph_qfreeze != 0 ||
1671 1.38.4.1 nathanw (periph->periph_flags & PERIPH_UNTAG) != 0)
1672 1.38.4.1 nathanw continue;
1673 1.38.4.1 nathanw
1674 1.38.4.1 nathanw if ((periph->periph_flags &
1675 1.38.4.1 nathanw (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1676 1.38.4.1 nathanw (xs->xs_control & XS_CTL_URGENT) == 0)
1677 1.38.4.1 nathanw continue;
1678 1.38.4.1 nathanw
1679 1.38.4.1 nathanw /*
1680 1.38.4.1 nathanw * We can issue this xfer!
1681 1.38.4.1 nathanw */
1682 1.38.4.1 nathanw goto got_one;
1683 1.38.4.1 nathanw }
1684 1.38.4.1 nathanw
1685 1.38.4.1 nathanw /*
1686 1.38.4.1 nathanw * Can't find any work to do right now.
1687 1.38.4.1 nathanw */
1688 1.38.4.1 nathanw splx(s);
1689 1.38.4.1 nathanw return;
1690 1.38.4.1 nathanw
1691 1.38.4.1 nathanw got_one:
1692 1.38.4.1 nathanw /*
1693 1.38.4.1 nathanw * Have an xfer to run. Allocate a resource from
1694 1.38.4.1 nathanw * the adapter to run it. If we can't allocate that
1695 1.38.4.1 nathanw * resource, we don't dequeue the xfer.
1696 1.38.4.1 nathanw */
1697 1.38.4.1 nathanw if (scsipi_get_resource(chan) == 0) {
1698 1.38.4.1 nathanw /*
1699 1.38.4.1 nathanw * Adapter is out of resources. If the adapter
1700 1.38.4.1 nathanw * supports it, attempt to grow them.
1701 1.38.4.1 nathanw */
1702 1.38.4.1 nathanw if (scsipi_grow_resources(chan) == 0) {
1703 1.38.4.1 nathanw /*
1704 1.38.4.1 nathanw * Wasn't able to grow resources,
1705 1.38.4.1 nathanw * nothing more we can do.
1706 1.38.4.1 nathanw */
1707 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_POLL) {
1708 1.38.4.1 nathanw scsipi_printaddr(xs->xs_periph);
1709 1.38.4.1 nathanw printf("polling command but no "
1710 1.38.4.1 nathanw "adapter resources");
1711 1.38.4.1 nathanw /* We'll panic shortly... */
1712 1.38.4.1 nathanw }
1713 1.38.4.1 nathanw splx(s);
1714 1.38.4.1 nathanw
1715 1.38.4.1 nathanw /*
1716 1.38.4.1 nathanw * XXX: We should be able to note that
1717 1.38.4.1 nathanw * XXX: that resources are needed here!
1718 1.38.4.1 nathanw */
1719 1.38.4.1 nathanw return;
1720 1.38.4.1 nathanw }
1721 1.38.4.1 nathanw /*
1722 1.38.4.1 nathanw * scsipi_grow_resources() allocated the resource
1723 1.38.4.1 nathanw * for us.
1724 1.38.4.1 nathanw */
1725 1.38.4.1 nathanw }
1726 1.38.4.1 nathanw
1727 1.38.4.1 nathanw /*
1728 1.38.4.1 nathanw * We have a resource to run this xfer, do it!
1729 1.38.4.1 nathanw */
1730 1.38.4.1 nathanw TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1731 1.38.4.1 nathanw
1732 1.38.4.1 nathanw /*
1733 1.38.4.1 nathanw * If the command is to be tagged, allocate a tag ID
1734 1.38.4.1 nathanw * for it.
1735 1.38.4.1 nathanw */
1736 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) != 0)
1737 1.38.4.1 nathanw scsipi_get_tag(xs);
1738 1.38.4.1 nathanw else
1739 1.38.4.1 nathanw periph->periph_flags |= PERIPH_UNTAG;
1740 1.38.4.1 nathanw periph->periph_sent++;
1741 1.38.4.1 nathanw splx(s);
1742 1.38.4.1 nathanw
1743 1.38.4.1 nathanw scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1744 1.38.4.1 nathanw }
1745 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1746 1.38.4.1 nathanw panic("scsipi_run_queue: impossible");
1747 1.38.4.1 nathanw #endif
1748 1.38.4.1 nathanw }
1749 1.38.4.1 nathanw
1750 1.38.4.1 nathanw /*
1751 1.38.4.1 nathanw * scsipi_execute_xs:
1752 1.38.4.1 nathanw *
1753 1.38.4.1 nathanw * Begin execution of an xfer, waiting for it to complete, if necessary.
1754 1.38.4.1 nathanw */
1755 1.38.4.1 nathanw int
1756 1.38.4.1 nathanw scsipi_execute_xs(xs)
1757 1.38.4.1 nathanw struct scsipi_xfer *xs;
1758 1.38.4.1 nathanw {
1759 1.38.4.1 nathanw struct scsipi_periph *periph = xs->xs_periph;
1760 1.38.4.1 nathanw struct scsipi_channel *chan = periph->periph_channel;
1761 1.38.4.1 nathanw int async, poll, retries, error, s;
1762 1.38.4.1 nathanw
1763 1.38.4.1 nathanw xs->xs_status &= ~XS_STS_DONE;
1764 1.38.4.1 nathanw xs->error = XS_NOERROR;
1765 1.38.4.1 nathanw xs->resid = xs->datalen;
1766 1.38.4.1 nathanw xs->status = SCSI_OK;
1767 1.38.4.1 nathanw
1768 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
1769 1.38.4.1 nathanw if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1770 1.38.4.1 nathanw printf("scsipi_execute_xs: ");
1771 1.38.4.1 nathanw show_scsipi_xs(xs);
1772 1.38.4.1 nathanw printf("\n");
1773 1.38.4.1 nathanw }
1774 1.38.4.1 nathanw #endif
1775 1.38.4.1 nathanw
1776 1.38.4.1 nathanw /*
1777 1.38.4.1 nathanw * Deal with command tagging:
1778 1.38.4.1 nathanw *
1779 1.38.4.1 nathanw * - If the device's current operating mode doesn't
1780 1.38.4.1 nathanw * include tagged queueing, clear the tag mask.
1781 1.38.4.1 nathanw *
1782 1.38.4.1 nathanw * - If the device's current operating mode *does*
1783 1.38.4.1 nathanw * include tagged queueing, set the tag_type in
1784 1.38.4.1 nathanw * the xfer to the appropriate byte for the tag
1785 1.38.4.1 nathanw * message.
1786 1.38.4.1 nathanw */
1787 1.38.4.1 nathanw if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1788 1.38.4.1 nathanw (xs->xs_control & XS_CTL_REQSENSE)) {
1789 1.38.4.1 nathanw xs->xs_control &= ~XS_CTL_TAGMASK;
1790 1.38.4.1 nathanw xs->xs_tag_type = 0;
1791 1.38.4.1 nathanw } else {
1792 1.38.4.1 nathanw /*
1793 1.38.4.1 nathanw * If the request doesn't specify a tag, give Head
1794 1.38.4.1 nathanw * tags to URGENT operations and Ordered tags to
1795 1.38.4.1 nathanw * everything else.
1796 1.38.4.1 nathanw */
1797 1.38.4.1 nathanw if (XS_CTL_TAGTYPE(xs) == 0) {
1798 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_URGENT)
1799 1.38.4.1 nathanw xs->xs_control |= XS_CTL_HEAD_TAG;
1800 1.38.4.1 nathanw else
1801 1.38.4.1 nathanw xs->xs_control |= XS_CTL_ORDERED_TAG;
1802 1.38.4.1 nathanw }
1803 1.38.4.1 nathanw
1804 1.38.4.1 nathanw switch (XS_CTL_TAGTYPE(xs)) {
1805 1.38.4.1 nathanw case XS_CTL_ORDERED_TAG:
1806 1.38.4.1 nathanw xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1807 1.38.4.1 nathanw break;
1808 1.38.4.1 nathanw
1809 1.38.4.1 nathanw case XS_CTL_SIMPLE_TAG:
1810 1.38.4.1 nathanw xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1811 1.38.4.1 nathanw break;
1812 1.38.4.1 nathanw
1813 1.38.4.1 nathanw case XS_CTL_HEAD_TAG:
1814 1.38.4.1 nathanw xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1815 1.38.4.1 nathanw break;
1816 1.38.4.1 nathanw
1817 1.38.4.1 nathanw default:
1818 1.38.4.1 nathanw scsipi_printaddr(periph);
1819 1.38.4.1 nathanw printf("invalid tag mask 0x%08x\n",
1820 1.38.4.1 nathanw XS_CTL_TAGTYPE(xs));
1821 1.38.4.1 nathanw panic("scsipi_execute_xs");
1822 1.38.4.1 nathanw }
1823 1.38.4.1 nathanw }
1824 1.38.4.1 nathanw
1825 1.38.4.1 nathanw /* If the adaptor wants us to poll, poll. */
1826 1.38.4.1 nathanw if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1827 1.38.4.1 nathanw xs->xs_control |= XS_CTL_POLL;
1828 1.38.4.1 nathanw
1829 1.38.4.1 nathanw /*
1830 1.38.4.1 nathanw * If we don't yet have a completion thread, or we are to poll for
1831 1.38.4.1 nathanw * completion, clear the ASYNC flag.
1832 1.38.4.1 nathanw */
1833 1.38.4.1 nathanw if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1834 1.38.4.1 nathanw xs->xs_control &= ~XS_CTL_ASYNC;
1835 1.38.4.1 nathanw
1836 1.38.4.1 nathanw async = (xs->xs_control & XS_CTL_ASYNC);
1837 1.38.4.1 nathanw poll = (xs->xs_control & XS_CTL_POLL);
1838 1.38.4.1 nathanw retries = xs->xs_retries; /* for polling commands */
1839 1.38.4.1 nathanw
1840 1.38.4.1 nathanw #ifdef DIAGNOSTIC
1841 1.38.4.1 nathanw if (async != 0 && xs->bp == NULL)
1842 1.38.4.1 nathanw panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1843 1.38.4.1 nathanw #endif
1844 1.38.4.1 nathanw
1845 1.38.4.1 nathanw /*
1846 1.38.4.1 nathanw * Enqueue the transfer. If we're not polling for completion, this
1847 1.38.4.1 nathanw * should ALWAYS return `no error'.
1848 1.38.4.1 nathanw */
1849 1.38.4.1 nathanw try_again:
1850 1.38.4.1 nathanw error = scsipi_enqueue(xs);
1851 1.38.4.1 nathanw if (error) {
1852 1.38.4.1 nathanw if (poll == 0) {
1853 1.38.4.1 nathanw scsipi_printaddr(periph);
1854 1.38.4.1 nathanw printf("not polling, but enqueue failed with %d\n",
1855 1.38.4.1 nathanw error);
1856 1.38.4.1 nathanw panic("scsipi_execute_xs");
1857 1.38.4.1 nathanw }
1858 1.38.4.1 nathanw
1859 1.38.4.1 nathanw scsipi_printaddr(periph);
1860 1.38.4.1 nathanw printf("failed to enqueue polling command");
1861 1.38.4.1 nathanw if (retries != 0) {
1862 1.38.4.1 nathanw printf(", retrying...\n");
1863 1.38.4.1 nathanw delay(1000000);
1864 1.38.4.1 nathanw retries--;
1865 1.38.4.1 nathanw goto try_again;
1866 1.38.4.1 nathanw }
1867 1.38.4.1 nathanw printf("\n");
1868 1.38.4.1 nathanw goto free_xs;
1869 1.38.4.1 nathanw }
1870 1.38.4.1 nathanw
1871 1.38.4.1 nathanw restarted:
1872 1.38.4.1 nathanw scsipi_run_queue(chan);
1873 1.38.4.1 nathanw
1874 1.38.4.1 nathanw /*
1875 1.38.4.1 nathanw * The xfer is enqueued, and possibly running. If it's to be
1876 1.38.4.1 nathanw * completed asynchronously, just return now.
1877 1.38.4.1 nathanw */
1878 1.38.4.1 nathanw if (async)
1879 1.38.4.1 nathanw return (EJUSTRETURN);
1880 1.38.4.1 nathanw
1881 1.38.4.1 nathanw /*
1882 1.38.4.1 nathanw * Not an asynchronous command; wait for it to complete.
1883 1.38.4.1 nathanw */
1884 1.38.4.1 nathanw s = splbio();
1885 1.38.4.1 nathanw while ((xs->xs_status & XS_STS_DONE) == 0) {
1886 1.38.4.1 nathanw if (poll) {
1887 1.38.4.1 nathanw scsipi_printaddr(periph);
1888 1.38.4.1 nathanw printf("polling command not done\n");
1889 1.38.4.1 nathanw panic("scsipi_execute_xs");
1890 1.38.4.1 nathanw }
1891 1.38.4.1 nathanw (void) tsleep(xs, PRIBIO, "xscmd", 0);
1892 1.38.4.1 nathanw }
1893 1.38.4.1 nathanw splx(s);
1894 1.38.4.1 nathanw
1895 1.38.4.1 nathanw /*
1896 1.38.4.1 nathanw * Command is complete. scsipi_done() has awakened us to perform
1897 1.38.4.1 nathanw * the error handling.
1898 1.38.4.1 nathanw */
1899 1.38.4.1 nathanw error = scsipi_complete(xs);
1900 1.38.4.1 nathanw if (error == ERESTART)
1901 1.38.4.1 nathanw goto restarted;
1902 1.38.4.1 nathanw
1903 1.38.4.1 nathanw /*
1904 1.38.4.1 nathanw * Command completed successfully or fatal error occurred. Fall
1905 1.38.4.1 nathanw * into....
1906 1.38.4.1 nathanw */
1907 1.38.4.1 nathanw free_xs:
1908 1.38.4.1 nathanw s = splbio();
1909 1.38.4.1 nathanw scsipi_put_xs(xs);
1910 1.38.4.1 nathanw splx(s);
1911 1.38.4.1 nathanw
1912 1.38.4.1 nathanw /*
1913 1.38.4.1 nathanw * Kick the queue, keep it running in case it stopped for some
1914 1.38.4.1 nathanw * reason.
1915 1.38.4.1 nathanw */
1916 1.38.4.1 nathanw scsipi_run_queue(chan);
1917 1.38.4.1 nathanw
1918 1.38.4.1 nathanw return (error);
1919 1.38.4.1 nathanw }
1920 1.38.4.1 nathanw
1921 1.38.4.1 nathanw /*
1922 1.38.4.1 nathanw * scsipi_completion_thread:
1923 1.38.4.1 nathanw *
1924 1.38.4.1 nathanw * This is the completion thread. We wait for errors on
1925 1.38.4.1 nathanw * asynchronous xfers, and perform the error handling
1926 1.38.4.1 nathanw * function, restarting the command, if necessary.
1927 1.38.4.1 nathanw */
1928 1.38.4.1 nathanw void
1929 1.38.4.1 nathanw scsipi_completion_thread(arg)
1930 1.38.4.1 nathanw void *arg;
1931 1.38.4.1 nathanw {
1932 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
1933 1.38.4.1 nathanw struct scsipi_xfer *xs;
1934 1.38.4.1 nathanw int s;
1935 1.38.4.1 nathanw
1936 1.38.4.1 nathanw for (;;) {
1937 1.38.4.1 nathanw s = splbio();
1938 1.38.4.1 nathanw xs = TAILQ_FIRST(&chan->chan_complete);
1939 1.38.4.1 nathanw if (xs == NULL &&
1940 1.38.4.1 nathanw (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) == 0) {
1941 1.38.4.1 nathanw (void) tsleep(&chan->chan_complete, PRIBIO,
1942 1.38.4.1 nathanw "sccomp", 0);
1943 1.38.4.1 nathanw splx(s);
1944 1.38.4.1 nathanw continue;
1945 1.38.4.1 nathanw }
1946 1.38.4.1 nathanw if (chan->chan_flags & SCSIPI_CHAN_SHUTDOWN) {
1947 1.38.4.1 nathanw splx(s);
1948 1.38.4.1 nathanw break;
1949 1.38.4.1 nathanw }
1950 1.38.4.1 nathanw TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
1951 1.38.4.1 nathanw splx(s);
1952 1.38.4.1 nathanw
1953 1.38.4.1 nathanw /*
1954 1.38.4.1 nathanw * Have an xfer with an error; process it.
1955 1.38.4.1 nathanw */
1956 1.38.4.1 nathanw (void) scsipi_complete(xs);
1957 1.38.4.1 nathanw
1958 1.38.4.1 nathanw /*
1959 1.38.4.1 nathanw * Kick the queue; keep it running if it was stopped
1960 1.38.4.1 nathanw * for some reason.
1961 1.38.4.1 nathanw */
1962 1.38.4.1 nathanw scsipi_run_queue(chan);
1963 1.38.4.1 nathanw }
1964 1.38.4.1 nathanw
1965 1.38.4.1 nathanw chan->chan_thread = NULL;
1966 1.38.4.1 nathanw
1967 1.38.4.1 nathanw /* In case parent is waiting for us to exit. */
1968 1.38.4.1 nathanw wakeup(&chan->chan_thread);
1969 1.38.4.1 nathanw
1970 1.38.4.1 nathanw kthread_exit(0);
1971 1.38.4.1 nathanw }
1972 1.38.4.1 nathanw
1973 1.38.4.1 nathanw /*
1974 1.38.4.1 nathanw * scsipi_create_completion_thread:
1975 1.38.4.1 nathanw *
1976 1.38.4.1 nathanw * Callback to actually create the completion thread.
1977 1.38.4.1 nathanw */
1978 1.38.4.1 nathanw void
1979 1.38.4.1 nathanw scsipi_create_completion_thread(arg)
1980 1.38.4.1 nathanw void *arg;
1981 1.38.4.1 nathanw {
1982 1.38.4.1 nathanw struct scsipi_channel *chan = arg;
1983 1.38.4.1 nathanw struct scsipi_adapter *adapt = chan->chan_adapter;
1984 1.38.4.1 nathanw
1985 1.38.4.1 nathanw if (kthread_create1(scsipi_completion_thread, chan,
1986 1.38.4.1 nathanw &chan->chan_thread, "%s:%d", adapt->adapt_dev->dv_xname,
1987 1.38.4.1 nathanw chan->chan_channel)) {
1988 1.38.4.1 nathanw printf("%s: unable to create completion thread for "
1989 1.38.4.1 nathanw "channel %d\n", adapt->adapt_dev->dv_xname,
1990 1.38.4.1 nathanw chan->chan_channel);
1991 1.38.4.1 nathanw panic("scsipi_create_completion_thread");
1992 1.38.4.1 nathanw }
1993 1.38.4.1 nathanw }
1994 1.38.4.1 nathanw
1995 1.38.4.1 nathanw /*
1996 1.38.4.1 nathanw * scsipi_async_event:
1997 1.38.4.1 nathanw *
1998 1.38.4.1 nathanw * Handle an asynchronous event from an adapter.
1999 1.38.4.1 nathanw */
2000 1.38.4.1 nathanw void
2001 1.38.4.1 nathanw scsipi_async_event(chan, event, arg)
2002 1.38.4.1 nathanw struct scsipi_channel *chan;
2003 1.38.4.1 nathanw scsipi_async_event_t event;
2004 1.38.4.1 nathanw void *arg;
2005 1.38.4.1 nathanw {
2006 1.38.4.1 nathanw int s;
2007 1.38.4.1 nathanw
2008 1.38.4.1 nathanw s = splbio();
2009 1.38.4.1 nathanw switch (event) {
2010 1.38.4.1 nathanw case ASYNC_EVENT_MAX_OPENINGS:
2011 1.38.4.1 nathanw scsipi_async_event_max_openings(chan,
2012 1.38.4.1 nathanw (struct scsipi_max_openings *)arg);
2013 1.38.4.1 nathanw break;
2014 1.38.4.1 nathanw
2015 1.38.4.1 nathanw case ASYNC_EVENT_XFER_MODE:
2016 1.38.4.1 nathanw scsipi_async_event_xfer_mode(chan,
2017 1.38.4.1 nathanw (struct scsipi_xfer_mode *)arg);
2018 1.38.4.1 nathanw break;
2019 1.38.4.1 nathanw case ASYNC_EVENT_RESET:
2020 1.38.4.1 nathanw scsipi_async_event_channel_reset(chan);
2021 1.38.4.1 nathanw break;
2022 1.38.4.1 nathanw }
2023 1.38.4.1 nathanw splx(s);
2024 1.38.4.1 nathanw }
2025 1.38.4.1 nathanw
2026 1.38.4.1 nathanw /*
2027 1.38.4.1 nathanw * scsipi_print_xfer_mode:
2028 1.38.4.1 nathanw *
2029 1.38.4.1 nathanw * Print a periph's capabilities.
2030 1.38.4.1 nathanw */
2031 1.38.4.1 nathanw void
2032 1.38.4.1 nathanw scsipi_print_xfer_mode(periph)
2033 1.38.4.1 nathanw struct scsipi_periph *periph;
2034 1.38.4.1 nathanw {
2035 1.38.4.1 nathanw int period, freq, speed, mbs;
2036 1.38.4.1 nathanw
2037 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2038 1.38.4.1 nathanw return;
2039 1.38.4.1 nathanw
2040 1.38.4.1 nathanw printf("%s: ", periph->periph_dev->dv_xname);
2041 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_SYNC) {
2042 1.38.4.1 nathanw period = scsipi_sync_factor_to_period(periph->periph_period);
2043 1.38.4.1 nathanw printf("sync (%d.%dns offset %d)",
2044 1.38.4.1 nathanw period / 10, period % 10, periph->periph_offset);
2045 1.38.4.1 nathanw } else
2046 1.38.4.1 nathanw printf("async");
2047 1.38.4.1 nathanw
2048 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_WIDE32)
2049 1.38.4.1 nathanw printf(", 32-bit");
2050 1.38.4.1 nathanw else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2051 1.38.4.1 nathanw printf(", 16-bit");
2052 1.38.4.1 nathanw else
2053 1.38.4.1 nathanw printf(", 8-bit");
2054 1.38.4.1 nathanw
2055 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_SYNC) {
2056 1.38.4.1 nathanw freq = scsipi_sync_factor_to_freq(periph->periph_period);
2057 1.38.4.1 nathanw speed = freq;
2058 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_WIDE32)
2059 1.38.4.1 nathanw speed *= 4;
2060 1.38.4.1 nathanw else if (periph->periph_mode & PERIPH_CAP_WIDE16)
2061 1.38.4.1 nathanw speed *= 2;
2062 1.38.4.1 nathanw mbs = speed / 1000;
2063 1.38.4.1 nathanw if (mbs > 0)
2064 1.38.4.1 nathanw printf(" (%d.%03dMB/s)", mbs, speed % 1000);
2065 1.38.4.1 nathanw else
2066 1.38.4.1 nathanw printf(" (%dKB/s)", speed % 1000);
2067 1.38.4.1 nathanw }
2068 1.38.4.1 nathanw
2069 1.38.4.1 nathanw printf(" transfers");
2070 1.38.4.1 nathanw
2071 1.38.4.1 nathanw if (periph->periph_mode & PERIPH_CAP_TQING)
2072 1.38.4.1 nathanw printf(", tagged queueing");
2073 1.38.4.1 nathanw
2074 1.38.4.1 nathanw printf("\n");
2075 1.38.4.1 nathanw }
2076 1.38.4.1 nathanw
2077 1.38.4.1 nathanw /*
2078 1.38.4.1 nathanw * scsipi_async_event_max_openings:
2079 1.38.4.1 nathanw *
2080 1.38.4.1 nathanw * Update the maximum number of outstanding commands a
2081 1.38.4.1 nathanw * device may have.
2082 1.38.4.1 nathanw */
2083 1.38.4.1 nathanw void
2084 1.38.4.1 nathanw scsipi_async_event_max_openings(chan, mo)
2085 1.38.4.1 nathanw struct scsipi_channel *chan;
2086 1.38.4.1 nathanw struct scsipi_max_openings *mo;
2087 1.38.4.1 nathanw {
2088 1.38.4.1 nathanw struct scsipi_periph *periph;
2089 1.38.4.1 nathanw int minlun, maxlun;
2090 1.38.4.1 nathanw
2091 1.38.4.1 nathanw if (mo->mo_lun == -1) {
2092 1.38.4.1 nathanw /*
2093 1.38.4.1 nathanw * Wildcarded; apply it to all LUNs.
2094 1.38.4.1 nathanw */
2095 1.38.4.1 nathanw minlun = 0;
2096 1.38.4.1 nathanw maxlun = chan->chan_nluns - 1;
2097 1.38.4.1 nathanw } else
2098 1.38.4.1 nathanw minlun = maxlun = mo->mo_lun;
2099 1.38.4.1 nathanw
2100 1.38.4.1 nathanw for (; minlun <= maxlun; minlun++) {
2101 1.38.4.1 nathanw periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2102 1.38.4.1 nathanw if (periph == NULL)
2103 1.38.4.1 nathanw continue;
2104 1.38.4.1 nathanw
2105 1.38.4.1 nathanw if (mo->mo_openings < periph->periph_openings)
2106 1.38.4.1 nathanw periph->periph_openings = mo->mo_openings;
2107 1.38.4.1 nathanw else if (mo->mo_openings > periph->periph_openings &&
2108 1.38.4.1 nathanw (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2109 1.38.4.1 nathanw periph->periph_openings = mo->mo_openings;
2110 1.38.4.1 nathanw }
2111 1.38.4.1 nathanw }
2112 1.38.4.1 nathanw
2113 1.38.4.1 nathanw /*
2114 1.38.4.1 nathanw * scsipi_async_event_xfer_mode:
2115 1.38.4.1 nathanw *
2116 1.38.4.1 nathanw * Update the xfer mode for all periphs sharing the
2117 1.38.4.1 nathanw * specified I_T Nexus.
2118 1.38.4.1 nathanw */
2119 1.38.4.1 nathanw void
2120 1.38.4.1 nathanw scsipi_async_event_xfer_mode(chan, xm)
2121 1.38.4.1 nathanw struct scsipi_channel *chan;
2122 1.38.4.1 nathanw struct scsipi_xfer_mode *xm;
2123 1.38.4.1 nathanw {
2124 1.38.4.1 nathanw struct scsipi_periph *periph;
2125 1.38.4.1 nathanw int lun, announce, mode, period, offset;
2126 1.38.4.1 nathanw
2127 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2128 1.38.4.1 nathanw periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2129 1.38.4.1 nathanw if (periph == NULL)
2130 1.38.4.1 nathanw continue;
2131 1.38.4.1 nathanw announce = 0;
2132 1.38.4.1 nathanw
2133 1.38.4.1 nathanw /*
2134 1.38.4.1 nathanw * Clamp the xfer mode down to this periph's capabilities.
2135 1.38.4.1 nathanw */
2136 1.38.4.1 nathanw mode = xm->xm_mode & periph->periph_cap;
2137 1.38.4.1 nathanw if (mode & PERIPH_CAP_SYNC) {
2138 1.38.4.1 nathanw period = xm->xm_period;
2139 1.38.4.1 nathanw offset = xm->xm_offset;
2140 1.38.4.1 nathanw } else {
2141 1.38.4.1 nathanw period = 0;
2142 1.38.4.1 nathanw offset = 0;
2143 1.38.4.1 nathanw }
2144 1.38.4.1 nathanw
2145 1.38.4.1 nathanw /*
2146 1.38.4.1 nathanw * If we do not have a valid xfer mode yet, or the parameters
2147 1.38.4.1 nathanw * are different, announce them.
2148 1.38.4.1 nathanw */
2149 1.38.4.1 nathanw if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2150 1.38.4.1 nathanw periph->periph_mode != mode ||
2151 1.38.4.1 nathanw periph->periph_period != period ||
2152 1.38.4.1 nathanw periph->periph_offset != offset)
2153 1.38.4.1 nathanw announce = 1;
2154 1.38.4.1 nathanw
2155 1.38.4.1 nathanw periph->periph_mode = mode;
2156 1.38.4.1 nathanw periph->periph_period = period;
2157 1.38.4.1 nathanw periph->periph_offset = offset;
2158 1.38.4.1 nathanw periph->periph_flags |= PERIPH_MODE_VALID;
2159 1.38.4.1 nathanw
2160 1.38.4.1 nathanw if (announce)
2161 1.38.4.1 nathanw scsipi_print_xfer_mode(periph);
2162 1.38.4.1 nathanw }
2163 1.38.4.1 nathanw }
2164 1.38.4.1 nathanw
2165 1.38.4.1 nathanw /*
2166 1.38.4.1 nathanw * scsipi_set_xfer_mode:
2167 1.38.4.1 nathanw *
2168 1.38.4.1 nathanw * Set the xfer mode for the specified I_T Nexus.
2169 1.38.4.1 nathanw */
2170 1.38.4.1 nathanw void
2171 1.38.4.1 nathanw scsipi_set_xfer_mode(chan, target, immed)
2172 1.38.4.1 nathanw struct scsipi_channel *chan;
2173 1.38.4.1 nathanw int target, immed;
2174 1.38.4.1 nathanw {
2175 1.38.4.1 nathanw struct scsipi_xfer_mode xm;
2176 1.38.4.1 nathanw struct scsipi_periph *itperiph;
2177 1.38.4.1 nathanw int lun, s;
2178 1.38.4.1 nathanw
2179 1.38.4.1 nathanw /*
2180 1.38.4.1 nathanw * Go to the minimal xfer mode.
2181 1.38.4.1 nathanw */
2182 1.38.4.1 nathanw xm.xm_target = target;
2183 1.38.4.1 nathanw xm.xm_mode = 0;
2184 1.38.4.1 nathanw xm.xm_period = 0; /* ignored */
2185 1.38.4.1 nathanw xm.xm_offset = 0; /* ignored */
2186 1.38.4.1 nathanw
2187 1.38.4.1 nathanw /*
2188 1.38.4.1 nathanw * Find the first LUN we know about on this I_T Nexus.
2189 1.38.4.1 nathanw */
2190 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2191 1.38.4.1 nathanw itperiph = scsipi_lookup_periph(chan, target, lun);
2192 1.38.4.1 nathanw if (itperiph != NULL)
2193 1.38.4.1 nathanw break;
2194 1.38.4.1 nathanw }
2195 1.38.4.1 nathanw if (itperiph != NULL)
2196 1.38.4.1 nathanw xm.xm_mode = itperiph->periph_cap;
2197 1.38.4.1 nathanw
2198 1.38.4.1 nathanw /*
2199 1.38.4.1 nathanw * Now issue the request to the adapter.
2200 1.38.4.1 nathanw */
2201 1.38.4.1 nathanw s = splbio();
2202 1.38.4.1 nathanw scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2203 1.38.4.1 nathanw splx(s);
2204 1.38.4.1 nathanw
2205 1.38.4.1 nathanw /*
2206 1.38.4.1 nathanw * If we want this to happen immediately, issue a dummy command,
2207 1.38.4.1 nathanw * since most adapters can't really negotiate unless they're
2208 1.38.4.1 nathanw * executing a job.
2209 1.38.4.1 nathanw */
2210 1.38.4.1 nathanw if (immed != 0 && itperiph != NULL) {
2211 1.38.4.1 nathanw (void) scsipi_test_unit_ready(itperiph,
2212 1.38.4.1 nathanw XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2213 1.38.4.1 nathanw XS_CTL_IGNORE_NOT_READY |
2214 1.38.4.1 nathanw XS_CTL_IGNORE_MEDIA_CHANGE);
2215 1.38.4.1 nathanw }
2216 1.38.4.1 nathanw }
2217 1.38.4.1 nathanw
2218 1.38.4.1 nathanw /*
2219 1.38.4.1 nathanw * scsipi_channel_reset:
2220 1.38.4.1 nathanw *
2221 1.38.4.1 nathanw * handle scsi bus reset
2222 1.38.4.1 nathanw * called at splbio
2223 1.38.4.1 nathanw */
2224 1.38.4.1 nathanw void
2225 1.38.4.1 nathanw scsipi_async_event_channel_reset(chan)
2226 1.38.4.1 nathanw struct scsipi_channel *chan;
2227 1.38.4.1 nathanw {
2228 1.38.4.1 nathanw struct scsipi_xfer *xs, *xs_next;
2229 1.38.4.1 nathanw struct scsipi_periph *periph;
2230 1.38.4.1 nathanw int target, lun;
2231 1.38.4.1 nathanw
2232 1.38.4.1 nathanw /*
2233 1.38.4.1 nathanw * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2234 1.38.4.1 nathanw * commands; as the sense is not available any more.
2235 1.38.4.1 nathanw * can't call scsipi_done() from here, as the command has not been
2236 1.38.4.1 nathanw * sent to the adapter yet (this would corrupt accounting).
2237 1.38.4.1 nathanw */
2238 1.38.4.1 nathanw
2239 1.38.4.1 nathanw for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2240 1.38.4.1 nathanw xs_next = TAILQ_NEXT(xs, channel_q);
2241 1.38.4.1 nathanw if (xs->xs_control & XS_CTL_REQSENSE) {
2242 1.38.4.1 nathanw TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2243 1.38.4.1 nathanw xs->error = XS_RESET;
2244 1.38.4.1 nathanw if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2245 1.38.4.1 nathanw TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2246 1.38.4.1 nathanw channel_q);
2247 1.38.4.1 nathanw }
2248 1.38.4.1 nathanw }
2249 1.38.4.1 nathanw wakeup(&chan->chan_complete);
2250 1.38.4.1 nathanw /* Catch xs with pending sense which may not have a REQSENSE xs yet */
2251 1.38.4.1 nathanw for (target = 0; target < chan->chan_ntargets; target++) {
2252 1.38.4.1 nathanw if (target == chan->chan_id)
2253 1.38.4.1 nathanw continue;
2254 1.38.4.1 nathanw for (lun = 0; lun < chan->chan_nluns; lun++) {
2255 1.38.4.1 nathanw periph = chan->chan_periphs[target][lun];
2256 1.38.4.1 nathanw if (periph) {
2257 1.38.4.1 nathanw xs = periph->periph_xscheck;
2258 1.38.4.1 nathanw if (xs)
2259 1.38.4.1 nathanw xs->error = XS_RESET;
2260 1.38.4.1 nathanw }
2261 1.38.4.1 nathanw }
2262 1.38.4.1 nathanw }
2263 1.38.4.1 nathanw }
2264 1.38.4.1 nathanw
2265 1.38.4.1 nathanw
2266 1.38.4.1 nathanw /*
2267 1.38.4.1 nathanw * scsipi_adapter_addref:
2268 1.38.4.1 nathanw *
2269 1.38.4.1 nathanw * Add a reference to the adapter pointed to by the provided
2270 1.38.4.1 nathanw * link, enabling the adapter if necessary.
2271 1.38.4.1 nathanw */
2272 1.38.4.1 nathanw int
2273 1.38.4.1 nathanw scsipi_adapter_addref(adapt)
2274 1.38.4.1 nathanw struct scsipi_adapter *adapt;
2275 1.38.4.1 nathanw {
2276 1.38.4.1 nathanw int s, error = 0;
2277 1.38.4.1 nathanw
2278 1.38.4.1 nathanw s = splbio();
2279 1.38.4.1 nathanw if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2280 1.38.4.1 nathanw error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2281 1.38.4.1 nathanw if (error)
2282 1.38.4.1 nathanw adapt->adapt_refcnt--;
2283 1.38.4.1 nathanw }
2284 1.38.4.1 nathanw splx(s);
2285 1.38.4.1 nathanw return (error);
2286 1.38.4.1 nathanw }
2287 1.38.4.1 nathanw
2288 1.38.4.1 nathanw /*
2289 1.38.4.1 nathanw * scsipi_adapter_delref:
2290 1.38.4.1 nathanw *
2291 1.38.4.1 nathanw * Delete a reference to the adapter pointed to by the provided
2292 1.38.4.1 nathanw * link, disabling the adapter if possible.
2293 1.38.4.1 nathanw */
2294 1.38.4.1 nathanw void
2295 1.38.4.1 nathanw scsipi_adapter_delref(adapt)
2296 1.38.4.1 nathanw struct scsipi_adapter *adapt;
2297 1.38.4.1 nathanw {
2298 1.38.4.1 nathanw int s;
2299 1.38.4.1 nathanw
2300 1.38.4.1 nathanw s = splbio();
2301 1.38.4.1 nathanw if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2302 1.38.4.1 nathanw (void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2303 1.38.4.1 nathanw splx(s);
2304 1.38.4.1 nathanw }
2305 1.38.4.1 nathanw
2306 1.38.4.1 nathanw struct scsipi_syncparam {
2307 1.38.4.1 nathanw int ss_factor;
2308 1.38.4.1 nathanw int ss_period; /* ns * 10 */
2309 1.38.4.1 nathanw } scsipi_syncparams[] = {
2310 1.38.4.1 nathanw { 0x0a, 250 },
2311 1.38.4.1 nathanw { 0x0b, 303 },
2312 1.38.4.1 nathanw { 0x0c, 500 },
2313 1.38.4.1 nathanw };
2314 1.38.4.1 nathanw const int scsipi_nsyncparams =
2315 1.38.4.1 nathanw sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2316 1.38.4.1 nathanw
2317 1.38.4.1 nathanw int
2318 1.38.4.1 nathanw scsipi_sync_period_to_factor(period)
2319 1.38.4.1 nathanw int period; /* ns * 10 */
2320 1.38.4.1 nathanw {
2321 1.38.4.1 nathanw int i;
2322 1.38.4.1 nathanw
2323 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2324 1.38.4.1 nathanw if (period <= scsipi_syncparams[i].ss_period)
2325 1.38.4.1 nathanw return (scsipi_syncparams[i].ss_factor);
2326 1.38.4.1 nathanw }
2327 1.38.4.1 nathanw
2328 1.38.4.1 nathanw return ((period / 10) / 4);
2329 1.38.4.1 nathanw }
2330 1.38.4.1 nathanw
2331 1.38.4.1 nathanw int
2332 1.38.4.1 nathanw scsipi_sync_factor_to_period(factor)
2333 1.38.4.1 nathanw int factor;
2334 1.38.4.1 nathanw {
2335 1.38.4.1 nathanw int i;
2336 1.38.4.1 nathanw
2337 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2338 1.38.4.1 nathanw if (factor == scsipi_syncparams[i].ss_factor)
2339 1.38.4.1 nathanw return (scsipi_syncparams[i].ss_period);
2340 1.38.4.1 nathanw }
2341 1.38.4.1 nathanw
2342 1.38.4.1 nathanw return ((factor * 4) * 10);
2343 1.38.4.1 nathanw }
2344 1.38.4.1 nathanw
2345 1.38.4.1 nathanw int
2346 1.38.4.1 nathanw scsipi_sync_factor_to_freq(factor)
2347 1.38.4.1 nathanw int factor;
2348 1.38.4.1 nathanw {
2349 1.38.4.1 nathanw int i;
2350 1.38.4.1 nathanw
2351 1.38.4.1 nathanw for (i = 0; i < scsipi_nsyncparams; i++) {
2352 1.38.4.1 nathanw if (factor == scsipi_syncparams[i].ss_factor)
2353 1.38.4.1 nathanw return (10000000 / scsipi_syncparams[i].ss_period);
2354 1.38.4.1 nathanw }
2355 1.38.4.1 nathanw
2356 1.38.4.1 nathanw return (10000000 / ((factor * 4) * 10));
2357 1.14 thorpej }
2358 1.14 thorpej
2359 1.38.4.1 nathanw #ifdef SCSIPI_DEBUG
2360 1.2 bouyer /*
2361 1.2 bouyer * Given a scsipi_xfer, dump the request, in all it's glory
2362 1.2 bouyer */
2363 1.2 bouyer void
2364 1.2 bouyer show_scsipi_xs(xs)
2365 1.2 bouyer struct scsipi_xfer *xs;
2366 1.2 bouyer {
2367 1.3 enami
2368 1.2 bouyer printf("xs(%p): ", xs);
2369 1.24 thorpej printf("xs_control(0x%08x)", xs->xs_control);
2370 1.24 thorpej printf("xs_status(0x%08x)", xs->xs_status);
2371 1.38.4.1 nathanw printf("periph(%p)", xs->xs_periph);
2372 1.38.4.1 nathanw printf("retr(0x%x)", xs->xs_retries);
2373 1.2 bouyer printf("timo(0x%x)", xs->timeout);
2374 1.2 bouyer printf("cmd(%p)", xs->cmd);
2375 1.2 bouyer printf("len(0x%x)", xs->cmdlen);
2376 1.2 bouyer printf("data(%p)", xs->data);
2377 1.2 bouyer printf("len(0x%x)", xs->datalen);
2378 1.2 bouyer printf("res(0x%x)", xs->resid);
2379 1.2 bouyer printf("err(0x%x)", xs->error);
2380 1.2 bouyer printf("bp(%p)", xs->bp);
2381 1.2 bouyer show_scsipi_cmd(xs);
2382 1.2 bouyer }
2383 1.2 bouyer
2384 1.2 bouyer void
2385 1.2 bouyer show_scsipi_cmd(xs)
2386 1.2 bouyer struct scsipi_xfer *xs;
2387 1.2 bouyer {
2388 1.2 bouyer u_char *b = (u_char *) xs->cmd;
2389 1.3 enami int i = 0;
2390 1.2 bouyer
2391 1.38.4.1 nathanw scsipi_printaddr(xs->xs_periph);
2392 1.38.4.1 nathanw printf(" command: ");
2393 1.2 bouyer
2394 1.24 thorpej if ((xs->xs_control & XS_CTL_RESET) == 0) {
2395 1.2 bouyer while (i < xs->cmdlen) {
2396 1.2 bouyer if (i)
2397 1.2 bouyer printf(",");
2398 1.2 bouyer printf("0x%x", b[i++]);
2399 1.2 bouyer }
2400 1.2 bouyer printf("-[%d bytes]\n", xs->datalen);
2401 1.2 bouyer if (xs->datalen)
2402 1.2 bouyer show_mem(xs->data, min(64, xs->datalen));
2403 1.2 bouyer } else
2404 1.2 bouyer printf("-RESET-\n");
2405 1.2 bouyer }
2406 1.2 bouyer
2407 1.2 bouyer void
2408 1.2 bouyer show_mem(address, num)
2409 1.2 bouyer u_char *address;
2410 1.2 bouyer int num;
2411 1.2 bouyer {
2412 1.2 bouyer int x;
2413 1.2 bouyer
2414 1.2 bouyer printf("------------------------------");
2415 1.2 bouyer for (x = 0; x < num; x++) {
2416 1.2 bouyer if ((x % 16) == 0)
2417 1.2 bouyer printf("\n%03d: ", x);
2418 1.2 bouyer printf("%02x ", *address++);
2419 1.2 bouyer }
2420 1.2 bouyer printf("\n------------------------------\n");
2421 1.2 bouyer }
2422 1.38.4.1 nathanw #endif /* SCSIPI_DEBUG */
2423