ata_subr.c revision 1.1 1 /* $NetBSD: ata_subr.c,v 1.1 2017/10/10 17:19:38 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2001 Manuel Bouyer. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: ata_subr.c,v 1.1 2017/10/10 17:19:38 jdolecek Exp $");
29
30 #include "opt_ata.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/device.h>
37 #include <sys/conf.h>
38 #include <sys/fcntl.h>
39 #include <sys/proc.h>
40 #include <sys/kthread.h>
41 #include <sys/errno.h>
42 #include <sys/ataio.h>
43 #include <sys/kmem.h>
44 #include <sys/intr.h>
45 #include <sys/bus.h>
46 #include <sys/once.h>
47 #include <sys/bitops.h>
48
49 #define ATABUS_PRIVATE
50
51 #include <dev/ata/ataconf.h>
52 #include <dev/ata/atareg.h>
53 #include <dev/ata/atavar.h>
54 #include <dev/ic/wdcvar.h> /* for PIOBM */
55
56 #define DEBUG_FUNCS 0x08
57 #define DEBUG_PROBE 0x10
58 #define DEBUG_DETACH 0x20
59 #define DEBUG_XFERS 0x40
60 #ifdef ATADEBUG
61 extern int atadebug_mask;
62 #define ATADEBUG_PRINT(args, level) \
63 if (atadebug_mask & (level)) \
64 printf args
65 #else
66 #define ATADEBUG_PRINT(args, level)
67 #endif
68
69 void
70 ata_queue_reset(struct ata_queue *chq)
71 {
72 /* make sure that we can use polled commands */
73 TAILQ_INIT(&chq->queue_xfer);
74 TAILQ_INIT(&chq->active_xfers);
75 chq->queue_freeze = 0;
76 chq->queue_active = 0;
77 chq->active_xfers_used = 0;
78 chq->queue_xfers_avail = __BIT(chq->queue_openings) - 1;
79 }
80
81 struct ata_xfer *
82 ata_queue_hwslot_to_xfer(struct ata_channel *chp, int hwslot)
83 {
84 struct ata_queue *chq = chp->ch_queue;
85 struct ata_xfer *xfer = NULL;
86
87 ata_channel_lock(chp);
88
89 KASSERTMSG(hwslot < chq->queue_openings, "hwslot %d > openings %d",
90 hwslot, chq->queue_openings);
91 KASSERTMSG((chq->active_xfers_used & __BIT(hwslot)) != 0,
92 "hwslot %d not active", hwslot);
93
94 /* Usually the first entry will be the one */
95 TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) {
96 if (xfer->c_slot == hwslot)
97 break;
98 }
99
100 ata_channel_unlock(chp);
101
102 KASSERTMSG((xfer != NULL),
103 "%s: xfer with slot %d not found (active %x)", __func__,
104 hwslot, chq->active_xfers_used);
105
106 return xfer;
107 }
108
109 struct ata_xfer *
110 ata_queue_get_active_xfer_locked(struct ata_channel *chp)
111 {
112 struct ata_xfer *xfer;
113
114 KASSERT(mutex_owned(&chp->ch_lock));
115 xfer = TAILQ_FIRST(&chp->ch_queue->active_xfers);
116
117 if (xfer && ISSET(xfer->c_flags, C_NCQ)) {
118 /* Spurious call, never return NCQ xfer from this interface */
119 xfer = NULL;
120 }
121
122 return xfer;
123 }
124
125 /*
126 * This interface is supposed only to be used when there is exactly
127 * one outstanding command, when there is no information about the slot,
128 * which triggered the command. ata_queue_hwslot_to_xfer() interface
129 * is preferred in all NCQ cases.
130 */
131 struct ata_xfer *
132 ata_queue_get_active_xfer(struct ata_channel *chp)
133 {
134 struct ata_xfer *xfer = NULL;
135
136 ata_channel_lock(chp);
137 xfer = ata_queue_get_active_xfer_locked(chp);
138 ata_channel_unlock(chp);
139
140 return xfer;
141 }
142
143 struct ata_xfer *
144 ata_queue_drive_active_xfer(struct ata_channel *chp, int drive)
145 {
146 struct ata_xfer *xfer = NULL;
147
148 ata_channel_lock(chp);
149
150 TAILQ_FOREACH(xfer, &chp->ch_queue->active_xfers, c_activechain) {
151 if (xfer->c_drive == drive)
152 break;
153 }
154 KASSERT(xfer != NULL);
155
156 ata_channel_unlock(chp);
157
158 return xfer;
159 }
160
161 static void
162 ata_xfer_init(struct ata_xfer *xfer, uint8_t slot)
163 {
164 memset(xfer, 0, sizeof(*xfer));
165
166 xfer->c_slot = slot;
167
168 cv_init(&xfer->c_active, "ataact");
169 cv_init(&xfer->c_finish, "atafin");
170 callout_init(&xfer->c_timo_callout, 0); /* XXX MPSAFE */
171 callout_init(&xfer->c_retry_callout, 0); /* XXX MPSAFE */
172 }
173
174 static void
175 ata_xfer_destroy(struct ata_xfer *xfer)
176 {
177 callout_halt(&xfer->c_timo_callout, NULL); /* XXX MPSAFE */
178 callout_destroy(&xfer->c_timo_callout);
179 callout_halt(&xfer->c_retry_callout, NULL); /* XXX MPSAFE */
180 callout_destroy(&xfer->c_retry_callout);
181 cv_destroy(&xfer->c_active);
182 cv_destroy(&xfer->c_finish);
183 }
184
185 struct ata_queue *
186 ata_queue_alloc(uint8_t openings)
187 {
188 if (openings == 0)
189 openings = 1;
190
191 if (openings > ATA_MAX_OPENINGS)
192 openings = ATA_MAX_OPENINGS;
193
194 struct ata_queue *chq = malloc(offsetof(struct ata_queue, queue_xfers[openings]),
195 M_DEVBUF, M_WAITOK | M_ZERO);
196
197 chq->queue_openings = openings;
198 ata_queue_reset(chq);
199
200 cv_init(&chq->queue_busy, "ataqbusy");
201 cv_init(&chq->queue_drain, "atdrn");
202 cv_init(&chq->queue_idle, "qidl");
203
204 for (uint8_t i = 0; i < openings; i++)
205 ata_xfer_init(&chq->queue_xfers[i], i);
206
207 return chq;
208 }
209
210 void
211 ata_queue_free(struct ata_queue *chq)
212 {
213 for (uint8_t i = 0; i < chq->queue_openings; i++)
214 ata_xfer_destroy(&chq->queue_xfers[i]);
215
216 cv_destroy(&chq->queue_busy);
217 cv_destroy(&chq->queue_drain);
218 cv_destroy(&chq->queue_idle);
219
220 free(chq, M_DEVBUF);
221 }
222
223 void
224 ata_channel_init(struct ata_channel *chp)
225 {
226 mutex_init(&chp->ch_lock, MUTEX_DEFAULT, IPL_BIO);
227 cv_init(&chp->ch_thr_idle, "atath");
228 }
229
230 void
231 ata_channel_destroy(struct ata_channel *chp)
232 {
233 mutex_destroy(&chp->ch_lock);
234 cv_destroy(&chp->ch_thr_idle);
235 }
236
237 /*
238 * Does it's own locking, does not require splbio().
239 * flags - whether to block waiting for free xfer
240 * openings - limit of openings supported by device, <= 0 means tag not
241 * relevant, and any available xfer can be returned
242 */
243 struct ata_xfer *
244 ata_get_xfer_ext(struct ata_channel *chp, int flags, uint8_t openings)
245 {
246 struct ata_queue *chq = chp->ch_queue;
247 struct ata_xfer *xfer = NULL;
248 uint32_t avail, slot, mask;
249 int error;
250
251 ATADEBUG_PRINT(("%s: channel %d flags %x openings %d\n",
252 __func__, chp->ch_channel, flags, openings),
253 DEBUG_XFERS);
254
255 ata_channel_lock(chp);
256
257 /*
258 * When openings is just 1, can't reserve anything for
259 * recovery. KASSERT() here is to catch code which naively
260 * relies on C_RECOVERY to work under this condition.
261 */
262 KASSERT((flags & C_RECOVERY) == 0 || chq->queue_openings > 1);
263
264 if (flags & C_RECOVERY) {
265 mask = UINT32_MAX;
266 } else {
267 if (openings <= 0 || openings > chq->queue_openings)
268 openings = chq->queue_openings;
269
270 if (openings > 1) {
271 mask = __BIT(openings - 1) - 1;
272 } else {
273 mask = UINT32_MAX;
274 }
275 }
276
277 retry:
278 avail = ffs32(chq->queue_xfers_avail & mask);
279 if (avail == 0) {
280 /*
281 * Catch code which tries to get another recovery xfer while
282 * already holding one (wrong recursion).
283 */
284 KASSERTMSG((flags & C_RECOVERY) == 0,
285 "recovery xfer busy openings %d mask %x avail %x",
286 openings, mask, chq->queue_xfers_avail);
287
288 if (flags & C_WAIT) {
289 chq->queue_flags |= QF_NEED_XFER;
290 error = cv_wait_sig(&chq->queue_busy, &chp->ch_lock);
291 if (error == 0)
292 goto retry;
293 }
294
295 goto out;
296 }
297
298 slot = avail - 1;
299 xfer = &chq->queue_xfers[slot];
300 chq->queue_xfers_avail &= ~__BIT(slot);
301
302 KASSERT((chq->active_xfers_used & __BIT(slot)) == 0);
303
304 /* zero everything after the callout member */
305 memset(&xfer->c_startzero, 0,
306 sizeof(struct ata_xfer) - offsetof(struct ata_xfer, c_startzero));
307
308 out:
309 ata_channel_unlock(chp);
310 return xfer;
311 }
312
313 /*
314 * ata_deactivate_xfer() must be always called prior to ata_free_xfer()
315 */
316 void
317 ata_free_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
318 {
319 struct ata_queue *chq = chp->ch_queue;
320
321 ata_channel_lock(chp);
322
323 if (xfer->c_flags & (C_WAITACT|C_WAITTIMO)) {
324 /* Someone is waiting for this xfer, so we can't free now */
325 xfer->c_flags |= C_FREE;
326 cv_signal(&xfer->c_active);
327 goto out;
328 }
329
330 #if NATA_PIOBM /* XXX wdc dependent code */
331 if (xfer->c_flags & C_PIOBM) {
332 struct wdc_softc *wdc = CHAN_TO_WDC(chp);
333
334 /* finish the busmastering PIO */
335 (*wdc->piobm_done)(wdc->dma_arg,
336 chp->ch_channel, xfer->c_drive);
337 chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_PIOBM_WAIT);
338 }
339 #endif
340
341 if (chp->ch_atac->atac_free_hw)
342 chp->ch_atac->atac_free_hw(chp);
343
344 KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0);
345 KASSERT((chq->queue_xfers_avail & __BIT(xfer->c_slot)) == 0);
346 chq->queue_xfers_avail |= __BIT(xfer->c_slot);
347
348 out:
349 if (chq->queue_flags & QF_NEED_XFER) {
350 chq->queue_flags &= ~QF_NEED_XFER;
351 cv_broadcast(&chq->queue_busy);
352 }
353
354 ata_channel_unlock(chp);
355 }
356
357 /*
358 * Must be called without any locks, i.e. with both drive and channel locks
359 * released.
360 */
361 void
362 ata_channel_start(struct ata_channel *chp, int drive)
363 {
364 int i, s;
365 struct ata_drive_datas *drvp;
366
367 s = splbio();
368
369 KASSERT(chp->ch_ndrives > 0);
370
371 #define ATA_DRIVE_START(chp, drive) \
372 do { \
373 KASSERT(drive < chp->ch_ndrives); \
374 drvp = &chp->ch_drive[drive]; \
375 \
376 if (drvp->drive_type != ATA_DRIVET_ATA && \
377 drvp->drive_type != ATA_DRIVET_ATAPI && \
378 drvp->drive_type != ATA_DRIVET_OLD) \
379 continue; \
380 \
381 if (drvp->drv_start != NULL) \
382 (*drvp->drv_start)(drvp->drv_softc); \
383 } while (0)
384
385 /*
386 * Process drives in round robin fashion starting with next one after
387 * the one which finished transfer. Thus no single drive would
388 * completely starve other drives on same channel.
389 * This loop processes all but the current drive, so won't do anything
390 * if there is only one drive in channel.
391 */
392 for (i = (drive + 1) % chp->ch_ndrives; i != drive;
393 i = (i + 1) % chp->ch_ndrives) {
394 ATA_DRIVE_START(chp, i);
395 }
396
397 /* Now try to kick off xfers on the current drive */
398 ATA_DRIVE_START(chp, drive);
399
400 splx(s);
401 #undef ATA_DRIVE_START
402 }
403
404 void
405 ata_channel_lock(struct ata_channel *chp)
406 {
407 mutex_enter(&chp->ch_lock);
408 }
409
410 void
411 ata_channel_unlock(struct ata_channel *chp)
412 {
413 mutex_exit(&chp->ch_lock);
414 }
415
416 void
417 ata_channel_lock_owned(struct ata_channel *chp)
418 {
419 KASSERT(mutex_owned(&chp->ch_lock));
420 }
421