ata_subr.c revision 1.4 1 /* $NetBSD: ata_subr.c,v 1.4 2017/10/20 07:06:07 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 1998, 2001 Manuel Bouyer. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: ata_subr.c,v 1.4 2017/10/20 07:06:07 jdolecek Exp $");
29
30 #include "opt_ata.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/device.h>
37 #include <sys/conf.h>
38 #include <sys/fcntl.h>
39 #include <sys/proc.h>
40 #include <sys/kthread.h>
41 #include <sys/errno.h>
42 #include <sys/ataio.h>
43 #include <sys/kmem.h>
44 #include <sys/intr.h>
45 #include <sys/bus.h>
46 #include <sys/once.h>
47 #include <sys/bitops.h>
48
49 #define ATABUS_PRIVATE
50
51 #include <dev/ata/ataconf.h>
52 #include <dev/ata/atareg.h>
53 #include <dev/ata/atavar.h>
54 #include <dev/ic/wdcvar.h> /* for PIOBM */
55
56 #define DEBUG_FUNCS 0x08
57 #define DEBUG_PROBE 0x10
58 #define DEBUG_DETACH 0x20
59 #define DEBUG_XFERS 0x40
60 #ifdef ATADEBUG
61 extern int atadebug_mask;
62 #define ATADEBUG_PRINT(args, level) \
63 if (atadebug_mask & (level)) \
64 printf args
65 #else
66 #define ATADEBUG_PRINT(args, level)
67 #endif
68
69 void
70 ata_queue_reset(struct ata_queue *chq)
71 {
72 /* make sure that we can use polled commands */
73 TAILQ_INIT(&chq->queue_xfer);
74 TAILQ_INIT(&chq->active_xfers);
75 chq->queue_freeze = 0;
76 chq->queue_active = 0;
77 chq->active_xfers_used = 0;
78 chq->queue_xfers_avail = __BIT(chq->queue_openings) - 1;
79 }
80
81 struct ata_xfer *
82 ata_queue_hwslot_to_xfer(struct ata_channel *chp, int hwslot)
83 {
84 struct ata_queue *chq = chp->ch_queue;
85 struct ata_xfer *xfer = NULL;
86
87 ata_channel_lock(chp);
88
89 KASSERTMSG(hwslot < chq->queue_openings, "hwslot %d > openings %d",
90 hwslot, chq->queue_openings);
91 KASSERTMSG((chq->active_xfers_used & __BIT(hwslot)) != 0,
92 "hwslot %d not active", hwslot);
93
94 /* Usually the first entry will be the one */
95 TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) {
96 if (xfer->c_slot == hwslot)
97 break;
98 }
99
100 ata_channel_unlock(chp);
101
102 KASSERTMSG((xfer != NULL),
103 "%s: xfer with slot %d not found (active %x)", __func__,
104 hwslot, chq->active_xfers_used);
105
106 return xfer;
107 }
108
109 struct ata_xfer *
110 ata_queue_get_active_xfer_locked(struct ata_channel *chp)
111 {
112 struct ata_xfer *xfer;
113
114 KASSERT(mutex_owned(&chp->ch_lock));
115 xfer = TAILQ_FIRST(&chp->ch_queue->active_xfers);
116
117 if (xfer && ISSET(xfer->c_flags, C_NCQ)) {
118 /* Spurious call, never return NCQ xfer from this interface */
119 xfer = NULL;
120 }
121
122 return xfer;
123 }
124
125 /*
126 * This interface is supposed only to be used when there is exactly
127 * one outstanding command, when there is no information about the slot,
128 * which triggered the command. ata_queue_hwslot_to_xfer() interface
129 * is preferred in all NCQ cases.
130 */
131 struct ata_xfer *
132 ata_queue_get_active_xfer(struct ata_channel *chp)
133 {
134 struct ata_xfer *xfer = NULL;
135
136 ata_channel_lock(chp);
137 xfer = ata_queue_get_active_xfer_locked(chp);
138 ata_channel_unlock(chp);
139
140 return xfer;
141 }
142
143 struct ata_xfer *
144 ata_queue_drive_active_xfer(struct ata_channel *chp, int drive)
145 {
146 struct ata_xfer *xfer = NULL;
147
148 ata_channel_lock(chp);
149
150 TAILQ_FOREACH(xfer, &chp->ch_queue->active_xfers, c_activechain) {
151 if (xfer->c_drive == drive)
152 break;
153 }
154 KASSERT(xfer != NULL);
155
156 ata_channel_unlock(chp);
157
158 return xfer;
159 }
160
161 static void
162 ata_xfer_init(struct ata_xfer *xfer, uint8_t slot)
163 {
164 memset(xfer, 0, sizeof(*xfer));
165
166 xfer->c_slot = slot;
167
168 cv_init(&xfer->c_active, "ataact");
169 cv_init(&xfer->c_finish, "atafin");
170 callout_init(&xfer->c_timo_callout, 0); /* XXX MPSAFE */
171 callout_init(&xfer->c_retry_callout, 0); /* XXX MPSAFE */
172 }
173
174 static void
175 ata_xfer_destroy(struct ata_xfer *xfer)
176 {
177 callout_halt(&xfer->c_timo_callout, NULL); /* XXX MPSAFE */
178 callout_destroy(&xfer->c_timo_callout);
179 callout_halt(&xfer->c_retry_callout, NULL); /* XXX MPSAFE */
180 callout_destroy(&xfer->c_retry_callout);
181 cv_destroy(&xfer->c_active);
182 cv_destroy(&xfer->c_finish);
183 }
184
185 struct ata_queue *
186 ata_queue_alloc(uint8_t openings)
187 {
188 if (openings == 0)
189 openings = 1;
190
191 if (openings > ATA_MAX_OPENINGS)
192 openings = ATA_MAX_OPENINGS;
193
194 struct ata_queue *chq = malloc(offsetof(struct ata_queue, queue_xfers[openings]),
195 M_DEVBUF, M_WAITOK | M_ZERO);
196
197 chq->queue_openings = openings;
198 ata_queue_reset(chq);
199
200 cv_init(&chq->queue_busy, "ataqbusy");
201 cv_init(&chq->queue_drain, "atdrn");
202 cv_init(&chq->queue_idle, "qidl");
203
204 for (uint8_t i = 0; i < openings; i++)
205 ata_xfer_init(&chq->queue_xfers[i], i);
206
207 return chq;
208 }
209
210 void
211 ata_queue_free(struct ata_queue *chq)
212 {
213 for (uint8_t i = 0; i < chq->queue_openings; i++)
214 ata_xfer_destroy(&chq->queue_xfers[i]);
215
216 cv_destroy(&chq->queue_busy);
217 cv_destroy(&chq->queue_drain);
218 cv_destroy(&chq->queue_idle);
219
220 free(chq, M_DEVBUF);
221 }
222
223 void
224 ata_channel_init(struct ata_channel *chp)
225 {
226 mutex_init(&chp->ch_lock, MUTEX_DEFAULT, IPL_BIO);
227 cv_init(&chp->ch_thr_idle, "atath");
228
229 /* Optionally setup the queue, too */
230 if (chp->ch_queue == NULL) {
231 chp->ch_queue = ata_queue_alloc(1);
232 }
233 }
234
235 void
236 ata_channel_destroy(struct ata_channel *chp)
237 {
238 if (chp->ch_queue != NULL) {
239 ata_queue_free(chp->ch_queue);
240 chp->ch_queue = NULL;
241 }
242
243 mutex_destroy(&chp->ch_lock);
244 cv_destroy(&chp->ch_thr_idle);
245 }
246
247 /*
248 * Does it's own locking, does not require splbio().
249 * flags - whether to block waiting for free xfer
250 * openings - limit of openings supported by device, <= 0 means tag not
251 * relevant, and any available xfer can be returned
252 */
253 struct ata_xfer *
254 ata_get_xfer_ext(struct ata_channel *chp, int flags, uint8_t openings)
255 {
256 struct ata_queue *chq = chp->ch_queue;
257 struct ata_xfer *xfer = NULL;
258 uint32_t avail, slot, mask;
259 int error;
260
261 ATADEBUG_PRINT(("%s: channel %d fl 0x%x op %d qavail 0x%x qact %d",
262 __func__, chp->ch_channel, flags, openings,
263 chq->queue_xfers_avail, chq->queue_active),
264 DEBUG_XFERS);
265
266 ata_channel_lock(chp);
267
268 /*
269 * When openings is just 1, can't reserve anything for
270 * recovery. KASSERT() here is to catch code which naively
271 * relies on C_RECOVERY to work under this condition.
272 */
273 KASSERT((flags & C_RECOVERY) == 0 || chq->queue_openings > 1);
274
275 if (flags & C_RECOVERY) {
276 mask = UINT32_MAX;
277 } else {
278 if (openings <= 0 || openings > chq->queue_openings)
279 openings = chq->queue_openings;
280
281 if (openings > 1) {
282 mask = __BIT(openings - 1) - 1;
283 } else {
284 mask = UINT32_MAX;
285 }
286 }
287
288 retry:
289 avail = ffs32(chq->queue_xfers_avail & mask);
290 if (avail == 0) {
291 /*
292 * Catch code which tries to get another recovery xfer while
293 * already holding one (wrong recursion).
294 */
295 KASSERTMSG((flags & C_RECOVERY) == 0,
296 "recovery xfer busy openings %d mask %x avail %x",
297 openings, mask, chq->queue_xfers_avail);
298
299 if (flags & C_WAIT) {
300 chq->queue_flags |= QF_NEED_XFER;
301 error = cv_wait_sig(&chq->queue_busy, &chp->ch_lock);
302 if (error == 0)
303 goto retry;
304 }
305
306 goto out;
307 }
308
309 slot = avail - 1;
310 xfer = &chq->queue_xfers[slot];
311 chq->queue_xfers_avail &= ~__BIT(slot);
312
313 KASSERT((chq->active_xfers_used & __BIT(slot)) == 0);
314
315 /* zero everything after the callout member */
316 memset(&xfer->c_startzero, 0,
317 sizeof(struct ata_xfer) - offsetof(struct ata_xfer, c_startzero));
318
319 out:
320 ata_channel_unlock(chp);
321
322 ATADEBUG_PRINT((" xfer %p\n", xfer), DEBUG_XFERS);
323 return xfer;
324 }
325
326 /*
327 * ata_deactivate_xfer() must be always called prior to ata_free_xfer()
328 */
329 void
330 ata_free_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
331 {
332 struct ata_queue *chq = chp->ch_queue;
333
334 ata_channel_lock(chp);
335
336 if (xfer->c_flags & (C_WAITACT|C_WAITTIMO)) {
337 /* Someone is waiting for this xfer, so we can't free now */
338 xfer->c_flags |= C_FREE;
339 cv_signal(&xfer->c_active);
340 goto out;
341 }
342
343 #if NATA_PIOBM /* XXX wdc dependent code */
344 if (xfer->c_flags & C_PIOBM) {
345 struct wdc_softc *wdc = CHAN_TO_WDC(chp);
346
347 /* finish the busmastering PIO */
348 (*wdc->piobm_done)(wdc->dma_arg,
349 chp->ch_channel, xfer->c_drive);
350 chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_PIOBM_WAIT | ATACH_IRQ_WAIT);
351 }
352 #endif
353
354 if (chp->ch_atac->atac_free_hw)
355 chp->ch_atac->atac_free_hw(chp);
356
357 KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0);
358 KASSERT((chq->queue_xfers_avail & __BIT(xfer->c_slot)) == 0);
359 chq->queue_xfers_avail |= __BIT(xfer->c_slot);
360
361 out:
362 if (chq->queue_flags & QF_NEED_XFER) {
363 chq->queue_flags &= ~QF_NEED_XFER;
364 cv_broadcast(&chq->queue_busy);
365 }
366
367 ata_channel_unlock(chp);
368
369 ATADEBUG_PRINT(("%s: channel %d xfer %p qavail 0x%x qact %d\n",
370 __func__, chp->ch_channel, xfer, chq->queue_xfers_avail,
371 chq->queue_active),
372 DEBUG_XFERS);
373 }
374
375 /*
376 * Must be called without any locks, i.e. with both drive and channel locks
377 * released.
378 */
379 void
380 ata_channel_start(struct ata_channel *chp, int drive)
381 {
382 int i, s;
383 struct ata_drive_datas *drvp;
384
385 s = splbio();
386
387 KASSERT(chp->ch_ndrives > 0);
388
389 #define ATA_DRIVE_START(chp, drive) \
390 do { \
391 KASSERT(drive < chp->ch_ndrives); \
392 drvp = &chp->ch_drive[drive]; \
393 \
394 if (drvp->drive_type != ATA_DRIVET_ATA && \
395 drvp->drive_type != ATA_DRIVET_ATAPI && \
396 drvp->drive_type != ATA_DRIVET_OLD) \
397 continue; \
398 \
399 if (drvp->drv_start != NULL) \
400 (*drvp->drv_start)(drvp->drv_softc); \
401 } while (0)
402
403 /*
404 * Process drives in round robin fashion starting with next one after
405 * the one which finished transfer. Thus no single drive would
406 * completely starve other drives on same channel.
407 * This loop processes all but the current drive, so won't do anything
408 * if there is only one drive in channel.
409 */
410 for (i = (drive + 1) % chp->ch_ndrives; i != drive;
411 i = (i + 1) % chp->ch_ndrives) {
412 ATA_DRIVE_START(chp, i);
413 }
414
415 /* Now try to kick off xfers on the current drive */
416 ATA_DRIVE_START(chp, drive);
417
418 splx(s);
419 #undef ATA_DRIVE_START
420 }
421
422 void
423 ata_channel_lock(struct ata_channel *chp)
424 {
425 mutex_enter(&chp->ch_lock);
426 }
427
428 void
429 ata_channel_unlock(struct ata_channel *chp)
430 {
431 mutex_exit(&chp->ch_lock);
432 }
433
434 void
435 ata_channel_lock_owned(struct ata_channel *chp)
436 {
437 KASSERT(mutex_owned(&chp->ch_lock));
438 }
439