Home | History | Annotate | Line # | Download | only in ata
      1 /*	$NetBSD: ata_subr.c,v 1.13 2020/12/23 08:17:01 skrll Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1998, 2001 Manuel Bouyer.  All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  */
     26 
     27 #include <sys/cdefs.h>
     28 __KERNEL_RCSID(0, "$NetBSD: ata_subr.c,v 1.13 2020/12/23 08:17:01 skrll Exp $");
     29 
     30 #include "opt_ata.h"
     31 
     32 #include <sys/param.h>
     33 #include <sys/systm.h>
     34 #include <sys/kernel.h>
     35 #include <sys/device.h>
     36 #include <sys/conf.h>
     37 #include <sys/fcntl.h>
     38 #include <sys/proc.h>
     39 #include <sys/kthread.h>
     40 #include <sys/errno.h>
     41 #include <sys/ataio.h>
     42 #include <sys/kmem.h>
     43 #include <sys/intr.h>
     44 #include <sys/bus.h>
     45 #include <sys/once.h>
     46 #include <sys/bitops.h>
     47 
     48 #define ATABUS_PRIVATE
     49 
     50 #include <dev/ata/ataconf.h>
     51 #include <dev/ata/atareg.h>
     52 #include <dev/ata/atavar.h>
     53 #include <dev/ic/wdcvar.h>	/* for PIOBM */
     54 
     55 #define DEBUG_FUNCS  0x08
     56 #define DEBUG_PROBE  0x10
     57 #define DEBUG_DETACH 0x20
     58 #define	DEBUG_XFERS  0x40
     59 #ifdef ATADEBUG
     60 extern int atadebug_mask;
     61 #define ATADEBUG_PRINT(args, level) \
     62 	if (atadebug_mask & (level)) \
     63 		printf args
     64 #else
     65 #define ATADEBUG_PRINT(args, level)
     66 #endif
     67 
     68 static void
     69 ata_queue_reset(struct ata_queue *chq)
     70 {
     71 	/* make sure that we can use polled commands */
     72 	SIMPLEQ_INIT(&chq->queue_xfer);
     73 	TAILQ_INIT(&chq->active_xfers);
     74 	chq->queue_freeze = 0;
     75 	chq->queue_active = 0;
     76 	chq->active_xfers_used = 0;
     77 	chq->queue_xfers_avail = __BIT(chq->queue_openings) - 1;
     78 }
     79 
     80 struct ata_xfer *
     81 ata_queue_hwslot_to_xfer(struct ata_channel *chp, int hwslot)
     82 {
     83 	struct ata_queue *chq = chp->ch_queue;
     84 	struct ata_xfer *xfer = NULL;
     85 
     86 	ata_channel_lock(chp);
     87 
     88 	KASSERTMSG(hwslot < chq->queue_openings, "hwslot %d > openings %d",
     89 	    hwslot, chq->queue_openings);
     90 	KASSERTMSG((chq->active_xfers_used & __BIT(hwslot)) != 0,
     91 	    "hwslot %d not active", hwslot);
     92 
     93 	/* Usually the first entry will be the one */
     94 	TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) {
     95 		if (xfer->c_slot == hwslot)
     96 			break;
     97 	}
     98 
     99 	ata_channel_unlock(chp);
    100 
    101 	KASSERTMSG((xfer != NULL),
    102 	    "%s: xfer with slot %d not found (active %x)", __func__,
    103 	    hwslot, chq->active_xfers_used);
    104 
    105 	return xfer;
    106 }
    107 
    108 struct ata_xfer *
    109 ata_queue_get_active_xfer_locked(struct ata_channel *chp)
    110 {
    111 	struct ata_xfer *xfer;
    112 
    113 	KASSERT(mutex_owned(&chp->ch_lock));
    114 	xfer = TAILQ_FIRST(&chp->ch_queue->active_xfers);
    115 
    116 	if (xfer && ISSET(xfer->c_flags, C_NCQ)) {
    117 		/* Spurious call, never return NCQ xfer from this interface */
    118 		xfer = NULL;
    119 	}
    120 
    121 	return xfer;
    122 }
    123 
    124 /*
    125  * This interface is supposed only to be used when there is exactly
    126  * one outstanding command, when there is no information about the slot,
    127  * which triggered the command. ata_queue_hwslot_to_xfer() interface
    128  * is preferred in all NCQ cases.
    129  */
    130 struct ata_xfer *
    131 ata_queue_get_active_xfer(struct ata_channel *chp)
    132 {
    133 	struct ata_xfer *xfer = NULL;
    134 
    135 	ata_channel_lock(chp);
    136 	xfer = ata_queue_get_active_xfer_locked(chp);
    137 	ata_channel_unlock(chp);
    138 
    139 	return xfer;
    140 }
    141 
    142 struct ata_xfer *
    143 ata_queue_drive_active_xfer(struct ata_channel *chp, int drive)
    144 {
    145 	struct ata_xfer *xfer = NULL;
    146 
    147 	ata_channel_lock(chp);
    148 
    149 	TAILQ_FOREACH(xfer, &chp->ch_queue->active_xfers, c_activechain) {
    150 		if (xfer->c_drive == drive)
    151 			break;
    152 	}
    153 	KASSERT(xfer != NULL);
    154 
    155 	ata_channel_unlock(chp);
    156 
    157 	return xfer;
    158 }
    159 
    160 struct ata_queue *
    161 ata_queue_alloc(uint8_t openings)
    162 {
    163 	if (openings == 0)
    164 		openings = 1;
    165 
    166 	if (openings > ATA_MAX_OPENINGS)
    167 		openings = ATA_MAX_OPENINGS;
    168 
    169 	struct ata_queue *chq = kmem_zalloc(sizeof(*chq), KM_SLEEP);
    170 
    171 	chq->queue_openings = openings;
    172 	ata_queue_reset(chq);
    173 
    174 	cv_init(&chq->queue_drain, "atdrn");
    175 	cv_init(&chq->queue_idle, "qidl");
    176 
    177 	cv_init(&chq->c_active, "ataact");
    178 	cv_init(&chq->c_cmd_finish, "atafin");
    179 
    180 	return chq;
    181 }
    182 
    183 void
    184 ata_queue_free(struct ata_queue *chq)
    185 {
    186 	cv_destroy(&chq->queue_drain);
    187 	cv_destroy(&chq->queue_idle);
    188 
    189 	cv_destroy(&chq->c_active);
    190 	cv_destroy(&chq->c_cmd_finish);
    191 
    192 	kmem_free(chq, sizeof(*chq));
    193 }
    194 
    195 void
    196 ata_channel_init(struct ata_channel *chp)
    197 {
    198 	mutex_init(&chp->ch_lock, MUTEX_DEFAULT, IPL_BIO);
    199 	cv_init(&chp->ch_thr_idle, "atath");
    200 
    201 	callout_init(&chp->c_timo_callout, 0); 	/* XXX MPSAFE */
    202 
    203 	/* Optionally setup the queue, too */
    204 	if (chp->ch_queue == NULL) {
    205 		chp->ch_queue = ata_queue_alloc(1);
    206 	}
    207 }
    208 
    209 void
    210 ata_channel_destroy(struct ata_channel *chp)
    211 {
    212 	if (chp->ch_queue != NULL) {
    213 		ata_queue_free(chp->ch_queue);
    214 		chp->ch_queue = NULL;
    215 	}
    216 
    217 	mutex_enter(&chp->ch_lock);
    218 	callout_halt(&chp->c_timo_callout, &chp->ch_lock);
    219 	callout_destroy(&chp->c_timo_callout);
    220 	mutex_exit(&chp->ch_lock);
    221 
    222 	mutex_destroy(&chp->ch_lock);
    223 	cv_destroy(&chp->ch_thr_idle);
    224 }
    225 
    226 void
    227 ata_timeout(void *v)
    228 {
    229 	struct ata_channel *chp = v;
    230 	struct ata_queue *chq = chp->ch_queue;
    231 	struct ata_xfer *xfer, *nxfer;
    232 	int s;
    233 
    234 	s = splbio();				/* XXX MPSAFE */
    235 
    236 	callout_ack(&chp->c_timo_callout);
    237 
    238 	if (chp->ch_flags & ATACH_RECOVERING) {
    239 		/* Do nothing, recovery will requeue the xfers */
    240 		goto done;
    241 	}
    242 
    243 	/*
    244 	 * If there is a timeout, means the last enqueued command
    245 	 * timed out, and thus all commands timed out.
    246 	 * XXX locking
    247 	 */
    248 	TAILQ_FOREACH_SAFE(xfer, &chq->active_xfers, c_activechain, nxfer) {
    249 		ATADEBUG_PRINT(("%s: slot %d\n", __func__, xfer->c_slot),
    250 		    DEBUG_FUNCS|DEBUG_XFERS);
    251 
    252 		if (ata_timo_xfer_check(xfer)) {
    253 			/* Already logged */
    254 			continue;
    255 		}
    256 
    257 		/* Mark as timed out. Do not print anything, wd(4) will. */
    258 		xfer->c_flags |= C_TIMEOU;
    259 		xfer->ops->c_intr(xfer->c_chp, xfer, 0);
    260 	}
    261 
    262 done:
    263 	splx(s);
    264 }
    265 
    266 void
    267 ata_channel_lock(struct ata_channel *chp)
    268 {
    269 	mutex_enter(&chp->ch_lock);
    270 }
    271 
    272 void
    273 ata_channel_unlock(struct ata_channel *chp)
    274 {
    275 	mutex_exit(&chp->ch_lock);
    276 }
    277 
    278 void
    279 ata_channel_lock_owned(struct ata_channel *chp)
    280 {
    281 	KASSERT(mutex_owned(&chp->ch_lock));
    282 }
    283 
    284 #ifdef ATADEBUG
    285 void
    286 atachannel_debug(struct ata_channel *chp)
    287 {
    288 	struct ata_queue *chq = chp->ch_queue;
    289 
    290 	printf("  ch %s flags 0x%x ndrives %d\n",
    291 	    device_xname(chp->atabus), chp->ch_flags, chp->ch_ndrives);
    292 	printf("  que: flags 0x%x avail 0x%x used 0x%x\n",
    293 	    chq->queue_flags, chq->queue_xfers_avail, chq->active_xfers_used);
    294 	printf("        act %d freez %d open %u\n",
    295 	    chq->queue_active, chq->queue_freeze, chq->queue_openings);
    296 }
    297 #endif /* ATADEBUG */
    298 
    299 bool
    300 ata_queue_alloc_slot(struct ata_channel *chp, uint8_t *c_slot,
    301     uint8_t drv_openings)
    302 {
    303 	struct ata_queue *chq = chp->ch_queue;
    304 	uint32_t avail, mask;
    305 
    306 	KASSERT(mutex_owned(&chp->ch_lock));
    307 	KASSERT(chq->queue_active < chq->queue_openings);
    308 
    309 	ATADEBUG_PRINT(("%s: channel %d qavail 0x%x qact %d\n",
    310 	    __func__, chp->ch_channel,
    311 	    chq->queue_xfers_avail, chq->queue_active),
    312 	    DEBUG_XFERS);
    313 
    314 	mask = __BIT(MIN(chq->queue_openings, drv_openings)) - 1;
    315 
    316 	avail = ffs32(chq->queue_xfers_avail & mask);
    317 	if (avail == 0)
    318 		return false;
    319 
    320 	KASSERT(avail > 0);
    321 	KASSERT(avail <= drv_openings);
    322 
    323 	*c_slot = avail - 1;
    324 	chq->queue_xfers_avail &= ~__BIT(*c_slot);
    325 
    326 	KASSERT((chq->active_xfers_used & __BIT(*c_slot)) == 0);
    327 	return true;
    328 }
    329 
    330 void
    331 ata_queue_free_slot(struct ata_channel *chp, uint8_t c_slot)
    332 {
    333 	struct ata_queue *chq = chp->ch_queue;
    334 
    335 	KASSERT(mutex_owned(&chp->ch_lock));
    336 
    337 	KASSERT((chq->active_xfers_used & __BIT(c_slot)) == 0);
    338 	KASSERT((chq->queue_xfers_avail & __BIT(c_slot)) == 0);
    339 
    340 	chq->queue_xfers_avail |= __BIT(c_slot);
    341 }
    342 
    343 void
    344 ata_queue_hold(struct ata_channel *chp)
    345 {
    346 	struct ata_queue *chq = chp->ch_queue;
    347 
    348 	KASSERT(mutex_owned(&chp->ch_lock));
    349 
    350 	chq->queue_hold |= chq->active_xfers_used;
    351 	chq->active_xfers_used = 0;
    352 }
    353 
    354 void
    355 ata_queue_unhold(struct ata_channel *chp)
    356 {
    357 	struct ata_queue *chq = chp->ch_queue;
    358 
    359 	KASSERT(mutex_owned(&chp->ch_lock));
    360 
    361 	chq->active_xfers_used |= chq->queue_hold;
    362 	chq->queue_hold = 0;
    363 }
    364 
    365 /*
    366  * Must be called with interrupts blocked.
    367  */
    368 uint32_t
    369 ata_queue_active(struct ata_channel *chp)
    370 {
    371 	struct ata_queue *chq = chp->ch_queue;
    372 
    373 	if (chp->ch_flags & ATACH_DETACHED)
    374 		return 0;
    375 
    376 	return chq->active_xfers_used;
    377 }
    378 
    379 uint8_t
    380 ata_queue_openings(struct ata_channel *chp)
    381 {
    382 	return chp->ch_queue->queue_openings;
    383 }
    384