Home | History | Annotate | Line # | Download | only in kern
subr_disk.c revision 1.45
      1 /*	$NetBSD: subr_disk.c,v 1.45 2002/11/01 11:32:01 mrg Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997, 1999, 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * Copyright (c) 1982, 1986, 1988, 1993
     42  *	The Regents of the University of California.  All rights reserved.
     43  * (c) UNIX System Laboratories, Inc.
     44  * All or some portions of this file are derived from material licensed
     45  * to the University of California by American Telephone and Telegraph
     46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     47  * the permission of UNIX System Laboratories, Inc.
     48  *
     49  * Redistribution and use in source and binary forms, with or without
     50  * modification, are permitted provided that the following conditions
     51  * are met:
     52  * 1. Redistributions of source code must retain the above copyright
     53  *    notice, this list of conditions and the following disclaimer.
     54  * 2. Redistributions in binary form must reproduce the above copyright
     55  *    notice, this list of conditions and the following disclaimer in the
     56  *    documentation and/or other materials provided with the distribution.
     57  * 3. All advertising materials mentioning features or use of this software
     58  *    must display the following acknowledgement:
     59  *	This product includes software developed by the University of
     60  *	California, Berkeley and its contributors.
     61  * 4. Neither the name of the University nor the names of its contributors
     62  *    may be used to endorse or promote products derived from this software
     63  *    without specific prior written permission.
     64  *
     65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     75  * SUCH DAMAGE.
     76  *
     77  *	@(#)ufs_disksubr.c	8.5 (Berkeley) 1/21/94
     78  */
     79 
     80 #include <sys/cdefs.h>
     81 __KERNEL_RCSID(0, "$NetBSD: subr_disk.c,v 1.45 2002/11/01 11:32:01 mrg Exp $");
     82 
     83 #include <sys/param.h>
     84 #include <sys/kernel.h>
     85 #include <sys/malloc.h>
     86 #include <sys/buf.h>
     87 #include <sys/syslog.h>
     88 #include <sys/disklabel.h>
     89 #include <sys/disk.h>
     90 #include <sys/sysctl.h>
     91 
     92 /*
     93  * A global list of all disks attached to the system.  May grow or
     94  * shrink over time.
     95  */
     96 struct	disklist_head disklist;	/* TAILQ_HEAD */
     97 int	disk_count;		/* number of drives in global disklist */
     98 struct simplelock disklist_slock = SIMPLELOCK_INITIALIZER;
     99 
    100 /*
    101  * Compute checksum for disk label.
    102  */
    103 u_int
    104 dkcksum(struct disklabel *lp)
    105 {
    106 	u_short *start, *end;
    107 	u_short sum = 0;
    108 
    109 	start = (u_short *)lp;
    110 	end = (u_short *)&lp->d_partitions[lp->d_npartitions];
    111 	while (start < end)
    112 		sum ^= *start++;
    113 	return (sum);
    114 }
    115 
    116 /*
    117  * Disk error is the preface to plaintive error messages
    118  * about failing disk transfers.  It prints messages of the form
    119 
    120 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
    121 
    122  * if the offset of the error in the transfer and a disk label
    123  * are both available.  blkdone should be -1 if the position of the error
    124  * is unknown; the disklabel pointer may be null from drivers that have not
    125  * been converted to use them.  The message is printed with printf
    126  * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
    127  * The message should be completed (with at least a newline) with printf
    128  * or addlog, respectively.  There is no trailing space.
    129  */
    130 void
    131 diskerr(const struct buf *bp, const char *dname, const char *what, int pri,
    132     int blkdone, const struct disklabel *lp)
    133 {
    134 	int unit = DISKUNIT(bp->b_dev), part = DISKPART(bp->b_dev);
    135 	void (*pr)(const char *, ...);
    136 	char partname = 'a' + part;
    137 	int sn;
    138 
    139 	if (pri != LOG_PRINTF) {
    140 		static const char fmt[] = "";
    141 		log(pri, fmt);
    142 		pr = addlog;
    143 	} else
    144 		pr = printf;
    145 	(*pr)("%s%d%c: %s %sing fsbn ", dname, unit, partname, what,
    146 	    bp->b_flags & B_READ ? "read" : "writ");
    147 	sn = bp->b_blkno;
    148 	if (bp->b_bcount <= DEV_BSIZE)
    149 		(*pr)("%d", sn);
    150 	else {
    151 		if (blkdone >= 0) {
    152 			sn += blkdone;
    153 			(*pr)("%d of ", sn);
    154 		}
    155 		(*pr)("%d-%d", bp->b_blkno,
    156 		    bp->b_blkno + (bp->b_bcount - 1) / DEV_BSIZE);
    157 	}
    158 	if (lp && (blkdone >= 0 || bp->b_bcount <= lp->d_secsize)) {
    159 		sn += lp->d_partitions[part].p_offset;
    160 		(*pr)(" (%s%d bn %d; cn %d", dname, unit, sn,
    161 		    sn / lp->d_secpercyl);
    162 		sn %= lp->d_secpercyl;
    163 		(*pr)(" tn %d sn %d)", sn / lp->d_nsectors,
    164 		    sn % lp->d_nsectors);
    165 	}
    166 }
    167 
    168 /*
    169  * Initialize the disklist.  Called by main() before autoconfiguration.
    170  */
    171 void
    172 disk_init(void)
    173 {
    174 
    175 	TAILQ_INIT(&disklist);
    176 	disk_count = 0;
    177 }
    178 
    179 /*
    180  * Searches the disklist for the disk corresponding to the
    181  * name provided.
    182  */
    183 struct disk *
    184 disk_find(char *name)
    185 {
    186 	struct disk *diskp;
    187 
    188 	if ((name == NULL) || (disk_count <= 0))
    189 		return (NULL);
    190 
    191 	simple_lock(&disklist_slock);
    192 	for (diskp = TAILQ_FIRST(&disklist); diskp != NULL;
    193 	    diskp = TAILQ_NEXT(diskp, dk_link))
    194 		if (strcmp(diskp->dk_name, name) == 0) {
    195 			simple_unlock(&disklist_slock);
    196 			return (diskp);
    197 		}
    198 	simple_unlock(&disklist_slock);
    199 
    200 	return (NULL);
    201 }
    202 
    203 /*
    204  * Attach a disk.
    205  */
    206 void
    207 disk_attach(struct disk *diskp)
    208 {
    209 	int s;
    210 
    211 	/*
    212 	 * Allocate and initialize the disklabel structures.  Note that
    213 	 * it's not safe to sleep here, since we're probably going to be
    214 	 * called during autoconfiguration.
    215 	 */
    216 	diskp->dk_label = malloc(sizeof(struct disklabel), M_DEVBUF, M_NOWAIT);
    217 	diskp->dk_cpulabel = malloc(sizeof(struct cpu_disklabel), M_DEVBUF,
    218 	    M_NOWAIT);
    219 	if ((diskp->dk_label == NULL) || (diskp->dk_cpulabel == NULL))
    220 		panic("disk_attach: can't allocate storage for disklabel");
    221 
    222 	memset(diskp->dk_label, 0, sizeof(struct disklabel));
    223 	memset(diskp->dk_cpulabel, 0, sizeof(struct cpu_disklabel));
    224 
    225 	/*
    226 	 * Set the attached timestamp.
    227 	 */
    228 	s = splclock();
    229 	diskp->dk_attachtime = mono_time;
    230 	splx(s);
    231 
    232 	/*
    233 	 * Link into the disklist.
    234 	 */
    235 	simple_lock(&disklist_slock);
    236 	TAILQ_INSERT_TAIL(&disklist, diskp, dk_link);
    237 	simple_unlock(&disklist_slock);
    238 	++disk_count;
    239 }
    240 
    241 /*
    242  * Detach a disk.
    243  */
    244 void
    245 disk_detach(struct disk *diskp)
    246 {
    247 
    248 	/*
    249 	 * Remove from the disklist.
    250 	 */
    251 	if (--disk_count < 0)
    252 		panic("disk_detach: disk_count < 0");
    253 	simple_lock(&disklist_slock);
    254 	TAILQ_REMOVE(&disklist, diskp, dk_link);
    255 	simple_unlock(&disklist_slock);
    256 
    257 	/*
    258 	 * Free the space used by the disklabel structures.
    259 	 */
    260 	free(diskp->dk_label, M_DEVBUF);
    261 	free(diskp->dk_cpulabel, M_DEVBUF);
    262 }
    263 
    264 /*
    265  * Increment a disk's busy counter.  If the counter is going from
    266  * 0 to 1, set the timestamp.
    267  */
    268 void
    269 disk_busy(struct disk *diskp)
    270 {
    271 	int s;
    272 
    273 	/*
    274 	 * XXX We'd like to use something as accurate as microtime(),
    275 	 * but that doesn't depend on the system TOD clock.
    276 	 */
    277 	if (diskp->dk_busy++ == 0) {
    278 		s = splclock();
    279 		diskp->dk_timestamp = mono_time;
    280 		splx(s);
    281 	}
    282 }
    283 
    284 /*
    285  * Decrement a disk's busy counter, increment the byte count, total busy
    286  * time, and reset the timestamp.
    287  */
    288 void
    289 disk_unbusy(struct disk *diskp, long bcount, int read)
    290 {
    291 	int s;
    292 	struct timeval dv_time, diff_time;
    293 
    294 	if (diskp->dk_busy-- == 0) {
    295 		printf("%s: dk_busy < 0\n", diskp->dk_name);
    296 		panic("disk_unbusy");
    297 	}
    298 
    299 	s = splclock();
    300 	dv_time = mono_time;
    301 	splx(s);
    302 
    303 	timersub(&dv_time, &diskp->dk_timestamp, &diff_time);
    304 	timeradd(&diskp->dk_time, &diff_time, &diskp->dk_time);
    305 
    306 	diskp->dk_timestamp = dv_time;
    307 	if (bcount > 0) {
    308 		if (read) {
    309 			diskp->dk_rbytes += bcount;
    310 			diskp->dk_rxfer++;
    311 		} else {
    312 			diskp->dk_wbytes += bcount;
    313 			diskp->dk_wxfer++;
    314 		}
    315 	}
    316 }
    317 
    318 /*
    319  * Reset the metrics counters on the given disk.  Note that we cannot
    320  * reset the busy counter, as it may case a panic in disk_unbusy().
    321  * We also must avoid playing with the timestamp information, as it
    322  * may skew any pending transfer results.
    323  */
    324 void
    325 disk_resetstat(struct disk *diskp)
    326 {
    327 	int s = splbio(), t;
    328 
    329 	diskp->dk_rxfer = 0;
    330 	diskp->dk_rbytes = 0;
    331 	diskp->dk_wxfer = 0;
    332 	diskp->dk_wbytes = 0;
    333 
    334 	t = splclock();
    335 	diskp->dk_attachtime = mono_time;
    336 	splx(t);
    337 
    338 	timerclear(&diskp->dk_time);
    339 
    340 	splx(s);
    341 }
    342 
    343 int
    344 sysctl_disknames(void *vwhere, size_t *sizep)
    345 {
    346 	char buf[DK_DISKNAMELEN + 1];
    347 	char *where = vwhere;
    348 	struct disk *diskp;
    349 	size_t needed, left, slen;
    350 	int error, first;
    351 
    352 	first = 1;
    353 	error = 0;
    354 	needed = 0;
    355 	left = *sizep;
    356 
    357 	simple_lock(&disklist_slock);
    358 	for (diskp = TAILQ_FIRST(&disklist); diskp != NULL;
    359 	    diskp = TAILQ_NEXT(diskp, dk_link)) {
    360 		if (where == NULL)
    361 			needed += strlen(diskp->dk_name) + 1;
    362 		else {
    363 			memset(buf, 0, sizeof(buf));
    364 			if (first) {
    365 				strncpy(buf, diskp->dk_name, sizeof(buf));
    366 				first = 0;
    367 			} else {
    368 				buf[0] = ' ';
    369 				strncpy(buf + 1, diskp->dk_name,
    370 				    sizeof(buf) - 1);
    371 			}
    372 			buf[DK_DISKNAMELEN] = '\0';
    373 			slen = strlen(buf);
    374 			if (left < slen + 1)
    375 				break;
    376 			/* +1 to copy out the trailing NUL byte */
    377 			error = copyout(buf, where, slen + 1);
    378 			if (error)
    379 				break;
    380 			where += slen;
    381 			needed += slen;
    382 			left -= slen;
    383 		}
    384 	}
    385 	simple_unlock(&disklist_slock);
    386 	*sizep = needed;
    387 	return (error);
    388 }
    389 
    390 int
    391 sysctl_diskstats(int *name, u_int namelen, void *vwhere, size_t *sizep)
    392 {
    393 	struct disk_sysctl sdisk;
    394 	struct disk *diskp;
    395 	char *where = vwhere;
    396 	size_t tocopy, left;
    397 	int error;
    398 
    399 	if (where == NULL) {
    400 		*sizep = disk_count * sizeof(sdisk);
    401 		return (0);
    402 	}
    403 
    404 	if (namelen == 0)
    405 		tocopy = sizeof(sdisk);
    406 	else
    407 		tocopy = name[0];
    408 
    409 	error = 0;
    410 	left = *sizep;
    411 	memset(&sdisk, 0, sizeof(sdisk));
    412 	*sizep = 0;
    413 
    414 	simple_lock(&disklist_slock);
    415 	TAILQ_FOREACH(diskp, &disklist, dk_link) {
    416 		if (left < sizeof(struct disk_sysctl))
    417 			break;
    418 		strncpy(sdisk.dk_name, diskp->dk_name, sizeof(sdisk.dk_name));
    419 		sdisk.dk_xfer = diskp->dk_rxfer + diskp->dk_wxfer;
    420 		sdisk.dk_rxfer = diskp->dk_rxfer;
    421 		sdisk.dk_wxfer = diskp->dk_wxfer;
    422 		sdisk.dk_seek = diskp->dk_seek;
    423 		sdisk.dk_bytes = diskp->dk_rbytes + diskp->dk_wbytes;
    424 		sdisk.dk_rbytes = diskp->dk_rbytes;
    425 		sdisk.dk_wbytes = diskp->dk_wbytes;
    426 		sdisk.dk_attachtime_sec = diskp->dk_attachtime.tv_sec;
    427 		sdisk.dk_attachtime_usec = diskp->dk_attachtime.tv_usec;
    428 		sdisk.dk_timestamp_sec = diskp->dk_timestamp.tv_sec;
    429 		sdisk.dk_timestamp_usec = diskp->dk_timestamp.tv_usec;
    430 		sdisk.dk_time_sec = diskp->dk_time.tv_sec;
    431 		sdisk.dk_time_usec = diskp->dk_time.tv_usec;
    432 		sdisk.dk_busy = diskp->dk_busy;
    433 
    434 		error = copyout(&sdisk, where, min(tocopy, sizeof(sdisk)));
    435 		if (error)
    436 			break;
    437 		where += tocopy;
    438 		*sizep += tocopy;
    439 		left -= tocopy;
    440 	}
    441 	simple_unlock(&disklist_slock);
    442 	return (error);
    443 }
    444 
    445 struct bufq_fcfs {
    446 	TAILQ_HEAD(, buf) bq_head;	/* actual list of buffers */
    447 };
    448 
    449 struct bufq_disksort {
    450 	TAILQ_HEAD(, buf) bq_head;	/* actual list of buffers */
    451 };
    452 
    453 #define PRIO_READ_BURST		48
    454 #define PRIO_WRITE_REQ		16
    455 
    456 struct bufq_prio {
    457 	TAILQ_HEAD(, buf) bq_read, bq_write; /* actual list of buffers */
    458 	struct buf *bq_write_next;	/* next request in bq_write */
    459 	struct buf *bq_next;		/* current request */
    460 	int bq_read_burst;		/* # of consecutive reads */
    461 };
    462 
    463 
    464 /*
    465  * Check if two buf's are in ascending order.
    466  */
    467 static __inline int
    468 buf_inorder(struct buf *bp, struct buf *bq, int sortby)
    469 {
    470 	int r;
    471 
    472 	if (bp == NULL || bq == NULL)
    473 		return (bq == NULL);
    474 
    475 	if (sortby == BUFQ_SORT_CYLINDER)
    476 		r = bp->b_cylinder - bq->b_cylinder;
    477 	else
    478 		r = 0;
    479 
    480 	if (r == 0)
    481 		r = bp->b_rawblkno - bq->b_rawblkno;
    482 
    483 	return (r <= 0);
    484 }
    485 
    486 
    487 /*
    488  * First-come first-served sort for disks.
    489  *
    490  * Requests are appended to the queue without any reordering.
    491  */
    492 static void
    493 bufq_fcfs_put(struct bufq_state *bufq, struct buf *bp)
    494 {
    495 	struct bufq_fcfs *fcfs = bufq->bq_private;
    496 
    497 	TAILQ_INSERT_TAIL(&fcfs->bq_head, bp, b_actq);
    498 }
    499 
    500 static struct buf *
    501 bufq_fcfs_get(struct bufq_state *bufq, int remove)
    502 {
    503 	struct bufq_fcfs *fcfs = bufq->bq_private;
    504 	struct buf *bp;
    505 
    506 	bp = TAILQ_FIRST(&fcfs->bq_head);
    507 
    508 	if (bp != NULL && remove)
    509 		TAILQ_REMOVE(&fcfs->bq_head, bp, b_actq);
    510 
    511 	return (bp);
    512 }
    513 
    514 
    515 /*
    516  * Seek sort for disks.
    517  *
    518  * There are actually two queues, sorted in ascendening order.  The first
    519  * queue holds those requests which are positioned after the current block;
    520  * the second holds requests which came in after their position was passed.
    521  * Thus we implement a one-way scan, retracting after reaching the end of
    522  * the drive to the first request on the second queue, at which time it
    523  * becomes the first queue.
    524  *
    525  * A one-way scan is natural because of the way UNIX read-ahead blocks are
    526  * allocated.
    527  */
    528 static void
    529 bufq_disksort_put(struct bufq_state *bufq, struct buf *bp)
    530 {
    531 	struct bufq_disksort *disksort = bufq->bq_private;
    532 	struct buf *bq, *nbq;
    533 	int sortby;
    534 
    535 	sortby = bufq->bq_flags & BUFQ_SORT_MASK;
    536 
    537 	bq = TAILQ_FIRST(&disksort->bq_head);
    538 
    539 	/*
    540 	 * If the queue is empty it's easy; we just go on the end.
    541 	 */
    542 	if (bq == NULL) {
    543 		TAILQ_INSERT_TAIL(&disksort->bq_head, bp, b_actq);
    544 		return;
    545 	}
    546 
    547 	/*
    548 	 * If we lie before the currently active request, then we
    549 	 * must locate the second request list and add ourselves to it.
    550 	 */
    551 	if (buf_inorder(bp, bq, sortby)) {
    552 		while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL) {
    553 			/*
    554 			 * Check for an ``inversion'' in the normally ascending
    555 			 * block numbers, indicating the start of the second
    556 			 * request list.
    557 			 */
    558 			if (buf_inorder(nbq, bq, sortby)) {
    559 				/*
    560 				 * Search the second request list for the first
    561 				 * request at a larger block number.  We go
    562 				 * after that; if there is no such request, we
    563 				 * go at the end.
    564 				 */
    565 				do {
    566 					if (buf_inorder(bp, nbq, sortby))
    567 						goto insert;
    568 					bq = nbq;
    569 				} while ((nbq =
    570 				    TAILQ_NEXT(bq, b_actq)) != NULL);
    571 				goto insert;		/* after last */
    572 			}
    573 			bq = nbq;
    574 		}
    575 		/*
    576 		 * No inversions... we will go after the last, and
    577 		 * be the first request in the second request list.
    578 		 */
    579 		goto insert;
    580 	}
    581 	/*
    582 	 * Request is at/after the current request...
    583 	 * sort in the first request list.
    584 	 */
    585 	while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL) {
    586 		/*
    587 		 * We want to go after the current request if there is an
    588 		 * inversion after it (i.e. it is the end of the first
    589 		 * request list), or if the next request is a larger cylinder
    590 		 * than our request.
    591 		 */
    592 		if (buf_inorder(nbq, bq, sortby) ||
    593 		    buf_inorder(bp, nbq, sortby))
    594 			goto insert;
    595 		bq = nbq;
    596 	}
    597 	/*
    598 	 * Neither a second list nor a larger request... we go at the end of
    599 	 * the first list, which is the same as the end of the whole schebang.
    600 	 */
    601 insert:	TAILQ_INSERT_AFTER(&disksort->bq_head, bq, bp, b_actq);
    602 }
    603 
    604 static struct buf *
    605 bufq_disksort_get(struct bufq_state *bufq, int remove)
    606 {
    607 	struct bufq_disksort *disksort = bufq->bq_private;
    608 	struct buf *bp;
    609 
    610 	bp = TAILQ_FIRST(&disksort->bq_head);
    611 
    612 	if (bp != NULL && remove)
    613 		TAILQ_REMOVE(&disksort->bq_head, bp, b_actq);
    614 
    615 	return (bp);
    616 }
    617 
    618 
    619 /*
    620  * Seek sort for disks.
    621  *
    622  * There are two queues.  The first queue holds read requests; the second
    623  * holds write requests.  The read queue is first-come first-served; the
    624  * write queue is sorted in ascendening block order.
    625  * The read queue is processed first.  After PRIO_READ_BURST consecutive
    626  * read requests with non-empty write queue PRIO_WRITE_REQ requests from
    627  * the write queue will be processed.
    628  */
    629 static void
    630 bufq_prio_put(struct bufq_state *bufq, struct buf *bp)
    631 {
    632 	struct bufq_prio *prio = bufq->bq_private;
    633 	struct buf *bq;
    634 	int sortby;
    635 
    636 	sortby = bufq->bq_flags & BUFQ_SORT_MASK;
    637 
    638 	/*
    639 	 * If it's a read request append it to the list.
    640 	 */
    641 	if ((bp->b_flags & B_READ) == B_READ) {
    642 		TAILQ_INSERT_TAIL(&prio->bq_read, bp, b_actq);
    643 		return;
    644 	}
    645 
    646 	bq = TAILQ_FIRST(&prio->bq_write);
    647 
    648 	/*
    649 	 * If the write list is empty, simply append it to the list.
    650 	 */
    651 	if (bq == NULL) {
    652 		TAILQ_INSERT_TAIL(&prio->bq_write, bp, b_actq);
    653 		prio->bq_write_next = bp;
    654 		return;
    655 	}
    656 
    657 	/*
    658 	 * If we lie after the next request, insert after this request.
    659 	 */
    660 	if (buf_inorder(prio->bq_write_next, bp, sortby))
    661 		bq = prio->bq_write_next;
    662 
    663 	/*
    664 	 * Search for the first request at a larger block number.
    665 	 * We go before this request if it exists.
    666 	 */
    667 	while (bq != NULL && buf_inorder(bq, bp, sortby))
    668 		bq = TAILQ_NEXT(bq, b_actq);
    669 
    670 	if (bq != NULL)
    671 		TAILQ_INSERT_BEFORE(bq, bp, b_actq);
    672 	else
    673 		TAILQ_INSERT_TAIL(&prio->bq_write, bp, b_actq);
    674 }
    675 
    676 static struct buf *
    677 bufq_prio_get(struct bufq_state *bufq, int remove)
    678 {
    679 	struct bufq_prio *prio = bufq->bq_private;
    680 	struct buf *bp;
    681 
    682 	/*
    683 	 * If no current request, get next from the lists.
    684 	 */
    685 	if (prio->bq_next == NULL) {
    686 		/*
    687 		 * If at least one list is empty, select the other.
    688 		 */
    689 		if (TAILQ_FIRST(&prio->bq_read) == NULL) {
    690 			prio->bq_next = prio->bq_write_next;
    691 			prio->bq_read_burst = 0;
    692 		} else if (prio->bq_write_next == NULL) {
    693 			prio->bq_next = TAILQ_FIRST(&prio->bq_read);
    694 			prio->bq_read_burst = 0;
    695 		} else {
    696 			/*
    697 			 * Both list have requests.  Select the read list up
    698 			 * to PRIO_READ_BURST times, then select the write
    699 			 * list PRIO_WRITE_REQ times.
    700 			 */
    701 			if (prio->bq_read_burst++ < PRIO_READ_BURST)
    702 				prio->bq_next = TAILQ_FIRST(&prio->bq_read);
    703 			else if (prio->bq_read_burst <
    704 			    PRIO_READ_BURST + PRIO_WRITE_REQ)
    705 				prio->bq_next = prio->bq_write_next;
    706 			else {
    707 				prio->bq_next = TAILQ_FIRST(&prio->bq_read);
    708 				prio->bq_read_burst = 0;
    709 			}
    710 		}
    711 	}
    712 
    713 	bp = prio->bq_next;
    714 
    715 	if (bp != NULL && remove) {
    716 		if ((bp->b_flags & B_READ) == B_READ)
    717 			TAILQ_REMOVE(&prio->bq_read, bp, b_actq);
    718 		else {
    719 			/*
    720 			 * Advance the write pointer before removing
    721 			 * bp since it is actually prio->bq_write_next.
    722 			 */
    723 			prio->bq_write_next =
    724 			    TAILQ_NEXT(prio->bq_write_next, b_actq);
    725 			TAILQ_REMOVE(&prio->bq_write, bp, b_actq);
    726 			if (prio->bq_write_next == NULL)
    727 				prio->bq_write_next =
    728 				    TAILQ_FIRST(&prio->bq_write);
    729 		}
    730 
    731 		prio->bq_next = NULL;
    732 	}
    733 
    734 	return (bp);
    735 }
    736 
    737 /*
    738  * Create a device buffer queue.
    739  */
    740 void
    741 bufq_alloc(struct bufq_state *bufq, int flags)
    742 {
    743 	struct bufq_fcfs *fcfs;
    744 	struct bufq_disksort *disksort;
    745 	struct bufq_prio *prio;
    746 
    747 	bufq->bq_flags = flags;
    748 
    749 	switch (flags & BUFQ_SORT_MASK) {
    750 	case BUFQ_SORT_RAWBLOCK:
    751 	case BUFQ_SORT_CYLINDER:
    752 		break;
    753 	case 0:
    754 		if ((flags & BUFQ_METHOD_MASK) == BUFQ_FCFS)
    755 			break;
    756 		/* FALLTHROUGH */
    757 	default:
    758 		panic("bufq_alloc: sort out of range");
    759 	}
    760 
    761 	switch (flags & BUFQ_METHOD_MASK) {
    762 	case BUFQ_FCFS:
    763 		bufq->bq_get = bufq_fcfs_get;
    764 		bufq->bq_put = bufq_fcfs_put;
    765 		MALLOC(bufq->bq_private, struct bufq_fcfs *,
    766 		    sizeof(struct bufq_fcfs), M_DEVBUF, M_ZERO);
    767 		fcfs = (struct bufq_fcfs *)bufq->bq_private;
    768 		TAILQ_INIT(&fcfs->bq_head);
    769 		break;
    770 	case BUFQ_DISKSORT:
    771 		bufq->bq_get = bufq_disksort_get;
    772 		bufq->bq_put = bufq_disksort_put;
    773 		MALLOC(bufq->bq_private, struct bufq_disksort *,
    774 		    sizeof(struct bufq_disksort), M_DEVBUF, M_ZERO);
    775 		disksort = (struct bufq_disksort *)bufq->bq_private;
    776 		TAILQ_INIT(&disksort->bq_head);
    777 		break;
    778 	case BUFQ_READ_PRIO:
    779 		bufq->bq_get = bufq_prio_get;
    780 		bufq->bq_put = bufq_prio_put;
    781 		MALLOC(bufq->bq_private, struct bufq_prio *,
    782 		    sizeof(struct bufq_prio), M_DEVBUF, M_ZERO);
    783 		prio = (struct bufq_prio *)bufq->bq_private;
    784 		TAILQ_INIT(&prio->bq_read);
    785 		TAILQ_INIT(&prio->bq_write);
    786 		break;
    787 	default:
    788 		panic("bufq_alloc: method out of range");
    789 	}
    790 }
    791 
    792 /*
    793  * Destroy a device buffer queue.
    794  */
    795 void
    796 bufq_free(struct bufq_state *bufq)
    797 {
    798 
    799 	KASSERT(bufq->bq_private != NULL);
    800 	KASSERT(BUFQ_PEEK(bufq) == NULL);
    801 
    802 	FREE(bufq->bq_private, M_DEVBUF);
    803 	bufq->bq_get = NULL;
    804 	bufq->bq_put = NULL;
    805 }
    806