Home | History | Annotate | Line # | Download | only in kern
subr_disk.c revision 1.47
      1 /*	$NetBSD: subr_disk.c,v 1.47 2002/11/04 03:50:07 mrg Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997, 1999, 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * Copyright (c) 1982, 1986, 1988, 1993
     42  *	The Regents of the University of California.  All rights reserved.
     43  * (c) UNIX System Laboratories, Inc.
     44  * All or some portions of this file are derived from material licensed
     45  * to the University of California by American Telephone and Telegraph
     46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     47  * the permission of UNIX System Laboratories, Inc.
     48  *
     49  * Redistribution and use in source and binary forms, with or without
     50  * modification, are permitted provided that the following conditions
     51  * are met:
     52  * 1. Redistributions of source code must retain the above copyright
     53  *    notice, this list of conditions and the following disclaimer.
     54  * 2. Redistributions in binary form must reproduce the above copyright
     55  *    notice, this list of conditions and the following disclaimer in the
     56  *    documentation and/or other materials provided with the distribution.
     57  * 3. All advertising materials mentioning features or use of this software
     58  *    must display the following acknowledgement:
     59  *	This product includes software developed by the University of
     60  *	California, Berkeley and its contributors.
     61  * 4. Neither the name of the University nor the names of its contributors
     62  *    may be used to endorse or promote products derived from this software
     63  *    without specific prior written permission.
     64  *
     65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     75  * SUCH DAMAGE.
     76  *
     77  *	@(#)ufs_disksubr.c	8.5 (Berkeley) 1/21/94
     78  */
     79 
     80 #include <sys/cdefs.h>
     81 __KERNEL_RCSID(0, "$NetBSD: subr_disk.c,v 1.47 2002/11/04 03:50:07 mrg Exp $");
     82 
     83 #include <sys/param.h>
     84 #include <sys/kernel.h>
     85 #include <sys/malloc.h>
     86 #include <sys/buf.h>
     87 #include <sys/syslog.h>
     88 #include <sys/disklabel.h>
     89 #include <sys/disk.h>
     90 #include <sys/sysctl.h>
     91 #include <lib/libkern/libkern.h>
     92 
     93 /*
     94  * A global list of all disks attached to the system.  May grow or
     95  * shrink over time.
     96  */
     97 struct	disklist_head disklist;	/* TAILQ_HEAD */
     98 int	disk_count;		/* number of drives in global disklist */
     99 struct simplelock disklist_slock = SIMPLELOCK_INITIALIZER;
    100 
    101 /*
    102  * Compute checksum for disk label.
    103  */
    104 u_int
    105 dkcksum(struct disklabel *lp)
    106 {
    107 	u_short *start, *end;
    108 	u_short sum = 0;
    109 
    110 	start = (u_short *)lp;
    111 	end = (u_short *)&lp->d_partitions[lp->d_npartitions];
    112 	while (start < end)
    113 		sum ^= *start++;
    114 	return (sum);
    115 }
    116 
    117 /*
    118  * Disk error is the preface to plaintive error messages
    119  * about failing disk transfers.  It prints messages of the form
    120 
    121 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
    122 
    123  * if the offset of the error in the transfer and a disk label
    124  * are both available.  blkdone should be -1 if the position of the error
    125  * is unknown; the disklabel pointer may be null from drivers that have not
    126  * been converted to use them.  The message is printed with printf
    127  * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
    128  * The message should be completed (with at least a newline) with printf
    129  * or addlog, respectively.  There is no trailing space.
    130  */
    131 void
    132 diskerr(const struct buf *bp, const char *dname, const char *what, int pri,
    133     int blkdone, const struct disklabel *lp)
    134 {
    135 	int unit = DISKUNIT(bp->b_dev), part = DISKPART(bp->b_dev);
    136 	void (*pr)(const char *, ...);
    137 	char partname = 'a' + part;
    138 	int sn;
    139 
    140 	if (pri != LOG_PRINTF) {
    141 		static const char fmt[] = "";
    142 		log(pri, fmt);
    143 		pr = addlog;
    144 	} else
    145 		pr = printf;
    146 	(*pr)("%s%d%c: %s %sing fsbn ", dname, unit, partname, what,
    147 	    bp->b_flags & B_READ ? "read" : "writ");
    148 	sn = bp->b_blkno;
    149 	if (bp->b_bcount <= DEV_BSIZE)
    150 		(*pr)("%d", sn);
    151 	else {
    152 		if (blkdone >= 0) {
    153 			sn += blkdone;
    154 			(*pr)("%d of ", sn);
    155 		}
    156 		(*pr)("%d-%d", bp->b_blkno,
    157 		    bp->b_blkno + (bp->b_bcount - 1) / DEV_BSIZE);
    158 	}
    159 	if (lp && (blkdone >= 0 || bp->b_bcount <= lp->d_secsize)) {
    160 		sn += lp->d_partitions[part].p_offset;
    161 		(*pr)(" (%s%d bn %d; cn %d", dname, unit, sn,
    162 		    sn / lp->d_secpercyl);
    163 		sn %= lp->d_secpercyl;
    164 		(*pr)(" tn %d sn %d)", sn / lp->d_nsectors,
    165 		    sn % lp->d_nsectors);
    166 	}
    167 }
    168 
    169 /*
    170  * Initialize the disklist.  Called by main() before autoconfiguration.
    171  */
    172 void
    173 disk_init(void)
    174 {
    175 
    176 	TAILQ_INIT(&disklist);
    177 	disk_count = 0;
    178 }
    179 
    180 /*
    181  * Searches the disklist for the disk corresponding to the
    182  * name provided.
    183  */
    184 struct disk *
    185 disk_find(char *name)
    186 {
    187 	struct disk *diskp;
    188 
    189 	if ((name == NULL) || (disk_count <= 0))
    190 		return (NULL);
    191 
    192 	simple_lock(&disklist_slock);
    193 	for (diskp = TAILQ_FIRST(&disklist); diskp != NULL;
    194 	    diskp = TAILQ_NEXT(diskp, dk_link))
    195 		if (strcmp(diskp->dk_name, name) == 0) {
    196 			simple_unlock(&disklist_slock);
    197 			return (diskp);
    198 		}
    199 	simple_unlock(&disklist_slock);
    200 
    201 	return (NULL);
    202 }
    203 
    204 /*
    205  * Attach a disk.
    206  */
    207 void
    208 disk_attach(struct disk *diskp)
    209 {
    210 	int s;
    211 
    212 	/*
    213 	 * Allocate and initialize the disklabel structures.  Note that
    214 	 * it's not safe to sleep here, since we're probably going to be
    215 	 * called during autoconfiguration.
    216 	 */
    217 	diskp->dk_label = malloc(sizeof(struct disklabel), M_DEVBUF, M_NOWAIT);
    218 	diskp->dk_cpulabel = malloc(sizeof(struct cpu_disklabel), M_DEVBUF,
    219 	    M_NOWAIT);
    220 	if ((diskp->dk_label == NULL) || (diskp->dk_cpulabel == NULL))
    221 		panic("disk_attach: can't allocate storage for disklabel");
    222 
    223 	memset(diskp->dk_label, 0, sizeof(struct disklabel));
    224 	memset(diskp->dk_cpulabel, 0, sizeof(struct cpu_disklabel));
    225 
    226 	/*
    227 	 * Set the attached timestamp.
    228 	 */
    229 	s = splclock();
    230 	diskp->dk_attachtime = mono_time;
    231 	splx(s);
    232 
    233 	/*
    234 	 * Link into the disklist.
    235 	 */
    236 	simple_lock(&disklist_slock);
    237 	TAILQ_INSERT_TAIL(&disklist, diskp, dk_link);
    238 	simple_unlock(&disklist_slock);
    239 	++disk_count;
    240 }
    241 
    242 /*
    243  * Detach a disk.
    244  */
    245 void
    246 disk_detach(struct disk *diskp)
    247 {
    248 
    249 	/*
    250 	 * Remove from the disklist.
    251 	 */
    252 	if (--disk_count < 0)
    253 		panic("disk_detach: disk_count < 0");
    254 	simple_lock(&disklist_slock);
    255 	TAILQ_REMOVE(&disklist, diskp, dk_link);
    256 	simple_unlock(&disklist_slock);
    257 
    258 	/*
    259 	 * Free the space used by the disklabel structures.
    260 	 */
    261 	free(diskp->dk_label, M_DEVBUF);
    262 	free(diskp->dk_cpulabel, M_DEVBUF);
    263 }
    264 
    265 /*
    266  * Increment a disk's busy counter.  If the counter is going from
    267  * 0 to 1, set the timestamp.
    268  */
    269 void
    270 disk_busy(struct disk *diskp)
    271 {
    272 	int s;
    273 
    274 	/*
    275 	 * XXX We'd like to use something as accurate as microtime(),
    276 	 * but that doesn't depend on the system TOD clock.
    277 	 */
    278 	if (diskp->dk_busy++ == 0) {
    279 		s = splclock();
    280 		diskp->dk_timestamp = mono_time;
    281 		splx(s);
    282 	}
    283 }
    284 
    285 /*
    286  * Decrement a disk's busy counter, increment the byte count, total busy
    287  * time, and reset the timestamp.
    288  */
    289 void
    290 disk_unbusy(struct disk *diskp, long bcount, int read)
    291 {
    292 	int s;
    293 	struct timeval dv_time, diff_time;
    294 
    295 	if (diskp->dk_busy-- == 0) {
    296 		printf("%s: dk_busy < 0\n", diskp->dk_name);
    297 		panic("disk_unbusy");
    298 	}
    299 
    300 	s = splclock();
    301 	dv_time = mono_time;
    302 	splx(s);
    303 
    304 	timersub(&dv_time, &diskp->dk_timestamp, &diff_time);
    305 	timeradd(&diskp->dk_time, &diff_time, &diskp->dk_time);
    306 
    307 	diskp->dk_timestamp = dv_time;
    308 	if (bcount > 0) {
    309 		if (read) {
    310 			diskp->dk_rbytes += bcount;
    311 			diskp->dk_rxfer++;
    312 		} else {
    313 			diskp->dk_wbytes += bcount;
    314 			diskp->dk_wxfer++;
    315 		}
    316 	}
    317 }
    318 
    319 /*
    320  * Reset the metrics counters on the given disk.  Note that we cannot
    321  * reset the busy counter, as it may case a panic in disk_unbusy().
    322  * We also must avoid playing with the timestamp information, as it
    323  * may skew any pending transfer results.
    324  */
    325 void
    326 disk_resetstat(struct disk *diskp)
    327 {
    328 	int s = splbio(), t;
    329 
    330 	diskp->dk_rxfer = 0;
    331 	diskp->dk_rbytes = 0;
    332 	diskp->dk_wxfer = 0;
    333 	diskp->dk_wbytes = 0;
    334 
    335 	t = splclock();
    336 	diskp->dk_attachtime = mono_time;
    337 	splx(t);
    338 
    339 	timerclear(&diskp->dk_time);
    340 
    341 	splx(s);
    342 }
    343 
    344 int
    345 sysctl_disknames(void *vwhere, size_t *sizep)
    346 {
    347 	char buf[DK_DISKNAMELEN + 1];
    348 	char *where = vwhere;
    349 	struct disk *diskp;
    350 	size_t needed, left, slen;
    351 	int error, first;
    352 
    353 	first = 1;
    354 	error = 0;
    355 	needed = 0;
    356 	left = *sizep;
    357 
    358 	simple_lock(&disklist_slock);
    359 	for (diskp = TAILQ_FIRST(&disklist); diskp != NULL;
    360 	    diskp = TAILQ_NEXT(diskp, dk_link)) {
    361 		if (where == NULL)
    362 			needed += strlen(diskp->dk_name) + 1;
    363 		else {
    364 			memset(buf, 0, sizeof(buf));
    365 			if (first) {
    366 				strncpy(buf, diskp->dk_name, sizeof(buf));
    367 				first = 0;
    368 			} else {
    369 				buf[0] = ' ';
    370 				strncpy(buf + 1, diskp->dk_name,
    371 				    sizeof(buf) - 1);
    372 			}
    373 			buf[DK_DISKNAMELEN] = '\0';
    374 			slen = strlen(buf);
    375 			if (left < slen + 1)
    376 				break;
    377 			/* +1 to copy out the trailing NUL byte */
    378 			error = copyout(buf, where, slen + 1);
    379 			if (error)
    380 				break;
    381 			where += slen;
    382 			needed += slen;
    383 			left -= slen;
    384 		}
    385 	}
    386 	simple_unlock(&disklist_slock);
    387 	*sizep = needed;
    388 	return (error);
    389 }
    390 
    391 int
    392 sysctl_diskstats(int *name, u_int namelen, void *vwhere, size_t *sizep)
    393 {
    394 	struct disk_sysctl sdisk;
    395 	struct disk *diskp;
    396 	char *where = vwhere;
    397 	size_t tocopy, left;
    398 	int error;
    399 
    400 	if (where == NULL) {
    401 		if (namelen == 0)
    402 			*sizep = disk_count * sizeof(sdisk);
    403 		else
    404 			*sizep = disk_count * name[0];
    405 		return (0);
    406 	}
    407 
    408 	if (namelen == 0)
    409 		/*
    410 		 * The original hw.diskstats call was broken and did not
    411 		 * require the userland to pass in it's size of struct
    412 		 * disk_sysctl.  This was fixed after NetBSD 1.6 was
    413 		 * released, and any applications that do not pass in
    414 		 * the size are given an error only, unless we care about
    415 		 * 1.6 compatibility.
    416 		 */
    417 #ifdef COMPAT_16
    418 		tocopy = offsetof(struct disk_sysctl, dk_rxfer);
    419 #else
    420 		return (EINVAL);
    421 #endif
    422 	else
    423 		tocopy = name[0];
    424 
    425 	error = 0;
    426 	left = *sizep;
    427 	memset(&sdisk, 0, sizeof(sdisk));
    428 	*sizep = 0;
    429 
    430 	simple_lock(&disklist_slock);
    431 	TAILQ_FOREACH(diskp, &disklist, dk_link) {
    432 		if (left < tocopy)
    433 			break;
    434 		strncpy(sdisk.dk_name, diskp->dk_name, sizeof(sdisk.dk_name));
    435 		sdisk.dk_xfer = diskp->dk_rxfer + diskp->dk_wxfer;
    436 		sdisk.dk_rxfer = diskp->dk_rxfer;
    437 		sdisk.dk_wxfer = diskp->dk_wxfer;
    438 		sdisk.dk_seek = diskp->dk_seek;
    439 		sdisk.dk_bytes = diskp->dk_rbytes + diskp->dk_wbytes;
    440 		sdisk.dk_rbytes = diskp->dk_rbytes;
    441 		sdisk.dk_wbytes = diskp->dk_wbytes;
    442 		sdisk.dk_attachtime_sec = diskp->dk_attachtime.tv_sec;
    443 		sdisk.dk_attachtime_usec = diskp->dk_attachtime.tv_usec;
    444 		sdisk.dk_timestamp_sec = diskp->dk_timestamp.tv_sec;
    445 		sdisk.dk_timestamp_usec = diskp->dk_timestamp.tv_usec;
    446 		sdisk.dk_time_sec = diskp->dk_time.tv_sec;
    447 		sdisk.dk_time_usec = diskp->dk_time.tv_usec;
    448 		sdisk.dk_busy = diskp->dk_busy;
    449 
    450 		error = copyout(&sdisk, where, min(tocopy, sizeof(sdisk)));
    451 		if (error)
    452 			break;
    453 		where += tocopy;
    454 		*sizep += tocopy;
    455 		left -= tocopy;
    456 	}
    457 	simple_unlock(&disklist_slock);
    458 	return (error);
    459 }
    460 
    461 struct bufq_fcfs {
    462 	TAILQ_HEAD(, buf) bq_head;	/* actual list of buffers */
    463 };
    464 
    465 struct bufq_disksort {
    466 	TAILQ_HEAD(, buf) bq_head;	/* actual list of buffers */
    467 };
    468 
    469 #define PRIO_READ_BURST		48
    470 #define PRIO_WRITE_REQ		16
    471 
    472 struct bufq_prio {
    473 	TAILQ_HEAD(, buf) bq_read, bq_write; /* actual list of buffers */
    474 	struct buf *bq_write_next;	/* next request in bq_write */
    475 	struct buf *bq_next;		/* current request */
    476 	int bq_read_burst;		/* # of consecutive reads */
    477 };
    478 
    479 
    480 /*
    481  * Check if two buf's are in ascending order.
    482  */
    483 static __inline int
    484 buf_inorder(struct buf *bp, struct buf *bq, int sortby)
    485 {
    486 	int r;
    487 
    488 	if (bp == NULL || bq == NULL)
    489 		return (bq == NULL);
    490 
    491 	if (sortby == BUFQ_SORT_CYLINDER)
    492 		r = bp->b_cylinder - bq->b_cylinder;
    493 	else
    494 		r = 0;
    495 
    496 	if (r == 0)
    497 		r = bp->b_rawblkno - bq->b_rawblkno;
    498 
    499 	return (r <= 0);
    500 }
    501 
    502 
    503 /*
    504  * First-come first-served sort for disks.
    505  *
    506  * Requests are appended to the queue without any reordering.
    507  */
    508 static void
    509 bufq_fcfs_put(struct bufq_state *bufq, struct buf *bp)
    510 {
    511 	struct bufq_fcfs *fcfs = bufq->bq_private;
    512 
    513 	TAILQ_INSERT_TAIL(&fcfs->bq_head, bp, b_actq);
    514 }
    515 
    516 static struct buf *
    517 bufq_fcfs_get(struct bufq_state *bufq, int remove)
    518 {
    519 	struct bufq_fcfs *fcfs = bufq->bq_private;
    520 	struct buf *bp;
    521 
    522 	bp = TAILQ_FIRST(&fcfs->bq_head);
    523 
    524 	if (bp != NULL && remove)
    525 		TAILQ_REMOVE(&fcfs->bq_head, bp, b_actq);
    526 
    527 	return (bp);
    528 }
    529 
    530 
    531 /*
    532  * Seek sort for disks.
    533  *
    534  * There are actually two queues, sorted in ascendening order.  The first
    535  * queue holds those requests which are positioned after the current block;
    536  * the second holds requests which came in after their position was passed.
    537  * Thus we implement a one-way scan, retracting after reaching the end of
    538  * the drive to the first request on the second queue, at which time it
    539  * becomes the first queue.
    540  *
    541  * A one-way scan is natural because of the way UNIX read-ahead blocks are
    542  * allocated.
    543  */
    544 static void
    545 bufq_disksort_put(struct bufq_state *bufq, struct buf *bp)
    546 {
    547 	struct bufq_disksort *disksort = bufq->bq_private;
    548 	struct buf *bq, *nbq;
    549 	int sortby;
    550 
    551 	sortby = bufq->bq_flags & BUFQ_SORT_MASK;
    552 
    553 	bq = TAILQ_FIRST(&disksort->bq_head);
    554 
    555 	/*
    556 	 * If the queue is empty it's easy; we just go on the end.
    557 	 */
    558 	if (bq == NULL) {
    559 		TAILQ_INSERT_TAIL(&disksort->bq_head, bp, b_actq);
    560 		return;
    561 	}
    562 
    563 	/*
    564 	 * If we lie before the currently active request, then we
    565 	 * must locate the second request list and add ourselves to it.
    566 	 */
    567 	if (buf_inorder(bp, bq, sortby)) {
    568 		while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL) {
    569 			/*
    570 			 * Check for an ``inversion'' in the normally ascending
    571 			 * block numbers, indicating the start of the second
    572 			 * request list.
    573 			 */
    574 			if (buf_inorder(nbq, bq, sortby)) {
    575 				/*
    576 				 * Search the second request list for the first
    577 				 * request at a larger block number.  We go
    578 				 * after that; if there is no such request, we
    579 				 * go at the end.
    580 				 */
    581 				do {
    582 					if (buf_inorder(bp, nbq, sortby))
    583 						goto insert;
    584 					bq = nbq;
    585 				} while ((nbq =
    586 				    TAILQ_NEXT(bq, b_actq)) != NULL);
    587 				goto insert;		/* after last */
    588 			}
    589 			bq = nbq;
    590 		}
    591 		/*
    592 		 * No inversions... we will go after the last, and
    593 		 * be the first request in the second request list.
    594 		 */
    595 		goto insert;
    596 	}
    597 	/*
    598 	 * Request is at/after the current request...
    599 	 * sort in the first request list.
    600 	 */
    601 	while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL) {
    602 		/*
    603 		 * We want to go after the current request if there is an
    604 		 * inversion after it (i.e. it is the end of the first
    605 		 * request list), or if the next request is a larger cylinder
    606 		 * than our request.
    607 		 */
    608 		if (buf_inorder(nbq, bq, sortby) ||
    609 		    buf_inorder(bp, nbq, sortby))
    610 			goto insert;
    611 		bq = nbq;
    612 	}
    613 	/*
    614 	 * Neither a second list nor a larger request... we go at the end of
    615 	 * the first list, which is the same as the end of the whole schebang.
    616 	 */
    617 insert:	TAILQ_INSERT_AFTER(&disksort->bq_head, bq, bp, b_actq);
    618 }
    619 
    620 static struct buf *
    621 bufq_disksort_get(struct bufq_state *bufq, int remove)
    622 {
    623 	struct bufq_disksort *disksort = bufq->bq_private;
    624 	struct buf *bp;
    625 
    626 	bp = TAILQ_FIRST(&disksort->bq_head);
    627 
    628 	if (bp != NULL && remove)
    629 		TAILQ_REMOVE(&disksort->bq_head, bp, b_actq);
    630 
    631 	return (bp);
    632 }
    633 
    634 
    635 /*
    636  * Seek sort for disks.
    637  *
    638  * There are two queues.  The first queue holds read requests; the second
    639  * holds write requests.  The read queue is first-come first-served; the
    640  * write queue is sorted in ascendening block order.
    641  * The read queue is processed first.  After PRIO_READ_BURST consecutive
    642  * read requests with non-empty write queue PRIO_WRITE_REQ requests from
    643  * the write queue will be processed.
    644  */
    645 static void
    646 bufq_prio_put(struct bufq_state *bufq, struct buf *bp)
    647 {
    648 	struct bufq_prio *prio = bufq->bq_private;
    649 	struct buf *bq;
    650 	int sortby;
    651 
    652 	sortby = bufq->bq_flags & BUFQ_SORT_MASK;
    653 
    654 	/*
    655 	 * If it's a read request append it to the list.
    656 	 */
    657 	if ((bp->b_flags & B_READ) == B_READ) {
    658 		TAILQ_INSERT_TAIL(&prio->bq_read, bp, b_actq);
    659 		return;
    660 	}
    661 
    662 	bq = TAILQ_FIRST(&prio->bq_write);
    663 
    664 	/*
    665 	 * If the write list is empty, simply append it to the list.
    666 	 */
    667 	if (bq == NULL) {
    668 		TAILQ_INSERT_TAIL(&prio->bq_write, bp, b_actq);
    669 		prio->bq_write_next = bp;
    670 		return;
    671 	}
    672 
    673 	/*
    674 	 * If we lie after the next request, insert after this request.
    675 	 */
    676 	if (buf_inorder(prio->bq_write_next, bp, sortby))
    677 		bq = prio->bq_write_next;
    678 
    679 	/*
    680 	 * Search for the first request at a larger block number.
    681 	 * We go before this request if it exists.
    682 	 */
    683 	while (bq != NULL && buf_inorder(bq, bp, sortby))
    684 		bq = TAILQ_NEXT(bq, b_actq);
    685 
    686 	if (bq != NULL)
    687 		TAILQ_INSERT_BEFORE(bq, bp, b_actq);
    688 	else
    689 		TAILQ_INSERT_TAIL(&prio->bq_write, bp, b_actq);
    690 }
    691 
    692 static struct buf *
    693 bufq_prio_get(struct bufq_state *bufq, int remove)
    694 {
    695 	struct bufq_prio *prio = bufq->bq_private;
    696 	struct buf *bp;
    697 
    698 	/*
    699 	 * If no current request, get next from the lists.
    700 	 */
    701 	if (prio->bq_next == NULL) {
    702 		/*
    703 		 * If at least one list is empty, select the other.
    704 		 */
    705 		if (TAILQ_FIRST(&prio->bq_read) == NULL) {
    706 			prio->bq_next = prio->bq_write_next;
    707 			prio->bq_read_burst = 0;
    708 		} else if (prio->bq_write_next == NULL) {
    709 			prio->bq_next = TAILQ_FIRST(&prio->bq_read);
    710 			prio->bq_read_burst = 0;
    711 		} else {
    712 			/*
    713 			 * Both list have requests.  Select the read list up
    714 			 * to PRIO_READ_BURST times, then select the write
    715 			 * list PRIO_WRITE_REQ times.
    716 			 */
    717 			if (prio->bq_read_burst++ < PRIO_READ_BURST)
    718 				prio->bq_next = TAILQ_FIRST(&prio->bq_read);
    719 			else if (prio->bq_read_burst <
    720 			    PRIO_READ_BURST + PRIO_WRITE_REQ)
    721 				prio->bq_next = prio->bq_write_next;
    722 			else {
    723 				prio->bq_next = TAILQ_FIRST(&prio->bq_read);
    724 				prio->bq_read_burst = 0;
    725 			}
    726 		}
    727 	}
    728 
    729 	bp = prio->bq_next;
    730 
    731 	if (bp != NULL && remove) {
    732 		if ((bp->b_flags & B_READ) == B_READ)
    733 			TAILQ_REMOVE(&prio->bq_read, bp, b_actq);
    734 		else {
    735 			/*
    736 			 * Advance the write pointer before removing
    737 			 * bp since it is actually prio->bq_write_next.
    738 			 */
    739 			prio->bq_write_next =
    740 			    TAILQ_NEXT(prio->bq_write_next, b_actq);
    741 			TAILQ_REMOVE(&prio->bq_write, bp, b_actq);
    742 			if (prio->bq_write_next == NULL)
    743 				prio->bq_write_next =
    744 				    TAILQ_FIRST(&prio->bq_write);
    745 		}
    746 
    747 		prio->bq_next = NULL;
    748 	}
    749 
    750 	return (bp);
    751 }
    752 
    753 /*
    754  * Create a device buffer queue.
    755  */
    756 void
    757 bufq_alloc(struct bufq_state *bufq, int flags)
    758 {
    759 	struct bufq_fcfs *fcfs;
    760 	struct bufq_disksort *disksort;
    761 	struct bufq_prio *prio;
    762 
    763 	bufq->bq_flags = flags;
    764 
    765 	switch (flags & BUFQ_SORT_MASK) {
    766 	case BUFQ_SORT_RAWBLOCK:
    767 	case BUFQ_SORT_CYLINDER:
    768 		break;
    769 	case 0:
    770 		if ((flags & BUFQ_METHOD_MASK) == BUFQ_FCFS)
    771 			break;
    772 		/* FALLTHROUGH */
    773 	default:
    774 		panic("bufq_alloc: sort out of range");
    775 	}
    776 
    777 	switch (flags & BUFQ_METHOD_MASK) {
    778 	case BUFQ_FCFS:
    779 		bufq->bq_get = bufq_fcfs_get;
    780 		bufq->bq_put = bufq_fcfs_put;
    781 		MALLOC(bufq->bq_private, struct bufq_fcfs *,
    782 		    sizeof(struct bufq_fcfs), M_DEVBUF, M_ZERO);
    783 		fcfs = (struct bufq_fcfs *)bufq->bq_private;
    784 		TAILQ_INIT(&fcfs->bq_head);
    785 		break;
    786 	case BUFQ_DISKSORT:
    787 		bufq->bq_get = bufq_disksort_get;
    788 		bufq->bq_put = bufq_disksort_put;
    789 		MALLOC(bufq->bq_private, struct bufq_disksort *,
    790 		    sizeof(struct bufq_disksort), M_DEVBUF, M_ZERO);
    791 		disksort = (struct bufq_disksort *)bufq->bq_private;
    792 		TAILQ_INIT(&disksort->bq_head);
    793 		break;
    794 	case BUFQ_READ_PRIO:
    795 		bufq->bq_get = bufq_prio_get;
    796 		bufq->bq_put = bufq_prio_put;
    797 		MALLOC(bufq->bq_private, struct bufq_prio *,
    798 		    sizeof(struct bufq_prio), M_DEVBUF, M_ZERO);
    799 		prio = (struct bufq_prio *)bufq->bq_private;
    800 		TAILQ_INIT(&prio->bq_read);
    801 		TAILQ_INIT(&prio->bq_write);
    802 		break;
    803 	default:
    804 		panic("bufq_alloc: method out of range");
    805 	}
    806 }
    807 
    808 /*
    809  * Destroy a device buffer queue.
    810  */
    811 void
    812 bufq_free(struct bufq_state *bufq)
    813 {
    814 
    815 	KASSERT(bufq->bq_private != NULL);
    816 	KASSERT(BUFQ_PEEK(bufq) == NULL);
    817 
    818 	FREE(bufq->bq_private, M_DEVBUF);
    819 	bufq->bq_get = NULL;
    820 	bufq->bq_put = NULL;
    821 }
    822