Home | History | Annotate | Line # | Download | only in kern
subr_disk.c revision 1.41
      1 /*	$NetBSD: subr_disk.c,v 1.41 2002/07/23 14:00:16 hannken Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997, 1999, 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  * 3. All advertising materials mentioning features or use of this software
     20  *    must display the following acknowledgement:
     21  *	This product includes software developed by the NetBSD
     22  *	Foundation, Inc. and its contributors.
     23  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  *    contributors may be used to endorse or promote products derived
     25  *    from this software without specific prior written permission.
     26  *
     27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  * POSSIBILITY OF SUCH DAMAGE.
     38  */
     39 
     40 /*
     41  * Copyright (c) 1982, 1986, 1988, 1993
     42  *	The Regents of the University of California.  All rights reserved.
     43  * (c) UNIX System Laboratories, Inc.
     44  * All or some portions of this file are derived from material licensed
     45  * to the University of California by American Telephone and Telegraph
     46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     47  * the permission of UNIX System Laboratories, Inc.
     48  *
     49  * Redistribution and use in source and binary forms, with or without
     50  * modification, are permitted provided that the following conditions
     51  * are met:
     52  * 1. Redistributions of source code must retain the above copyright
     53  *    notice, this list of conditions and the following disclaimer.
     54  * 2. Redistributions in binary form must reproduce the above copyright
     55  *    notice, this list of conditions and the following disclaimer in the
     56  *    documentation and/or other materials provided with the distribution.
     57  * 3. All advertising materials mentioning features or use of this software
     58  *    must display the following acknowledgement:
     59  *	This product includes software developed by the University of
     60  *	California, Berkeley and its contributors.
     61  * 4. Neither the name of the University nor the names of its contributors
     62  *    may be used to endorse or promote products derived from this software
     63  *    without specific prior written permission.
     64  *
     65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     75  * SUCH DAMAGE.
     76  *
     77  *	@(#)ufs_disksubr.c	8.5 (Berkeley) 1/21/94
     78  */
     79 
     80 #include <sys/cdefs.h>
     81 __KERNEL_RCSID(0, "$NetBSD: subr_disk.c,v 1.41 2002/07/23 14:00:16 hannken Exp $");
     82 
     83 #include <sys/param.h>
     84 #include <sys/kernel.h>
     85 #include <sys/malloc.h>
     86 #include <sys/buf.h>
     87 #include <sys/syslog.h>
     88 #include <sys/disklabel.h>
     89 #include <sys/disk.h>
     90 #include <sys/sysctl.h>
     91 
     92 /*
     93  * A global list of all disks attached to the system.  May grow or
     94  * shrink over time.
     95  */
     96 struct	disklist_head disklist;	/* TAILQ_HEAD */
     97 int	disk_count;		/* number of drives in global disklist */
     98 struct simplelock disklist_slock = SIMPLELOCK_INITIALIZER;
     99 
    100 /*
    101  * XXX This interface will be removed in the near future!
    102  *
    103  * Seek sort for disks.  We depend on the driver which calls us using b_resid
    104  * as the current cylinder number.
    105  *
    106  * The argument bufq is an I/O queue for the device, on which there are
    107  * actually two queues, sorted in ascending cylinder order.  The first
    108  * queue holds those requests which are positioned after the current
    109  * cylinder (in the first request); the second holds requests which came
    110  * in after their cylinder number was passed.  Thus we implement a one-way
    111  * scan, retracting after reaching the end of the drive to the first request
    112  * on the second queue, at which time it becomes the first queue.
    113  *
    114  * A one-way scan is natural because of the way UNIX read-ahead blocks are
    115  * allocated.
    116  *
    117  * This is further adjusted by any `barriers' which may exist in the queue.
    118  * The bufq points to the last such ordered request.
    119  */
    120 void
    121 disksort_cylinder(struct buf_queue *bufq, struct buf *bp)
    122 {
    123 	struct buf *bq, *nbq;
    124 
    125 	/*
    126 	 * If there are ordered requests on the queue, we must start
    127 	 * the elevator sort after the last of these.
    128 	 */
    129 	if ((bq = bufq->bq_barrier) == NULL)
    130 		bq = BUFQ_FIRST(bufq);
    131 
    132 	/*
    133 	 * If the queue is empty, of if it's an ordered request,
    134 	 * it's easy; we just go on the end.
    135 	 */
    136 	if (bq == NULL || (bp->b_flags & B_ORDERED) != 0) {
    137 		BUFQ_INSERT_TAIL(bufq, bp);
    138 		return;
    139 	}
    140 
    141 	/*
    142 	 * If we lie after the first (currently active) request, then we
    143 	 * must locate the second request list and add ourselves to it.
    144 	 */
    145 	if (bp->b_cylinder < bq->b_cylinder ||
    146 	    (bp->b_cylinder == bq->b_cylinder &&
    147 	     bp->b_rawblkno < bq->b_rawblkno)) {
    148 		while ((nbq = BUFQ_NEXT(bq)) != NULL) {
    149 			/*
    150 			 * Check for an ``inversion'' in the normally ascending
    151 			 * cylinder numbers, indicating the start of the second
    152 			 * request list.
    153 			 */
    154 			if (nbq->b_cylinder < bq->b_cylinder) {
    155 				/*
    156 				 * Search the second request list for the first
    157 				 * request at a larger cylinder number.  We go
    158 				 * before that; if there is no such request, we
    159 				 * go at end.
    160 				 */
    161 				do {
    162 					if (bp->b_cylinder < nbq->b_cylinder)
    163 						goto insert;
    164 					if (bp->b_cylinder == nbq->b_cylinder &&
    165 					    bp->b_rawblkno < nbq->b_rawblkno)
    166 						goto insert;
    167 					bq = nbq;
    168 				} while ((nbq = BUFQ_NEXT(bq)) != NULL);
    169 				goto insert;		/* after last */
    170 			}
    171 			bq = nbq;
    172 		}
    173 		/*
    174 		 * No inversions... we will go after the last, and
    175 		 * be the first request in the second request list.
    176 		 */
    177 		goto insert;
    178 	}
    179 	/*
    180 	 * Request is at/after the current request...
    181 	 * sort in the first request list.
    182 	 */
    183 	while ((nbq = BUFQ_NEXT(bq)) != NULL) {
    184 		/*
    185 		 * We want to go after the current request if there is an
    186 		 * inversion after it (i.e. it is the end of the first
    187 		 * request list), or if the next request is a larger cylinder
    188 		 * than our request.
    189 		 */
    190 		if (nbq->b_cylinder < bq->b_cylinder ||
    191 		    bp->b_cylinder < nbq->b_cylinder ||
    192 		    (bp->b_cylinder == nbq->b_cylinder &&
    193 		     bp->b_rawblkno < nbq->b_rawblkno))
    194 			goto insert;
    195 		bq = nbq;
    196 	}
    197 	/*
    198 	 * Neither a second list nor a larger request... we go at the end of
    199 	 * the first list, which is the same as the end of the whole schebang.
    200 	 */
    201 insert:	BUFQ_INSERT_AFTER(bufq, bq, bp);
    202 }
    203 
    204 /*
    205  * Seek sort for disks.  This version sorts based on b_rawblkno, which
    206  * indicates the block number.
    207  *
    208  * As before, there are actually two queues, sorted in ascendening block
    209  * order.  The first queue holds those requests which are positioned after
    210  * the current block (in the first request); the second holds requests which
    211  * came in after their block number was passed.  Thus we implement a one-way
    212  * scan, retracting after reaching the end of the driver to the first request
    213  * on the second queue, at which time it becomes the first queue.
    214  *
    215  * A one-way scan is natural because of the way UNIX read-ahead blocks are
    216  * allocated.
    217  *
    218  * This is further adjusted by any `barriers' which may exist in the queue.
    219  * The bufq points to the last such ordered request.
    220  */
    221 void
    222 disksort_blkno(struct buf_queue *bufq, struct buf *bp)
    223 {
    224 	struct buf *bq, *nbq;
    225 
    226 	/*
    227 	 * If there are ordered requests on the queue, we must start
    228 	 * the elevator sort after the last of these.
    229 	 */
    230 	if ((bq = bufq->bq_barrier) == NULL)
    231 		bq = BUFQ_FIRST(bufq);
    232 
    233 	/*
    234 	 * If the queue is empty, or if it's an ordered request,
    235 	 * it's easy; we just go on the end.
    236 	 */
    237 	if (bq == NULL || (bp->b_flags & B_ORDERED) != 0) {
    238 		BUFQ_INSERT_TAIL(bufq, bp);
    239 		return;
    240 	}
    241 
    242 	/*
    243 	 * If we lie after the first (currently active) request, then we
    244 	 * must locate the second request list and add ourselves to it.
    245 	 */
    246 	if (bp->b_rawblkno < bq->b_rawblkno) {
    247 		while ((nbq = BUFQ_NEXT(bq)) != NULL) {
    248 			/*
    249 			 * Check for an ``inversion'' in the normally ascending
    250 			 * block numbers, indicating the start of the second
    251 			 * request list.
    252 			 */
    253 			if (nbq->b_rawblkno < bq->b_rawblkno) {
    254 				/*
    255 				 * Search the second request list for the first
    256 				 * request at a larger block number.  We go
    257 				 * after that; if there is no such request, we
    258 				 * go at the end.
    259 				 */
    260 				do {
    261 					if (bp->b_rawblkno < nbq->b_rawblkno)
    262 						goto insert;
    263 					bq = nbq;
    264 				} while ((nbq = BUFQ_NEXT(bq)) != NULL);
    265 				goto insert;		/* after last */
    266 			}
    267 			bq = nbq;
    268 		}
    269 		/*
    270 		 * No inversions... we will go after the last, and
    271 		 * be the first request in the second request list.
    272 		 */
    273 		goto insert;
    274 	}
    275 	/*
    276 	 * Request is at/after the current request...
    277 	 * sort in the first request list.
    278 	 */
    279 	while ((nbq = BUFQ_NEXT(bq)) != NULL) {
    280 		/*
    281 		 * We want to go after the current request if there is an
    282 		 * inversion after it (i.e. it is the end of the first
    283 		 * request list), or if the next request is a larger cylinder
    284 		 * than our request.
    285 		 */
    286 		if (nbq->b_rawblkno < bq->b_rawblkno ||
    287 		    bp->b_rawblkno < nbq->b_rawblkno)
    288 			goto insert;
    289 		bq = nbq;
    290 	}
    291 	/*
    292 	 * Neither a second list nor a larger request... we go at the end of
    293 	 * the first list, which is the same as the end of the whole schebang.
    294 	 */
    295 insert:	BUFQ_INSERT_AFTER(bufq, bq, bp);
    296 }
    297 
    298 /*
    299  * Seek non-sort for disks.  This version simply inserts requests at
    300  * the tail of the queue.
    301  */
    302 void
    303 disksort_tail(struct buf_queue *bufq, struct buf *bp)
    304 {
    305 
    306 	BUFQ_INSERT_TAIL(bufq, bp);
    307 }
    308 
    309 /*
    310  * XXX End of to be removed interface!
    311  */
    312 
    313 /*
    314  * Compute checksum for disk label.
    315  */
    316 u_int
    317 dkcksum(struct disklabel *lp)
    318 {
    319 	u_short *start, *end;
    320 	u_short sum = 0;
    321 
    322 	start = (u_short *)lp;
    323 	end = (u_short *)&lp->d_partitions[lp->d_npartitions];
    324 	while (start < end)
    325 		sum ^= *start++;
    326 	return (sum);
    327 }
    328 
    329 /*
    330  * Disk error is the preface to plaintive error messages
    331  * about failing disk transfers.  It prints messages of the form
    332 
    333 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
    334 
    335  * if the offset of the error in the transfer and a disk label
    336  * are both available.  blkdone should be -1 if the position of the error
    337  * is unknown; the disklabel pointer may be null from drivers that have not
    338  * been converted to use them.  The message is printed with printf
    339  * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
    340  * The message should be completed (with at least a newline) with printf
    341  * or addlog, respectively.  There is no trailing space.
    342  */
    343 void
    344 diskerr(const struct buf *bp, const char *dname, const char *what, int pri,
    345     int blkdone, const struct disklabel *lp)
    346 {
    347 	int unit = DISKUNIT(bp->b_dev), part = DISKPART(bp->b_dev);
    348 	void (*pr)(const char *, ...);
    349 	char partname = 'a' + part;
    350 	int sn;
    351 
    352 	if (pri != LOG_PRINTF) {
    353 		static const char fmt[] = "";
    354 		log(pri, fmt);
    355 		pr = addlog;
    356 	} else
    357 		pr = printf;
    358 	(*pr)("%s%d%c: %s %sing fsbn ", dname, unit, partname, what,
    359 	    bp->b_flags & B_READ ? "read" : "writ");
    360 	sn = bp->b_blkno;
    361 	if (bp->b_bcount <= DEV_BSIZE)
    362 		(*pr)("%d", sn);
    363 	else {
    364 		if (blkdone >= 0) {
    365 			sn += blkdone;
    366 			(*pr)("%d of ", sn);
    367 		}
    368 		(*pr)("%d-%d", bp->b_blkno,
    369 		    bp->b_blkno + (bp->b_bcount - 1) / DEV_BSIZE);
    370 	}
    371 	if (lp && (blkdone >= 0 || bp->b_bcount <= lp->d_secsize)) {
    372 		sn += lp->d_partitions[part].p_offset;
    373 		(*pr)(" (%s%d bn %d; cn %d", dname, unit, sn,
    374 		    sn / lp->d_secpercyl);
    375 		sn %= lp->d_secpercyl;
    376 		(*pr)(" tn %d sn %d)", sn / lp->d_nsectors,
    377 		    sn % lp->d_nsectors);
    378 	}
    379 }
    380 
    381 /*
    382  * Initialize the disklist.  Called by main() before autoconfiguration.
    383  */
    384 void
    385 disk_init(void)
    386 {
    387 
    388 	TAILQ_INIT(&disklist);
    389 	disk_count = 0;
    390 }
    391 
    392 /*
    393  * Searches the disklist for the disk corresponding to the
    394  * name provided.
    395  */
    396 struct disk *
    397 disk_find(char *name)
    398 {
    399 	struct disk *diskp;
    400 
    401 	if ((name == NULL) || (disk_count <= 0))
    402 		return (NULL);
    403 
    404 	simple_lock(&disklist_slock);
    405 	for (diskp = TAILQ_FIRST(&disklist); diskp != NULL;
    406 	    diskp = TAILQ_NEXT(diskp, dk_link))
    407 		if (strcmp(diskp->dk_name, name) == 0) {
    408 			simple_unlock(&disklist_slock);
    409 			return (diskp);
    410 		}
    411 	simple_unlock(&disklist_slock);
    412 
    413 	return (NULL);
    414 }
    415 
    416 /*
    417  * Attach a disk.
    418  */
    419 void
    420 disk_attach(struct disk *diskp)
    421 {
    422 	int s;
    423 
    424 	/*
    425 	 * Allocate and initialize the disklabel structures.  Note that
    426 	 * it's not safe to sleep here, since we're probably going to be
    427 	 * called during autoconfiguration.
    428 	 */
    429 	diskp->dk_label = malloc(sizeof(struct disklabel), M_DEVBUF, M_NOWAIT);
    430 	diskp->dk_cpulabel = malloc(sizeof(struct cpu_disklabel), M_DEVBUF,
    431 	    M_NOWAIT);
    432 	if ((diskp->dk_label == NULL) || (diskp->dk_cpulabel == NULL))
    433 		panic("disk_attach: can't allocate storage for disklabel");
    434 
    435 	memset(diskp->dk_label, 0, sizeof(struct disklabel));
    436 	memset(diskp->dk_cpulabel, 0, sizeof(struct cpu_disklabel));
    437 
    438 	/*
    439 	 * Set the attached timestamp.
    440 	 */
    441 	s = splclock();
    442 	diskp->dk_attachtime = mono_time;
    443 	splx(s);
    444 
    445 	/*
    446 	 * Link into the disklist.
    447 	 */
    448 	simple_lock(&disklist_slock);
    449 	TAILQ_INSERT_TAIL(&disklist, diskp, dk_link);
    450 	simple_unlock(&disklist_slock);
    451 	++disk_count;
    452 }
    453 
    454 /*
    455  * Detach a disk.
    456  */
    457 void
    458 disk_detach(struct disk *diskp)
    459 {
    460 
    461 	/*
    462 	 * Remove from the disklist.
    463 	 */
    464 	if (--disk_count < 0)
    465 		panic("disk_detach: disk_count < 0");
    466 	simple_lock(&disklist_slock);
    467 	TAILQ_REMOVE(&disklist, diskp, dk_link);
    468 	simple_unlock(&disklist_slock);
    469 
    470 	/*
    471 	 * Free the space used by the disklabel structures.
    472 	 */
    473 	free(diskp->dk_label, M_DEVBUF);
    474 	free(diskp->dk_cpulabel, M_DEVBUF);
    475 }
    476 
    477 /*
    478  * Increment a disk's busy counter.  If the counter is going from
    479  * 0 to 1, set the timestamp.
    480  */
    481 void
    482 disk_busy(struct disk *diskp)
    483 {
    484 	int s;
    485 
    486 	/*
    487 	 * XXX We'd like to use something as accurate as microtime(),
    488 	 * but that doesn't depend on the system TOD clock.
    489 	 */
    490 	if (diskp->dk_busy++ == 0) {
    491 		s = splclock();
    492 		diskp->dk_timestamp = mono_time;
    493 		splx(s);
    494 	}
    495 }
    496 
    497 /*
    498  * Decrement a disk's busy counter, increment the byte count, total busy
    499  * time, and reset the timestamp.
    500  */
    501 void
    502 disk_unbusy(struct disk *diskp, long bcount)
    503 {
    504 	int s;
    505 	struct timeval dv_time, diff_time;
    506 
    507 	if (diskp->dk_busy-- == 0) {
    508 		printf("%s: dk_busy < 0\n", diskp->dk_name);
    509 		panic("disk_unbusy");
    510 	}
    511 
    512 	s = splclock();
    513 	dv_time = mono_time;
    514 	splx(s);
    515 
    516 	timersub(&dv_time, &diskp->dk_timestamp, &diff_time);
    517 	timeradd(&diskp->dk_time, &diff_time, &diskp->dk_time);
    518 
    519 	diskp->dk_timestamp = dv_time;
    520 	if (bcount > 0) {
    521 		diskp->dk_bytes += bcount;
    522 		diskp->dk_xfer++;
    523 	}
    524 }
    525 
    526 /*
    527  * Reset the metrics counters on the given disk.  Note that we cannot
    528  * reset the busy counter, as it may case a panic in disk_unbusy().
    529  * We also must avoid playing with the timestamp information, as it
    530  * may skew any pending transfer results.
    531  */
    532 void
    533 disk_resetstat(struct disk *diskp)
    534 {
    535 	int s = splbio(), t;
    536 
    537 	diskp->dk_xfer = 0;
    538 	diskp->dk_bytes = 0;
    539 
    540 	t = splclock();
    541 	diskp->dk_attachtime = mono_time;
    542 	splx(t);
    543 
    544 	timerclear(&diskp->dk_time);
    545 
    546 	splx(s);
    547 }
    548 
    549 int
    550 sysctl_disknames(void *vwhere, size_t *sizep)
    551 {
    552 	char buf[DK_DISKNAMELEN + 1];
    553 	char *where = vwhere;
    554 	struct disk *diskp;
    555 	size_t needed, left, slen;
    556 	int error, first;
    557 
    558 	first = 1;
    559 	error = 0;
    560 	needed = 0;
    561 	left = *sizep;
    562 
    563 	simple_lock(&disklist_slock);
    564 	for (diskp = TAILQ_FIRST(&disklist); diskp != NULL;
    565 	    diskp = TAILQ_NEXT(diskp, dk_link)) {
    566 		if (where == NULL)
    567 			needed += strlen(diskp->dk_name) + 1;
    568 		else {
    569 			memset(buf, 0, sizeof(buf));
    570 			if (first) {
    571 				strncpy(buf, diskp->dk_name, sizeof(buf));
    572 				first = 0;
    573 			} else {
    574 				buf[0] = ' ';
    575 				strncpy(buf + 1, diskp->dk_name,
    576 				    sizeof(buf) - 1);
    577 			}
    578 			buf[DK_DISKNAMELEN] = '\0';
    579 			slen = strlen(buf);
    580 			if (left < slen + 1)
    581 				break;
    582 			/* +1 to copy out the trailing NUL byte */
    583 			error = copyout(buf, where, slen + 1);
    584 			if (error)
    585 				break;
    586 			where += slen;
    587 			needed += slen;
    588 			left -= slen;
    589 		}
    590 	}
    591 	simple_unlock(&disklist_slock);
    592 	*sizep = needed;
    593 	return (error);
    594 }
    595 
    596 int
    597 sysctl_diskstats(int *name, u_int namelen, void *vwhere, size_t *sizep)
    598 {
    599 	struct disk_sysctl sdisk;
    600 	struct disk *diskp;
    601 	char *where = vwhere;
    602 	size_t tocopy, left;
    603 	int error;
    604 
    605 	if (where == NULL) {
    606 		*sizep = disk_count * sizeof(struct disk_sysctl);
    607 		return (0);
    608 	}
    609 
    610 	if (namelen == 0)
    611 		tocopy = sizeof(sdisk);
    612 	else
    613 		tocopy = name[0];
    614 
    615 	error = 0;
    616 	left = *sizep;
    617 	memset(&sdisk, 0, sizeof(sdisk));
    618 	*sizep = 0;
    619 
    620 	simple_lock(&disklist_slock);
    621 	TAILQ_FOREACH(diskp, &disklist, dk_link) {
    622 		if (left < sizeof(struct disk_sysctl))
    623 			break;
    624 		strncpy(sdisk.dk_name, diskp->dk_name, sizeof(sdisk.dk_name));
    625 		sdisk.dk_xfer = diskp->dk_xfer;
    626 		sdisk.dk_seek = diskp->dk_seek;
    627 		sdisk.dk_bytes = diskp->dk_bytes;
    628 		sdisk.dk_attachtime_sec = diskp->dk_attachtime.tv_sec;
    629 		sdisk.dk_attachtime_usec = diskp->dk_attachtime.tv_usec;
    630 		sdisk.dk_timestamp_sec = diskp->dk_timestamp.tv_sec;
    631 		sdisk.dk_timestamp_usec = diskp->dk_timestamp.tv_usec;
    632 		sdisk.dk_time_sec = diskp->dk_time.tv_sec;
    633 		sdisk.dk_time_usec = diskp->dk_time.tv_usec;
    634 		sdisk.dk_busy = diskp->dk_busy;
    635 
    636 		error = copyout(&sdisk, where, min(tocopy, sizeof(sdisk)));
    637 		if (error)
    638 			break;
    639 		where += tocopy;
    640 		*sizep += tocopy;
    641 		left -= tocopy;
    642 	}
    643 	simple_unlock(&disklist_slock);
    644 	return (error);
    645 }
    646 
    647 
    648 struct bufq_fcfs {
    649 	TAILQ_HEAD(, buf) bq_head;	/* actual list of buffers */
    650 };
    651 
    652 struct bufq_disksort {
    653 	TAILQ_HEAD(, buf) bq_head;	/* actual list of buffers */
    654 };
    655 
    656 #define PRIO_READ_BURST		48
    657 #define PRIO_WRITE_REQ		16
    658 
    659 struct bufq_prio {
    660 	TAILQ_HEAD(, buf) bq_read, bq_write; /* actual list of buffers */
    661 	struct buf *bq_write_next;	/* next request in bq_write */
    662 	struct buf *bq_next;		/* current request */
    663 	int bq_read_burst;		/* # of consecutive reads */
    664 };
    665 
    666 
    667 /*
    668  * Check if two buf's are in ascending order.
    669  */
    670 static __inline int
    671 buf_inorder(struct buf *bp, struct buf *bq, int sortby)
    672 {
    673 	int r;
    674 
    675 	if (bp == NULL || bq == NULL)
    676 		return(bq == NULL);
    677 
    678 	if (sortby == BUFQ_SORT_CYLINDER)
    679 		r = bp->b_cylinder - bq->b_cylinder;
    680 	else
    681 		r = 0;
    682 
    683 	if (r == 0)
    684 		r = bp->b_rawblkno - bq->b_rawblkno;
    685 
    686 	return(r <= 0);
    687 }
    688 
    689 
    690 /*
    691  * First-come first-served sort for disks.
    692  *
    693  * Requests are appended to the queue without any reordering.
    694  */
    695 static void
    696 bufq_fcfs_put(struct bufq_state *bufq, struct buf *bp)
    697 {
    698 	struct bufq_fcfs *fcfs = bufq->bq_private;
    699 
    700 	TAILQ_INSERT_TAIL(&fcfs->bq_head, bp, b_actq);
    701 }
    702 
    703 static struct buf *
    704 bufq_fcfs_get(struct bufq_state *bufq, int remove)
    705 {
    706 	struct bufq_fcfs *fcfs = bufq->bq_private;
    707 	struct buf *bp;
    708 
    709 	bp = TAILQ_FIRST(&fcfs->bq_head);
    710 
    711 	if (bp != NULL && remove)
    712 		TAILQ_REMOVE(&fcfs->bq_head, bp, b_actq);
    713 
    714 	return(bp);
    715 }
    716 
    717 
    718 /*
    719  * Seek sort for disks.
    720  *
    721  * There are actually two queues, sorted in ascendening order.  The first
    722  * queue holds those requests which are positioned after the current block;
    723  * the second holds requests which came in after their position was passed.
    724  * Thus we implement a one-way scan, retracting after reaching the end of
    725  * the drive to the first request on the second queue, at which time it
    726  * becomes the first queue.
    727  *
    728  * A one-way scan is natural because of the way UNIX read-ahead blocks are
    729  * allocated.
    730  */
    731 static void
    732 bufq_disksort_put(struct bufq_state *bufq, struct buf *bp)
    733 {
    734 	struct bufq_disksort *disksort = bufq->bq_private;
    735 	struct buf *bq, *nbq;
    736 	int sortby;
    737 
    738 	sortby = bufq->bq_flags & BUFQ_SORT_MASK;
    739 
    740 	bq = TAILQ_FIRST(&disksort->bq_head);
    741 
    742 	/*
    743 	 * If the queue is empty it's easy; we just go on the end.
    744 	 */
    745 	if (bq == NULL) {
    746 		TAILQ_INSERT_TAIL(&disksort->bq_head, bp, b_actq);
    747 		return;
    748 	}
    749 
    750 	/*
    751 	 * If we lie before the currently active request, then we
    752 	 * must locate the second request list and add ourselves to it.
    753 	 */
    754 	if (buf_inorder(bp, bq, sortby)) {
    755 		while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL) {
    756 			/*
    757 			 * Check for an ``inversion'' in the normally ascending
    758 			 * block numbers, indicating the start of the second
    759 			 * request list.
    760 			 */
    761 			if (buf_inorder(nbq, bq, sortby)) {
    762 				/*
    763 				 * Search the second request list for the first
    764 				 * request at a larger block number.  We go
    765 				 * after that; if there is no such request, we
    766 				 * go at the end.
    767 				 */
    768 				do {
    769 					if (buf_inorder(bp, nbq, sortby))
    770 						goto insert;
    771 					bq = nbq;
    772 				} while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL);
    773 				goto insert;		/* after last */
    774 			}
    775 			bq = nbq;
    776 		}
    777 		/*
    778 		 * No inversions... we will go after the last, and
    779 		 * be the first request in the second request list.
    780 		 */
    781 		goto insert;
    782 	}
    783 	/*
    784 	 * Request is at/after the current request...
    785 	 * sort in the first request list.
    786 	 */
    787 	while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL) {
    788 		/*
    789 		 * We want to go after the current request if there is an
    790 		 * inversion after it (i.e. it is the end of the first
    791 		 * request list), or if the next request is a larger cylinder
    792 		 * than our request.
    793 		 */
    794 		if (buf_inorder(nbq, bq, sortby) ||
    795 		    buf_inorder(bp, nbq, sortby))
    796 			goto insert;
    797 		bq = nbq;
    798 	}
    799 	/*
    800 	 * Neither a second list nor a larger request... we go at the end of
    801 	 * the first list, which is the same as the end of the whole schebang.
    802 	 */
    803 insert:	TAILQ_INSERT_AFTER(&disksort->bq_head, bq, bp, b_actq);
    804 }
    805 
    806 static struct buf *
    807 bufq_disksort_get(struct bufq_state *bufq, int remove)
    808 {
    809 	struct bufq_disksort *disksort = bufq->bq_private;
    810 	struct buf *bp;
    811 
    812 	bp = TAILQ_FIRST(&disksort->bq_head);
    813 
    814 	if (bp != NULL && remove)
    815 		TAILQ_REMOVE(&disksort->bq_head, bp, b_actq);
    816 
    817 	return(bp);
    818 }
    819 
    820 
    821 /*
    822  * Seek sort for disks.
    823  *
    824  * There are two queues.  The first queue holds read requests; the second
    825  * holds write requests.  The read queue is first-come first-served; the
    826  * write queue is sorted in ascendening block order.
    827  * The read queue is processed first.  After PRIO_READ_BURST consecutive
    828  * read requests with non-empty write queue PRIO_WRITE_REQ requests from
    829  * the write queue will be processed.
    830  */
    831 static void
    832 bufq_prio_put(struct bufq_state *bufq, struct buf *bp)
    833 {
    834 	struct bufq_prio *prio = bufq->bq_private;
    835 	struct buf *bq;
    836 	int sortby;
    837 
    838 	sortby = bufq->bq_flags & BUFQ_SORT_MASK;
    839 
    840 	/*
    841 	 * If it's a read request append it to the list.
    842 	 */
    843 	if ((bp->b_flags & B_READ) == B_READ) {
    844 		TAILQ_INSERT_TAIL(&prio->bq_read, bp, b_actq);
    845 		return;
    846 	}
    847 
    848 	bq = TAILQ_FIRST(&prio->bq_write);
    849 
    850 	/*
    851 	 * If the write list is empty, simply append it to the list.
    852 	 */
    853 	if (bq == NULL) {
    854 		TAILQ_INSERT_TAIL(&prio->bq_write, bp, b_actq);
    855 		prio->bq_write_next = bp;
    856 		return;
    857 	}
    858 
    859 	/*
    860 	 * If we lie after the next request, insert after this request.
    861 	 */
    862 	if (buf_inorder(prio->bq_write_next, bp, sortby))
    863 		bq = prio->bq_write_next;
    864 
    865 	/*
    866 	 * Search for the first request at a larger block number.
    867 	 * We go before this request if it exists.
    868 	 */
    869 	while (bq != NULL && buf_inorder(bq, bp, sortby))
    870 		bq = TAILQ_NEXT(bq, b_actq);
    871 
    872 	if (bq != NULL)
    873 		TAILQ_INSERT_BEFORE(bq, bp, b_actq);
    874 	else
    875 		TAILQ_INSERT_TAIL(&prio->bq_write, bp, b_actq);
    876 }
    877 
    878 static struct buf *
    879 bufq_prio_get(struct bufq_state *bufq, int remove)
    880 {
    881 	struct bufq_prio *prio = bufq->bq_private;
    882 	struct buf *bp;
    883 
    884 	/*
    885 	 * If no current request, get next from the lists.
    886 	 */
    887 	if (prio->bq_next == NULL) {
    888 		/*
    889 		 * If at least one list is empty, select the other.
    890 		 */
    891 
    892 		if (TAILQ_FIRST(&prio->bq_read) == NULL) {
    893 			prio->bq_next = prio->bq_write_next;
    894 			prio->bq_read_burst = 0;
    895 		} else if (prio->bq_write_next == NULL) {
    896 			prio->bq_next = TAILQ_FIRST(&prio->bq_read);
    897 			prio->bq_read_burst = 0;
    898 		} else {
    899 			/*
    900 			 * Both list have requests.  Select the read list up
    901 			 * to PRIO_READ_BURST times, then select the write
    902 			 * list PRIO_WRITE_REQ times.
    903 			 */
    904 
    905 			if (prio->bq_read_burst++ < PRIO_READ_BURST)
    906 				prio->bq_next = TAILQ_FIRST(&prio->bq_read);
    907 			else if (prio->bq_read_burst <
    908 				     PRIO_READ_BURST + PRIO_WRITE_REQ)
    909 				prio->bq_next = prio->bq_write_next;
    910 			else {
    911 				prio->bq_next = TAILQ_FIRST(&prio->bq_read);
    912 				prio->bq_read_burst = 0;
    913 			}
    914 		}
    915 	}
    916 
    917 	bp = prio->bq_next;
    918 
    919 	if (prio->bq_next != NULL && remove) {
    920 		if ((prio->bq_next->b_flags & B_READ) == B_READ)
    921 			TAILQ_REMOVE(&prio->bq_read, prio->bq_next, b_actq);
    922 		else {
    923 			TAILQ_REMOVE(&prio->bq_write, prio->bq_next, b_actq);
    924 			/*
    925 			 * Advance the write pointer.
    926 			 */
    927 			prio->bq_write_next =
    928 			    TAILQ_NEXT(prio->bq_write_next, b_actq);
    929 			if (prio->bq_write_next == NULL)
    930 				prio->bq_write_next =
    931 				    TAILQ_FIRST(&prio->bq_write);
    932 		}
    933 
    934 		prio->bq_next = NULL;
    935 	}
    936 
    937 	return(bp);
    938 }
    939 
    940 /*
    941  * Create a device buffer queue.
    942  */
    943 void
    944 bufq_alloc(struct bufq_state *bufq, int flags)
    945 {
    946 	struct bufq_fcfs *fcfs;
    947 	struct bufq_disksort *disksort;
    948 	struct bufq_prio *prio;
    949 
    950 	bufq->bq_flags = flags;
    951 
    952 	switch (flags & BUFQ_SORT_MASK) {
    953 	case BUFQ_SORT_RAWBLOCK:
    954 	case BUFQ_SORT_CYLINDER:
    955 		break;
    956 	case 0:
    957 		if ((flags & BUFQ_METHOD_MASK) == BUFQ_FCFS)
    958 			break;
    959 		/* FALLTHROUGH */
    960 	default:
    961 		panic("bufq_alloc: sort out of range");
    962 	}
    963 
    964 	switch (flags & BUFQ_METHOD_MASK) {
    965 	case BUFQ_FCFS:
    966 		bufq->bq_get = bufq_fcfs_get;
    967 		bufq->bq_put = bufq_fcfs_put;
    968 		MALLOC(bufq->bq_private, struct bufq_fcfs *,
    969 		    sizeof(struct bufq_fcfs), M_DEVBUF, M_ZERO);
    970 		fcfs = (struct bufq_fcfs *)bufq->bq_private;
    971 		TAILQ_INIT(&fcfs->bq_head);
    972 		break;
    973 	case BUFQ_DISKSORT:
    974 		bufq->bq_get = bufq_disksort_get;
    975 		bufq->bq_put = bufq_disksort_put;
    976 		MALLOC(bufq->bq_private, struct bufq_disksort *,
    977 		    sizeof(struct bufq_disksort), M_DEVBUF, M_ZERO);
    978 		disksort = (struct bufq_disksort *)bufq->bq_private;
    979 		TAILQ_INIT(&disksort->bq_head);
    980 		break;
    981 	case BUFQ_READ_PRIO:
    982 		bufq->bq_get = bufq_prio_get;
    983 		bufq->bq_put = bufq_prio_put;
    984 		MALLOC(bufq->bq_private, struct bufq_prio *,
    985 		    sizeof(struct bufq_prio), M_DEVBUF, M_ZERO);
    986 		prio = (struct bufq_prio *)bufq->bq_private;
    987 		TAILQ_INIT(&prio->bq_read);
    988 		TAILQ_INIT(&prio->bq_write);
    989 		break;
    990 	default:
    991 		panic("bufq_alloc: method out of range");
    992 	}
    993 }
    994 
    995 /*
    996  * Destroy a device buffer queue.
    997  */
    998 void
    999 bufq_free(struct bufq_state *bufq)
   1000 {
   1001 	KASSERT(bufq->bq_private != NULL);
   1002 	KASSERT(BUFQ_PEEK(bufq) == NULL);
   1003 
   1004 	FREE(bufq->bq_private, M_DEVBUF);
   1005 	bufq->bq_get = NULL;
   1006 	bufq->bq_put = NULL;
   1007 }
   1008