Home | History | Annotate | Line # | Download | only in kern
subr_disk.c revision 1.60
      1  1.60      yamt /*	$NetBSD: subr_disk.c,v 1.60 2004/03/09 12:23:07 yamt Exp $	*/
      2  1.22   thorpej 
      3  1.22   thorpej /*-
      4  1.26   thorpej  * Copyright (c) 1996, 1997, 1999, 2000 The NetBSD Foundation, Inc.
      5  1.22   thorpej  * All rights reserved.
      6  1.22   thorpej  *
      7  1.22   thorpej  * This code is derived from software contributed to The NetBSD Foundation
      8  1.22   thorpej  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  1.22   thorpej  * NASA Ames Research Center.
     10  1.22   thorpej  *
     11  1.22   thorpej  * Redistribution and use in source and binary forms, with or without
     12  1.22   thorpej  * modification, are permitted provided that the following conditions
     13  1.22   thorpej  * are met:
     14  1.22   thorpej  * 1. Redistributions of source code must retain the above copyright
     15  1.22   thorpej  *    notice, this list of conditions and the following disclaimer.
     16  1.22   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     17  1.22   thorpej  *    notice, this list of conditions and the following disclaimer in the
     18  1.22   thorpej  *    documentation and/or other materials provided with the distribution.
     19  1.22   thorpej  * 3. All advertising materials mentioning features or use of this software
     20  1.22   thorpej  *    must display the following acknowledgement:
     21  1.22   thorpej  *	This product includes software developed by the NetBSD
     22  1.22   thorpej  *	Foundation, Inc. and its contributors.
     23  1.22   thorpej  * 4. Neither the name of The NetBSD Foundation nor the names of its
     24  1.22   thorpej  *    contributors may be used to endorse or promote products derived
     25  1.22   thorpej  *    from this software without specific prior written permission.
     26  1.22   thorpej  *
     27  1.22   thorpej  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     28  1.22   thorpej  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     29  1.22   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     30  1.22   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     31  1.22   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     32  1.22   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     33  1.22   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     34  1.22   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     35  1.22   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     36  1.22   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     37  1.22   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     38  1.22   thorpej  */
     39  1.12       cgd 
     40  1.11   mycroft /*
     41  1.11   mycroft  * Copyright (c) 1982, 1986, 1988, 1993
     42  1.11   mycroft  *	The Regents of the University of California.  All rights reserved.
     43  1.11   mycroft  * (c) UNIX System Laboratories, Inc.
     44  1.11   mycroft  * All or some portions of this file are derived from material licensed
     45  1.11   mycroft  * to the University of California by American Telephone and Telegraph
     46  1.11   mycroft  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     47  1.11   mycroft  * the permission of UNIX System Laboratories, Inc.
     48  1.11   mycroft  *
     49  1.11   mycroft  * Redistribution and use in source and binary forms, with or without
     50  1.11   mycroft  * modification, are permitted provided that the following conditions
     51  1.11   mycroft  * are met:
     52  1.11   mycroft  * 1. Redistributions of source code must retain the above copyright
     53  1.11   mycroft  *    notice, this list of conditions and the following disclaimer.
     54  1.11   mycroft  * 2. Redistributions in binary form must reproduce the above copyright
     55  1.11   mycroft  *    notice, this list of conditions and the following disclaimer in the
     56  1.11   mycroft  *    documentation and/or other materials provided with the distribution.
     57  1.53       agc  * 3. Neither the name of the University nor the names of its contributors
     58  1.11   mycroft  *    may be used to endorse or promote products derived from this software
     59  1.11   mycroft  *    without specific prior written permission.
     60  1.11   mycroft  *
     61  1.11   mycroft  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     62  1.11   mycroft  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     63  1.11   mycroft  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     64  1.11   mycroft  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     65  1.11   mycroft  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     66  1.11   mycroft  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     67  1.11   mycroft  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     68  1.11   mycroft  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     69  1.11   mycroft  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     70  1.11   mycroft  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     71  1.11   mycroft  * SUCH DAMAGE.
     72  1.11   mycroft  *
     73  1.12       cgd  *	@(#)ufs_disksubr.c	8.5 (Berkeley) 1/21/94
     74  1.11   mycroft  */
     75  1.31     lukem 
     76  1.31     lukem #include <sys/cdefs.h>
     77  1.60      yamt __KERNEL_RCSID(0, "$NetBSD: subr_disk.c,v 1.60 2004/03/09 12:23:07 yamt Exp $");
     78  1.48       mrg 
     79  1.48       mrg #include "opt_compat_netbsd.h"
     80  1.59      yamt #include "opt_bufq.h"
     81  1.11   mycroft 
     82  1.11   mycroft #include <sys/param.h>
     83  1.15   thorpej #include <sys/kernel.h>
     84  1.15   thorpej #include <sys/malloc.h>
     85  1.11   mycroft #include <sys/buf.h>
     86  1.15   thorpej #include <sys/syslog.h>
     87  1.11   mycroft #include <sys/disklabel.h>
     88  1.15   thorpej #include <sys/disk.h>
     89  1.33    simonb #include <sys/sysctl.h>
     90  1.47       mrg #include <lib/libkern/libkern.h>
     91  1.14   thorpej 
     92  1.14   thorpej /*
     93  1.15   thorpej  * A global list of all disks attached to the system.  May grow or
     94  1.15   thorpej  * shrink over time.
     95  1.15   thorpej  */
     96  1.15   thorpej struct	disklist_head disklist;	/* TAILQ_HEAD */
     97  1.15   thorpej int	disk_count;		/* number of drives in global disklist */
     98  1.33    simonb struct simplelock disklist_slock = SIMPLELOCK_INITIALIZER;
     99  1.39   hannken 
    100  1.59      yamt #ifdef NEW_BUFQ_STRATEGY
    101  1.59      yamt int bufq_disk_default_strat = BUFQ_READ_PRIO;
    102  1.59      yamt #else /* NEW_BUFQ_STRATEGY */
    103  1.59      yamt int bufq_disk_default_strat = BUFQ_DISKSORT;
    104  1.59      yamt #endif /* NEW_BUFQ_STRATEGY */
    105  1.59      yamt 
    106  1.39   hannken /*
    107  1.11   mycroft  * Compute checksum for disk label.
    108  1.11   mycroft  */
    109  1.11   mycroft u_int
    110  1.30    simonb dkcksum(struct disklabel *lp)
    111  1.11   mycroft {
    112  1.29  augustss 	u_short *start, *end;
    113  1.29  augustss 	u_short sum = 0;
    114  1.11   mycroft 
    115  1.11   mycroft 	start = (u_short *)lp;
    116  1.11   mycroft 	end = (u_short *)&lp->d_partitions[lp->d_npartitions];
    117  1.11   mycroft 	while (start < end)
    118  1.11   mycroft 		sum ^= *start++;
    119  1.11   mycroft 	return (sum);
    120  1.11   mycroft }
    121  1.11   mycroft 
    122  1.11   mycroft /*
    123  1.11   mycroft  * Disk error is the preface to plaintive error messages
    124  1.11   mycroft  * about failing disk transfers.  It prints messages of the form
    125  1.11   mycroft 
    126  1.11   mycroft hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
    127  1.11   mycroft 
    128  1.11   mycroft  * if the offset of the error in the transfer and a disk label
    129  1.11   mycroft  * are both available.  blkdone should be -1 if the position of the error
    130  1.11   mycroft  * is unknown; the disklabel pointer may be null from drivers that have not
    131  1.20  christos  * been converted to use them.  The message is printed with printf
    132  1.11   mycroft  * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
    133  1.20  christos  * The message should be completed (with at least a newline) with printf
    134  1.11   mycroft  * or addlog, respectively.  There is no trailing space.
    135  1.11   mycroft  */
    136  1.51       dsl #ifndef PRIdaddr
    137  1.51       dsl #define PRIdaddr PRId64
    138  1.51       dsl #endif
    139  1.11   mycroft void
    140  1.38      yamt diskerr(const struct buf *bp, const char *dname, const char *what, int pri,
    141  1.38      yamt     int blkdone, const struct disklabel *lp)
    142  1.11   mycroft {
    143  1.25  drochner 	int unit = DISKUNIT(bp->b_dev), part = DISKPART(bp->b_dev);
    144  1.30    simonb 	void (*pr)(const char *, ...);
    145  1.11   mycroft 	char partname = 'a' + part;
    146  1.51       dsl 	daddr_t sn;
    147  1.51       dsl 
    148  1.52       dsl 	if (/*CONSTCOND*/0)
    149  1.51       dsl 		/* Compiler will error this is the format is wrong... */
    150  1.51       dsl 		printf("%" PRIdaddr, bp->b_blkno);
    151  1.11   mycroft 
    152  1.11   mycroft 	if (pri != LOG_PRINTF) {
    153  1.17  christos 		static const char fmt[] = "";
    154  1.17  christos 		log(pri, fmt);
    155  1.11   mycroft 		pr = addlog;
    156  1.11   mycroft 	} else
    157  1.20  christos 		pr = printf;
    158  1.11   mycroft 	(*pr)("%s%d%c: %s %sing fsbn ", dname, unit, partname, what,
    159  1.11   mycroft 	    bp->b_flags & B_READ ? "read" : "writ");
    160  1.11   mycroft 	sn = bp->b_blkno;
    161  1.11   mycroft 	if (bp->b_bcount <= DEV_BSIZE)
    162  1.51       dsl 		(*pr)("%" PRIdaddr, sn);
    163  1.11   mycroft 	else {
    164  1.11   mycroft 		if (blkdone >= 0) {
    165  1.11   mycroft 			sn += blkdone;
    166  1.51       dsl 			(*pr)("%" PRIdaddr " of ", sn);
    167  1.11   mycroft 		}
    168  1.51       dsl 		(*pr)("%" PRIdaddr "-%" PRIdaddr "", bp->b_blkno,
    169  1.11   mycroft 		    bp->b_blkno + (bp->b_bcount - 1) / DEV_BSIZE);
    170  1.11   mycroft 	}
    171  1.11   mycroft 	if (lp && (blkdone >= 0 || bp->b_bcount <= lp->d_secsize)) {
    172  1.11   mycroft 		sn += lp->d_partitions[part].p_offset;
    173  1.51       dsl 		(*pr)(" (%s%d bn %" PRIdaddr "; cn %" PRIdaddr "",
    174  1.51       dsl 		    dname, unit, sn, sn / lp->d_secpercyl);
    175  1.11   mycroft 		sn %= lp->d_secpercyl;
    176  1.51       dsl 		(*pr)(" tn %" PRIdaddr " sn %" PRIdaddr ")",
    177  1.51       dsl 		    sn / lp->d_nsectors, sn % lp->d_nsectors);
    178  1.11   mycroft 	}
    179  1.15   thorpej }
    180  1.15   thorpej 
    181  1.15   thorpej /*
    182  1.15   thorpej  * Initialize the disklist.  Called by main() before autoconfiguration.
    183  1.15   thorpej  */
    184  1.15   thorpej void
    185  1.30    simonb disk_init(void)
    186  1.15   thorpej {
    187  1.15   thorpej 
    188  1.15   thorpej 	TAILQ_INIT(&disklist);
    189  1.15   thorpej 	disk_count = 0;
    190  1.15   thorpej }
    191  1.15   thorpej 
    192  1.15   thorpej /*
    193  1.15   thorpej  * Searches the disklist for the disk corresponding to the
    194  1.15   thorpej  * name provided.
    195  1.15   thorpej  */
    196  1.15   thorpej struct disk *
    197  1.30    simonb disk_find(char *name)
    198  1.15   thorpej {
    199  1.15   thorpej 	struct disk *diskp;
    200  1.15   thorpej 
    201  1.15   thorpej 	if ((name == NULL) || (disk_count <= 0))
    202  1.15   thorpej 		return (NULL);
    203  1.15   thorpej 
    204  1.33    simonb 	simple_lock(&disklist_slock);
    205  1.33    simonb 	for (diskp = TAILQ_FIRST(&disklist); diskp != NULL;
    206  1.33    simonb 	    diskp = TAILQ_NEXT(diskp, dk_link))
    207  1.33    simonb 		if (strcmp(diskp->dk_name, name) == 0) {
    208  1.33    simonb 			simple_unlock(&disklist_slock);
    209  1.15   thorpej 			return (diskp);
    210  1.33    simonb 		}
    211  1.33    simonb 	simple_unlock(&disklist_slock);
    212  1.15   thorpej 
    213  1.15   thorpej 	return (NULL);
    214  1.15   thorpej }
    215  1.15   thorpej 
    216  1.15   thorpej /*
    217  1.15   thorpej  * Attach a disk.
    218  1.15   thorpej  */
    219  1.15   thorpej void
    220  1.30    simonb disk_attach(struct disk *diskp)
    221  1.15   thorpej {
    222  1.15   thorpej 	int s;
    223  1.15   thorpej 
    224  1.15   thorpej 	/*
    225  1.15   thorpej 	 * Allocate and initialize the disklabel structures.  Note that
    226  1.15   thorpej 	 * it's not safe to sleep here, since we're probably going to be
    227  1.15   thorpej 	 * called during autoconfiguration.
    228  1.15   thorpej 	 */
    229  1.15   thorpej 	diskp->dk_label = malloc(sizeof(struct disklabel), M_DEVBUF, M_NOWAIT);
    230  1.15   thorpej 	diskp->dk_cpulabel = malloc(sizeof(struct cpu_disklabel), M_DEVBUF,
    231  1.15   thorpej 	    M_NOWAIT);
    232  1.15   thorpej 	if ((diskp->dk_label == NULL) || (diskp->dk_cpulabel == NULL))
    233  1.15   thorpej 		panic("disk_attach: can't allocate storage for disklabel");
    234  1.15   thorpej 
    235  1.24     perry 	memset(diskp->dk_label, 0, sizeof(struct disklabel));
    236  1.24     perry 	memset(diskp->dk_cpulabel, 0, sizeof(struct cpu_disklabel));
    237  1.15   thorpej 
    238  1.15   thorpej 	/*
    239  1.15   thorpej 	 * Set the attached timestamp.
    240  1.15   thorpej 	 */
    241  1.15   thorpej 	s = splclock();
    242  1.15   thorpej 	diskp->dk_attachtime = mono_time;
    243  1.15   thorpej 	splx(s);
    244  1.15   thorpej 
    245  1.15   thorpej 	/*
    246  1.15   thorpej 	 * Link into the disklist.
    247  1.15   thorpej 	 */
    248  1.33    simonb 	simple_lock(&disklist_slock);
    249  1.15   thorpej 	TAILQ_INSERT_TAIL(&disklist, diskp, dk_link);
    250  1.33    simonb 	simple_unlock(&disklist_slock);
    251  1.15   thorpej 	++disk_count;
    252  1.15   thorpej }
    253  1.15   thorpej 
    254  1.15   thorpej /*
    255  1.16  christos  * Detach a disk.
    256  1.15   thorpej  */
    257  1.15   thorpej void
    258  1.30    simonb disk_detach(struct disk *diskp)
    259  1.15   thorpej {
    260  1.15   thorpej 
    261  1.15   thorpej 	/*
    262  1.23   thorpej 	 * Remove from the disklist.
    263  1.23   thorpej 	 */
    264  1.23   thorpej 	if (--disk_count < 0)
    265  1.23   thorpej 		panic("disk_detach: disk_count < 0");
    266  1.33    simonb 	simple_lock(&disklist_slock);
    267  1.23   thorpej 	TAILQ_REMOVE(&disklist, diskp, dk_link);
    268  1.33    simonb 	simple_unlock(&disklist_slock);
    269  1.23   thorpej 
    270  1.23   thorpej 	/*
    271  1.15   thorpej 	 * Free the space used by the disklabel structures.
    272  1.15   thorpej 	 */
    273  1.15   thorpej 	free(diskp->dk_label, M_DEVBUF);
    274  1.15   thorpej 	free(diskp->dk_cpulabel, M_DEVBUF);
    275  1.15   thorpej }
    276  1.15   thorpej 
    277  1.15   thorpej /*
    278  1.15   thorpej  * Increment a disk's busy counter.  If the counter is going from
    279  1.15   thorpej  * 0 to 1, set the timestamp.
    280  1.15   thorpej  */
    281  1.15   thorpej void
    282  1.30    simonb disk_busy(struct disk *diskp)
    283  1.15   thorpej {
    284  1.15   thorpej 	int s;
    285  1.15   thorpej 
    286  1.15   thorpej 	/*
    287  1.15   thorpej 	 * XXX We'd like to use something as accurate as microtime(),
    288  1.15   thorpej 	 * but that doesn't depend on the system TOD clock.
    289  1.15   thorpej 	 */
    290  1.15   thorpej 	if (diskp->dk_busy++ == 0) {
    291  1.15   thorpej 		s = splclock();
    292  1.15   thorpej 		diskp->dk_timestamp = mono_time;
    293  1.15   thorpej 		splx(s);
    294  1.15   thorpej 	}
    295  1.15   thorpej }
    296  1.15   thorpej 
    297  1.15   thorpej /*
    298  1.15   thorpej  * Decrement a disk's busy counter, increment the byte count, total busy
    299  1.15   thorpej  * time, and reset the timestamp.
    300  1.15   thorpej  */
    301  1.15   thorpej void
    302  1.45       mrg disk_unbusy(struct disk *diskp, long bcount, int read)
    303  1.15   thorpej {
    304  1.15   thorpej 	int s;
    305  1.15   thorpej 	struct timeval dv_time, diff_time;
    306  1.15   thorpej 
    307  1.23   thorpej 	if (diskp->dk_busy-- == 0) {
    308  1.23   thorpej 		printf("%s: dk_busy < 0\n", diskp->dk_name);
    309  1.23   thorpej 		panic("disk_unbusy");
    310  1.23   thorpej 	}
    311  1.15   thorpej 
    312  1.15   thorpej 	s = splclock();
    313  1.15   thorpej 	dv_time = mono_time;
    314  1.15   thorpej 	splx(s);
    315  1.15   thorpej 
    316  1.15   thorpej 	timersub(&dv_time, &diskp->dk_timestamp, &diff_time);
    317  1.15   thorpej 	timeradd(&diskp->dk_time, &diff_time, &diskp->dk_time);
    318  1.15   thorpej 
    319  1.15   thorpej 	diskp->dk_timestamp = dv_time;
    320  1.15   thorpej 	if (bcount > 0) {
    321  1.45       mrg 		if (read) {
    322  1.45       mrg 			diskp->dk_rbytes += bcount;
    323  1.45       mrg 			diskp->dk_rxfer++;
    324  1.45       mrg 		} else {
    325  1.45       mrg 			diskp->dk_wbytes += bcount;
    326  1.45       mrg 			diskp->dk_wxfer++;
    327  1.45       mrg 		}
    328  1.15   thorpej 	}
    329  1.15   thorpej }
    330  1.15   thorpej 
    331  1.15   thorpej /*
    332  1.15   thorpej  * Reset the metrics counters on the given disk.  Note that we cannot
    333  1.15   thorpej  * reset the busy counter, as it may case a panic in disk_unbusy().
    334  1.15   thorpej  * We also must avoid playing with the timestamp information, as it
    335  1.15   thorpej  * may skew any pending transfer results.
    336  1.15   thorpej  */
    337  1.15   thorpej void
    338  1.30    simonb disk_resetstat(struct disk *diskp)
    339  1.15   thorpej {
    340  1.15   thorpej 	int s = splbio(), t;
    341  1.15   thorpej 
    342  1.45       mrg 	diskp->dk_rxfer = 0;
    343  1.45       mrg 	diskp->dk_rbytes = 0;
    344  1.45       mrg 	diskp->dk_wxfer = 0;
    345  1.45       mrg 	diskp->dk_wbytes = 0;
    346  1.15   thorpej 
    347  1.15   thorpej 	t = splclock();
    348  1.15   thorpej 	diskp->dk_attachtime = mono_time;
    349  1.15   thorpej 	splx(t);
    350  1.15   thorpej 
    351  1.15   thorpej 	timerclear(&diskp->dk_time);
    352  1.15   thorpej 
    353  1.15   thorpej 	splx(s);
    354  1.33    simonb }
    355  1.33    simonb 
    356  1.33    simonb int
    357  1.54    atatat sysctl_hw_disknames(SYSCTLFN_ARGS)
    358  1.33    simonb {
    359  1.33    simonb 	char buf[DK_DISKNAMELEN + 1];
    360  1.54    atatat 	char *where = oldp;
    361  1.33    simonb 	struct disk *diskp;
    362  1.33    simonb 	size_t needed, left, slen;
    363  1.33    simonb 	int error, first;
    364  1.33    simonb 
    365  1.54    atatat 	if (newp != NULL)
    366  1.54    atatat 		return (EPERM);
    367  1.54    atatat 	if (namelen != 0)
    368  1.54    atatat 		return (EINVAL);
    369  1.54    atatat 
    370  1.33    simonb 	first = 1;
    371  1.33    simonb 	error = 0;
    372  1.33    simonb 	needed = 0;
    373  1.54    atatat 	left = *oldlenp;
    374  1.35    simonb 
    375  1.33    simonb 	simple_lock(&disklist_slock);
    376  1.33    simonb 	for (diskp = TAILQ_FIRST(&disklist); diskp != NULL;
    377  1.33    simonb 	    diskp = TAILQ_NEXT(diskp, dk_link)) {
    378  1.33    simonb 		if (where == NULL)
    379  1.33    simonb 			needed += strlen(diskp->dk_name) + 1;
    380  1.33    simonb 		else {
    381  1.33    simonb 			memset(buf, 0, sizeof(buf));
    382  1.33    simonb 			if (first) {
    383  1.33    simonb 				strncpy(buf, diskp->dk_name, sizeof(buf));
    384  1.33    simonb 				first = 0;
    385  1.33    simonb 			} else {
    386  1.33    simonb 				buf[0] = ' ';
    387  1.36     enami 				strncpy(buf + 1, diskp->dk_name,
    388  1.37     enami 				    sizeof(buf) - 1);
    389  1.33    simonb 			}
    390  1.33    simonb 			buf[DK_DISKNAMELEN] = '\0';
    391  1.33    simonb 			slen = strlen(buf);
    392  1.33    simonb 			if (left < slen + 1)
    393  1.33    simonb 				break;
    394  1.33    simonb 			/* +1 to copy out the trailing NUL byte */
    395  1.33    simonb 			error = copyout(buf, where, slen + 1);
    396  1.33    simonb 			if (error)
    397  1.33    simonb 				break;
    398  1.33    simonb 			where += slen;
    399  1.33    simonb 			needed += slen;
    400  1.33    simonb 			left -= slen;
    401  1.33    simonb 		}
    402  1.33    simonb 	}
    403  1.33    simonb 	simple_unlock(&disklist_slock);
    404  1.54    atatat 	*oldlenp = needed;
    405  1.33    simonb 	return (error);
    406  1.33    simonb }
    407  1.33    simonb 
    408  1.33    simonb int
    409  1.54    atatat sysctl_hw_diskstats(SYSCTLFN_ARGS)
    410  1.33    simonb {
    411  1.33    simonb 	struct disk_sysctl sdisk;
    412  1.33    simonb 	struct disk *diskp;
    413  1.54    atatat 	char *where = oldp;
    414  1.33    simonb 	size_t tocopy, left;
    415  1.33    simonb 	int error;
    416  1.33    simonb 
    417  1.54    atatat 	if (newp != NULL)
    418  1.54    atatat 		return (EPERM);
    419  1.54    atatat 
    420  1.48       mrg 	/*
    421  1.48       mrg 	 * The original hw.diskstats call was broken and did not require
    422  1.48       mrg 	 * the userland to pass in it's size of struct disk_sysctl.  This
    423  1.48       mrg 	 * was fixed after NetBSD 1.6 was released, and any applications
    424  1.48       mrg 	 * that do not pass in the size are given an error only, unless
    425  1.48       mrg 	 * we care about 1.6 compatibility.
    426  1.48       mrg 	 */
    427  1.33    simonb 	if (namelen == 0)
    428  1.47       mrg #ifdef COMPAT_16
    429  1.49     enami 		tocopy = offsetof(struct disk_sysctl, dk_rxfer);
    430  1.47       mrg #else
    431  1.47       mrg 		return (EINVAL);
    432  1.47       mrg #endif
    433  1.33    simonb 	else
    434  1.33    simonb 		tocopy = name[0];
    435  1.49     enami 
    436  1.49     enami 	if (where == NULL) {
    437  1.54    atatat 		*oldlenp = disk_count * tocopy;
    438  1.49     enami 		return (0);
    439  1.49     enami 	}
    440  1.33    simonb 
    441  1.33    simonb 	error = 0;
    442  1.54    atatat 	left = *oldlenp;
    443  1.33    simonb 	memset(&sdisk, 0, sizeof(sdisk));
    444  1.54    atatat 	*oldlenp = 0;
    445  1.33    simonb 
    446  1.33    simonb 	simple_lock(&disklist_slock);
    447  1.34    simonb 	TAILQ_FOREACH(diskp, &disklist, dk_link) {
    448  1.46    simonb 		if (left < tocopy)
    449  1.33    simonb 			break;
    450  1.36     enami 		strncpy(sdisk.dk_name, diskp->dk_name, sizeof(sdisk.dk_name));
    451  1.45       mrg 		sdisk.dk_xfer = diskp->dk_rxfer + diskp->dk_wxfer;
    452  1.45       mrg 		sdisk.dk_rxfer = diskp->dk_rxfer;
    453  1.45       mrg 		sdisk.dk_wxfer = diskp->dk_wxfer;
    454  1.33    simonb 		sdisk.dk_seek = diskp->dk_seek;
    455  1.45       mrg 		sdisk.dk_bytes = diskp->dk_rbytes + diskp->dk_wbytes;
    456  1.45       mrg 		sdisk.dk_rbytes = diskp->dk_rbytes;
    457  1.45       mrg 		sdisk.dk_wbytes = diskp->dk_wbytes;
    458  1.33    simonb 		sdisk.dk_attachtime_sec = diskp->dk_attachtime.tv_sec;
    459  1.33    simonb 		sdisk.dk_attachtime_usec = diskp->dk_attachtime.tv_usec;
    460  1.33    simonb 		sdisk.dk_timestamp_sec = diskp->dk_timestamp.tv_sec;
    461  1.33    simonb 		sdisk.dk_timestamp_usec = diskp->dk_timestamp.tv_usec;
    462  1.33    simonb 		sdisk.dk_time_sec = diskp->dk_time.tv_sec;
    463  1.33    simonb 		sdisk.dk_time_usec = diskp->dk_time.tv_usec;
    464  1.33    simonb 		sdisk.dk_busy = diskp->dk_busy;
    465  1.35    simonb 
    466  1.33    simonb 		error = copyout(&sdisk, where, min(tocopy, sizeof(sdisk)));
    467  1.33    simonb 		if (error)
    468  1.33    simonb 			break;
    469  1.33    simonb 		where += tocopy;
    470  1.54    atatat 		*oldlenp += tocopy;
    471  1.33    simonb 		left -= tocopy;
    472  1.33    simonb 	}
    473  1.33    simonb 	simple_unlock(&disklist_slock);
    474  1.33    simonb 	return (error);
    475  1.39   hannken }
    476  1.39   hannken 
    477  1.39   hannken struct bufq_fcfs {
    478  1.39   hannken 	TAILQ_HEAD(, buf) bq_head;	/* actual list of buffers */
    479  1.39   hannken };
    480  1.39   hannken 
    481  1.39   hannken struct bufq_disksort {
    482  1.39   hannken 	TAILQ_HEAD(, buf) bq_head;	/* actual list of buffers */
    483  1.39   hannken };
    484  1.39   hannken 
    485  1.39   hannken #define PRIO_READ_BURST		48
    486  1.39   hannken #define PRIO_WRITE_REQ		16
    487  1.39   hannken 
    488  1.39   hannken struct bufq_prio {
    489  1.39   hannken 	TAILQ_HEAD(, buf) bq_read, bq_write; /* actual list of buffers */
    490  1.39   hannken 	struct buf *bq_write_next;	/* next request in bq_write */
    491  1.41   hannken 	struct buf *bq_next;		/* current request */
    492  1.39   hannken 	int bq_read_burst;		/* # of consecutive reads */
    493  1.39   hannken };
    494  1.39   hannken 
    495  1.39   hannken 
    496  1.60      yamt static __inline int buf_inorder(const struct buf *, const struct buf *, int);
    497  1.60      yamt 
    498  1.39   hannken /*
    499  1.39   hannken  * Check if two buf's are in ascending order.
    500  1.39   hannken  */
    501  1.39   hannken static __inline int
    502  1.60      yamt buf_inorder(const struct buf *bp, const struct buf *bq, int sortby)
    503  1.39   hannken {
    504  1.39   hannken 
    505  1.39   hannken 	if (bp == NULL || bq == NULL)
    506  1.43     enami 		return (bq == NULL);
    507  1.39   hannken 
    508  1.57      yamt 	if (sortby == BUFQ_SORT_CYLINDER) {
    509  1.57      yamt 		if (bp->b_cylinder != bq->b_cylinder)
    510  1.57      yamt 			return bp->b_cylinder < bq->b_cylinder;
    511  1.57      yamt 		else
    512  1.57      yamt 			return bp->b_rawblkno < bq->b_rawblkno;
    513  1.57      yamt 	} else
    514  1.57      yamt 		return bp->b_rawblkno < bq->b_rawblkno;
    515  1.39   hannken }
    516  1.39   hannken 
    517  1.39   hannken 
    518  1.39   hannken /*
    519  1.39   hannken  * First-come first-served sort for disks.
    520  1.39   hannken  *
    521  1.39   hannken  * Requests are appended to the queue without any reordering.
    522  1.39   hannken  */
    523  1.39   hannken static void
    524  1.39   hannken bufq_fcfs_put(struct bufq_state *bufq, struct buf *bp)
    525  1.39   hannken {
    526  1.39   hannken 	struct bufq_fcfs *fcfs = bufq->bq_private;
    527  1.39   hannken 
    528  1.39   hannken 	TAILQ_INSERT_TAIL(&fcfs->bq_head, bp, b_actq);
    529  1.39   hannken }
    530  1.39   hannken 
    531  1.39   hannken static struct buf *
    532  1.39   hannken bufq_fcfs_get(struct bufq_state *bufq, int remove)
    533  1.39   hannken {
    534  1.39   hannken 	struct bufq_fcfs *fcfs = bufq->bq_private;
    535  1.39   hannken 	struct buf *bp;
    536  1.39   hannken 
    537  1.39   hannken 	bp = TAILQ_FIRST(&fcfs->bq_head);
    538  1.39   hannken 
    539  1.39   hannken 	if (bp != NULL && remove)
    540  1.39   hannken 		TAILQ_REMOVE(&fcfs->bq_head, bp, b_actq);
    541  1.39   hannken 
    542  1.43     enami 	return (bp);
    543  1.39   hannken }
    544  1.39   hannken 
    545  1.39   hannken 
    546  1.39   hannken /*
    547  1.39   hannken  * Seek sort for disks.
    548  1.39   hannken  *
    549  1.39   hannken  * There are actually two queues, sorted in ascendening order.  The first
    550  1.39   hannken  * queue holds those requests which are positioned after the current block;
    551  1.39   hannken  * the second holds requests which came in after their position was passed.
    552  1.39   hannken  * Thus we implement a one-way scan, retracting after reaching the end of
    553  1.39   hannken  * the drive to the first request on the second queue, at which time it
    554  1.39   hannken  * becomes the first queue.
    555  1.39   hannken  *
    556  1.39   hannken  * A one-way scan is natural because of the way UNIX read-ahead blocks are
    557  1.39   hannken  * allocated.
    558  1.39   hannken  */
    559  1.39   hannken static void
    560  1.39   hannken bufq_disksort_put(struct bufq_state *bufq, struct buf *bp)
    561  1.39   hannken {
    562  1.39   hannken 	struct bufq_disksort *disksort = bufq->bq_private;
    563  1.39   hannken 	struct buf *bq, *nbq;
    564  1.39   hannken 	int sortby;
    565  1.39   hannken 
    566  1.39   hannken 	sortby = bufq->bq_flags & BUFQ_SORT_MASK;
    567  1.39   hannken 
    568  1.39   hannken 	bq = TAILQ_FIRST(&disksort->bq_head);
    569  1.39   hannken 
    570  1.39   hannken 	/*
    571  1.39   hannken 	 * If the queue is empty it's easy; we just go on the end.
    572  1.39   hannken 	 */
    573  1.39   hannken 	if (bq == NULL) {
    574  1.39   hannken 		TAILQ_INSERT_TAIL(&disksort->bq_head, bp, b_actq);
    575  1.39   hannken 		return;
    576  1.39   hannken 	}
    577  1.39   hannken 
    578  1.39   hannken 	/*
    579  1.39   hannken 	 * If we lie before the currently active request, then we
    580  1.39   hannken 	 * must locate the second request list and add ourselves to it.
    581  1.39   hannken 	 */
    582  1.39   hannken 	if (buf_inorder(bp, bq, sortby)) {
    583  1.39   hannken 		while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL) {
    584  1.39   hannken 			/*
    585  1.39   hannken 			 * Check for an ``inversion'' in the normally ascending
    586  1.39   hannken 			 * block numbers, indicating the start of the second
    587  1.39   hannken 			 * request list.
    588  1.39   hannken 			 */
    589  1.39   hannken 			if (buf_inorder(nbq, bq, sortby)) {
    590  1.39   hannken 				/*
    591  1.39   hannken 				 * Search the second request list for the first
    592  1.39   hannken 				 * request at a larger block number.  We go
    593  1.39   hannken 				 * after that; if there is no such request, we
    594  1.39   hannken 				 * go at the end.
    595  1.39   hannken 				 */
    596  1.39   hannken 				do {
    597  1.39   hannken 					if (buf_inorder(bp, nbq, sortby))
    598  1.39   hannken 						goto insert;
    599  1.39   hannken 					bq = nbq;
    600  1.43     enami 				} while ((nbq =
    601  1.43     enami 				    TAILQ_NEXT(bq, b_actq)) != NULL);
    602  1.39   hannken 				goto insert;		/* after last */
    603  1.39   hannken 			}
    604  1.39   hannken 			bq = nbq;
    605  1.39   hannken 		}
    606  1.39   hannken 		/*
    607  1.39   hannken 		 * No inversions... we will go after the last, and
    608  1.39   hannken 		 * be the first request in the second request list.
    609  1.39   hannken 		 */
    610  1.39   hannken 		goto insert;
    611  1.39   hannken 	}
    612  1.39   hannken 	/*
    613  1.39   hannken 	 * Request is at/after the current request...
    614  1.39   hannken 	 * sort in the first request list.
    615  1.39   hannken 	 */
    616  1.39   hannken 	while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL) {
    617  1.39   hannken 		/*
    618  1.39   hannken 		 * We want to go after the current request if there is an
    619  1.39   hannken 		 * inversion after it (i.e. it is the end of the first
    620  1.39   hannken 		 * request list), or if the next request is a larger cylinder
    621  1.39   hannken 		 * than our request.
    622  1.39   hannken 		 */
    623  1.39   hannken 		if (buf_inorder(nbq, bq, sortby) ||
    624  1.39   hannken 		    buf_inorder(bp, nbq, sortby))
    625  1.39   hannken 			goto insert;
    626  1.39   hannken 		bq = nbq;
    627  1.39   hannken 	}
    628  1.39   hannken 	/*
    629  1.39   hannken 	 * Neither a second list nor a larger request... we go at the end of
    630  1.39   hannken 	 * the first list, which is the same as the end of the whole schebang.
    631  1.39   hannken 	 */
    632  1.39   hannken insert:	TAILQ_INSERT_AFTER(&disksort->bq_head, bq, bp, b_actq);
    633  1.39   hannken }
    634  1.39   hannken 
    635  1.39   hannken static struct buf *
    636  1.39   hannken bufq_disksort_get(struct bufq_state *bufq, int remove)
    637  1.39   hannken {
    638  1.39   hannken 	struct bufq_disksort *disksort = bufq->bq_private;
    639  1.39   hannken 	struct buf *bp;
    640  1.39   hannken 
    641  1.39   hannken 	bp = TAILQ_FIRST(&disksort->bq_head);
    642  1.39   hannken 
    643  1.39   hannken 	if (bp != NULL && remove)
    644  1.39   hannken 		TAILQ_REMOVE(&disksort->bq_head, bp, b_actq);
    645  1.39   hannken 
    646  1.43     enami 	return (bp);
    647  1.39   hannken }
    648  1.39   hannken 
    649  1.39   hannken 
    650  1.39   hannken /*
    651  1.39   hannken  * Seek sort for disks.
    652  1.39   hannken  *
    653  1.39   hannken  * There are two queues.  The first queue holds read requests; the second
    654  1.39   hannken  * holds write requests.  The read queue is first-come first-served; the
    655  1.39   hannken  * write queue is sorted in ascendening block order.
    656  1.39   hannken  * The read queue is processed first.  After PRIO_READ_BURST consecutive
    657  1.39   hannken  * read requests with non-empty write queue PRIO_WRITE_REQ requests from
    658  1.39   hannken  * the write queue will be processed.
    659  1.39   hannken  */
    660  1.39   hannken static void
    661  1.39   hannken bufq_prio_put(struct bufq_state *bufq, struct buf *bp)
    662  1.39   hannken {
    663  1.39   hannken 	struct bufq_prio *prio = bufq->bq_private;
    664  1.39   hannken 	struct buf *bq;
    665  1.39   hannken 	int sortby;
    666  1.39   hannken 
    667  1.39   hannken 	sortby = bufq->bq_flags & BUFQ_SORT_MASK;
    668  1.39   hannken 
    669  1.39   hannken 	/*
    670  1.39   hannken 	 * If it's a read request append it to the list.
    671  1.39   hannken 	 */
    672  1.39   hannken 	if ((bp->b_flags & B_READ) == B_READ) {
    673  1.39   hannken 		TAILQ_INSERT_TAIL(&prio->bq_read, bp, b_actq);
    674  1.39   hannken 		return;
    675  1.39   hannken 	}
    676  1.39   hannken 
    677  1.39   hannken 	bq = TAILQ_FIRST(&prio->bq_write);
    678  1.39   hannken 
    679  1.39   hannken 	/*
    680  1.39   hannken 	 * If the write list is empty, simply append it to the list.
    681  1.39   hannken 	 */
    682  1.39   hannken 	if (bq == NULL) {
    683  1.39   hannken 		TAILQ_INSERT_TAIL(&prio->bq_write, bp, b_actq);
    684  1.39   hannken 		prio->bq_write_next = bp;
    685  1.39   hannken 		return;
    686  1.39   hannken 	}
    687  1.39   hannken 
    688  1.39   hannken 	/*
    689  1.39   hannken 	 * If we lie after the next request, insert after this request.
    690  1.39   hannken 	 */
    691  1.39   hannken 	if (buf_inorder(prio->bq_write_next, bp, sortby))
    692  1.39   hannken 		bq = prio->bq_write_next;
    693  1.39   hannken 
    694  1.39   hannken 	/*
    695  1.39   hannken 	 * Search for the first request at a larger block number.
    696  1.39   hannken 	 * We go before this request if it exists.
    697  1.39   hannken 	 */
    698  1.39   hannken 	while (bq != NULL && buf_inorder(bq, bp, sortby))
    699  1.39   hannken 		bq = TAILQ_NEXT(bq, b_actq);
    700  1.39   hannken 
    701  1.39   hannken 	if (bq != NULL)
    702  1.39   hannken 		TAILQ_INSERT_BEFORE(bq, bp, b_actq);
    703  1.39   hannken 	else
    704  1.39   hannken 		TAILQ_INSERT_TAIL(&prio->bq_write, bp, b_actq);
    705  1.39   hannken }
    706  1.39   hannken 
    707  1.39   hannken static struct buf *
    708  1.39   hannken bufq_prio_get(struct bufq_state *bufq, int remove)
    709  1.39   hannken {
    710  1.39   hannken 	struct bufq_prio *prio = bufq->bq_private;
    711  1.39   hannken 	struct buf *bp;
    712  1.39   hannken 
    713  1.39   hannken 	/*
    714  1.41   hannken 	 * If no current request, get next from the lists.
    715  1.39   hannken 	 */
    716  1.41   hannken 	if (prio->bq_next == NULL) {
    717  1.39   hannken 		/*
    718  1.41   hannken 		 * If at least one list is empty, select the other.
    719  1.39   hannken 		 */
    720  1.41   hannken 		if (TAILQ_FIRST(&prio->bq_read) == NULL) {
    721  1.41   hannken 			prio->bq_next = prio->bq_write_next;
    722  1.41   hannken 			prio->bq_read_burst = 0;
    723  1.41   hannken 		} else if (prio->bq_write_next == NULL) {
    724  1.41   hannken 			prio->bq_next = TAILQ_FIRST(&prio->bq_read);
    725  1.39   hannken 			prio->bq_read_burst = 0;
    726  1.41   hannken 		} else {
    727  1.41   hannken 			/*
    728  1.41   hannken 			 * Both list have requests.  Select the read list up
    729  1.41   hannken 			 * to PRIO_READ_BURST times, then select the write
    730  1.41   hannken 			 * list PRIO_WRITE_REQ times.
    731  1.41   hannken 			 */
    732  1.41   hannken 			if (prio->bq_read_burst++ < PRIO_READ_BURST)
    733  1.41   hannken 				prio->bq_next = TAILQ_FIRST(&prio->bq_read);
    734  1.41   hannken 			else if (prio->bq_read_burst <
    735  1.43     enami 			    PRIO_READ_BURST + PRIO_WRITE_REQ)
    736  1.41   hannken 				prio->bq_next = prio->bq_write_next;
    737  1.41   hannken 			else {
    738  1.41   hannken 				prio->bq_next = TAILQ_FIRST(&prio->bq_read);
    739  1.41   hannken 				prio->bq_read_burst = 0;
    740  1.41   hannken 			}
    741  1.39   hannken 		}
    742  1.39   hannken 	}
    743  1.39   hannken 
    744  1.41   hannken 	bp = prio->bq_next;
    745  1.41   hannken 
    746  1.44     enami 	if (bp != NULL && remove) {
    747  1.44     enami 		if ((bp->b_flags & B_READ) == B_READ)
    748  1.44     enami 			TAILQ_REMOVE(&prio->bq_read, bp, b_actq);
    749  1.39   hannken 		else {
    750  1.39   hannken 			/*
    751  1.44     enami 			 * Advance the write pointer before removing
    752  1.44     enami 			 * bp since it is actually prio->bq_write_next.
    753  1.39   hannken 			 */
    754  1.39   hannken 			prio->bq_write_next =
    755  1.39   hannken 			    TAILQ_NEXT(prio->bq_write_next, b_actq);
    756  1.44     enami 			TAILQ_REMOVE(&prio->bq_write, bp, b_actq);
    757  1.39   hannken 			if (prio->bq_write_next == NULL)
    758  1.39   hannken 				prio->bq_write_next =
    759  1.39   hannken 				    TAILQ_FIRST(&prio->bq_write);
    760  1.39   hannken 		}
    761  1.41   hannken 
    762  1.41   hannken 		prio->bq_next = NULL;
    763  1.39   hannken 	}
    764  1.39   hannken 
    765  1.43     enami 	return (bp);
    766  1.39   hannken }
    767  1.39   hannken 
    768  1.58      yamt 
    769  1.58      yamt /*
    770  1.58      yamt  * Cyclical scan (CSCAN)
    771  1.58      yamt  */
    772  1.58      yamt TAILQ_HEAD(bqhead, buf);
    773  1.58      yamt struct cscan_queue {
    774  1.58      yamt 	struct bqhead cq_head[2];	/* actual lists of buffers */
    775  1.58      yamt 	int cq_idx;			/* current list index */
    776  1.58      yamt 	int cq_lastcylinder;		/* b_cylinder of the last request */
    777  1.58      yamt 	daddr_t cq_lastrawblkno;	/* b_rawblkno of the last request */
    778  1.58      yamt };
    779  1.58      yamt 
    780  1.58      yamt static int __inline cscan_empty(const struct cscan_queue *);
    781  1.58      yamt static void cscan_put(struct cscan_queue *, struct buf *, int);
    782  1.58      yamt static struct buf *cscan_get(struct cscan_queue *, int);
    783  1.58      yamt static void cscan_init(struct cscan_queue *);
    784  1.58      yamt 
    785  1.58      yamt static __inline int
    786  1.58      yamt cscan_empty(const struct cscan_queue *q)
    787  1.58      yamt {
    788  1.58      yamt 
    789  1.58      yamt 	return TAILQ_EMPTY(&q->cq_head[0]) && TAILQ_EMPTY(&q->cq_head[1]);
    790  1.58      yamt }
    791  1.58      yamt 
    792  1.58      yamt static void
    793  1.58      yamt cscan_put(struct cscan_queue *q, struct buf *bp, int sortby)
    794  1.58      yamt {
    795  1.58      yamt 	struct buf tmp;
    796  1.58      yamt 	struct buf *it;
    797  1.58      yamt 	struct bqhead *bqh;
    798  1.58      yamt 	int idx;
    799  1.58      yamt 
    800  1.58      yamt 	tmp.b_cylinder = q->cq_lastcylinder;
    801  1.58      yamt 	tmp.b_rawblkno = q->cq_lastrawblkno;
    802  1.58      yamt 
    803  1.58      yamt 	if (buf_inorder(bp, &tmp, sortby))
    804  1.58      yamt 		idx = 1 - q->cq_idx;
    805  1.58      yamt 	else
    806  1.58      yamt 		idx = q->cq_idx;
    807  1.58      yamt 
    808  1.58      yamt 	bqh = &q->cq_head[idx];
    809  1.58      yamt 
    810  1.58      yamt 	TAILQ_FOREACH(it, bqh, b_actq)
    811  1.58      yamt 		if (buf_inorder(bp, it, sortby))
    812  1.58      yamt 			break;
    813  1.58      yamt 
    814  1.58      yamt 	if (it != NULL)
    815  1.58      yamt 		TAILQ_INSERT_BEFORE(it, bp, b_actq);
    816  1.58      yamt 	else
    817  1.58      yamt 		TAILQ_INSERT_TAIL(bqh, bp, b_actq);
    818  1.58      yamt }
    819  1.58      yamt 
    820  1.58      yamt static struct buf *
    821  1.58      yamt cscan_get(struct cscan_queue *q, int remove)
    822  1.58      yamt {
    823  1.58      yamt 	int idx = q->cq_idx;
    824  1.58      yamt 	struct bqhead *bqh;
    825  1.58      yamt 	struct buf *bp;
    826  1.58      yamt 
    827  1.58      yamt 	bqh = &q->cq_head[idx];
    828  1.58      yamt 	bp = TAILQ_FIRST(bqh);
    829  1.58      yamt 
    830  1.58      yamt 	if (bp == NULL) {
    831  1.58      yamt 		/* switch queue */
    832  1.58      yamt 		idx = 1 - idx;
    833  1.58      yamt 		bqh = &q->cq_head[idx];
    834  1.58      yamt 		bp = TAILQ_FIRST(bqh);
    835  1.58      yamt 	}
    836  1.58      yamt 
    837  1.58      yamt 	KDASSERT((bp != NULL && !cscan_empty(q)) ||
    838  1.58      yamt 	         (bp == NULL && cscan_empty(q)));
    839  1.58      yamt 
    840  1.58      yamt 	if (bp != NULL && remove) {
    841  1.58      yamt 		q->cq_idx = idx;
    842  1.58      yamt 		TAILQ_REMOVE(bqh, bp, b_actq);
    843  1.58      yamt 
    844  1.58      yamt 		q->cq_lastcylinder = bp->b_cylinder;
    845  1.58      yamt 		q->cq_lastrawblkno =
    846  1.58      yamt 		    bp->b_rawblkno + (bp->b_bcount >> DEV_BSHIFT);
    847  1.58      yamt 	}
    848  1.58      yamt 
    849  1.58      yamt 	return (bp);
    850  1.58      yamt }
    851  1.58      yamt 
    852  1.58      yamt static void
    853  1.58      yamt cscan_init(struct cscan_queue *q)
    854  1.58      yamt {
    855  1.58      yamt 
    856  1.58      yamt 	TAILQ_INIT(&q->cq_head[0]);
    857  1.58      yamt 	TAILQ_INIT(&q->cq_head[1]);
    858  1.58      yamt }
    859  1.58      yamt 
    860  1.58      yamt 
    861  1.58      yamt /*
    862  1.58      yamt  * Per-prioritiy CSCAN.
    863  1.58      yamt  *
    864  1.58      yamt  * XXX probably we should have a way to raise
    865  1.58      yamt  * priority of the on-queue requests.
    866  1.58      yamt  */
    867  1.58      yamt #define	PRIOCSCAN_NQUEUE	3
    868  1.58      yamt 
    869  1.58      yamt struct priocscan_queue {
    870  1.58      yamt 	struct cscan_queue q_queue;
    871  1.58      yamt 	int q_burst;
    872  1.58      yamt };
    873  1.58      yamt 
    874  1.58      yamt struct bufq_priocscan {
    875  1.58      yamt 	struct priocscan_queue bq_queue[PRIOCSCAN_NQUEUE];
    876  1.58      yamt 
    877  1.58      yamt #if 0
    878  1.58      yamt 	/*
    879  1.58      yamt 	 * XXX using "global" head position can reduce positioning time
    880  1.58      yamt 	 * when switching between queues.
    881  1.58      yamt 	 * although it might affect against fairness.
    882  1.58      yamt 	 */
    883  1.58      yamt 	daddr_t bq_lastrawblkno;
    884  1.58      yamt 	int bq_lastcylinder;
    885  1.58      yamt #endif
    886  1.58      yamt };
    887  1.58      yamt 
    888  1.58      yamt /*
    889  1.58      yamt  * how many requests to serve when having pending requests on other queues.
    890  1.58      yamt  *
    891  1.58      yamt  * XXX tune
    892  1.58      yamt  */
    893  1.58      yamt const int priocscan_burst[] = {
    894  1.58      yamt 	64, 16, 4
    895  1.58      yamt };
    896  1.58      yamt 
    897  1.58      yamt static void bufq_priocscan_put(struct bufq_state *, struct buf *);
    898  1.58      yamt static struct buf *bufq_priocscan_get(struct bufq_state *, int);
    899  1.58      yamt static void bufq_priocscan_init(struct bufq_state *);
    900  1.58      yamt static __inline struct cscan_queue *bufq_priocscan_selectqueue(
    901  1.58      yamt     struct bufq_priocscan *, const struct buf *);
    902  1.58      yamt 
    903  1.58      yamt static __inline struct cscan_queue *
    904  1.58      yamt bufq_priocscan_selectqueue(struct bufq_priocscan *q, const struct buf *bp)
    905  1.58      yamt {
    906  1.58      yamt 	static const int priocscan_priomap[] = {
    907  1.58      yamt 		[BPRIO_TIMENONCRITICAL] = 2,
    908  1.58      yamt 		[BPRIO_TIMELIMITED] = 1,
    909  1.58      yamt 		[BPRIO_TIMECRITICAL] = 0
    910  1.58      yamt 	};
    911  1.58      yamt 
    912  1.58      yamt 	return &q->bq_queue[priocscan_priomap[BIO_GETPRIO(bp)]].q_queue;
    913  1.58      yamt }
    914  1.58      yamt 
    915  1.58      yamt static void
    916  1.58      yamt bufq_priocscan_put(struct bufq_state *bufq, struct buf *bp)
    917  1.58      yamt {
    918  1.58      yamt 	struct bufq_priocscan *q = bufq->bq_private;
    919  1.58      yamt 	struct cscan_queue *cq;
    920  1.58      yamt 	const int sortby = bufq->bq_flags & BUFQ_SORT_MASK;
    921  1.58      yamt 
    922  1.58      yamt 	cq = bufq_priocscan_selectqueue(q, bp);
    923  1.58      yamt 	cscan_put(cq, bp, sortby);
    924  1.58      yamt }
    925  1.58      yamt 
    926  1.58      yamt static struct buf *
    927  1.58      yamt bufq_priocscan_get(struct bufq_state *bufq, int remove)
    928  1.58      yamt {
    929  1.58      yamt 	struct bufq_priocscan *q = bufq->bq_private;
    930  1.58      yamt 	struct priocscan_queue *pq, *npq;
    931  1.58      yamt 	struct priocscan_queue *first; /* first non-empty queue */
    932  1.58      yamt 	const struct priocscan_queue *epq;
    933  1.58      yamt 	const struct cscan_queue *cq;
    934  1.58      yamt 	struct buf *bp;
    935  1.58      yamt 	boolean_t single; /* true if there's only one non-empty queue */
    936  1.58      yamt 
    937  1.58      yamt 	pq = &q->bq_queue[0];
    938  1.58      yamt 	epq = pq + PRIOCSCAN_NQUEUE;
    939  1.58      yamt 	for (; pq < epq; pq++) {
    940  1.58      yamt 		cq = &pq->q_queue;
    941  1.58      yamt 		if (!cscan_empty(cq))
    942  1.58      yamt 			break;
    943  1.58      yamt 	}
    944  1.58      yamt 	if (pq == epq) {
    945  1.58      yamt 		/* there's no requests */
    946  1.58      yamt 		return NULL;
    947  1.58      yamt 	}
    948  1.58      yamt 
    949  1.58      yamt 	first = pq;
    950  1.58      yamt 	single = TRUE;
    951  1.58      yamt 	for (npq = first + 1; npq < epq; npq++) {
    952  1.58      yamt 		cq = &npq->q_queue;
    953  1.58      yamt 		if (!cscan_empty(cq)) {
    954  1.58      yamt 			single = FALSE;
    955  1.58      yamt 			if (pq->q_burst > 0)
    956  1.58      yamt 				break;
    957  1.58      yamt 			pq = npq;
    958  1.58      yamt 		}
    959  1.58      yamt 	}
    960  1.58      yamt 	if (single) {
    961  1.58      yamt 		/*
    962  1.58      yamt 		 * there's only a non-empty queue.  just serve it.
    963  1.58      yamt 		 */
    964  1.58      yamt 		pq = first;
    965  1.58      yamt 	} else if (pq->q_burst > 0) {
    966  1.58      yamt 		/*
    967  1.58      yamt 		 * XXX account only by number of requests.  is it good enough?
    968  1.58      yamt 		 */
    969  1.58      yamt 		pq->q_burst--;
    970  1.58      yamt 	} else {
    971  1.58      yamt 		/*
    972  1.58      yamt 		 * no queue was selected due to burst counts
    973  1.58      yamt 		 */
    974  1.58      yamt 		int i;
    975  1.58      yamt #ifdef DEBUG
    976  1.58      yamt 		for (i = 0; i < PRIOCSCAN_NQUEUE; i++) {
    977  1.58      yamt 			pq = &q->bq_queue[i];
    978  1.58      yamt 			cq = &pq->q_queue;
    979  1.58      yamt 			if (!cscan_empty(cq) && pq->q_burst)
    980  1.58      yamt 				panic("%s: inconsist", __func__);
    981  1.58      yamt 		}
    982  1.58      yamt #endif /* DEBUG */
    983  1.58      yamt 
    984  1.58      yamt 		/*
    985  1.58      yamt 		 * reset burst counts
    986  1.58      yamt 		 */
    987  1.58      yamt 		for (i = 0; i < PRIOCSCAN_NQUEUE; i++) {
    988  1.58      yamt 			pq = &q->bq_queue[i];
    989  1.58      yamt 			pq->q_burst = priocscan_burst[i];
    990  1.58      yamt 		}
    991  1.58      yamt 
    992  1.58      yamt 		/*
    993  1.58      yamt 		 * serve first non-empty queue.
    994  1.58      yamt 		 */
    995  1.58      yamt 		pq = first;
    996  1.58      yamt 	}
    997  1.58      yamt 
    998  1.58      yamt 	KDASSERT(!cscan_empty(&pq->q_queue));
    999  1.58      yamt 	bp = cscan_get(&pq->q_queue, remove);
   1000  1.58      yamt 	KDASSERT(bp != NULL);
   1001  1.58      yamt 	KDASSERT(&pq->q_queue == bufq_priocscan_selectqueue(q, bp));
   1002  1.58      yamt 
   1003  1.58      yamt 	return bp;
   1004  1.58      yamt }
   1005  1.58      yamt 
   1006  1.58      yamt static void
   1007  1.58      yamt bufq_priocscan_init(struct bufq_state *bufq)
   1008  1.58      yamt {
   1009  1.58      yamt 	struct bufq_priocscan *q;
   1010  1.58      yamt 	int i;
   1011  1.58      yamt 
   1012  1.58      yamt 	bufq->bq_get = bufq_priocscan_get;
   1013  1.58      yamt 	bufq->bq_put = bufq_priocscan_put;
   1014  1.58      yamt 	bufq->bq_private = malloc(sizeof(struct bufq_priocscan),
   1015  1.58      yamt 	    M_DEVBUF, M_ZERO);
   1016  1.58      yamt 
   1017  1.58      yamt 	q = bufq->bq_private;
   1018  1.58      yamt 	for (i = 0; i < PRIOCSCAN_NQUEUE; i++) {
   1019  1.58      yamt 		struct cscan_queue *cq = &q->bq_queue[i].q_queue;
   1020  1.58      yamt 
   1021  1.58      yamt 		cscan_init(cq);
   1022  1.58      yamt 	}
   1023  1.58      yamt }
   1024  1.58      yamt 
   1025  1.58      yamt 
   1026  1.40   hannken /*
   1027  1.40   hannken  * Create a device buffer queue.
   1028  1.40   hannken  */
   1029  1.39   hannken void
   1030  1.40   hannken bufq_alloc(struct bufq_state *bufq, int flags)
   1031  1.39   hannken {
   1032  1.39   hannken 	struct bufq_fcfs *fcfs;
   1033  1.39   hannken 	struct bufq_disksort *disksort;
   1034  1.39   hannken 	struct bufq_prio *prio;
   1035  1.39   hannken 
   1036  1.39   hannken 	bufq->bq_flags = flags;
   1037  1.39   hannken 
   1038  1.39   hannken 	switch (flags & BUFQ_SORT_MASK) {
   1039  1.39   hannken 	case BUFQ_SORT_RAWBLOCK:
   1040  1.39   hannken 	case BUFQ_SORT_CYLINDER:
   1041  1.39   hannken 		break;
   1042  1.39   hannken 	case 0:
   1043  1.39   hannken 		if ((flags & BUFQ_METHOD_MASK) == BUFQ_FCFS)
   1044  1.39   hannken 			break;
   1045  1.39   hannken 		/* FALLTHROUGH */
   1046  1.39   hannken 	default:
   1047  1.40   hannken 		panic("bufq_alloc: sort out of range");
   1048  1.39   hannken 	}
   1049  1.39   hannken 
   1050  1.39   hannken 	switch (flags & BUFQ_METHOD_MASK) {
   1051  1.39   hannken 	case BUFQ_FCFS:
   1052  1.39   hannken 		bufq->bq_get = bufq_fcfs_get;
   1053  1.39   hannken 		bufq->bq_put = bufq_fcfs_put;
   1054  1.40   hannken 		MALLOC(bufq->bq_private, struct bufq_fcfs *,
   1055  1.40   hannken 		    sizeof(struct bufq_fcfs), M_DEVBUF, M_ZERO);
   1056  1.39   hannken 		fcfs = (struct bufq_fcfs *)bufq->bq_private;
   1057  1.39   hannken 		TAILQ_INIT(&fcfs->bq_head);
   1058  1.39   hannken 		break;
   1059  1.39   hannken 	case BUFQ_DISKSORT:
   1060  1.39   hannken 		bufq->bq_get = bufq_disksort_get;
   1061  1.39   hannken 		bufq->bq_put = bufq_disksort_put;
   1062  1.40   hannken 		MALLOC(bufq->bq_private, struct bufq_disksort *,
   1063  1.40   hannken 		    sizeof(struct bufq_disksort), M_DEVBUF, M_ZERO);
   1064  1.39   hannken 		disksort = (struct bufq_disksort *)bufq->bq_private;
   1065  1.39   hannken 		TAILQ_INIT(&disksort->bq_head);
   1066  1.39   hannken 		break;
   1067  1.39   hannken 	case BUFQ_READ_PRIO:
   1068  1.39   hannken 		bufq->bq_get = bufq_prio_get;
   1069  1.39   hannken 		bufq->bq_put = bufq_prio_put;
   1070  1.40   hannken 		MALLOC(bufq->bq_private, struct bufq_prio *,
   1071  1.40   hannken 		    sizeof(struct bufq_prio), M_DEVBUF, M_ZERO);
   1072  1.39   hannken 		prio = (struct bufq_prio *)bufq->bq_private;
   1073  1.39   hannken 		TAILQ_INIT(&prio->bq_read);
   1074  1.39   hannken 		TAILQ_INIT(&prio->bq_write);
   1075  1.39   hannken 		break;
   1076  1.58      yamt 	case BUFQ_PRIOCSCAN:
   1077  1.58      yamt 		bufq_priocscan_init(bufq);
   1078  1.58      yamt 		break;
   1079  1.39   hannken 	default:
   1080  1.40   hannken 		panic("bufq_alloc: method out of range");
   1081  1.39   hannken 	}
   1082  1.40   hannken }
   1083  1.40   hannken 
   1084  1.40   hannken /*
   1085  1.40   hannken  * Destroy a device buffer queue.
   1086  1.40   hannken  */
   1087  1.40   hannken void
   1088  1.40   hannken bufq_free(struct bufq_state *bufq)
   1089  1.40   hannken {
   1090  1.43     enami 
   1091  1.40   hannken 	KASSERT(bufq->bq_private != NULL);
   1092  1.40   hannken 	KASSERT(BUFQ_PEEK(bufq) == NULL);
   1093  1.40   hannken 
   1094  1.40   hannken 	FREE(bufq->bq_private, M_DEVBUF);
   1095  1.40   hannken 	bufq->bq_get = NULL;
   1096  1.40   hannken 	bufq->bq_put = NULL;
   1097  1.50      fvdl }
   1098  1.50      fvdl 
   1099  1.50      fvdl /*
   1100  1.50      fvdl  * Bounds checking against the media size, used for the raw partition.
   1101  1.50      fvdl  * The sector size passed in should currently always be DEV_BSIZE,
   1102  1.50      fvdl  * and the media size the size of the device in DEV_BSIZE sectors.
   1103  1.50      fvdl  */
   1104  1.50      fvdl int
   1105  1.50      fvdl bounds_check_with_mediasize(struct buf *bp, int secsize, u_int64_t mediasize)
   1106  1.50      fvdl {
   1107  1.50      fvdl 	int sz;
   1108  1.50      fvdl 
   1109  1.50      fvdl 	sz = howmany(bp->b_bcount, secsize);
   1110  1.50      fvdl 
   1111  1.50      fvdl 	if (bp->b_blkno + sz > mediasize) {
   1112  1.50      fvdl 		sz = mediasize - bp->b_blkno;
   1113  1.50      fvdl 		if (sz == 0) {
   1114  1.50      fvdl 			/* If exactly at end of disk, return EOF. */
   1115  1.50      fvdl 			bp->b_resid = bp->b_bcount;
   1116  1.50      fvdl 			goto done;
   1117  1.50      fvdl 		}
   1118  1.50      fvdl 		if (sz < 0) {
   1119  1.50      fvdl 			/* If past end of disk, return EINVAL. */
   1120  1.50      fvdl 			bp->b_error = EINVAL;
   1121  1.50      fvdl 			goto bad;
   1122  1.50      fvdl 		}
   1123  1.50      fvdl 		/* Otherwise, truncate request. */
   1124  1.50      fvdl 		bp->b_bcount = sz << DEV_BSHIFT;
   1125  1.50      fvdl 	}
   1126  1.50      fvdl 
   1127  1.50      fvdl 	return 1;
   1128  1.50      fvdl 
   1129  1.50      fvdl bad:
   1130  1.50      fvdl 	bp->b_flags |= B_ERROR;
   1131  1.50      fvdl done:
   1132  1.50      fvdl 	return 0;
   1133  1.11   mycroft }
   1134