Home | History | Annotate | Line # | Download | only in kern
bufq_priocscan.c revision 1.18
      1  1.18   martin /*	$NetBSD: bufq_priocscan.c,v 1.18 2014/01/28 12:50:54 martin Exp $	*/
      2   1.1     yamt 
      3   1.1     yamt /*-
      4  1.17     yamt  * Copyright (c)2004,2005,2006,2008,2009,2011,2012 YAMAMOTO Takashi,
      5   1.1     yamt  * All rights reserved.
      6   1.1     yamt  *
      7   1.1     yamt  * Redistribution and use in source and binary forms, with or without
      8   1.1     yamt  * modification, are permitted provided that the following conditions
      9   1.1     yamt  * are met:
     10   1.1     yamt  * 1. Redistributions of source code must retain the above copyright
     11   1.1     yamt  *    notice, this list of conditions and the following disclaimer.
     12   1.1     yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.1     yamt  *    notice, this list of conditions and the following disclaimer in the
     14   1.1     yamt  *    documentation and/or other materials provided with the distribution.
     15   1.1     yamt  *
     16   1.1     yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     17   1.1     yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     18   1.1     yamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     19   1.1     yamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     20   1.1     yamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     21   1.1     yamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     22   1.1     yamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23   1.1     yamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     24   1.1     yamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     25   1.1     yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     26   1.1     yamt  * SUCH DAMAGE.
     27   1.1     yamt  */
     28   1.1     yamt 
     29   1.1     yamt #include <sys/cdefs.h>
     30  1.18   martin __KERNEL_RCSID(0, "$NetBSD: bufq_priocscan.c,v 1.18 2014/01/28 12:50:54 martin Exp $");
     31   1.1     yamt 
     32   1.1     yamt #include <sys/param.h>
     33   1.1     yamt #include <sys/systm.h>
     34   1.1     yamt #include <sys/buf.h>
     35   1.2     yamt #include <sys/bufq.h>
     36   1.5     yamt #include <sys/bufq_impl.h>
     37  1.14     yamt #include <sys/kmem.h>
     38  1.17     yamt #include <sys/rbtree.h>
     39  1.17     yamt 
     40  1.17     yamt #undef	PRIOCSCAN_USE_GLOBAL_POSITION
     41   1.1     yamt 
     42   1.1     yamt /*
     43   1.1     yamt  * Cyclical scan (CSCAN)
     44   1.1     yamt  */
     45  1.17     yamt 
     46  1.17     yamt struct cscan_key {
     47  1.17     yamt 	daddr_t	k_rawblkno;
     48  1.17     yamt 	int k_cylinder;
     49  1.17     yamt };
     50  1.17     yamt 
     51   1.1     yamt struct cscan_queue {
     52  1.17     yamt 	rb_tree_t cq_buffers;		/* ordered list of buffers */
     53  1.17     yamt #if !defined(PRIOCSCAN_USE_GLOBAL_POSITION)
     54  1.17     yamt 	struct cscan_key cq_lastkey;	/* key of last request */
     55  1.17     yamt #endif /* !defined(PRIOCSCAN_USE_GLOBAL_POSITION) */
     56  1.17     yamt 	int cq_sortby;			/* BUFQ_SORT_MASK */
     57  1.17     yamt 	rb_tree_ops_t cq_ops;
     58   1.1     yamt };
     59   1.1     yamt 
     60  1.17     yamt static signed int
     61  1.17     yamt buf_cmp(const struct buf *b1, const struct buf *b2, int sortby)
     62  1.17     yamt {
     63   1.1     yamt 
     64  1.17     yamt 	if (buf_inorder(b2, b1, sortby)) {
     65  1.17     yamt 		return 1;	/* b1 > b2 */
     66  1.17     yamt 	}
     67  1.17     yamt 	if (buf_inorder(b1, b2, sortby)) {
     68  1.17     yamt 		return -1;	/* b1 < b2 */
     69  1.17     yamt 	}
     70  1.17     yamt 	return 0;
     71  1.17     yamt }
     72  1.17     yamt 
     73  1.17     yamt /* return positive if n1 > n2 */
     74  1.17     yamt static signed int
     75  1.17     yamt cscan_tree_compare_nodes(void *context, const void *n1, const void *n2)
     76   1.1     yamt {
     77  1.17     yamt 	const struct cscan_queue * const q = context;
     78  1.17     yamt 	const struct buf * const b1 = n1;
     79  1.17     yamt 	const struct buf * const b2 = n2;
     80  1.17     yamt 	const int sortby = q->cq_sortby;
     81  1.17     yamt 	const int diff = buf_cmp(b1, b2, sortby);
     82  1.17     yamt 
     83  1.17     yamt 	/*
     84  1.17     yamt 	 * XXX rawblkno/cylinder might not be unique.  eg. unbuffered i/o
     85  1.17     yamt 	 */
     86   1.1     yamt 
     87  1.17     yamt 	if (diff != 0) {
     88  1.17     yamt 		return diff;
     89  1.17     yamt 	}
     90  1.17     yamt 
     91  1.17     yamt 	/*
     92  1.17     yamt 	 * XXX rawblkno/cylinder might not be unique.  eg. unbuffered i/o
     93  1.17     yamt 	 */
     94  1.17     yamt 	if (b1 > b2) {
     95  1.17     yamt 		return 1;
     96  1.17     yamt 	}
     97  1.17     yamt 	if (b1 < b2) {
     98  1.17     yamt 		return -1;
     99  1.17     yamt 	}
    100  1.17     yamt 	return 0;
    101  1.17     yamt }
    102  1.17     yamt 
    103  1.17     yamt /* return positive if n1 > k2 */
    104  1.17     yamt static signed int
    105  1.17     yamt cscan_tree_compare_key(void *context, const void *n1, const void *k2)
    106  1.17     yamt {
    107  1.17     yamt 	const struct cscan_queue * const q = context;
    108  1.17     yamt 	const struct buf * const b1 = n1;
    109  1.17     yamt 	const struct cscan_key * const key = k2;
    110  1.17     yamt 	const struct buf tmp = {
    111  1.17     yamt 		.b_rawblkno = key->k_rawblkno,
    112  1.17     yamt 		.b_cylinder = key->k_cylinder,
    113  1.17     yamt 	};
    114  1.17     yamt 	const struct buf *b2 = &tmp;
    115  1.17     yamt 	const int sortby = q->cq_sortby;
    116  1.17     yamt 
    117  1.17     yamt 	return buf_cmp(b1, b2, sortby);
    118   1.1     yamt }
    119   1.1     yamt 
    120  1.17     yamt static void __unused
    121  1.17     yamt cscan_dump(struct cscan_queue *cq)
    122   1.1     yamt {
    123  1.17     yamt 	const int sortby = cq->cq_sortby;
    124  1.17     yamt 	struct buf *bp;
    125   1.1     yamt 
    126  1.17     yamt 	RB_TREE_FOREACH(bp, &cq->cq_buffers) {
    127  1.17     yamt 		if (sortby == BUFQ_SORT_RAWBLOCK) {
    128  1.17     yamt 			printf(" %jd", (intmax_t)bp->b_rawblkno);
    129  1.17     yamt 		} else {
    130  1.17     yamt 			printf(" %jd/%jd",
    131  1.17     yamt 			    (intmax_t)bp->b_cylinder, (intmax_t)bp->b_rawblkno);
    132  1.17     yamt 		}
    133  1.17     yamt 	}
    134  1.17     yamt }
    135   1.1     yamt 
    136  1.17     yamt static inline bool
    137  1.17     yamt cscan_empty(struct cscan_queue *q)
    138  1.17     yamt {
    139   1.1     yamt 
    140  1.17     yamt 	/* XXX this might do more work than necessary */
    141  1.17     yamt 	return rb_tree_iterate(&q->cq_buffers, NULL, RB_DIR_LEFT) == NULL;
    142  1.17     yamt }
    143   1.1     yamt 
    144  1.17     yamt static void
    145  1.17     yamt cscan_put(struct cscan_queue *q, struct buf *bp)
    146  1.17     yamt {
    147  1.18   martin 	struct buf *obp __diagused;
    148   1.1     yamt 
    149  1.17     yamt 	obp = rb_tree_insert_node(&q->cq_buffers, bp);
    150  1.17     yamt 	KASSERT(obp == bp); /* see cscan_tree_compare_nodes */
    151   1.1     yamt }
    152   1.1     yamt 
    153   1.1     yamt static struct buf *
    154  1.17     yamt cscan_get(struct cscan_queue *q, int remove, struct cscan_key *key)
    155   1.1     yamt {
    156   1.1     yamt 	struct buf *bp;
    157   1.1     yamt 
    158  1.17     yamt 	bp = rb_tree_find_node_geq(&q->cq_buffers, key);
    159  1.17     yamt 	KDASSERT(bp == NULL || cscan_tree_compare_key(q, bp, key) >= 0);
    160   1.1     yamt 	if (bp == NULL) {
    161  1.17     yamt 		bp = rb_tree_iterate(&q->cq_buffers, NULL, RB_DIR_LEFT);
    162  1.17     yamt 		KDASSERT(cscan_tree_compare_key(q, bp, key) < 0);
    163   1.1     yamt 	}
    164   1.1     yamt 	if (bp != NULL && remove) {
    165  1.17     yamt #if defined(DEBUG)
    166  1.17     yamt 		struct buf *nbp;
    167  1.17     yamt #endif /* defined(DEBUG) */
    168   1.1     yamt 
    169  1.17     yamt 		rb_tree_remove_node(&q->cq_buffers, bp);
    170  1.17     yamt 		/*
    171  1.17     yamt 		 * remember the head position.
    172  1.17     yamt 		 */
    173  1.17     yamt 		key->k_cylinder = bp->b_cylinder;
    174  1.17     yamt 		key->k_rawblkno = bp->b_rawblkno + (bp->b_bcount >> DEV_BSHIFT);
    175  1.17     yamt #if defined(DEBUG)
    176  1.17     yamt 		nbp = rb_tree_find_node_geq(&q->cq_buffers, key);
    177  1.17     yamt 		if (nbp != NULL && cscan_tree_compare_nodes(q, nbp, bp) < 0) {
    178  1.17     yamt 			panic("%s: wrong order %p < %p\n", __func__,
    179  1.17     yamt 			    nbp, bp);
    180  1.17     yamt 		}
    181  1.17     yamt #endif /* defined(DEBUG) */
    182   1.1     yamt 	}
    183  1.17     yamt 	return bp;
    184   1.1     yamt }
    185   1.1     yamt 
    186   1.1     yamt static void
    187  1.17     yamt cscan_init(struct cscan_queue *q, int sortby)
    188   1.1     yamt {
    189  1.17     yamt 	static const rb_tree_ops_t cscan_tree_ops = {
    190  1.17     yamt 		.rbto_compare_nodes = cscan_tree_compare_nodes,
    191  1.17     yamt 		.rbto_compare_key = cscan_tree_compare_key,
    192  1.17     yamt 		.rbto_node_offset = offsetof(struct buf, b_u.u_rbnode),
    193  1.17     yamt 		.rbto_context = NULL,
    194  1.17     yamt 	};
    195   1.1     yamt 
    196  1.17     yamt 	q->cq_sortby = sortby;
    197  1.17     yamt 	/* XXX copy ops to workaround rbtree.h API limitation */
    198  1.17     yamt 	q->cq_ops = cscan_tree_ops;
    199  1.17     yamt 	q->cq_ops.rbto_context = q;
    200  1.17     yamt 	rb_tree_init(&q->cq_buffers, &q->cq_ops);
    201   1.1     yamt }
    202   1.1     yamt 
    203   1.1     yamt /*
    204   1.1     yamt  * Per-prioritiy CSCAN.
    205   1.1     yamt  *
    206   1.1     yamt  * XXX probably we should have a way to raise
    207   1.1     yamt  * priority of the on-queue requests.
    208   1.1     yamt  */
    209   1.1     yamt #define	PRIOCSCAN_NQUEUE	3
    210   1.1     yamt 
    211   1.1     yamt struct priocscan_queue {
    212   1.1     yamt 	struct cscan_queue q_queue;
    213  1.15     yamt 	unsigned int q_burst;
    214   1.1     yamt };
    215   1.1     yamt 
    216   1.1     yamt struct bufq_priocscan {
    217   1.1     yamt 	struct priocscan_queue bq_queue[PRIOCSCAN_NQUEUE];
    218   1.1     yamt 
    219  1.17     yamt #if defined(PRIOCSCAN_USE_GLOBAL_POSITION)
    220   1.1     yamt 	/*
    221   1.1     yamt 	 * XXX using "global" head position can reduce positioning time
    222   1.1     yamt 	 * when switching between queues.
    223   1.1     yamt 	 * although it might affect against fairness.
    224   1.1     yamt 	 */
    225  1.17     yamt 	struct cscan_key bq_lastkey;
    226   1.1     yamt #endif
    227   1.1     yamt };
    228   1.1     yamt 
    229   1.1     yamt /*
    230   1.1     yamt  * how many requests to serve when having pending requests on other queues.
    231   1.1     yamt  *
    232   1.1     yamt  * XXX tune
    233  1.12     yamt  * be careful: while making these values larger likely
    234  1.12     yamt  * increases the total throughput, it can also increase latencies
    235  1.12     yamt  * for some workloads.
    236   1.1     yamt  */
    237   1.1     yamt const int priocscan_burst[] = {
    238   1.1     yamt 	64, 16, 4
    239   1.1     yamt };
    240   1.1     yamt 
    241   1.3     yamt static void bufq_priocscan_init(struct bufq_state *);
    242   1.1     yamt static void bufq_priocscan_put(struct bufq_state *, struct buf *);
    243   1.1     yamt static struct buf *bufq_priocscan_get(struct bufq_state *, int);
    244   1.3     yamt 
    245   1.5     yamt BUFQ_DEFINE(priocscan, 40, bufq_priocscan_init);
    246   1.3     yamt 
    247   1.7    perry static inline struct cscan_queue *bufq_priocscan_selectqueue(
    248   1.1     yamt     struct bufq_priocscan *, const struct buf *);
    249   1.1     yamt 
    250   1.7    perry static inline struct cscan_queue *
    251   1.1     yamt bufq_priocscan_selectqueue(struct bufq_priocscan *q, const struct buf *bp)
    252   1.1     yamt {
    253   1.1     yamt 	static const int priocscan_priomap[] = {
    254   1.1     yamt 		[BPRIO_TIMENONCRITICAL] = 2,
    255   1.1     yamt 		[BPRIO_TIMELIMITED] = 1,
    256   1.1     yamt 		[BPRIO_TIMECRITICAL] = 0
    257   1.1     yamt 	};
    258   1.1     yamt 
    259   1.1     yamt 	return &q->bq_queue[priocscan_priomap[BIO_GETPRIO(bp)]].q_queue;
    260   1.1     yamt }
    261   1.1     yamt 
    262   1.1     yamt static void
    263   1.1     yamt bufq_priocscan_put(struct bufq_state *bufq, struct buf *bp)
    264   1.1     yamt {
    265   1.1     yamt 	struct bufq_priocscan *q = bufq->bq_private;
    266   1.1     yamt 	struct cscan_queue *cq;
    267   1.1     yamt 
    268   1.1     yamt 	cq = bufq_priocscan_selectqueue(q, bp);
    269  1.17     yamt 	cscan_put(cq, bp);
    270   1.1     yamt }
    271   1.1     yamt 
    272   1.1     yamt static struct buf *
    273   1.1     yamt bufq_priocscan_get(struct bufq_state *bufq, int remove)
    274   1.1     yamt {
    275   1.1     yamt 	struct bufq_priocscan *q = bufq->bq_private;
    276   1.1     yamt 	struct priocscan_queue *pq, *npq;
    277  1.15     yamt 	struct priocscan_queue *first; /* highest priority non-empty queue */
    278   1.1     yamt 	const struct priocscan_queue *epq;
    279   1.1     yamt 	struct buf *bp;
    280   1.9  thorpej 	bool single; /* true if there's only one non-empty queue */
    281   1.1     yamt 
    282  1.15     yamt 	/*
    283  1.15     yamt 	 * find the highest priority non-empty queue.
    284  1.15     yamt 	 */
    285   1.1     yamt 	pq = &q->bq_queue[0];
    286   1.1     yamt 	epq = pq + PRIOCSCAN_NQUEUE;
    287   1.1     yamt 	for (; pq < epq; pq++) {
    288  1.15     yamt 		if (!cscan_empty(&pq->q_queue)) {
    289   1.1     yamt 			break;
    290  1.15     yamt 		}
    291   1.1     yamt 	}
    292   1.1     yamt 	if (pq == epq) {
    293  1.15     yamt 		/*
    294  1.15     yamt 		 * all our queues are empty.  there's nothing to serve.
    295  1.15     yamt 		 */
    296   1.1     yamt 		return NULL;
    297   1.1     yamt 	}
    298  1.15     yamt 	first = pq;
    299   1.1     yamt 
    300  1.15     yamt 	/*
    301  1.15     yamt 	 * scan the rest of queues.
    302  1.15     yamt 	 *
    303  1.15     yamt 	 * if we have two or more non-empty queues, we serve the highest
    304  1.15     yamt 	 * priority one with non-zero burst count.
    305  1.15     yamt 	 */
    306  1.10  thorpej 	single = true;
    307  1.15     yamt 	for (npq = pq + 1; npq < epq; npq++) {
    308  1.15     yamt 		if (!cscan_empty(&npq->q_queue)) {
    309  1.15     yamt 			/*
    310  1.15     yamt 			 * we found another non-empty queue.
    311  1.15     yamt 			 * it means that a queue needs to consume its burst
    312  1.15     yamt 			 * count to be served.
    313  1.15     yamt 			 */
    314  1.10  thorpej 			single = false;
    315  1.15     yamt 
    316  1.15     yamt 			/*
    317  1.15     yamt 			 * check if our current candidate queue has already
    318  1.15     yamt 			 * exhausted its burst count.
    319  1.15     yamt 			 */
    320  1.15     yamt 			if (pq->q_burst > 0) {
    321   1.1     yamt 				break;
    322  1.15     yamt 			}
    323   1.1     yamt 			pq = npq;
    324   1.1     yamt 		}
    325   1.1     yamt 	}
    326   1.1     yamt 	if (single) {
    327   1.1     yamt 		/*
    328  1.15     yamt 		 * there's only a non-empty queue.
    329  1.15     yamt 		 * just serve it without consuming its burst count.
    330   1.1     yamt 		 */
    331  1.15     yamt 		KASSERT(pq == first);
    332   1.1     yamt 	} else {
    333   1.1     yamt 		/*
    334  1.15     yamt 		 * there are two or more non-empty queues.
    335   1.1     yamt 		 */
    336  1.15     yamt 		if (pq->q_burst == 0) {
    337  1.15     yamt 			/*
    338  1.15     yamt 			 * no queues can be served because they have already
    339  1.15     yamt 			 * exhausted their burst count.
    340  1.15     yamt 			 */
    341  1.15     yamt 			unsigned int i;
    342   1.1     yamt #ifdef DEBUG
    343  1.15     yamt 			for (i = 0; i < PRIOCSCAN_NQUEUE; i++) {
    344  1.15     yamt 				pq = &q->bq_queue[i];
    345  1.15     yamt 				if (!cscan_empty(&pq->q_queue) && pq->q_burst) {
    346  1.15     yamt 					panic("%s: inconsist", __func__);
    347  1.15     yamt 				}
    348  1.15     yamt 			}
    349   1.1     yamt #endif /* DEBUG */
    350  1.15     yamt 			/*
    351  1.15     yamt 			 * reset burst counts.
    352  1.15     yamt 			 */
    353  1.15     yamt 			if (remove) {
    354  1.15     yamt 				for (i = 0; i < PRIOCSCAN_NQUEUE; i++) {
    355  1.15     yamt 					pq = &q->bq_queue[i];
    356  1.15     yamt 					pq->q_burst = priocscan_burst[i];
    357  1.15     yamt 				}
    358  1.15     yamt 			}
    359   1.1     yamt 
    360  1.15     yamt 			/*
    361  1.15     yamt 			 * serve the highest priority non-empty queue.
    362  1.15     yamt 			 */
    363  1.15     yamt 			pq = first;
    364  1.15     yamt 		}
    365   1.1     yamt 		/*
    366  1.15     yamt 		 * consume the burst count.
    367  1.15     yamt 		 *
    368  1.15     yamt 		 * XXX account only by number of requests.  is it good enough?
    369   1.1     yamt 		 */
    370   1.4     yamt 		if (remove) {
    371  1.16  hannken 			KASSERT(pq->q_burst > 0);
    372  1.15     yamt 			pq->q_burst--;
    373   1.1     yamt 		}
    374   1.1     yamt 	}
    375   1.1     yamt 
    376  1.15     yamt 	/*
    377  1.15     yamt 	 * finally, get a request from the selected queue.
    378  1.15     yamt 	 */
    379   1.1     yamt 	KDASSERT(!cscan_empty(&pq->q_queue));
    380  1.17     yamt 	bp = cscan_get(&pq->q_queue, remove,
    381  1.17     yamt #if defined(PRIOCSCAN_USE_GLOBAL_POSITION)
    382  1.17     yamt 	    &q->bq_lastkey
    383  1.17     yamt #else /* defined(PRIOCSCAN_USE_GLOBAL_POSITION) */
    384  1.17     yamt 	    &pq->q_queue.cq_lastkey
    385  1.17     yamt #endif /* defined(PRIOCSCAN_USE_GLOBAL_POSITION) */
    386  1.17     yamt 	    );
    387   1.1     yamt 	KDASSERT(bp != NULL);
    388   1.1     yamt 	KDASSERT(&pq->q_queue == bufq_priocscan_selectqueue(q, bp));
    389   1.1     yamt 
    390   1.1     yamt 	return bp;
    391   1.1     yamt }
    392   1.1     yamt 
    393  1.11  reinoud static struct buf *
    394  1.13     yamt bufq_priocscan_cancel(struct bufq_state *bufq, struct buf *bp)
    395  1.11  reinoud {
    396  1.13     yamt 	struct bufq_priocscan * const q = bufq->bq_private;
    397  1.17     yamt 	unsigned int i;
    398  1.11  reinoud 
    399  1.13     yamt 	for (i = 0; i < PRIOCSCAN_NQUEUE; i++) {
    400  1.13     yamt 		struct cscan_queue * const cq = &q->bq_queue[i].q_queue;
    401  1.17     yamt 		struct buf *it;
    402  1.17     yamt 
    403  1.17     yamt 		/*
    404  1.17     yamt 		 * XXX probably could be faster but the cancel functionality
    405  1.17     yamt 		 * is not widely used anyway.
    406  1.17     yamt 		 */
    407  1.17     yamt 		RB_TREE_FOREACH(it, &cq->cq_buffers) {
    408  1.17     yamt 			if (it == bp) {
    409  1.17     yamt 				rb_tree_remove_node(&cq->cq_buffers, bp);
    410  1.17     yamt 				return bp;
    411  1.11  reinoud 			}
    412  1.11  reinoud 		}
    413  1.11  reinoud 	}
    414  1.11  reinoud 	return NULL;
    415  1.11  reinoud }
    416  1.11  reinoud 
    417   1.3     yamt static void
    418  1.14     yamt bufq_priocscan_fini(struct bufq_state *bufq)
    419  1.14     yamt {
    420  1.14     yamt 
    421  1.14     yamt 	KASSERT(bufq->bq_private != NULL);
    422  1.14     yamt 	kmem_free(bufq->bq_private, sizeof(struct bufq_priocscan));
    423  1.14     yamt }
    424  1.14     yamt 
    425  1.14     yamt static void
    426   1.1     yamt bufq_priocscan_init(struct bufq_state *bufq)
    427   1.1     yamt {
    428   1.1     yamt 	struct bufq_priocscan *q;
    429  1.17     yamt 	const int sortby = bufq->bq_flags & BUFQ_SORT_MASK;
    430  1.15     yamt 	unsigned int i;
    431   1.1     yamt 
    432   1.1     yamt 	bufq->bq_get = bufq_priocscan_get;
    433   1.1     yamt 	bufq->bq_put = bufq_priocscan_put;
    434  1.11  reinoud 	bufq->bq_cancel = bufq_priocscan_cancel;
    435  1.14     yamt 	bufq->bq_fini = bufq_priocscan_fini;
    436  1.14     yamt 	bufq->bq_private = kmem_zalloc(sizeof(struct bufq_priocscan), KM_SLEEP);
    437   1.1     yamt 
    438   1.1     yamt 	q = bufq->bq_private;
    439   1.1     yamt 	for (i = 0; i < PRIOCSCAN_NQUEUE; i++) {
    440   1.1     yamt 		struct cscan_queue *cq = &q->bq_queue[i].q_queue;
    441   1.1     yamt 
    442  1.17     yamt 		cscan_init(cq, sortby);
    443   1.1     yamt 	}
    444   1.1     yamt }
    445