subr_disk.c revision 1.29.6.5 1 1.29.6.5 nathanw /* $NetBSD: subr_disk.c,v 1.29.6.5 2002/08/01 02:46:23 nathanw Exp $ */
2 1.22 thorpej
3 1.22 thorpej /*-
4 1.26 thorpej * Copyright (c) 1996, 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 1.22 thorpej * All rights reserved.
6 1.22 thorpej *
7 1.22 thorpej * This code is derived from software contributed to The NetBSD Foundation
8 1.22 thorpej * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.22 thorpej * NASA Ames Research Center.
10 1.22 thorpej *
11 1.22 thorpej * Redistribution and use in source and binary forms, with or without
12 1.22 thorpej * modification, are permitted provided that the following conditions
13 1.22 thorpej * are met:
14 1.22 thorpej * 1. Redistributions of source code must retain the above copyright
15 1.22 thorpej * notice, this list of conditions and the following disclaimer.
16 1.22 thorpej * 2. Redistributions in binary form must reproduce the above copyright
17 1.22 thorpej * notice, this list of conditions and the following disclaimer in the
18 1.22 thorpej * documentation and/or other materials provided with the distribution.
19 1.22 thorpej * 3. All advertising materials mentioning features or use of this software
20 1.22 thorpej * must display the following acknowledgement:
21 1.22 thorpej * This product includes software developed by the NetBSD
22 1.22 thorpej * Foundation, Inc. and its contributors.
23 1.22 thorpej * 4. Neither the name of The NetBSD Foundation nor the names of its
24 1.22 thorpej * contributors may be used to endorse or promote products derived
25 1.22 thorpej * from this software without specific prior written permission.
26 1.22 thorpej *
27 1.22 thorpej * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 1.22 thorpej * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 1.22 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 1.22 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 1.22 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 1.22 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 1.22 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 1.22 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 1.22 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 1.22 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 1.22 thorpej * POSSIBILITY OF SUCH DAMAGE.
38 1.22 thorpej */
39 1.12 cgd
40 1.11 mycroft /*
41 1.11 mycroft * Copyright (c) 1982, 1986, 1988, 1993
42 1.11 mycroft * The Regents of the University of California. All rights reserved.
43 1.11 mycroft * (c) UNIX System Laboratories, Inc.
44 1.11 mycroft * All or some portions of this file are derived from material licensed
45 1.11 mycroft * to the University of California by American Telephone and Telegraph
46 1.11 mycroft * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 1.11 mycroft * the permission of UNIX System Laboratories, Inc.
48 1.11 mycroft *
49 1.11 mycroft * Redistribution and use in source and binary forms, with or without
50 1.11 mycroft * modification, are permitted provided that the following conditions
51 1.11 mycroft * are met:
52 1.11 mycroft * 1. Redistributions of source code must retain the above copyright
53 1.11 mycroft * notice, this list of conditions and the following disclaimer.
54 1.11 mycroft * 2. Redistributions in binary form must reproduce the above copyright
55 1.11 mycroft * notice, this list of conditions and the following disclaimer in the
56 1.11 mycroft * documentation and/or other materials provided with the distribution.
57 1.11 mycroft * 3. All advertising materials mentioning features or use of this software
58 1.11 mycroft * must display the following acknowledgement:
59 1.11 mycroft * This product includes software developed by the University of
60 1.11 mycroft * California, Berkeley and its contributors.
61 1.11 mycroft * 4. Neither the name of the University nor the names of its contributors
62 1.11 mycroft * may be used to endorse or promote products derived from this software
63 1.11 mycroft * without specific prior written permission.
64 1.11 mycroft *
65 1.11 mycroft * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66 1.11 mycroft * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 1.11 mycroft * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 1.11 mycroft * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69 1.11 mycroft * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 1.11 mycroft * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 1.11 mycroft * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 1.11 mycroft * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73 1.11 mycroft * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74 1.11 mycroft * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75 1.11 mycroft * SUCH DAMAGE.
76 1.11 mycroft *
77 1.12 cgd * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94
78 1.11 mycroft */
79 1.29.6.2 nathanw
80 1.29.6.2 nathanw #include <sys/cdefs.h>
81 1.29.6.5 nathanw __KERNEL_RCSID(0, "$NetBSD: subr_disk.c,v 1.29.6.5 2002/08/01 02:46:23 nathanw Exp $");
82 1.11 mycroft
83 1.11 mycroft #include <sys/param.h>
84 1.15 thorpej #include <sys/kernel.h>
85 1.15 thorpej #include <sys/malloc.h>
86 1.11 mycroft #include <sys/buf.h>
87 1.15 thorpej #include <sys/syslog.h>
88 1.11 mycroft #include <sys/disklabel.h>
89 1.15 thorpej #include <sys/disk.h>
90 1.29.6.4 nathanw #include <sys/sysctl.h>
91 1.14 thorpej
92 1.14 thorpej /*
93 1.15 thorpej * A global list of all disks attached to the system. May grow or
94 1.15 thorpej * shrink over time.
95 1.15 thorpej */
96 1.15 thorpej struct disklist_head disklist; /* TAILQ_HEAD */
97 1.15 thorpej int disk_count; /* number of drives in global disklist */
98 1.29.6.4 nathanw struct simplelock disklist_slock = SIMPLELOCK_INITIALIZER;
99 1.15 thorpej
100 1.15 thorpej /*
101 1.29.6.5 nathanw * XXX This interface will be removed in the near future!
102 1.29.6.5 nathanw *
103 1.11 mycroft * Seek sort for disks. We depend on the driver which calls us using b_resid
104 1.11 mycroft * as the current cylinder number.
105 1.11 mycroft *
106 1.26 thorpej * The argument bufq is an I/O queue for the device, on which there are
107 1.26 thorpej * actually two queues, sorted in ascending cylinder order. The first
108 1.26 thorpej * queue holds those requests which are positioned after the current
109 1.26 thorpej * cylinder (in the first request); the second holds requests which came
110 1.26 thorpej * in after their cylinder number was passed. Thus we implement a one-way
111 1.26 thorpej * scan, retracting after reaching the end of the drive to the first request
112 1.26 thorpej * on the second queue, at which time it becomes the first queue.
113 1.11 mycroft *
114 1.11 mycroft * A one-way scan is natural because of the way UNIX read-ahead blocks are
115 1.11 mycroft * allocated.
116 1.26 thorpej *
117 1.26 thorpej * This is further adjusted by any `barriers' which may exist in the queue.
118 1.26 thorpej * The bufq points to the last such ordered request.
119 1.11 mycroft */
120 1.11 mycroft void
121 1.29.6.1 nathanw disksort_cylinder(struct buf_queue *bufq, struct buf *bp)
122 1.11 mycroft {
123 1.26 thorpej struct buf *bq, *nbq;
124 1.26 thorpej
125 1.26 thorpej /*
126 1.26 thorpej * If there are ordered requests on the queue, we must start
127 1.26 thorpej * the elevator sort after the last of these.
128 1.26 thorpej */
129 1.26 thorpej if ((bq = bufq->bq_barrier) == NULL)
130 1.26 thorpej bq = BUFQ_FIRST(bufq);
131 1.11 mycroft
132 1.26 thorpej /*
133 1.26 thorpej * If the queue is empty, of if it's an ordered request,
134 1.26 thorpej * it's easy; we just go on the end.
135 1.26 thorpej */
136 1.26 thorpej if (bq == NULL || (bp->b_flags & B_ORDERED) != 0) {
137 1.26 thorpej BUFQ_INSERT_TAIL(bufq, bp);
138 1.11 mycroft return;
139 1.11 mycroft }
140 1.11 mycroft
141 1.11 mycroft /*
142 1.11 mycroft * If we lie after the first (currently active) request, then we
143 1.11 mycroft * must locate the second request list and add ourselves to it.
144 1.11 mycroft */
145 1.27 hannken if (bp->b_cylinder < bq->b_cylinder ||
146 1.28 thorpej (bp->b_cylinder == bq->b_cylinder &&
147 1.28 thorpej bp->b_rawblkno < bq->b_rawblkno)) {
148 1.26 thorpej while ((nbq = BUFQ_NEXT(bq)) != NULL) {
149 1.11 mycroft /*
150 1.11 mycroft * Check for an ``inversion'' in the normally ascending
151 1.11 mycroft * cylinder numbers, indicating the start of the second
152 1.11 mycroft * request list.
153 1.11 mycroft */
154 1.26 thorpej if (nbq->b_cylinder < bq->b_cylinder) {
155 1.11 mycroft /*
156 1.11 mycroft * Search the second request list for the first
157 1.11 mycroft * request at a larger cylinder number. We go
158 1.11 mycroft * before that; if there is no such request, we
159 1.11 mycroft * go at end.
160 1.11 mycroft */
161 1.11 mycroft do {
162 1.26 thorpej if (bp->b_cylinder < nbq->b_cylinder)
163 1.11 mycroft goto insert;
164 1.26 thorpej if (bp->b_cylinder == nbq->b_cylinder &&
165 1.28 thorpej bp->b_rawblkno < nbq->b_rawblkno)
166 1.26 thorpej goto insert;
167 1.26 thorpej bq = nbq;
168 1.26 thorpej } while ((nbq = BUFQ_NEXT(bq)) != NULL);
169 1.26 thorpej goto insert; /* after last */
170 1.26 thorpej }
171 1.29.6.3 nathanw bq = nbq;
172 1.26 thorpej }
173 1.26 thorpej /*
174 1.26 thorpej * No inversions... we will go after the last, and
175 1.26 thorpej * be the first request in the second request list.
176 1.26 thorpej */
177 1.26 thorpej goto insert;
178 1.26 thorpej }
179 1.26 thorpej /*
180 1.26 thorpej * Request is at/after the current request...
181 1.26 thorpej * sort in the first request list.
182 1.26 thorpej */
183 1.26 thorpej while ((nbq = BUFQ_NEXT(bq)) != NULL) {
184 1.26 thorpej /*
185 1.26 thorpej * We want to go after the current request if there is an
186 1.26 thorpej * inversion after it (i.e. it is the end of the first
187 1.26 thorpej * request list), or if the next request is a larger cylinder
188 1.26 thorpej * than our request.
189 1.26 thorpej */
190 1.26 thorpej if (nbq->b_cylinder < bq->b_cylinder ||
191 1.26 thorpej bp->b_cylinder < nbq->b_cylinder ||
192 1.26 thorpej (bp->b_cylinder == nbq->b_cylinder &&
193 1.28 thorpej bp->b_rawblkno < nbq->b_rawblkno))
194 1.26 thorpej goto insert;
195 1.26 thorpej bq = nbq;
196 1.26 thorpej }
197 1.26 thorpej /*
198 1.26 thorpej * Neither a second list nor a larger request... we go at the end of
199 1.26 thorpej * the first list, which is the same as the end of the whole schebang.
200 1.26 thorpej */
201 1.26 thorpej insert: BUFQ_INSERT_AFTER(bufq, bq, bp);
202 1.26 thorpej }
203 1.26 thorpej
204 1.26 thorpej /*
205 1.28 thorpej * Seek sort for disks. This version sorts based on b_rawblkno, which
206 1.26 thorpej * indicates the block number.
207 1.26 thorpej *
208 1.26 thorpej * As before, there are actually two queues, sorted in ascendening block
209 1.26 thorpej * order. The first queue holds those requests which are positioned after
210 1.26 thorpej * the current block (in the first request); the second holds requests which
211 1.26 thorpej * came in after their block number was passed. Thus we implement a one-way
212 1.26 thorpej * scan, retracting after reaching the end of the driver to the first request
213 1.26 thorpej * on the second queue, at which time it becomes the first queue.
214 1.26 thorpej *
215 1.26 thorpej * A one-way scan is natural because of the way UNIX read-ahead blocks are
216 1.26 thorpej * allocated.
217 1.26 thorpej *
218 1.26 thorpej * This is further adjusted by any `barriers' which may exist in the queue.
219 1.26 thorpej * The bufq points to the last such ordered request.
220 1.26 thorpej */
221 1.26 thorpej void
222 1.29.6.1 nathanw disksort_blkno(struct buf_queue *bufq, struct buf *bp)
223 1.26 thorpej {
224 1.26 thorpej struct buf *bq, *nbq;
225 1.26 thorpej
226 1.26 thorpej /*
227 1.26 thorpej * If there are ordered requests on the queue, we must start
228 1.26 thorpej * the elevator sort after the last of these.
229 1.26 thorpej */
230 1.26 thorpej if ((bq = bufq->bq_barrier) == NULL)
231 1.26 thorpej bq = BUFQ_FIRST(bufq);
232 1.26 thorpej
233 1.26 thorpej /*
234 1.26 thorpej * If the queue is empty, or if it's an ordered request,
235 1.26 thorpej * it's easy; we just go on the end.
236 1.26 thorpej */
237 1.26 thorpej if (bq == NULL || (bp->b_flags & B_ORDERED) != 0) {
238 1.26 thorpej BUFQ_INSERT_TAIL(bufq, bp);
239 1.26 thorpej return;
240 1.26 thorpej }
241 1.26 thorpej
242 1.26 thorpej /*
243 1.26 thorpej * If we lie after the first (currently active) request, then we
244 1.26 thorpej * must locate the second request list and add ourselves to it.
245 1.26 thorpej */
246 1.28 thorpej if (bp->b_rawblkno < bq->b_rawblkno) {
247 1.26 thorpej while ((nbq = BUFQ_NEXT(bq)) != NULL) {
248 1.26 thorpej /*
249 1.26 thorpej * Check for an ``inversion'' in the normally ascending
250 1.26 thorpej * block numbers, indicating the start of the second
251 1.26 thorpej * request list.
252 1.26 thorpej */
253 1.28 thorpej if (nbq->b_rawblkno < bq->b_rawblkno) {
254 1.26 thorpej /*
255 1.26 thorpej * Search the second request list for the first
256 1.26 thorpej * request at a larger block number. We go
257 1.26 thorpej * after that; if there is no such request, we
258 1.26 thorpej * go at the end.
259 1.26 thorpej */
260 1.26 thorpej do {
261 1.28 thorpej if (bp->b_rawblkno < nbq->b_rawblkno)
262 1.11 mycroft goto insert;
263 1.26 thorpej bq = nbq;
264 1.26 thorpej } while ((nbq = BUFQ_NEXT(bq)) != NULL);
265 1.11 mycroft goto insert; /* after last */
266 1.11 mycroft }
267 1.29.6.3 nathanw bq = nbq;
268 1.11 mycroft }
269 1.11 mycroft /*
270 1.11 mycroft * No inversions... we will go after the last, and
271 1.11 mycroft * be the first request in the second request list.
272 1.11 mycroft */
273 1.11 mycroft goto insert;
274 1.11 mycroft }
275 1.11 mycroft /*
276 1.11 mycroft * Request is at/after the current request...
277 1.11 mycroft * sort in the first request list.
278 1.11 mycroft */
279 1.26 thorpej while ((nbq = BUFQ_NEXT(bq)) != NULL) {
280 1.11 mycroft /*
281 1.11 mycroft * We want to go after the current request if there is an
282 1.11 mycroft * inversion after it (i.e. it is the end of the first
283 1.11 mycroft * request list), or if the next request is a larger cylinder
284 1.11 mycroft * than our request.
285 1.11 mycroft */
286 1.28 thorpej if (nbq->b_rawblkno < bq->b_rawblkno ||
287 1.28 thorpej bp->b_rawblkno < nbq->b_rawblkno)
288 1.11 mycroft goto insert;
289 1.26 thorpej bq = nbq;
290 1.11 mycroft }
291 1.11 mycroft /*
292 1.11 mycroft * Neither a second list nor a larger request... we go at the end of
293 1.11 mycroft * the first list, which is the same as the end of the whole schebang.
294 1.11 mycroft */
295 1.26 thorpej insert: BUFQ_INSERT_AFTER(bufq, bq, bp);
296 1.26 thorpej }
297 1.26 thorpej
298 1.26 thorpej /*
299 1.26 thorpej * Seek non-sort for disks. This version simply inserts requests at
300 1.26 thorpej * the tail of the queue.
301 1.26 thorpej */
302 1.26 thorpej void
303 1.29.6.1 nathanw disksort_tail(struct buf_queue *bufq, struct buf *bp)
304 1.26 thorpej {
305 1.26 thorpej
306 1.26 thorpej BUFQ_INSERT_TAIL(bufq, bp);
307 1.11 mycroft }
308 1.11 mycroft
309 1.11 mycroft /*
310 1.29.6.5 nathanw * XXX End of to be removed interface!
311 1.29.6.5 nathanw */
312 1.29.6.5 nathanw
313 1.29.6.5 nathanw /*
314 1.11 mycroft * Compute checksum for disk label.
315 1.11 mycroft */
316 1.11 mycroft u_int
317 1.29.6.1 nathanw dkcksum(struct disklabel *lp)
318 1.11 mycroft {
319 1.29 augustss u_short *start, *end;
320 1.29 augustss u_short sum = 0;
321 1.11 mycroft
322 1.11 mycroft start = (u_short *)lp;
323 1.11 mycroft end = (u_short *)&lp->d_partitions[lp->d_npartitions];
324 1.11 mycroft while (start < end)
325 1.11 mycroft sum ^= *start++;
326 1.11 mycroft return (sum);
327 1.11 mycroft }
328 1.11 mycroft
329 1.11 mycroft /*
330 1.11 mycroft * Disk error is the preface to plaintive error messages
331 1.11 mycroft * about failing disk transfers. It prints messages of the form
332 1.11 mycroft
333 1.11 mycroft hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
334 1.11 mycroft
335 1.11 mycroft * if the offset of the error in the transfer and a disk label
336 1.11 mycroft * are both available. blkdone should be -1 if the position of the error
337 1.11 mycroft * is unknown; the disklabel pointer may be null from drivers that have not
338 1.20 christos * been converted to use them. The message is printed with printf
339 1.11 mycroft * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
340 1.20 christos * The message should be completed (with at least a newline) with printf
341 1.11 mycroft * or addlog, respectively. There is no trailing space.
342 1.11 mycroft */
343 1.11 mycroft void
344 1.29.6.5 nathanw diskerr(const struct buf *bp, const char *dname, const char *what, int pri,
345 1.29.6.5 nathanw int blkdone, const struct disklabel *lp)
346 1.11 mycroft {
347 1.25 drochner int unit = DISKUNIT(bp->b_dev), part = DISKPART(bp->b_dev);
348 1.29.6.1 nathanw void (*pr)(const char *, ...);
349 1.11 mycroft char partname = 'a' + part;
350 1.11 mycroft int sn;
351 1.11 mycroft
352 1.11 mycroft if (pri != LOG_PRINTF) {
353 1.17 christos static const char fmt[] = "";
354 1.17 christos log(pri, fmt);
355 1.11 mycroft pr = addlog;
356 1.11 mycroft } else
357 1.20 christos pr = printf;
358 1.11 mycroft (*pr)("%s%d%c: %s %sing fsbn ", dname, unit, partname, what,
359 1.11 mycroft bp->b_flags & B_READ ? "read" : "writ");
360 1.11 mycroft sn = bp->b_blkno;
361 1.11 mycroft if (bp->b_bcount <= DEV_BSIZE)
362 1.11 mycroft (*pr)("%d", sn);
363 1.11 mycroft else {
364 1.11 mycroft if (blkdone >= 0) {
365 1.11 mycroft sn += blkdone;
366 1.11 mycroft (*pr)("%d of ", sn);
367 1.11 mycroft }
368 1.11 mycroft (*pr)("%d-%d", bp->b_blkno,
369 1.11 mycroft bp->b_blkno + (bp->b_bcount - 1) / DEV_BSIZE);
370 1.11 mycroft }
371 1.11 mycroft if (lp && (blkdone >= 0 || bp->b_bcount <= lp->d_secsize)) {
372 1.11 mycroft sn += lp->d_partitions[part].p_offset;
373 1.11 mycroft (*pr)(" (%s%d bn %d; cn %d", dname, unit, sn,
374 1.11 mycroft sn / lp->d_secpercyl);
375 1.11 mycroft sn %= lp->d_secpercyl;
376 1.29.6.4 nathanw (*pr)(" tn %d sn %d)", sn / lp->d_nsectors,
377 1.29.6.4 nathanw sn % lp->d_nsectors);
378 1.11 mycroft }
379 1.15 thorpej }
380 1.15 thorpej
381 1.15 thorpej /*
382 1.15 thorpej * Initialize the disklist. Called by main() before autoconfiguration.
383 1.15 thorpej */
384 1.15 thorpej void
385 1.29.6.1 nathanw disk_init(void)
386 1.15 thorpej {
387 1.15 thorpej
388 1.15 thorpej TAILQ_INIT(&disklist);
389 1.15 thorpej disk_count = 0;
390 1.15 thorpej }
391 1.15 thorpej
392 1.15 thorpej /*
393 1.15 thorpej * Searches the disklist for the disk corresponding to the
394 1.15 thorpej * name provided.
395 1.15 thorpej */
396 1.15 thorpej struct disk *
397 1.29.6.1 nathanw disk_find(char *name)
398 1.15 thorpej {
399 1.15 thorpej struct disk *diskp;
400 1.15 thorpej
401 1.15 thorpej if ((name == NULL) || (disk_count <= 0))
402 1.15 thorpej return (NULL);
403 1.15 thorpej
404 1.29.6.4 nathanw simple_lock(&disklist_slock);
405 1.29.6.4 nathanw for (diskp = TAILQ_FIRST(&disklist); diskp != NULL;
406 1.29.6.4 nathanw diskp = TAILQ_NEXT(diskp, dk_link))
407 1.29.6.4 nathanw if (strcmp(diskp->dk_name, name) == 0) {
408 1.29.6.4 nathanw simple_unlock(&disklist_slock);
409 1.15 thorpej return (diskp);
410 1.29.6.4 nathanw }
411 1.29.6.4 nathanw simple_unlock(&disklist_slock);
412 1.15 thorpej
413 1.15 thorpej return (NULL);
414 1.15 thorpej }
415 1.15 thorpej
416 1.15 thorpej /*
417 1.15 thorpej * Attach a disk.
418 1.15 thorpej */
419 1.15 thorpej void
420 1.29.6.1 nathanw disk_attach(struct disk *diskp)
421 1.15 thorpej {
422 1.15 thorpej int s;
423 1.15 thorpej
424 1.15 thorpej /*
425 1.15 thorpej * Allocate and initialize the disklabel structures. Note that
426 1.15 thorpej * it's not safe to sleep here, since we're probably going to be
427 1.15 thorpej * called during autoconfiguration.
428 1.15 thorpej */
429 1.15 thorpej diskp->dk_label = malloc(sizeof(struct disklabel), M_DEVBUF, M_NOWAIT);
430 1.15 thorpej diskp->dk_cpulabel = malloc(sizeof(struct cpu_disklabel), M_DEVBUF,
431 1.15 thorpej M_NOWAIT);
432 1.15 thorpej if ((diskp->dk_label == NULL) || (diskp->dk_cpulabel == NULL))
433 1.15 thorpej panic("disk_attach: can't allocate storage for disklabel");
434 1.15 thorpej
435 1.24 perry memset(diskp->dk_label, 0, sizeof(struct disklabel));
436 1.24 perry memset(diskp->dk_cpulabel, 0, sizeof(struct cpu_disklabel));
437 1.15 thorpej
438 1.15 thorpej /*
439 1.15 thorpej * Set the attached timestamp.
440 1.15 thorpej */
441 1.15 thorpej s = splclock();
442 1.15 thorpej diskp->dk_attachtime = mono_time;
443 1.15 thorpej splx(s);
444 1.15 thorpej
445 1.15 thorpej /*
446 1.15 thorpej * Link into the disklist.
447 1.15 thorpej */
448 1.29.6.4 nathanw simple_lock(&disklist_slock);
449 1.15 thorpej TAILQ_INSERT_TAIL(&disklist, diskp, dk_link);
450 1.29.6.4 nathanw simple_unlock(&disklist_slock);
451 1.15 thorpej ++disk_count;
452 1.15 thorpej }
453 1.15 thorpej
454 1.15 thorpej /*
455 1.16 christos * Detach a disk.
456 1.15 thorpej */
457 1.15 thorpej void
458 1.29.6.1 nathanw disk_detach(struct disk *diskp)
459 1.15 thorpej {
460 1.15 thorpej
461 1.15 thorpej /*
462 1.23 thorpej * Remove from the disklist.
463 1.23 thorpej */
464 1.23 thorpej if (--disk_count < 0)
465 1.23 thorpej panic("disk_detach: disk_count < 0");
466 1.29.6.4 nathanw simple_lock(&disklist_slock);
467 1.23 thorpej TAILQ_REMOVE(&disklist, diskp, dk_link);
468 1.29.6.4 nathanw simple_unlock(&disklist_slock);
469 1.23 thorpej
470 1.23 thorpej /*
471 1.15 thorpej * Free the space used by the disklabel structures.
472 1.15 thorpej */
473 1.15 thorpej free(diskp->dk_label, M_DEVBUF);
474 1.15 thorpej free(diskp->dk_cpulabel, M_DEVBUF);
475 1.15 thorpej }
476 1.15 thorpej
477 1.15 thorpej /*
478 1.15 thorpej * Increment a disk's busy counter. If the counter is going from
479 1.15 thorpej * 0 to 1, set the timestamp.
480 1.15 thorpej */
481 1.15 thorpej void
482 1.29.6.1 nathanw disk_busy(struct disk *diskp)
483 1.15 thorpej {
484 1.15 thorpej int s;
485 1.15 thorpej
486 1.15 thorpej /*
487 1.15 thorpej * XXX We'd like to use something as accurate as microtime(),
488 1.15 thorpej * but that doesn't depend on the system TOD clock.
489 1.15 thorpej */
490 1.15 thorpej if (diskp->dk_busy++ == 0) {
491 1.15 thorpej s = splclock();
492 1.15 thorpej diskp->dk_timestamp = mono_time;
493 1.15 thorpej splx(s);
494 1.15 thorpej }
495 1.15 thorpej }
496 1.15 thorpej
497 1.15 thorpej /*
498 1.15 thorpej * Decrement a disk's busy counter, increment the byte count, total busy
499 1.15 thorpej * time, and reset the timestamp.
500 1.15 thorpej */
501 1.15 thorpej void
502 1.29.6.1 nathanw disk_unbusy(struct disk *diskp, long bcount)
503 1.15 thorpej {
504 1.15 thorpej int s;
505 1.15 thorpej struct timeval dv_time, diff_time;
506 1.15 thorpej
507 1.23 thorpej if (diskp->dk_busy-- == 0) {
508 1.23 thorpej printf("%s: dk_busy < 0\n", diskp->dk_name);
509 1.23 thorpej panic("disk_unbusy");
510 1.23 thorpej }
511 1.15 thorpej
512 1.15 thorpej s = splclock();
513 1.15 thorpej dv_time = mono_time;
514 1.15 thorpej splx(s);
515 1.15 thorpej
516 1.15 thorpej timersub(&dv_time, &diskp->dk_timestamp, &diff_time);
517 1.15 thorpej timeradd(&diskp->dk_time, &diff_time, &diskp->dk_time);
518 1.15 thorpej
519 1.15 thorpej diskp->dk_timestamp = dv_time;
520 1.15 thorpej if (bcount > 0) {
521 1.15 thorpej diskp->dk_bytes += bcount;
522 1.15 thorpej diskp->dk_xfer++;
523 1.15 thorpej }
524 1.15 thorpej }
525 1.15 thorpej
526 1.15 thorpej /*
527 1.15 thorpej * Reset the metrics counters on the given disk. Note that we cannot
528 1.15 thorpej * reset the busy counter, as it may case a panic in disk_unbusy().
529 1.15 thorpej * We also must avoid playing with the timestamp information, as it
530 1.15 thorpej * may skew any pending transfer results.
531 1.15 thorpej */
532 1.15 thorpej void
533 1.29.6.1 nathanw disk_resetstat(struct disk *diskp)
534 1.15 thorpej {
535 1.15 thorpej int s = splbio(), t;
536 1.15 thorpej
537 1.15 thorpej diskp->dk_xfer = 0;
538 1.15 thorpej diskp->dk_bytes = 0;
539 1.15 thorpej
540 1.15 thorpej t = splclock();
541 1.15 thorpej diskp->dk_attachtime = mono_time;
542 1.15 thorpej splx(t);
543 1.15 thorpej
544 1.15 thorpej timerclear(&diskp->dk_time);
545 1.15 thorpej
546 1.15 thorpej splx(s);
547 1.29.6.4 nathanw }
548 1.29.6.4 nathanw
549 1.29.6.4 nathanw int
550 1.29.6.4 nathanw sysctl_disknames(void *vwhere, size_t *sizep)
551 1.29.6.4 nathanw {
552 1.29.6.4 nathanw char buf[DK_DISKNAMELEN + 1];
553 1.29.6.4 nathanw char *where = vwhere;
554 1.29.6.4 nathanw struct disk *diskp;
555 1.29.6.4 nathanw size_t needed, left, slen;
556 1.29.6.4 nathanw int error, first;
557 1.29.6.4 nathanw
558 1.29.6.4 nathanw first = 1;
559 1.29.6.4 nathanw error = 0;
560 1.29.6.4 nathanw needed = 0;
561 1.29.6.4 nathanw left = *sizep;
562 1.29.6.4 nathanw
563 1.29.6.4 nathanw simple_lock(&disklist_slock);
564 1.29.6.4 nathanw for (diskp = TAILQ_FIRST(&disklist); diskp != NULL;
565 1.29.6.4 nathanw diskp = TAILQ_NEXT(diskp, dk_link)) {
566 1.29.6.4 nathanw if (where == NULL)
567 1.29.6.4 nathanw needed += strlen(diskp->dk_name) + 1;
568 1.29.6.4 nathanw else {
569 1.29.6.4 nathanw memset(buf, 0, sizeof(buf));
570 1.29.6.4 nathanw if (first) {
571 1.29.6.4 nathanw strncpy(buf, diskp->dk_name, sizeof(buf));
572 1.29.6.4 nathanw first = 0;
573 1.29.6.4 nathanw } else {
574 1.29.6.4 nathanw buf[0] = ' ';
575 1.29.6.4 nathanw strncpy(buf + 1, diskp->dk_name,
576 1.29.6.4 nathanw sizeof(buf) - 1);
577 1.29.6.4 nathanw }
578 1.29.6.4 nathanw buf[DK_DISKNAMELEN] = '\0';
579 1.29.6.4 nathanw slen = strlen(buf);
580 1.29.6.4 nathanw if (left < slen + 1)
581 1.29.6.4 nathanw break;
582 1.29.6.4 nathanw /* +1 to copy out the trailing NUL byte */
583 1.29.6.4 nathanw error = copyout(buf, where, slen + 1);
584 1.29.6.4 nathanw if (error)
585 1.29.6.4 nathanw break;
586 1.29.6.4 nathanw where += slen;
587 1.29.6.4 nathanw needed += slen;
588 1.29.6.4 nathanw left -= slen;
589 1.29.6.4 nathanw }
590 1.29.6.4 nathanw }
591 1.29.6.4 nathanw simple_unlock(&disklist_slock);
592 1.29.6.4 nathanw *sizep = needed;
593 1.29.6.4 nathanw return (error);
594 1.29.6.4 nathanw }
595 1.29.6.4 nathanw
596 1.29.6.4 nathanw int
597 1.29.6.4 nathanw sysctl_diskstats(int *name, u_int namelen, void *vwhere, size_t *sizep)
598 1.29.6.4 nathanw {
599 1.29.6.4 nathanw struct disk_sysctl sdisk;
600 1.29.6.4 nathanw struct disk *diskp;
601 1.29.6.4 nathanw char *where = vwhere;
602 1.29.6.4 nathanw size_t tocopy, left;
603 1.29.6.4 nathanw int error;
604 1.29.6.4 nathanw
605 1.29.6.4 nathanw if (where == NULL) {
606 1.29.6.4 nathanw *sizep = disk_count * sizeof(struct disk_sysctl);
607 1.29.6.4 nathanw return (0);
608 1.29.6.4 nathanw }
609 1.29.6.4 nathanw
610 1.29.6.4 nathanw if (namelen == 0)
611 1.29.6.4 nathanw tocopy = sizeof(sdisk);
612 1.29.6.4 nathanw else
613 1.29.6.4 nathanw tocopy = name[0];
614 1.29.6.4 nathanw
615 1.29.6.4 nathanw error = 0;
616 1.29.6.4 nathanw left = *sizep;
617 1.29.6.4 nathanw memset(&sdisk, 0, sizeof(sdisk));
618 1.29.6.4 nathanw *sizep = 0;
619 1.29.6.4 nathanw
620 1.29.6.4 nathanw simple_lock(&disklist_slock);
621 1.29.6.4 nathanw TAILQ_FOREACH(diskp, &disklist, dk_link) {
622 1.29.6.4 nathanw if (left < sizeof(struct disk_sysctl))
623 1.29.6.4 nathanw break;
624 1.29.6.4 nathanw strncpy(sdisk.dk_name, diskp->dk_name, sizeof(sdisk.dk_name));
625 1.29.6.4 nathanw sdisk.dk_xfer = diskp->dk_xfer;
626 1.29.6.4 nathanw sdisk.dk_seek = diskp->dk_seek;
627 1.29.6.4 nathanw sdisk.dk_bytes = diskp->dk_bytes;
628 1.29.6.4 nathanw sdisk.dk_attachtime_sec = diskp->dk_attachtime.tv_sec;
629 1.29.6.4 nathanw sdisk.dk_attachtime_usec = diskp->dk_attachtime.tv_usec;
630 1.29.6.4 nathanw sdisk.dk_timestamp_sec = diskp->dk_timestamp.tv_sec;
631 1.29.6.4 nathanw sdisk.dk_timestamp_usec = diskp->dk_timestamp.tv_usec;
632 1.29.6.4 nathanw sdisk.dk_time_sec = diskp->dk_time.tv_sec;
633 1.29.6.4 nathanw sdisk.dk_time_usec = diskp->dk_time.tv_usec;
634 1.29.6.4 nathanw sdisk.dk_busy = diskp->dk_busy;
635 1.29.6.4 nathanw
636 1.29.6.4 nathanw error = copyout(&sdisk, where, min(tocopy, sizeof(sdisk)));
637 1.29.6.4 nathanw if (error)
638 1.29.6.4 nathanw break;
639 1.29.6.4 nathanw where += tocopy;
640 1.29.6.4 nathanw *sizep += tocopy;
641 1.29.6.4 nathanw left -= tocopy;
642 1.29.6.4 nathanw }
643 1.29.6.4 nathanw simple_unlock(&disklist_slock);
644 1.29.6.4 nathanw return (error);
645 1.29.6.5 nathanw }
646 1.29.6.5 nathanw
647 1.29.6.5 nathanw
648 1.29.6.5 nathanw struct bufq_fcfs {
649 1.29.6.5 nathanw TAILQ_HEAD(, buf) bq_head; /* actual list of buffers */
650 1.29.6.5 nathanw };
651 1.29.6.5 nathanw
652 1.29.6.5 nathanw struct bufq_disksort {
653 1.29.6.5 nathanw TAILQ_HEAD(, buf) bq_head; /* actual list of buffers */
654 1.29.6.5 nathanw };
655 1.29.6.5 nathanw
656 1.29.6.5 nathanw #define PRIO_READ_BURST 48
657 1.29.6.5 nathanw #define PRIO_WRITE_REQ 16
658 1.29.6.5 nathanw
659 1.29.6.5 nathanw struct bufq_prio {
660 1.29.6.5 nathanw TAILQ_HEAD(, buf) bq_read, bq_write; /* actual list of buffers */
661 1.29.6.5 nathanw struct buf *bq_write_next; /* next request in bq_write */
662 1.29.6.5 nathanw struct buf *bq_next; /* current request */
663 1.29.6.5 nathanw int bq_read_burst; /* # of consecutive reads */
664 1.29.6.5 nathanw };
665 1.29.6.5 nathanw
666 1.29.6.5 nathanw
667 1.29.6.5 nathanw /*
668 1.29.6.5 nathanw * Check if two buf's are in ascending order.
669 1.29.6.5 nathanw */
670 1.29.6.5 nathanw static __inline int
671 1.29.6.5 nathanw buf_inorder(struct buf *bp, struct buf *bq, int sortby)
672 1.29.6.5 nathanw {
673 1.29.6.5 nathanw int r;
674 1.29.6.5 nathanw
675 1.29.6.5 nathanw if (bp == NULL || bq == NULL)
676 1.29.6.5 nathanw return(bq == NULL);
677 1.29.6.5 nathanw
678 1.29.6.5 nathanw if (sortby == BUFQ_SORT_CYLINDER)
679 1.29.6.5 nathanw r = bp->b_cylinder - bq->b_cylinder;
680 1.29.6.5 nathanw else
681 1.29.6.5 nathanw r = 0;
682 1.29.6.5 nathanw
683 1.29.6.5 nathanw if (r == 0)
684 1.29.6.5 nathanw r = bp->b_rawblkno - bq->b_rawblkno;
685 1.29.6.5 nathanw
686 1.29.6.5 nathanw return(r <= 0);
687 1.29.6.5 nathanw }
688 1.29.6.5 nathanw
689 1.29.6.5 nathanw
690 1.29.6.5 nathanw /*
691 1.29.6.5 nathanw * First-come first-served sort for disks.
692 1.29.6.5 nathanw *
693 1.29.6.5 nathanw * Requests are appended to the queue without any reordering.
694 1.29.6.5 nathanw */
695 1.29.6.5 nathanw static void
696 1.29.6.5 nathanw bufq_fcfs_put(struct bufq_state *bufq, struct buf *bp)
697 1.29.6.5 nathanw {
698 1.29.6.5 nathanw struct bufq_fcfs *fcfs = bufq->bq_private;
699 1.29.6.5 nathanw
700 1.29.6.5 nathanw TAILQ_INSERT_TAIL(&fcfs->bq_head, bp, b_actq);
701 1.29.6.5 nathanw }
702 1.29.6.5 nathanw
703 1.29.6.5 nathanw static struct buf *
704 1.29.6.5 nathanw bufq_fcfs_get(struct bufq_state *bufq, int remove)
705 1.29.6.5 nathanw {
706 1.29.6.5 nathanw struct bufq_fcfs *fcfs = bufq->bq_private;
707 1.29.6.5 nathanw struct buf *bp;
708 1.29.6.5 nathanw
709 1.29.6.5 nathanw bp = TAILQ_FIRST(&fcfs->bq_head);
710 1.29.6.5 nathanw
711 1.29.6.5 nathanw if (bp != NULL && remove)
712 1.29.6.5 nathanw TAILQ_REMOVE(&fcfs->bq_head, bp, b_actq);
713 1.29.6.5 nathanw
714 1.29.6.5 nathanw return(bp);
715 1.29.6.5 nathanw }
716 1.29.6.5 nathanw
717 1.29.6.5 nathanw
718 1.29.6.5 nathanw /*
719 1.29.6.5 nathanw * Seek sort for disks.
720 1.29.6.5 nathanw *
721 1.29.6.5 nathanw * There are actually two queues, sorted in ascendening order. The first
722 1.29.6.5 nathanw * queue holds those requests which are positioned after the current block;
723 1.29.6.5 nathanw * the second holds requests which came in after their position was passed.
724 1.29.6.5 nathanw * Thus we implement a one-way scan, retracting after reaching the end of
725 1.29.6.5 nathanw * the drive to the first request on the second queue, at which time it
726 1.29.6.5 nathanw * becomes the first queue.
727 1.29.6.5 nathanw *
728 1.29.6.5 nathanw * A one-way scan is natural because of the way UNIX read-ahead blocks are
729 1.29.6.5 nathanw * allocated.
730 1.29.6.5 nathanw */
731 1.29.6.5 nathanw static void
732 1.29.6.5 nathanw bufq_disksort_put(struct bufq_state *bufq, struct buf *bp)
733 1.29.6.5 nathanw {
734 1.29.6.5 nathanw struct bufq_disksort *disksort = bufq->bq_private;
735 1.29.6.5 nathanw struct buf *bq, *nbq;
736 1.29.6.5 nathanw int sortby;
737 1.29.6.5 nathanw
738 1.29.6.5 nathanw sortby = bufq->bq_flags & BUFQ_SORT_MASK;
739 1.29.6.5 nathanw
740 1.29.6.5 nathanw bq = TAILQ_FIRST(&disksort->bq_head);
741 1.29.6.5 nathanw
742 1.29.6.5 nathanw /*
743 1.29.6.5 nathanw * If the queue is empty it's easy; we just go on the end.
744 1.29.6.5 nathanw */
745 1.29.6.5 nathanw if (bq == NULL) {
746 1.29.6.5 nathanw TAILQ_INSERT_TAIL(&disksort->bq_head, bp, b_actq);
747 1.29.6.5 nathanw return;
748 1.29.6.5 nathanw }
749 1.29.6.5 nathanw
750 1.29.6.5 nathanw /*
751 1.29.6.5 nathanw * If we lie before the currently active request, then we
752 1.29.6.5 nathanw * must locate the second request list and add ourselves to it.
753 1.29.6.5 nathanw */
754 1.29.6.5 nathanw if (buf_inorder(bp, bq, sortby)) {
755 1.29.6.5 nathanw while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL) {
756 1.29.6.5 nathanw /*
757 1.29.6.5 nathanw * Check for an ``inversion'' in the normally ascending
758 1.29.6.5 nathanw * block numbers, indicating the start of the second
759 1.29.6.5 nathanw * request list.
760 1.29.6.5 nathanw */
761 1.29.6.5 nathanw if (buf_inorder(nbq, bq, sortby)) {
762 1.29.6.5 nathanw /*
763 1.29.6.5 nathanw * Search the second request list for the first
764 1.29.6.5 nathanw * request at a larger block number. We go
765 1.29.6.5 nathanw * after that; if there is no such request, we
766 1.29.6.5 nathanw * go at the end.
767 1.29.6.5 nathanw */
768 1.29.6.5 nathanw do {
769 1.29.6.5 nathanw if (buf_inorder(bp, nbq, sortby))
770 1.29.6.5 nathanw goto insert;
771 1.29.6.5 nathanw bq = nbq;
772 1.29.6.5 nathanw } while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL);
773 1.29.6.5 nathanw goto insert; /* after last */
774 1.29.6.5 nathanw }
775 1.29.6.5 nathanw bq = nbq;
776 1.29.6.5 nathanw }
777 1.29.6.5 nathanw /*
778 1.29.6.5 nathanw * No inversions... we will go after the last, and
779 1.29.6.5 nathanw * be the first request in the second request list.
780 1.29.6.5 nathanw */
781 1.29.6.5 nathanw goto insert;
782 1.29.6.5 nathanw }
783 1.29.6.5 nathanw /*
784 1.29.6.5 nathanw * Request is at/after the current request...
785 1.29.6.5 nathanw * sort in the first request list.
786 1.29.6.5 nathanw */
787 1.29.6.5 nathanw while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL) {
788 1.29.6.5 nathanw /*
789 1.29.6.5 nathanw * We want to go after the current request if there is an
790 1.29.6.5 nathanw * inversion after it (i.e. it is the end of the first
791 1.29.6.5 nathanw * request list), or if the next request is a larger cylinder
792 1.29.6.5 nathanw * than our request.
793 1.29.6.5 nathanw */
794 1.29.6.5 nathanw if (buf_inorder(nbq, bq, sortby) ||
795 1.29.6.5 nathanw buf_inorder(bp, nbq, sortby))
796 1.29.6.5 nathanw goto insert;
797 1.29.6.5 nathanw bq = nbq;
798 1.29.6.5 nathanw }
799 1.29.6.5 nathanw /*
800 1.29.6.5 nathanw * Neither a second list nor a larger request... we go at the end of
801 1.29.6.5 nathanw * the first list, which is the same as the end of the whole schebang.
802 1.29.6.5 nathanw */
803 1.29.6.5 nathanw insert: TAILQ_INSERT_AFTER(&disksort->bq_head, bq, bp, b_actq);
804 1.29.6.5 nathanw }
805 1.29.6.5 nathanw
806 1.29.6.5 nathanw static struct buf *
807 1.29.6.5 nathanw bufq_disksort_get(struct bufq_state *bufq, int remove)
808 1.29.6.5 nathanw {
809 1.29.6.5 nathanw struct bufq_disksort *disksort = bufq->bq_private;
810 1.29.6.5 nathanw struct buf *bp;
811 1.29.6.5 nathanw
812 1.29.6.5 nathanw bp = TAILQ_FIRST(&disksort->bq_head);
813 1.29.6.5 nathanw
814 1.29.6.5 nathanw if (bp != NULL && remove)
815 1.29.6.5 nathanw TAILQ_REMOVE(&disksort->bq_head, bp, b_actq);
816 1.29.6.5 nathanw
817 1.29.6.5 nathanw return(bp);
818 1.29.6.5 nathanw }
819 1.29.6.5 nathanw
820 1.29.6.5 nathanw
821 1.29.6.5 nathanw /*
822 1.29.6.5 nathanw * Seek sort for disks.
823 1.29.6.5 nathanw *
824 1.29.6.5 nathanw * There are two queues. The first queue holds read requests; the second
825 1.29.6.5 nathanw * holds write requests. The read queue is first-come first-served; the
826 1.29.6.5 nathanw * write queue is sorted in ascendening block order.
827 1.29.6.5 nathanw * The read queue is processed first. After PRIO_READ_BURST consecutive
828 1.29.6.5 nathanw * read requests with non-empty write queue PRIO_WRITE_REQ requests from
829 1.29.6.5 nathanw * the write queue will be processed.
830 1.29.6.5 nathanw */
831 1.29.6.5 nathanw static void
832 1.29.6.5 nathanw bufq_prio_put(struct bufq_state *bufq, struct buf *bp)
833 1.29.6.5 nathanw {
834 1.29.6.5 nathanw struct bufq_prio *prio = bufq->bq_private;
835 1.29.6.5 nathanw struct buf *bq;
836 1.29.6.5 nathanw int sortby;
837 1.29.6.5 nathanw
838 1.29.6.5 nathanw sortby = bufq->bq_flags & BUFQ_SORT_MASK;
839 1.29.6.5 nathanw
840 1.29.6.5 nathanw /*
841 1.29.6.5 nathanw * If it's a read request append it to the list.
842 1.29.6.5 nathanw */
843 1.29.6.5 nathanw if ((bp->b_flags & B_READ) == B_READ) {
844 1.29.6.5 nathanw TAILQ_INSERT_TAIL(&prio->bq_read, bp, b_actq);
845 1.29.6.5 nathanw return;
846 1.29.6.5 nathanw }
847 1.29.6.5 nathanw
848 1.29.6.5 nathanw bq = TAILQ_FIRST(&prio->bq_write);
849 1.29.6.5 nathanw
850 1.29.6.5 nathanw /*
851 1.29.6.5 nathanw * If the write list is empty, simply append it to the list.
852 1.29.6.5 nathanw */
853 1.29.6.5 nathanw if (bq == NULL) {
854 1.29.6.5 nathanw TAILQ_INSERT_TAIL(&prio->bq_write, bp, b_actq);
855 1.29.6.5 nathanw prio->bq_write_next = bp;
856 1.29.6.5 nathanw return;
857 1.29.6.5 nathanw }
858 1.29.6.5 nathanw
859 1.29.6.5 nathanw /*
860 1.29.6.5 nathanw * If we lie after the next request, insert after this request.
861 1.29.6.5 nathanw */
862 1.29.6.5 nathanw if (buf_inorder(prio->bq_write_next, bp, sortby))
863 1.29.6.5 nathanw bq = prio->bq_write_next;
864 1.29.6.5 nathanw
865 1.29.6.5 nathanw /*
866 1.29.6.5 nathanw * Search for the first request at a larger block number.
867 1.29.6.5 nathanw * We go before this request if it exists.
868 1.29.6.5 nathanw */
869 1.29.6.5 nathanw while (bq != NULL && buf_inorder(bq, bp, sortby))
870 1.29.6.5 nathanw bq = TAILQ_NEXT(bq, b_actq);
871 1.29.6.5 nathanw
872 1.29.6.5 nathanw if (bq != NULL)
873 1.29.6.5 nathanw TAILQ_INSERT_BEFORE(bq, bp, b_actq);
874 1.29.6.5 nathanw else
875 1.29.6.5 nathanw TAILQ_INSERT_TAIL(&prio->bq_write, bp, b_actq);
876 1.29.6.5 nathanw }
877 1.29.6.5 nathanw
878 1.29.6.5 nathanw static struct buf *
879 1.29.6.5 nathanw bufq_prio_get(struct bufq_state *bufq, int remove)
880 1.29.6.5 nathanw {
881 1.29.6.5 nathanw struct bufq_prio *prio = bufq->bq_private;
882 1.29.6.5 nathanw struct buf *bp;
883 1.29.6.5 nathanw
884 1.29.6.5 nathanw /*
885 1.29.6.5 nathanw * If no current request, get next from the lists.
886 1.29.6.5 nathanw */
887 1.29.6.5 nathanw if (prio->bq_next == NULL) {
888 1.29.6.5 nathanw /*
889 1.29.6.5 nathanw * If at least one list is empty, select the other.
890 1.29.6.5 nathanw */
891 1.29.6.5 nathanw
892 1.29.6.5 nathanw if (TAILQ_FIRST(&prio->bq_read) == NULL) {
893 1.29.6.5 nathanw prio->bq_next = prio->bq_write_next;
894 1.29.6.5 nathanw prio->bq_read_burst = 0;
895 1.29.6.5 nathanw } else if (prio->bq_write_next == NULL) {
896 1.29.6.5 nathanw prio->bq_next = TAILQ_FIRST(&prio->bq_read);
897 1.29.6.5 nathanw prio->bq_read_burst = 0;
898 1.29.6.5 nathanw } else {
899 1.29.6.5 nathanw /*
900 1.29.6.5 nathanw * Both list have requests. Select the read list up
901 1.29.6.5 nathanw * to PRIO_READ_BURST times, then select the write
902 1.29.6.5 nathanw * list PRIO_WRITE_REQ times.
903 1.29.6.5 nathanw */
904 1.29.6.5 nathanw
905 1.29.6.5 nathanw if (prio->bq_read_burst++ < PRIO_READ_BURST)
906 1.29.6.5 nathanw prio->bq_next = TAILQ_FIRST(&prio->bq_read);
907 1.29.6.5 nathanw else if (prio->bq_read_burst <
908 1.29.6.5 nathanw PRIO_READ_BURST + PRIO_WRITE_REQ)
909 1.29.6.5 nathanw prio->bq_next = prio->bq_write_next;
910 1.29.6.5 nathanw else {
911 1.29.6.5 nathanw prio->bq_next = TAILQ_FIRST(&prio->bq_read);
912 1.29.6.5 nathanw prio->bq_read_burst = 0;
913 1.29.6.5 nathanw }
914 1.29.6.5 nathanw }
915 1.29.6.5 nathanw }
916 1.29.6.5 nathanw
917 1.29.6.5 nathanw bp = prio->bq_next;
918 1.29.6.5 nathanw
919 1.29.6.5 nathanw if (prio->bq_next != NULL && remove) {
920 1.29.6.5 nathanw if ((prio->bq_next->b_flags & B_READ) == B_READ)
921 1.29.6.5 nathanw TAILQ_REMOVE(&prio->bq_read, prio->bq_next, b_actq);
922 1.29.6.5 nathanw else {
923 1.29.6.5 nathanw TAILQ_REMOVE(&prio->bq_write, prio->bq_next, b_actq);
924 1.29.6.5 nathanw /*
925 1.29.6.5 nathanw * Advance the write pointer.
926 1.29.6.5 nathanw */
927 1.29.6.5 nathanw prio->bq_write_next =
928 1.29.6.5 nathanw TAILQ_NEXT(prio->bq_write_next, b_actq);
929 1.29.6.5 nathanw if (prio->bq_write_next == NULL)
930 1.29.6.5 nathanw prio->bq_write_next =
931 1.29.6.5 nathanw TAILQ_FIRST(&prio->bq_write);
932 1.29.6.5 nathanw }
933 1.29.6.5 nathanw
934 1.29.6.5 nathanw prio->bq_next = NULL;
935 1.29.6.5 nathanw }
936 1.29.6.5 nathanw
937 1.29.6.5 nathanw return(bp);
938 1.29.6.5 nathanw }
939 1.29.6.5 nathanw
940 1.29.6.5 nathanw /*
941 1.29.6.5 nathanw * Create a device buffer queue.
942 1.29.6.5 nathanw */
943 1.29.6.5 nathanw void
944 1.29.6.5 nathanw bufq_alloc(struct bufq_state *bufq, int flags)
945 1.29.6.5 nathanw {
946 1.29.6.5 nathanw struct bufq_fcfs *fcfs;
947 1.29.6.5 nathanw struct bufq_disksort *disksort;
948 1.29.6.5 nathanw struct bufq_prio *prio;
949 1.29.6.5 nathanw
950 1.29.6.5 nathanw bufq->bq_flags = flags;
951 1.29.6.5 nathanw
952 1.29.6.5 nathanw switch (flags & BUFQ_SORT_MASK) {
953 1.29.6.5 nathanw case BUFQ_SORT_RAWBLOCK:
954 1.29.6.5 nathanw case BUFQ_SORT_CYLINDER:
955 1.29.6.5 nathanw break;
956 1.29.6.5 nathanw case 0:
957 1.29.6.5 nathanw if ((flags & BUFQ_METHOD_MASK) == BUFQ_FCFS)
958 1.29.6.5 nathanw break;
959 1.29.6.5 nathanw /* FALLTHROUGH */
960 1.29.6.5 nathanw default:
961 1.29.6.5 nathanw panic("bufq_alloc: sort out of range");
962 1.29.6.5 nathanw }
963 1.29.6.5 nathanw
964 1.29.6.5 nathanw switch (flags & BUFQ_METHOD_MASK) {
965 1.29.6.5 nathanw case BUFQ_FCFS:
966 1.29.6.5 nathanw bufq->bq_get = bufq_fcfs_get;
967 1.29.6.5 nathanw bufq->bq_put = bufq_fcfs_put;
968 1.29.6.5 nathanw MALLOC(bufq->bq_private, struct bufq_fcfs *,
969 1.29.6.5 nathanw sizeof(struct bufq_fcfs), M_DEVBUF, M_ZERO);
970 1.29.6.5 nathanw fcfs = (struct bufq_fcfs *)bufq->bq_private;
971 1.29.6.5 nathanw TAILQ_INIT(&fcfs->bq_head);
972 1.29.6.5 nathanw break;
973 1.29.6.5 nathanw case BUFQ_DISKSORT:
974 1.29.6.5 nathanw bufq->bq_get = bufq_disksort_get;
975 1.29.6.5 nathanw bufq->bq_put = bufq_disksort_put;
976 1.29.6.5 nathanw MALLOC(bufq->bq_private, struct bufq_disksort *,
977 1.29.6.5 nathanw sizeof(struct bufq_disksort), M_DEVBUF, M_ZERO);
978 1.29.6.5 nathanw disksort = (struct bufq_disksort *)bufq->bq_private;
979 1.29.6.5 nathanw TAILQ_INIT(&disksort->bq_head);
980 1.29.6.5 nathanw break;
981 1.29.6.5 nathanw case BUFQ_READ_PRIO:
982 1.29.6.5 nathanw bufq->bq_get = bufq_prio_get;
983 1.29.6.5 nathanw bufq->bq_put = bufq_prio_put;
984 1.29.6.5 nathanw MALLOC(bufq->bq_private, struct bufq_prio *,
985 1.29.6.5 nathanw sizeof(struct bufq_prio), M_DEVBUF, M_ZERO);
986 1.29.6.5 nathanw prio = (struct bufq_prio *)bufq->bq_private;
987 1.29.6.5 nathanw TAILQ_INIT(&prio->bq_read);
988 1.29.6.5 nathanw TAILQ_INIT(&prio->bq_write);
989 1.29.6.5 nathanw break;
990 1.29.6.5 nathanw default:
991 1.29.6.5 nathanw panic("bufq_alloc: method out of range");
992 1.29.6.5 nathanw }
993 1.29.6.5 nathanw }
994 1.29.6.5 nathanw
995 1.29.6.5 nathanw /*
996 1.29.6.5 nathanw * Destroy a device buffer queue.
997 1.29.6.5 nathanw */
998 1.29.6.5 nathanw void
999 1.29.6.5 nathanw bufq_free(struct bufq_state *bufq)
1000 1.29.6.5 nathanw {
1001 1.29.6.5 nathanw KASSERT(bufq->bq_private != NULL);
1002 1.29.6.5 nathanw KASSERT(BUFQ_PEEK(bufq) == NULL);
1003 1.29.6.5 nathanw
1004 1.29.6.5 nathanw FREE(bufq->bq_private, M_DEVBUF);
1005 1.29.6.5 nathanw bufq->bq_get = NULL;
1006 1.29.6.5 nathanw bufq->bq_put = NULL;
1007 1.11 mycroft }
1008