subr_disk.c revision 1.46 1 /* $NetBSD: subr_disk.c,v 1.46 2002/11/01 15:20:03 simonb Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1982, 1986, 1988, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. All advertising materials mentioning features or use of this software
58 * must display the following acknowledgement:
59 * This product includes software developed by the University of
60 * California, Berkeley and its contributors.
61 * 4. Neither the name of the University nor the names of its contributors
62 * may be used to endorse or promote products derived from this software
63 * without specific prior written permission.
64 *
65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75 * SUCH DAMAGE.
76 *
77 * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94
78 */
79
80 #include <sys/cdefs.h>
81 __KERNEL_RCSID(0, "$NetBSD: subr_disk.c,v 1.46 2002/11/01 15:20:03 simonb Exp $");
82
83 #include <sys/param.h>
84 #include <sys/kernel.h>
85 #include <sys/malloc.h>
86 #include <sys/buf.h>
87 #include <sys/syslog.h>
88 #include <sys/disklabel.h>
89 #include <sys/disk.h>
90 #include <sys/sysctl.h>
91
92 /*
93 * A global list of all disks attached to the system. May grow or
94 * shrink over time.
95 */
96 struct disklist_head disklist; /* TAILQ_HEAD */
97 int disk_count; /* number of drives in global disklist */
98 struct simplelock disklist_slock = SIMPLELOCK_INITIALIZER;
99
100 /*
101 * Compute checksum for disk label.
102 */
103 u_int
104 dkcksum(struct disklabel *lp)
105 {
106 u_short *start, *end;
107 u_short sum = 0;
108
109 start = (u_short *)lp;
110 end = (u_short *)&lp->d_partitions[lp->d_npartitions];
111 while (start < end)
112 sum ^= *start++;
113 return (sum);
114 }
115
116 /*
117 * Disk error is the preface to plaintive error messages
118 * about failing disk transfers. It prints messages of the form
119
120 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
121
122 * if the offset of the error in the transfer and a disk label
123 * are both available. blkdone should be -1 if the position of the error
124 * is unknown; the disklabel pointer may be null from drivers that have not
125 * been converted to use them. The message is printed with printf
126 * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
127 * The message should be completed (with at least a newline) with printf
128 * or addlog, respectively. There is no trailing space.
129 */
130 void
131 diskerr(const struct buf *bp, const char *dname, const char *what, int pri,
132 int blkdone, const struct disklabel *lp)
133 {
134 int unit = DISKUNIT(bp->b_dev), part = DISKPART(bp->b_dev);
135 void (*pr)(const char *, ...);
136 char partname = 'a' + part;
137 int sn;
138
139 if (pri != LOG_PRINTF) {
140 static const char fmt[] = "";
141 log(pri, fmt);
142 pr = addlog;
143 } else
144 pr = printf;
145 (*pr)("%s%d%c: %s %sing fsbn ", dname, unit, partname, what,
146 bp->b_flags & B_READ ? "read" : "writ");
147 sn = bp->b_blkno;
148 if (bp->b_bcount <= DEV_BSIZE)
149 (*pr)("%d", sn);
150 else {
151 if (blkdone >= 0) {
152 sn += blkdone;
153 (*pr)("%d of ", sn);
154 }
155 (*pr)("%d-%d", bp->b_blkno,
156 bp->b_blkno + (bp->b_bcount - 1) / DEV_BSIZE);
157 }
158 if (lp && (blkdone >= 0 || bp->b_bcount <= lp->d_secsize)) {
159 sn += lp->d_partitions[part].p_offset;
160 (*pr)(" (%s%d bn %d; cn %d", dname, unit, sn,
161 sn / lp->d_secpercyl);
162 sn %= lp->d_secpercyl;
163 (*pr)(" tn %d sn %d)", sn / lp->d_nsectors,
164 sn % lp->d_nsectors);
165 }
166 }
167
168 /*
169 * Initialize the disklist. Called by main() before autoconfiguration.
170 */
171 void
172 disk_init(void)
173 {
174
175 TAILQ_INIT(&disklist);
176 disk_count = 0;
177 }
178
179 /*
180 * Searches the disklist for the disk corresponding to the
181 * name provided.
182 */
183 struct disk *
184 disk_find(char *name)
185 {
186 struct disk *diskp;
187
188 if ((name == NULL) || (disk_count <= 0))
189 return (NULL);
190
191 simple_lock(&disklist_slock);
192 for (diskp = TAILQ_FIRST(&disklist); diskp != NULL;
193 diskp = TAILQ_NEXT(diskp, dk_link))
194 if (strcmp(diskp->dk_name, name) == 0) {
195 simple_unlock(&disklist_slock);
196 return (diskp);
197 }
198 simple_unlock(&disklist_slock);
199
200 return (NULL);
201 }
202
203 /*
204 * Attach a disk.
205 */
206 void
207 disk_attach(struct disk *diskp)
208 {
209 int s;
210
211 /*
212 * Allocate and initialize the disklabel structures. Note that
213 * it's not safe to sleep here, since we're probably going to be
214 * called during autoconfiguration.
215 */
216 diskp->dk_label = malloc(sizeof(struct disklabel), M_DEVBUF, M_NOWAIT);
217 diskp->dk_cpulabel = malloc(sizeof(struct cpu_disklabel), M_DEVBUF,
218 M_NOWAIT);
219 if ((diskp->dk_label == NULL) || (diskp->dk_cpulabel == NULL))
220 panic("disk_attach: can't allocate storage for disklabel");
221
222 memset(diskp->dk_label, 0, sizeof(struct disklabel));
223 memset(diskp->dk_cpulabel, 0, sizeof(struct cpu_disklabel));
224
225 /*
226 * Set the attached timestamp.
227 */
228 s = splclock();
229 diskp->dk_attachtime = mono_time;
230 splx(s);
231
232 /*
233 * Link into the disklist.
234 */
235 simple_lock(&disklist_slock);
236 TAILQ_INSERT_TAIL(&disklist, diskp, dk_link);
237 simple_unlock(&disklist_slock);
238 ++disk_count;
239 }
240
241 /*
242 * Detach a disk.
243 */
244 void
245 disk_detach(struct disk *diskp)
246 {
247
248 /*
249 * Remove from the disklist.
250 */
251 if (--disk_count < 0)
252 panic("disk_detach: disk_count < 0");
253 simple_lock(&disklist_slock);
254 TAILQ_REMOVE(&disklist, diskp, dk_link);
255 simple_unlock(&disklist_slock);
256
257 /*
258 * Free the space used by the disklabel structures.
259 */
260 free(diskp->dk_label, M_DEVBUF);
261 free(diskp->dk_cpulabel, M_DEVBUF);
262 }
263
264 /*
265 * Increment a disk's busy counter. If the counter is going from
266 * 0 to 1, set the timestamp.
267 */
268 void
269 disk_busy(struct disk *diskp)
270 {
271 int s;
272
273 /*
274 * XXX We'd like to use something as accurate as microtime(),
275 * but that doesn't depend on the system TOD clock.
276 */
277 if (diskp->dk_busy++ == 0) {
278 s = splclock();
279 diskp->dk_timestamp = mono_time;
280 splx(s);
281 }
282 }
283
284 /*
285 * Decrement a disk's busy counter, increment the byte count, total busy
286 * time, and reset the timestamp.
287 */
288 void
289 disk_unbusy(struct disk *diskp, long bcount, int read)
290 {
291 int s;
292 struct timeval dv_time, diff_time;
293
294 if (diskp->dk_busy-- == 0) {
295 printf("%s: dk_busy < 0\n", diskp->dk_name);
296 panic("disk_unbusy");
297 }
298
299 s = splclock();
300 dv_time = mono_time;
301 splx(s);
302
303 timersub(&dv_time, &diskp->dk_timestamp, &diff_time);
304 timeradd(&diskp->dk_time, &diff_time, &diskp->dk_time);
305
306 diskp->dk_timestamp = dv_time;
307 if (bcount > 0) {
308 if (read) {
309 diskp->dk_rbytes += bcount;
310 diskp->dk_rxfer++;
311 } else {
312 diskp->dk_wbytes += bcount;
313 diskp->dk_wxfer++;
314 }
315 }
316 }
317
318 /*
319 * Reset the metrics counters on the given disk. Note that we cannot
320 * reset the busy counter, as it may case a panic in disk_unbusy().
321 * We also must avoid playing with the timestamp information, as it
322 * may skew any pending transfer results.
323 */
324 void
325 disk_resetstat(struct disk *diskp)
326 {
327 int s = splbio(), t;
328
329 diskp->dk_rxfer = 0;
330 diskp->dk_rbytes = 0;
331 diskp->dk_wxfer = 0;
332 diskp->dk_wbytes = 0;
333
334 t = splclock();
335 diskp->dk_attachtime = mono_time;
336 splx(t);
337
338 timerclear(&diskp->dk_time);
339
340 splx(s);
341 }
342
343 int
344 sysctl_disknames(void *vwhere, size_t *sizep)
345 {
346 char buf[DK_DISKNAMELEN + 1];
347 char *where = vwhere;
348 struct disk *diskp;
349 size_t needed, left, slen;
350 int error, first;
351
352 first = 1;
353 error = 0;
354 needed = 0;
355 left = *sizep;
356
357 simple_lock(&disklist_slock);
358 for (diskp = TAILQ_FIRST(&disklist); diskp != NULL;
359 diskp = TAILQ_NEXT(diskp, dk_link)) {
360 if (where == NULL)
361 needed += strlen(diskp->dk_name) + 1;
362 else {
363 memset(buf, 0, sizeof(buf));
364 if (first) {
365 strncpy(buf, diskp->dk_name, sizeof(buf));
366 first = 0;
367 } else {
368 buf[0] = ' ';
369 strncpy(buf + 1, diskp->dk_name,
370 sizeof(buf) - 1);
371 }
372 buf[DK_DISKNAMELEN] = '\0';
373 slen = strlen(buf);
374 if (left < slen + 1)
375 break;
376 /* +1 to copy out the trailing NUL byte */
377 error = copyout(buf, where, slen + 1);
378 if (error)
379 break;
380 where += slen;
381 needed += slen;
382 left -= slen;
383 }
384 }
385 simple_unlock(&disklist_slock);
386 *sizep = needed;
387 return (error);
388 }
389
390 int
391 sysctl_diskstats(int *name, u_int namelen, void *vwhere, size_t *sizep)
392 {
393 struct disk_sysctl sdisk;
394 struct disk *diskp;
395 char *where = vwhere;
396 size_t tocopy, left;
397 int error;
398
399 if (where == NULL) {
400 if (namelen == 0)
401 *sizep = disk_count * sizeof(sdisk);
402 else
403 *sizep = disk_count * name[0];
404 return (0);
405 }
406
407 if (namelen == 0)
408 tocopy = sizeof(sdisk);
409 else
410 tocopy = name[0];
411
412 error = 0;
413 left = *sizep;
414 memset(&sdisk, 0, sizeof(sdisk));
415 *sizep = 0;
416
417 simple_lock(&disklist_slock);
418 TAILQ_FOREACH(diskp, &disklist, dk_link) {
419 if (left < tocopy)
420 break;
421 strncpy(sdisk.dk_name, diskp->dk_name, sizeof(sdisk.dk_name));
422 sdisk.dk_xfer = diskp->dk_rxfer + diskp->dk_wxfer;
423 sdisk.dk_rxfer = diskp->dk_rxfer;
424 sdisk.dk_wxfer = diskp->dk_wxfer;
425 sdisk.dk_seek = diskp->dk_seek;
426 sdisk.dk_bytes = diskp->dk_rbytes + diskp->dk_wbytes;
427 sdisk.dk_rbytes = diskp->dk_rbytes;
428 sdisk.dk_wbytes = diskp->dk_wbytes;
429 sdisk.dk_attachtime_sec = diskp->dk_attachtime.tv_sec;
430 sdisk.dk_attachtime_usec = diskp->dk_attachtime.tv_usec;
431 sdisk.dk_timestamp_sec = diskp->dk_timestamp.tv_sec;
432 sdisk.dk_timestamp_usec = diskp->dk_timestamp.tv_usec;
433 sdisk.dk_time_sec = diskp->dk_time.tv_sec;
434 sdisk.dk_time_usec = diskp->dk_time.tv_usec;
435 sdisk.dk_busy = diskp->dk_busy;
436
437 error = copyout(&sdisk, where, min(tocopy, sizeof(sdisk)));
438 if (error)
439 break;
440 where += tocopy;
441 *sizep += tocopy;
442 left -= tocopy;
443 }
444 simple_unlock(&disklist_slock);
445 return (error);
446 }
447
448 struct bufq_fcfs {
449 TAILQ_HEAD(, buf) bq_head; /* actual list of buffers */
450 };
451
452 struct bufq_disksort {
453 TAILQ_HEAD(, buf) bq_head; /* actual list of buffers */
454 };
455
456 #define PRIO_READ_BURST 48
457 #define PRIO_WRITE_REQ 16
458
459 struct bufq_prio {
460 TAILQ_HEAD(, buf) bq_read, bq_write; /* actual list of buffers */
461 struct buf *bq_write_next; /* next request in bq_write */
462 struct buf *bq_next; /* current request */
463 int bq_read_burst; /* # of consecutive reads */
464 };
465
466
467 /*
468 * Check if two buf's are in ascending order.
469 */
470 static __inline int
471 buf_inorder(struct buf *bp, struct buf *bq, int sortby)
472 {
473 int r;
474
475 if (bp == NULL || bq == NULL)
476 return (bq == NULL);
477
478 if (sortby == BUFQ_SORT_CYLINDER)
479 r = bp->b_cylinder - bq->b_cylinder;
480 else
481 r = 0;
482
483 if (r == 0)
484 r = bp->b_rawblkno - bq->b_rawblkno;
485
486 return (r <= 0);
487 }
488
489
490 /*
491 * First-come first-served sort for disks.
492 *
493 * Requests are appended to the queue without any reordering.
494 */
495 static void
496 bufq_fcfs_put(struct bufq_state *bufq, struct buf *bp)
497 {
498 struct bufq_fcfs *fcfs = bufq->bq_private;
499
500 TAILQ_INSERT_TAIL(&fcfs->bq_head, bp, b_actq);
501 }
502
503 static struct buf *
504 bufq_fcfs_get(struct bufq_state *bufq, int remove)
505 {
506 struct bufq_fcfs *fcfs = bufq->bq_private;
507 struct buf *bp;
508
509 bp = TAILQ_FIRST(&fcfs->bq_head);
510
511 if (bp != NULL && remove)
512 TAILQ_REMOVE(&fcfs->bq_head, bp, b_actq);
513
514 return (bp);
515 }
516
517
518 /*
519 * Seek sort for disks.
520 *
521 * There are actually two queues, sorted in ascendening order. The first
522 * queue holds those requests which are positioned after the current block;
523 * the second holds requests which came in after their position was passed.
524 * Thus we implement a one-way scan, retracting after reaching the end of
525 * the drive to the first request on the second queue, at which time it
526 * becomes the first queue.
527 *
528 * A one-way scan is natural because of the way UNIX read-ahead blocks are
529 * allocated.
530 */
531 static void
532 bufq_disksort_put(struct bufq_state *bufq, struct buf *bp)
533 {
534 struct bufq_disksort *disksort = bufq->bq_private;
535 struct buf *bq, *nbq;
536 int sortby;
537
538 sortby = bufq->bq_flags & BUFQ_SORT_MASK;
539
540 bq = TAILQ_FIRST(&disksort->bq_head);
541
542 /*
543 * If the queue is empty it's easy; we just go on the end.
544 */
545 if (bq == NULL) {
546 TAILQ_INSERT_TAIL(&disksort->bq_head, bp, b_actq);
547 return;
548 }
549
550 /*
551 * If we lie before the currently active request, then we
552 * must locate the second request list and add ourselves to it.
553 */
554 if (buf_inorder(bp, bq, sortby)) {
555 while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL) {
556 /*
557 * Check for an ``inversion'' in the normally ascending
558 * block numbers, indicating the start of the second
559 * request list.
560 */
561 if (buf_inorder(nbq, bq, sortby)) {
562 /*
563 * Search the second request list for the first
564 * request at a larger block number. We go
565 * after that; if there is no such request, we
566 * go at the end.
567 */
568 do {
569 if (buf_inorder(bp, nbq, sortby))
570 goto insert;
571 bq = nbq;
572 } while ((nbq =
573 TAILQ_NEXT(bq, b_actq)) != NULL);
574 goto insert; /* after last */
575 }
576 bq = nbq;
577 }
578 /*
579 * No inversions... we will go after the last, and
580 * be the first request in the second request list.
581 */
582 goto insert;
583 }
584 /*
585 * Request is at/after the current request...
586 * sort in the first request list.
587 */
588 while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL) {
589 /*
590 * We want to go after the current request if there is an
591 * inversion after it (i.e. it is the end of the first
592 * request list), or if the next request is a larger cylinder
593 * than our request.
594 */
595 if (buf_inorder(nbq, bq, sortby) ||
596 buf_inorder(bp, nbq, sortby))
597 goto insert;
598 bq = nbq;
599 }
600 /*
601 * Neither a second list nor a larger request... we go at the end of
602 * the first list, which is the same as the end of the whole schebang.
603 */
604 insert: TAILQ_INSERT_AFTER(&disksort->bq_head, bq, bp, b_actq);
605 }
606
607 static struct buf *
608 bufq_disksort_get(struct bufq_state *bufq, int remove)
609 {
610 struct bufq_disksort *disksort = bufq->bq_private;
611 struct buf *bp;
612
613 bp = TAILQ_FIRST(&disksort->bq_head);
614
615 if (bp != NULL && remove)
616 TAILQ_REMOVE(&disksort->bq_head, bp, b_actq);
617
618 return (bp);
619 }
620
621
622 /*
623 * Seek sort for disks.
624 *
625 * There are two queues. The first queue holds read requests; the second
626 * holds write requests. The read queue is first-come first-served; the
627 * write queue is sorted in ascendening block order.
628 * The read queue is processed first. After PRIO_READ_BURST consecutive
629 * read requests with non-empty write queue PRIO_WRITE_REQ requests from
630 * the write queue will be processed.
631 */
632 static void
633 bufq_prio_put(struct bufq_state *bufq, struct buf *bp)
634 {
635 struct bufq_prio *prio = bufq->bq_private;
636 struct buf *bq;
637 int sortby;
638
639 sortby = bufq->bq_flags & BUFQ_SORT_MASK;
640
641 /*
642 * If it's a read request append it to the list.
643 */
644 if ((bp->b_flags & B_READ) == B_READ) {
645 TAILQ_INSERT_TAIL(&prio->bq_read, bp, b_actq);
646 return;
647 }
648
649 bq = TAILQ_FIRST(&prio->bq_write);
650
651 /*
652 * If the write list is empty, simply append it to the list.
653 */
654 if (bq == NULL) {
655 TAILQ_INSERT_TAIL(&prio->bq_write, bp, b_actq);
656 prio->bq_write_next = bp;
657 return;
658 }
659
660 /*
661 * If we lie after the next request, insert after this request.
662 */
663 if (buf_inorder(prio->bq_write_next, bp, sortby))
664 bq = prio->bq_write_next;
665
666 /*
667 * Search for the first request at a larger block number.
668 * We go before this request if it exists.
669 */
670 while (bq != NULL && buf_inorder(bq, bp, sortby))
671 bq = TAILQ_NEXT(bq, b_actq);
672
673 if (bq != NULL)
674 TAILQ_INSERT_BEFORE(bq, bp, b_actq);
675 else
676 TAILQ_INSERT_TAIL(&prio->bq_write, bp, b_actq);
677 }
678
679 static struct buf *
680 bufq_prio_get(struct bufq_state *bufq, int remove)
681 {
682 struct bufq_prio *prio = bufq->bq_private;
683 struct buf *bp;
684
685 /*
686 * If no current request, get next from the lists.
687 */
688 if (prio->bq_next == NULL) {
689 /*
690 * If at least one list is empty, select the other.
691 */
692 if (TAILQ_FIRST(&prio->bq_read) == NULL) {
693 prio->bq_next = prio->bq_write_next;
694 prio->bq_read_burst = 0;
695 } else if (prio->bq_write_next == NULL) {
696 prio->bq_next = TAILQ_FIRST(&prio->bq_read);
697 prio->bq_read_burst = 0;
698 } else {
699 /*
700 * Both list have requests. Select the read list up
701 * to PRIO_READ_BURST times, then select the write
702 * list PRIO_WRITE_REQ times.
703 */
704 if (prio->bq_read_burst++ < PRIO_READ_BURST)
705 prio->bq_next = TAILQ_FIRST(&prio->bq_read);
706 else if (prio->bq_read_burst <
707 PRIO_READ_BURST + PRIO_WRITE_REQ)
708 prio->bq_next = prio->bq_write_next;
709 else {
710 prio->bq_next = TAILQ_FIRST(&prio->bq_read);
711 prio->bq_read_burst = 0;
712 }
713 }
714 }
715
716 bp = prio->bq_next;
717
718 if (bp != NULL && remove) {
719 if ((bp->b_flags & B_READ) == B_READ)
720 TAILQ_REMOVE(&prio->bq_read, bp, b_actq);
721 else {
722 /*
723 * Advance the write pointer before removing
724 * bp since it is actually prio->bq_write_next.
725 */
726 prio->bq_write_next =
727 TAILQ_NEXT(prio->bq_write_next, b_actq);
728 TAILQ_REMOVE(&prio->bq_write, bp, b_actq);
729 if (prio->bq_write_next == NULL)
730 prio->bq_write_next =
731 TAILQ_FIRST(&prio->bq_write);
732 }
733
734 prio->bq_next = NULL;
735 }
736
737 return (bp);
738 }
739
740 /*
741 * Create a device buffer queue.
742 */
743 void
744 bufq_alloc(struct bufq_state *bufq, int flags)
745 {
746 struct bufq_fcfs *fcfs;
747 struct bufq_disksort *disksort;
748 struct bufq_prio *prio;
749
750 bufq->bq_flags = flags;
751
752 switch (flags & BUFQ_SORT_MASK) {
753 case BUFQ_SORT_RAWBLOCK:
754 case BUFQ_SORT_CYLINDER:
755 break;
756 case 0:
757 if ((flags & BUFQ_METHOD_MASK) == BUFQ_FCFS)
758 break;
759 /* FALLTHROUGH */
760 default:
761 panic("bufq_alloc: sort out of range");
762 }
763
764 switch (flags & BUFQ_METHOD_MASK) {
765 case BUFQ_FCFS:
766 bufq->bq_get = bufq_fcfs_get;
767 bufq->bq_put = bufq_fcfs_put;
768 MALLOC(bufq->bq_private, struct bufq_fcfs *,
769 sizeof(struct bufq_fcfs), M_DEVBUF, M_ZERO);
770 fcfs = (struct bufq_fcfs *)bufq->bq_private;
771 TAILQ_INIT(&fcfs->bq_head);
772 break;
773 case BUFQ_DISKSORT:
774 bufq->bq_get = bufq_disksort_get;
775 bufq->bq_put = bufq_disksort_put;
776 MALLOC(bufq->bq_private, struct bufq_disksort *,
777 sizeof(struct bufq_disksort), M_DEVBUF, M_ZERO);
778 disksort = (struct bufq_disksort *)bufq->bq_private;
779 TAILQ_INIT(&disksort->bq_head);
780 break;
781 case BUFQ_READ_PRIO:
782 bufq->bq_get = bufq_prio_get;
783 bufq->bq_put = bufq_prio_put;
784 MALLOC(bufq->bq_private, struct bufq_prio *,
785 sizeof(struct bufq_prio), M_DEVBUF, M_ZERO);
786 prio = (struct bufq_prio *)bufq->bq_private;
787 TAILQ_INIT(&prio->bq_read);
788 TAILQ_INIT(&prio->bq_write);
789 break;
790 default:
791 panic("bufq_alloc: method out of range");
792 }
793 }
794
795 /*
796 * Destroy a device buffer queue.
797 */
798 void
799 bufq_free(struct bufq_state *bufq)
800 {
801
802 KASSERT(bufq->bq_private != NULL);
803 KASSERT(BUFQ_PEEK(bufq) == NULL);
804
805 FREE(bufq->bq_private, M_DEVBUF);
806 bufq->bq_get = NULL;
807 bufq->bq_put = NULL;
808 }
809