subr_disk.c revision 1.61 1 /* $NetBSD: subr_disk.c,v 1.61 2004/09/25 03:30:44 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1982, 1986, 1988, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94
74 */
75
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: subr_disk.c,v 1.61 2004/09/25 03:30:44 thorpej Exp $");
78
79 #include "opt_compat_netbsd.h"
80 #include "opt_bufq.h"
81
82 #include <sys/param.h>
83 #include <sys/kernel.h>
84 #include <sys/malloc.h>
85 #include <sys/buf.h>
86 #include <sys/syslog.h>
87 #include <sys/disklabel.h>
88 #include <sys/disk.h>
89 #include <sys/sysctl.h>
90 #include <lib/libkern/libkern.h>
91
92 /*
93 * A global list of all disks attached to the system. May grow or
94 * shrink over time.
95 */
96 struct disklist_head disklist; /* TAILQ_HEAD */
97 int disk_count; /* number of drives in global disklist */
98 struct simplelock disklist_slock = SIMPLELOCK_INITIALIZER;
99
100 #ifdef NEW_BUFQ_STRATEGY
101 int bufq_disk_default_strat = BUFQ_READ_PRIO;
102 #else /* NEW_BUFQ_STRATEGY */
103 int bufq_disk_default_strat = BUFQ_DISKSORT;
104 #endif /* NEW_BUFQ_STRATEGY */
105
106 /*
107 * Compute checksum for disk label.
108 */
109 u_int
110 dkcksum(struct disklabel *lp)
111 {
112 u_short *start, *end;
113 u_short sum = 0;
114
115 start = (u_short *)lp;
116 end = (u_short *)&lp->d_partitions[lp->d_npartitions];
117 while (start < end)
118 sum ^= *start++;
119 return (sum);
120 }
121
122 /*
123 * Disk error is the preface to plaintive error messages
124 * about failing disk transfers. It prints messages of the form
125
126 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
127
128 * if the offset of the error in the transfer and a disk label
129 * are both available. blkdone should be -1 if the position of the error
130 * is unknown; the disklabel pointer may be null from drivers that have not
131 * been converted to use them. The message is printed with printf
132 * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
133 * The message should be completed (with at least a newline) with printf
134 * or addlog, respectively. There is no trailing space.
135 */
136 #ifndef PRIdaddr
137 #define PRIdaddr PRId64
138 #endif
139 void
140 diskerr(const struct buf *bp, const char *dname, const char *what, int pri,
141 int blkdone, const struct disklabel *lp)
142 {
143 int unit = DISKUNIT(bp->b_dev), part = DISKPART(bp->b_dev);
144 void (*pr)(const char *, ...);
145 char partname = 'a' + part;
146 daddr_t sn;
147
148 if (/*CONSTCOND*/0)
149 /* Compiler will error this is the format is wrong... */
150 printf("%" PRIdaddr, bp->b_blkno);
151
152 if (pri != LOG_PRINTF) {
153 static const char fmt[] = "";
154 log(pri, fmt);
155 pr = addlog;
156 } else
157 pr = printf;
158 (*pr)("%s%d%c: %s %sing fsbn ", dname, unit, partname, what,
159 bp->b_flags & B_READ ? "read" : "writ");
160 sn = bp->b_blkno;
161 if (bp->b_bcount <= DEV_BSIZE)
162 (*pr)("%" PRIdaddr, sn);
163 else {
164 if (blkdone >= 0) {
165 sn += blkdone;
166 (*pr)("%" PRIdaddr " of ", sn);
167 }
168 (*pr)("%" PRIdaddr "-%" PRIdaddr "", bp->b_blkno,
169 bp->b_blkno + (bp->b_bcount - 1) / DEV_BSIZE);
170 }
171 if (lp && (blkdone >= 0 || bp->b_bcount <= lp->d_secsize)) {
172 sn += lp->d_partitions[part].p_offset;
173 (*pr)(" (%s%d bn %" PRIdaddr "; cn %" PRIdaddr "",
174 dname, unit, sn, sn / lp->d_secpercyl);
175 sn %= lp->d_secpercyl;
176 (*pr)(" tn %" PRIdaddr " sn %" PRIdaddr ")",
177 sn / lp->d_nsectors, sn % lp->d_nsectors);
178 }
179 }
180
181 /*
182 * Initialize the disklist. Called by main() before autoconfiguration.
183 */
184 void
185 disk_init(void)
186 {
187
188 TAILQ_INIT(&disklist);
189 disk_count = 0;
190 }
191
192 /*
193 * Searches the disklist for the disk corresponding to the
194 * name provided.
195 */
196 struct disk *
197 disk_find(char *name)
198 {
199 struct disk *diskp;
200
201 if ((name == NULL) || (disk_count <= 0))
202 return (NULL);
203
204 simple_lock(&disklist_slock);
205 for (diskp = TAILQ_FIRST(&disklist); diskp != NULL;
206 diskp = TAILQ_NEXT(diskp, dk_link))
207 if (strcmp(diskp->dk_name, name) == 0) {
208 simple_unlock(&disklist_slock);
209 return (diskp);
210 }
211 simple_unlock(&disklist_slock);
212
213 return (NULL);
214 }
215
216 /*
217 * Attach a disk.
218 */
219 void
220 disk_attach(struct disk *diskp)
221 {
222 int s;
223
224 /*
225 * Allocate and initialize the disklabel structures. Note that
226 * it's not safe to sleep here, since we're probably going to be
227 * called during autoconfiguration.
228 */
229 diskp->dk_label = malloc(sizeof(struct disklabel), M_DEVBUF, M_NOWAIT);
230 diskp->dk_cpulabel = malloc(sizeof(struct cpu_disklabel), M_DEVBUF,
231 M_NOWAIT);
232 if ((diskp->dk_label == NULL) || (diskp->dk_cpulabel == NULL))
233 panic("disk_attach: can't allocate storage for disklabel");
234
235 memset(diskp->dk_label, 0, sizeof(struct disklabel));
236 memset(diskp->dk_cpulabel, 0, sizeof(struct cpu_disklabel));
237
238 /*
239 * Initialize the wedge-related locks and other fields.
240 */
241 lockinit(&diskp->dk_rawlock, PRIBIO, "dkrawlk", 0, 0);
242 lockinit(&diskp->dk_openlock, PRIBIO, "dkoplk", 0, 0);
243 LIST_INIT(&diskp->dk_wedges);
244 diskp->dk_nwedges = 0;
245
246 /*
247 * Set the attached timestamp.
248 */
249 s = splclock();
250 diskp->dk_attachtime = mono_time;
251 splx(s);
252
253 /*
254 * Link into the disklist.
255 */
256 simple_lock(&disklist_slock);
257 TAILQ_INSERT_TAIL(&disklist, diskp, dk_link);
258 simple_unlock(&disklist_slock);
259 ++disk_count;
260 }
261
262 /*
263 * Detach a disk.
264 */
265 void
266 disk_detach(struct disk *diskp)
267 {
268
269 (void) lockmgr(&diskp->dk_openlock, LK_DRAIN, NULL);
270
271 /*
272 * Remove from the disklist.
273 */
274 if (--disk_count < 0)
275 panic("disk_detach: disk_count < 0");
276 simple_lock(&disklist_slock);
277 TAILQ_REMOVE(&disklist, diskp, dk_link);
278 simple_unlock(&disklist_slock);
279
280 /*
281 * Free the space used by the disklabel structures.
282 */
283 free(diskp->dk_label, M_DEVBUF);
284 free(diskp->dk_cpulabel, M_DEVBUF);
285 }
286
287 /*
288 * Increment a disk's busy counter. If the counter is going from
289 * 0 to 1, set the timestamp.
290 */
291 void
292 disk_busy(struct disk *diskp)
293 {
294 int s;
295
296 /*
297 * XXX We'd like to use something as accurate as microtime(),
298 * but that doesn't depend on the system TOD clock.
299 */
300 if (diskp->dk_busy++ == 0) {
301 s = splclock();
302 diskp->dk_timestamp = mono_time;
303 splx(s);
304 }
305 }
306
307 /*
308 * Decrement a disk's busy counter, increment the byte count, total busy
309 * time, and reset the timestamp.
310 */
311 void
312 disk_unbusy(struct disk *diskp, long bcount, int read)
313 {
314 int s;
315 struct timeval dv_time, diff_time;
316
317 if (diskp->dk_busy-- == 0) {
318 printf("%s: dk_busy < 0\n", diskp->dk_name);
319 panic("disk_unbusy");
320 }
321
322 s = splclock();
323 dv_time = mono_time;
324 splx(s);
325
326 timersub(&dv_time, &diskp->dk_timestamp, &diff_time);
327 timeradd(&diskp->dk_time, &diff_time, &diskp->dk_time);
328
329 diskp->dk_timestamp = dv_time;
330 if (bcount > 0) {
331 if (read) {
332 diskp->dk_rbytes += bcount;
333 diskp->dk_rxfer++;
334 } else {
335 diskp->dk_wbytes += bcount;
336 diskp->dk_wxfer++;
337 }
338 }
339 }
340
341 /*
342 * Reset the metrics counters on the given disk. Note that we cannot
343 * reset the busy counter, as it may case a panic in disk_unbusy().
344 * We also must avoid playing with the timestamp information, as it
345 * may skew any pending transfer results.
346 */
347 void
348 disk_resetstat(struct disk *diskp)
349 {
350 int s = splbio(), t;
351
352 diskp->dk_rxfer = 0;
353 diskp->dk_rbytes = 0;
354 diskp->dk_wxfer = 0;
355 diskp->dk_wbytes = 0;
356
357 t = splclock();
358 diskp->dk_attachtime = mono_time;
359 splx(t);
360
361 timerclear(&diskp->dk_time);
362
363 splx(s);
364 }
365
366 int
367 sysctl_hw_disknames(SYSCTLFN_ARGS)
368 {
369 char buf[DK_DISKNAMELEN + 1];
370 char *where = oldp;
371 struct disk *diskp;
372 size_t needed, left, slen;
373 int error, first;
374
375 if (newp != NULL)
376 return (EPERM);
377 if (namelen != 0)
378 return (EINVAL);
379
380 first = 1;
381 error = 0;
382 needed = 0;
383 left = *oldlenp;
384
385 simple_lock(&disklist_slock);
386 for (diskp = TAILQ_FIRST(&disklist); diskp != NULL;
387 diskp = TAILQ_NEXT(diskp, dk_link)) {
388 if (where == NULL)
389 needed += strlen(diskp->dk_name) + 1;
390 else {
391 memset(buf, 0, sizeof(buf));
392 if (first) {
393 strncpy(buf, diskp->dk_name, sizeof(buf));
394 first = 0;
395 } else {
396 buf[0] = ' ';
397 strncpy(buf + 1, diskp->dk_name,
398 sizeof(buf) - 1);
399 }
400 buf[DK_DISKNAMELEN] = '\0';
401 slen = strlen(buf);
402 if (left < slen + 1)
403 break;
404 /* +1 to copy out the trailing NUL byte */
405 error = copyout(buf, where, slen + 1);
406 if (error)
407 break;
408 where += slen;
409 needed += slen;
410 left -= slen;
411 }
412 }
413 simple_unlock(&disklist_slock);
414 *oldlenp = needed;
415 return (error);
416 }
417
418 int
419 sysctl_hw_diskstats(SYSCTLFN_ARGS)
420 {
421 struct disk_sysctl sdisk;
422 struct disk *diskp;
423 char *where = oldp;
424 size_t tocopy, left;
425 int error;
426
427 if (newp != NULL)
428 return (EPERM);
429
430 /*
431 * The original hw.diskstats call was broken and did not require
432 * the userland to pass in it's size of struct disk_sysctl. This
433 * was fixed after NetBSD 1.6 was released, and any applications
434 * that do not pass in the size are given an error only, unless
435 * we care about 1.6 compatibility.
436 */
437 if (namelen == 0)
438 #ifdef COMPAT_16
439 tocopy = offsetof(struct disk_sysctl, dk_rxfer);
440 #else
441 return (EINVAL);
442 #endif
443 else
444 tocopy = name[0];
445
446 if (where == NULL) {
447 *oldlenp = disk_count * tocopy;
448 return (0);
449 }
450
451 error = 0;
452 left = *oldlenp;
453 memset(&sdisk, 0, sizeof(sdisk));
454 *oldlenp = 0;
455
456 simple_lock(&disklist_slock);
457 TAILQ_FOREACH(diskp, &disklist, dk_link) {
458 if (left < tocopy)
459 break;
460 strncpy(sdisk.dk_name, diskp->dk_name, sizeof(sdisk.dk_name));
461 sdisk.dk_xfer = diskp->dk_rxfer + diskp->dk_wxfer;
462 sdisk.dk_rxfer = diskp->dk_rxfer;
463 sdisk.dk_wxfer = diskp->dk_wxfer;
464 sdisk.dk_seek = diskp->dk_seek;
465 sdisk.dk_bytes = diskp->dk_rbytes + diskp->dk_wbytes;
466 sdisk.dk_rbytes = diskp->dk_rbytes;
467 sdisk.dk_wbytes = diskp->dk_wbytes;
468 sdisk.dk_attachtime_sec = diskp->dk_attachtime.tv_sec;
469 sdisk.dk_attachtime_usec = diskp->dk_attachtime.tv_usec;
470 sdisk.dk_timestamp_sec = diskp->dk_timestamp.tv_sec;
471 sdisk.dk_timestamp_usec = diskp->dk_timestamp.tv_usec;
472 sdisk.dk_time_sec = diskp->dk_time.tv_sec;
473 sdisk.dk_time_usec = diskp->dk_time.tv_usec;
474 sdisk.dk_busy = diskp->dk_busy;
475
476 error = copyout(&sdisk, where, min(tocopy, sizeof(sdisk)));
477 if (error)
478 break;
479 where += tocopy;
480 *oldlenp += tocopy;
481 left -= tocopy;
482 }
483 simple_unlock(&disklist_slock);
484 return (error);
485 }
486
487 struct bufq_fcfs {
488 TAILQ_HEAD(, buf) bq_head; /* actual list of buffers */
489 };
490
491 struct bufq_disksort {
492 TAILQ_HEAD(, buf) bq_head; /* actual list of buffers */
493 };
494
495 #define PRIO_READ_BURST 48
496 #define PRIO_WRITE_REQ 16
497
498 struct bufq_prio {
499 TAILQ_HEAD(, buf) bq_read, bq_write; /* actual list of buffers */
500 struct buf *bq_write_next; /* next request in bq_write */
501 struct buf *bq_next; /* current request */
502 int bq_read_burst; /* # of consecutive reads */
503 };
504
505
506 static __inline int buf_inorder(const struct buf *, const struct buf *, int);
507
508 /*
509 * Check if two buf's are in ascending order.
510 */
511 static __inline int
512 buf_inorder(const struct buf *bp, const struct buf *bq, int sortby)
513 {
514
515 if (bp == NULL || bq == NULL)
516 return (bq == NULL);
517
518 if (sortby == BUFQ_SORT_CYLINDER) {
519 if (bp->b_cylinder != bq->b_cylinder)
520 return bp->b_cylinder < bq->b_cylinder;
521 else
522 return bp->b_rawblkno < bq->b_rawblkno;
523 } else
524 return bp->b_rawblkno < bq->b_rawblkno;
525 }
526
527
528 /*
529 * First-come first-served sort for disks.
530 *
531 * Requests are appended to the queue without any reordering.
532 */
533 static void
534 bufq_fcfs_put(struct bufq_state *bufq, struct buf *bp)
535 {
536 struct bufq_fcfs *fcfs = bufq->bq_private;
537
538 TAILQ_INSERT_TAIL(&fcfs->bq_head, bp, b_actq);
539 }
540
541 static struct buf *
542 bufq_fcfs_get(struct bufq_state *bufq, int remove)
543 {
544 struct bufq_fcfs *fcfs = bufq->bq_private;
545 struct buf *bp;
546
547 bp = TAILQ_FIRST(&fcfs->bq_head);
548
549 if (bp != NULL && remove)
550 TAILQ_REMOVE(&fcfs->bq_head, bp, b_actq);
551
552 return (bp);
553 }
554
555
556 /*
557 * Seek sort for disks.
558 *
559 * There are actually two queues, sorted in ascendening order. The first
560 * queue holds those requests which are positioned after the current block;
561 * the second holds requests which came in after their position was passed.
562 * Thus we implement a one-way scan, retracting after reaching the end of
563 * the drive to the first request on the second queue, at which time it
564 * becomes the first queue.
565 *
566 * A one-way scan is natural because of the way UNIX read-ahead blocks are
567 * allocated.
568 */
569 static void
570 bufq_disksort_put(struct bufq_state *bufq, struct buf *bp)
571 {
572 struct bufq_disksort *disksort = bufq->bq_private;
573 struct buf *bq, *nbq;
574 int sortby;
575
576 sortby = bufq->bq_flags & BUFQ_SORT_MASK;
577
578 bq = TAILQ_FIRST(&disksort->bq_head);
579
580 /*
581 * If the queue is empty it's easy; we just go on the end.
582 */
583 if (bq == NULL) {
584 TAILQ_INSERT_TAIL(&disksort->bq_head, bp, b_actq);
585 return;
586 }
587
588 /*
589 * If we lie before the currently active request, then we
590 * must locate the second request list and add ourselves to it.
591 */
592 if (buf_inorder(bp, bq, sortby)) {
593 while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL) {
594 /*
595 * Check for an ``inversion'' in the normally ascending
596 * block numbers, indicating the start of the second
597 * request list.
598 */
599 if (buf_inorder(nbq, bq, sortby)) {
600 /*
601 * Search the second request list for the first
602 * request at a larger block number. We go
603 * after that; if there is no such request, we
604 * go at the end.
605 */
606 do {
607 if (buf_inorder(bp, nbq, sortby))
608 goto insert;
609 bq = nbq;
610 } while ((nbq =
611 TAILQ_NEXT(bq, b_actq)) != NULL);
612 goto insert; /* after last */
613 }
614 bq = nbq;
615 }
616 /*
617 * No inversions... we will go after the last, and
618 * be the first request in the second request list.
619 */
620 goto insert;
621 }
622 /*
623 * Request is at/after the current request...
624 * sort in the first request list.
625 */
626 while ((nbq = TAILQ_NEXT(bq, b_actq)) != NULL) {
627 /*
628 * We want to go after the current request if there is an
629 * inversion after it (i.e. it is the end of the first
630 * request list), or if the next request is a larger cylinder
631 * than our request.
632 */
633 if (buf_inorder(nbq, bq, sortby) ||
634 buf_inorder(bp, nbq, sortby))
635 goto insert;
636 bq = nbq;
637 }
638 /*
639 * Neither a second list nor a larger request... we go at the end of
640 * the first list, which is the same as the end of the whole schebang.
641 */
642 insert: TAILQ_INSERT_AFTER(&disksort->bq_head, bq, bp, b_actq);
643 }
644
645 static struct buf *
646 bufq_disksort_get(struct bufq_state *bufq, int remove)
647 {
648 struct bufq_disksort *disksort = bufq->bq_private;
649 struct buf *bp;
650
651 bp = TAILQ_FIRST(&disksort->bq_head);
652
653 if (bp != NULL && remove)
654 TAILQ_REMOVE(&disksort->bq_head, bp, b_actq);
655
656 return (bp);
657 }
658
659
660 /*
661 * Seek sort for disks.
662 *
663 * There are two queues. The first queue holds read requests; the second
664 * holds write requests. The read queue is first-come first-served; the
665 * write queue is sorted in ascendening block order.
666 * The read queue is processed first. After PRIO_READ_BURST consecutive
667 * read requests with non-empty write queue PRIO_WRITE_REQ requests from
668 * the write queue will be processed.
669 */
670 static void
671 bufq_prio_put(struct bufq_state *bufq, struct buf *bp)
672 {
673 struct bufq_prio *prio = bufq->bq_private;
674 struct buf *bq;
675 int sortby;
676
677 sortby = bufq->bq_flags & BUFQ_SORT_MASK;
678
679 /*
680 * If it's a read request append it to the list.
681 */
682 if ((bp->b_flags & B_READ) == B_READ) {
683 TAILQ_INSERT_TAIL(&prio->bq_read, bp, b_actq);
684 return;
685 }
686
687 bq = TAILQ_FIRST(&prio->bq_write);
688
689 /*
690 * If the write list is empty, simply append it to the list.
691 */
692 if (bq == NULL) {
693 TAILQ_INSERT_TAIL(&prio->bq_write, bp, b_actq);
694 prio->bq_write_next = bp;
695 return;
696 }
697
698 /*
699 * If we lie after the next request, insert after this request.
700 */
701 if (buf_inorder(prio->bq_write_next, bp, sortby))
702 bq = prio->bq_write_next;
703
704 /*
705 * Search for the first request at a larger block number.
706 * We go before this request if it exists.
707 */
708 while (bq != NULL && buf_inorder(bq, bp, sortby))
709 bq = TAILQ_NEXT(bq, b_actq);
710
711 if (bq != NULL)
712 TAILQ_INSERT_BEFORE(bq, bp, b_actq);
713 else
714 TAILQ_INSERT_TAIL(&prio->bq_write, bp, b_actq);
715 }
716
717 static struct buf *
718 bufq_prio_get(struct bufq_state *bufq, int remove)
719 {
720 struct bufq_prio *prio = bufq->bq_private;
721 struct buf *bp;
722
723 /*
724 * If no current request, get next from the lists.
725 */
726 if (prio->bq_next == NULL) {
727 /*
728 * If at least one list is empty, select the other.
729 */
730 if (TAILQ_FIRST(&prio->bq_read) == NULL) {
731 prio->bq_next = prio->bq_write_next;
732 prio->bq_read_burst = 0;
733 } else if (prio->bq_write_next == NULL) {
734 prio->bq_next = TAILQ_FIRST(&prio->bq_read);
735 prio->bq_read_burst = 0;
736 } else {
737 /*
738 * Both list have requests. Select the read list up
739 * to PRIO_READ_BURST times, then select the write
740 * list PRIO_WRITE_REQ times.
741 */
742 if (prio->bq_read_burst++ < PRIO_READ_BURST)
743 prio->bq_next = TAILQ_FIRST(&prio->bq_read);
744 else if (prio->bq_read_burst <
745 PRIO_READ_BURST + PRIO_WRITE_REQ)
746 prio->bq_next = prio->bq_write_next;
747 else {
748 prio->bq_next = TAILQ_FIRST(&prio->bq_read);
749 prio->bq_read_burst = 0;
750 }
751 }
752 }
753
754 bp = prio->bq_next;
755
756 if (bp != NULL && remove) {
757 if ((bp->b_flags & B_READ) == B_READ)
758 TAILQ_REMOVE(&prio->bq_read, bp, b_actq);
759 else {
760 /*
761 * Advance the write pointer before removing
762 * bp since it is actually prio->bq_write_next.
763 */
764 prio->bq_write_next =
765 TAILQ_NEXT(prio->bq_write_next, b_actq);
766 TAILQ_REMOVE(&prio->bq_write, bp, b_actq);
767 if (prio->bq_write_next == NULL)
768 prio->bq_write_next =
769 TAILQ_FIRST(&prio->bq_write);
770 }
771
772 prio->bq_next = NULL;
773 }
774
775 return (bp);
776 }
777
778
779 /*
780 * Cyclical scan (CSCAN)
781 */
782 TAILQ_HEAD(bqhead, buf);
783 struct cscan_queue {
784 struct bqhead cq_head[2]; /* actual lists of buffers */
785 int cq_idx; /* current list index */
786 int cq_lastcylinder; /* b_cylinder of the last request */
787 daddr_t cq_lastrawblkno; /* b_rawblkno of the last request */
788 };
789
790 static int __inline cscan_empty(const struct cscan_queue *);
791 static void cscan_put(struct cscan_queue *, struct buf *, int);
792 static struct buf *cscan_get(struct cscan_queue *, int);
793 static void cscan_init(struct cscan_queue *);
794
795 static __inline int
796 cscan_empty(const struct cscan_queue *q)
797 {
798
799 return TAILQ_EMPTY(&q->cq_head[0]) && TAILQ_EMPTY(&q->cq_head[1]);
800 }
801
802 static void
803 cscan_put(struct cscan_queue *q, struct buf *bp, int sortby)
804 {
805 struct buf tmp;
806 struct buf *it;
807 struct bqhead *bqh;
808 int idx;
809
810 tmp.b_cylinder = q->cq_lastcylinder;
811 tmp.b_rawblkno = q->cq_lastrawblkno;
812
813 if (buf_inorder(bp, &tmp, sortby))
814 idx = 1 - q->cq_idx;
815 else
816 idx = q->cq_idx;
817
818 bqh = &q->cq_head[idx];
819
820 TAILQ_FOREACH(it, bqh, b_actq)
821 if (buf_inorder(bp, it, sortby))
822 break;
823
824 if (it != NULL)
825 TAILQ_INSERT_BEFORE(it, bp, b_actq);
826 else
827 TAILQ_INSERT_TAIL(bqh, bp, b_actq);
828 }
829
830 static struct buf *
831 cscan_get(struct cscan_queue *q, int remove)
832 {
833 int idx = q->cq_idx;
834 struct bqhead *bqh;
835 struct buf *bp;
836
837 bqh = &q->cq_head[idx];
838 bp = TAILQ_FIRST(bqh);
839
840 if (bp == NULL) {
841 /* switch queue */
842 idx = 1 - idx;
843 bqh = &q->cq_head[idx];
844 bp = TAILQ_FIRST(bqh);
845 }
846
847 KDASSERT((bp != NULL && !cscan_empty(q)) ||
848 (bp == NULL && cscan_empty(q)));
849
850 if (bp != NULL && remove) {
851 q->cq_idx = idx;
852 TAILQ_REMOVE(bqh, bp, b_actq);
853
854 q->cq_lastcylinder = bp->b_cylinder;
855 q->cq_lastrawblkno =
856 bp->b_rawblkno + (bp->b_bcount >> DEV_BSHIFT);
857 }
858
859 return (bp);
860 }
861
862 static void
863 cscan_init(struct cscan_queue *q)
864 {
865
866 TAILQ_INIT(&q->cq_head[0]);
867 TAILQ_INIT(&q->cq_head[1]);
868 }
869
870
871 /*
872 * Per-prioritiy CSCAN.
873 *
874 * XXX probably we should have a way to raise
875 * priority of the on-queue requests.
876 */
877 #define PRIOCSCAN_NQUEUE 3
878
879 struct priocscan_queue {
880 struct cscan_queue q_queue;
881 int q_burst;
882 };
883
884 struct bufq_priocscan {
885 struct priocscan_queue bq_queue[PRIOCSCAN_NQUEUE];
886
887 #if 0
888 /*
889 * XXX using "global" head position can reduce positioning time
890 * when switching between queues.
891 * although it might affect against fairness.
892 */
893 daddr_t bq_lastrawblkno;
894 int bq_lastcylinder;
895 #endif
896 };
897
898 /*
899 * how many requests to serve when having pending requests on other queues.
900 *
901 * XXX tune
902 */
903 const int priocscan_burst[] = {
904 64, 16, 4
905 };
906
907 static void bufq_priocscan_put(struct bufq_state *, struct buf *);
908 static struct buf *bufq_priocscan_get(struct bufq_state *, int);
909 static void bufq_priocscan_init(struct bufq_state *);
910 static __inline struct cscan_queue *bufq_priocscan_selectqueue(
911 struct bufq_priocscan *, const struct buf *);
912
913 static __inline struct cscan_queue *
914 bufq_priocscan_selectqueue(struct bufq_priocscan *q, const struct buf *bp)
915 {
916 static const int priocscan_priomap[] = {
917 [BPRIO_TIMENONCRITICAL] = 2,
918 [BPRIO_TIMELIMITED] = 1,
919 [BPRIO_TIMECRITICAL] = 0
920 };
921
922 return &q->bq_queue[priocscan_priomap[BIO_GETPRIO(bp)]].q_queue;
923 }
924
925 static void
926 bufq_priocscan_put(struct bufq_state *bufq, struct buf *bp)
927 {
928 struct bufq_priocscan *q = bufq->bq_private;
929 struct cscan_queue *cq;
930 const int sortby = bufq->bq_flags & BUFQ_SORT_MASK;
931
932 cq = bufq_priocscan_selectqueue(q, bp);
933 cscan_put(cq, bp, sortby);
934 }
935
936 static struct buf *
937 bufq_priocscan_get(struct bufq_state *bufq, int remove)
938 {
939 struct bufq_priocscan *q = bufq->bq_private;
940 struct priocscan_queue *pq, *npq;
941 struct priocscan_queue *first; /* first non-empty queue */
942 const struct priocscan_queue *epq;
943 const struct cscan_queue *cq;
944 struct buf *bp;
945 boolean_t single; /* true if there's only one non-empty queue */
946
947 pq = &q->bq_queue[0];
948 epq = pq + PRIOCSCAN_NQUEUE;
949 for (; pq < epq; pq++) {
950 cq = &pq->q_queue;
951 if (!cscan_empty(cq))
952 break;
953 }
954 if (pq == epq) {
955 /* there's no requests */
956 return NULL;
957 }
958
959 first = pq;
960 single = TRUE;
961 for (npq = first + 1; npq < epq; npq++) {
962 cq = &npq->q_queue;
963 if (!cscan_empty(cq)) {
964 single = FALSE;
965 if (pq->q_burst > 0)
966 break;
967 pq = npq;
968 }
969 }
970 if (single) {
971 /*
972 * there's only a non-empty queue. just serve it.
973 */
974 pq = first;
975 } else if (pq->q_burst > 0) {
976 /*
977 * XXX account only by number of requests. is it good enough?
978 */
979 pq->q_burst--;
980 } else {
981 /*
982 * no queue was selected due to burst counts
983 */
984 int i;
985 #ifdef DEBUG
986 for (i = 0; i < PRIOCSCAN_NQUEUE; i++) {
987 pq = &q->bq_queue[i];
988 cq = &pq->q_queue;
989 if (!cscan_empty(cq) && pq->q_burst)
990 panic("%s: inconsist", __func__);
991 }
992 #endif /* DEBUG */
993
994 /*
995 * reset burst counts
996 */
997 for (i = 0; i < PRIOCSCAN_NQUEUE; i++) {
998 pq = &q->bq_queue[i];
999 pq->q_burst = priocscan_burst[i];
1000 }
1001
1002 /*
1003 * serve first non-empty queue.
1004 */
1005 pq = first;
1006 }
1007
1008 KDASSERT(!cscan_empty(&pq->q_queue));
1009 bp = cscan_get(&pq->q_queue, remove);
1010 KDASSERT(bp != NULL);
1011 KDASSERT(&pq->q_queue == bufq_priocscan_selectqueue(q, bp));
1012
1013 return bp;
1014 }
1015
1016 static void
1017 bufq_priocscan_init(struct bufq_state *bufq)
1018 {
1019 struct bufq_priocscan *q;
1020 int i;
1021
1022 bufq->bq_get = bufq_priocscan_get;
1023 bufq->bq_put = bufq_priocscan_put;
1024 bufq->bq_private = malloc(sizeof(struct bufq_priocscan),
1025 M_DEVBUF, M_ZERO);
1026
1027 q = bufq->bq_private;
1028 for (i = 0; i < PRIOCSCAN_NQUEUE; i++) {
1029 struct cscan_queue *cq = &q->bq_queue[i].q_queue;
1030
1031 cscan_init(cq);
1032 }
1033 }
1034
1035
1036 /*
1037 * Create a device buffer queue.
1038 */
1039 void
1040 bufq_alloc(struct bufq_state *bufq, int flags)
1041 {
1042 struct bufq_fcfs *fcfs;
1043 struct bufq_disksort *disksort;
1044 struct bufq_prio *prio;
1045
1046 bufq->bq_flags = flags;
1047
1048 switch (flags & BUFQ_SORT_MASK) {
1049 case BUFQ_SORT_RAWBLOCK:
1050 case BUFQ_SORT_CYLINDER:
1051 break;
1052 case 0:
1053 if ((flags & BUFQ_METHOD_MASK) == BUFQ_FCFS)
1054 break;
1055 /* FALLTHROUGH */
1056 default:
1057 panic("bufq_alloc: sort out of range");
1058 }
1059
1060 switch (flags & BUFQ_METHOD_MASK) {
1061 case BUFQ_FCFS:
1062 bufq->bq_get = bufq_fcfs_get;
1063 bufq->bq_put = bufq_fcfs_put;
1064 MALLOC(bufq->bq_private, struct bufq_fcfs *,
1065 sizeof(struct bufq_fcfs), M_DEVBUF, M_ZERO);
1066 fcfs = (struct bufq_fcfs *)bufq->bq_private;
1067 TAILQ_INIT(&fcfs->bq_head);
1068 break;
1069 case BUFQ_DISKSORT:
1070 bufq->bq_get = bufq_disksort_get;
1071 bufq->bq_put = bufq_disksort_put;
1072 MALLOC(bufq->bq_private, struct bufq_disksort *,
1073 sizeof(struct bufq_disksort), M_DEVBUF, M_ZERO);
1074 disksort = (struct bufq_disksort *)bufq->bq_private;
1075 TAILQ_INIT(&disksort->bq_head);
1076 break;
1077 case BUFQ_READ_PRIO:
1078 bufq->bq_get = bufq_prio_get;
1079 bufq->bq_put = bufq_prio_put;
1080 MALLOC(bufq->bq_private, struct bufq_prio *,
1081 sizeof(struct bufq_prio), M_DEVBUF, M_ZERO);
1082 prio = (struct bufq_prio *)bufq->bq_private;
1083 TAILQ_INIT(&prio->bq_read);
1084 TAILQ_INIT(&prio->bq_write);
1085 break;
1086 case BUFQ_PRIOCSCAN:
1087 bufq_priocscan_init(bufq);
1088 break;
1089 default:
1090 panic("bufq_alloc: method out of range");
1091 }
1092 }
1093
1094 /*
1095 * Destroy a device buffer queue.
1096 */
1097 void
1098 bufq_free(struct bufq_state *bufq)
1099 {
1100
1101 KASSERT(bufq->bq_private != NULL);
1102 KASSERT(BUFQ_PEEK(bufq) == NULL);
1103
1104 FREE(bufq->bq_private, M_DEVBUF);
1105 bufq->bq_get = NULL;
1106 bufq->bq_put = NULL;
1107 }
1108
1109 /*
1110 * Bounds checking against the media size, used for the raw partition.
1111 * The sector size passed in should currently always be DEV_BSIZE,
1112 * and the media size the size of the device in DEV_BSIZE sectors.
1113 */
1114 int
1115 bounds_check_with_mediasize(struct buf *bp, int secsize, u_int64_t mediasize)
1116 {
1117 int sz;
1118
1119 sz = howmany(bp->b_bcount, secsize);
1120
1121 if (bp->b_blkno + sz > mediasize) {
1122 sz = mediasize - bp->b_blkno;
1123 if (sz == 0) {
1124 /* If exactly at end of disk, return EOF. */
1125 bp->b_resid = bp->b_bcount;
1126 goto done;
1127 }
1128 if (sz < 0) {
1129 /* If past end of disk, return EINVAL. */
1130 bp->b_error = EINVAL;
1131 goto bad;
1132 }
1133 /* Otherwise, truncate request. */
1134 bp->b_bcount = sz << DEV_BSHIFT;
1135 }
1136
1137 return 1;
1138
1139 bad:
1140 bp->b_flags |= B_ERROR;
1141 done:
1142 return 0;
1143 }
1144