1 1.31 reinoud /* $NetBSD: udf_strat_rmw.c,v 1.31 2023/06/27 09:58:50 reinoud Exp $ */ 2 1.1 reinoud 3 1.1 reinoud /* 4 1.1 reinoud * Copyright (c) 2006, 2008 Reinoud Zandijk 5 1.1 reinoud * All rights reserved. 6 1.1 reinoud * 7 1.1 reinoud * Redistribution and use in source and binary forms, with or without 8 1.1 reinoud * modification, are permitted provided that the following conditions 9 1.1 reinoud * are met: 10 1.1 reinoud * 1. Redistributions of source code must retain the above copyright 11 1.1 reinoud * notice, this list of conditions and the following disclaimer. 12 1.1 reinoud * 2. Redistributions in binary form must reproduce the above copyright 13 1.1 reinoud * notice, this list of conditions and the following disclaimer in the 14 1.1 reinoud * documentation and/or other materials provided with the distribution. 15 1.1 reinoud * 16 1.1 reinoud * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 1.1 reinoud * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 1.1 reinoud * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 1.1 reinoud * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 1.1 reinoud * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 1.1 reinoud * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 1.1 reinoud * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 1.1 reinoud * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 1.1 reinoud * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 1.1 reinoud * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 1.1 reinoud * 27 1.1 reinoud */ 28 1.1 reinoud 29 1.1 reinoud #include <sys/cdefs.h> 30 1.1 reinoud #ifndef lint 31 1.31 reinoud __KERNEL_RCSID(0, "$NetBSD: udf_strat_rmw.c,v 1.31 2023/06/27 09:58:50 reinoud Exp $"); 32 1.1 reinoud #endif /* not lint */ 33 1.1 reinoud 34 1.1 reinoud 35 1.1 reinoud #if defined(_KERNEL_OPT) 36 1.1 reinoud #include "opt_compat_netbsd.h" 37 1.1 reinoud #endif 38 1.1 reinoud 39 1.1 reinoud #include <sys/param.h> 40 1.1 reinoud #include <sys/systm.h> 41 1.1 reinoud #include <sys/sysctl.h> 42 1.1 reinoud #include <sys/namei.h> 43 1.1 reinoud #include <sys/proc.h> 44 1.1 reinoud #include <sys/kernel.h> 45 1.1 reinoud #include <sys/vnode.h> 46 1.1 reinoud #include <miscfs/genfs/genfs_node.h> 47 1.1 reinoud #include <sys/mount.h> 48 1.1 reinoud #include <sys/buf.h> 49 1.1 reinoud #include <sys/file.h> 50 1.1 reinoud #include <sys/device.h> 51 1.1 reinoud #include <sys/disklabel.h> 52 1.1 reinoud #include <sys/ioctl.h> 53 1.1 reinoud #include <sys/malloc.h> 54 1.1 reinoud #include <sys/dirent.h> 55 1.1 reinoud #include <sys/stat.h> 56 1.1 reinoud #include <sys/conf.h> 57 1.1 reinoud #include <sys/kauth.h> 58 1.1 reinoud #include <sys/kthread.h> 59 1.1 reinoud #include <dev/clock_subr.h> 60 1.1 reinoud 61 1.1 reinoud #include <fs/udf/ecma167-udf.h> 62 1.1 reinoud #include <fs/udf/udf_mount.h> 63 1.1 reinoud 64 1.1 reinoud #include "udf.h" 65 1.1 reinoud #include "udf_subr.h" 66 1.1 reinoud #include "udf_bswap.h" 67 1.1 reinoud 68 1.1 reinoud 69 1.1 reinoud #define VTOI(vnode) ((struct udf_node *) (vnode)->v_data) 70 1.1 reinoud #define PRIV(ump) ((struct strat_private *) (ump)->strategy_private) 71 1.1 reinoud #define BTOE(buf) ((struct udf_eccline *) ((buf)->b_private)) 72 1.1 reinoud 73 1.1 reinoud /* --------------------------------------------------------------------- */ 74 1.1 reinoud 75 1.1 reinoud #define UDF_MAX_PACKET_SIZE 64 /* DONT change this */ 76 1.1 reinoud 77 1.1 reinoud /* sheduler states */ 78 1.11 reinoud #define UDF_SHED_WAITING 1 /* waiting on timeout */ 79 1.11 reinoud #define UDF_SHED_READING 2 80 1.11 reinoud #define UDF_SHED_WRITING 3 81 1.11 reinoud #define UDF_SHED_SEQWRITING 4 82 1.21 reinoud #define UDF_SHED_IDLE 5 /* refcnt'd */ 83 1.11 reinoud #define UDF_SHED_FREE 6 /* recycleable */ 84 1.11 reinoud #define UDF_SHED_MAX 6+1 85 1.1 reinoud 86 1.1 reinoud /* flags */ 87 1.1 reinoud #define ECC_LOCKED 0x01 /* prevent access */ 88 1.1 reinoud #define ECC_WANTED 0x02 /* trying access */ 89 1.1 reinoud #define ECC_SEQWRITING 0x04 /* sequential queue */ 90 1.1 reinoud #define ECC_FLOATING 0x08 /* not queued yet */ 91 1.1 reinoud 92 1.13 reinoud #define ECC_WAITTIME 10 93 1.11 reinoud 94 1.1 reinoud 95 1.1 reinoud TAILQ_HEAD(ecclineq, udf_eccline); 96 1.1 reinoud struct udf_eccline { 97 1.1 reinoud struct udf_mount *ump; 98 1.1 reinoud uint64_t present; /* preserve these */ 99 1.1 reinoud uint64_t readin; /* bitmap */ 100 1.1 reinoud uint64_t dirty; /* bitmap */ 101 1.1 reinoud uint64_t error; /* bitmap */ 102 1.1 reinoud uint32_t refcnt; 103 1.1 reinoud 104 1.11 reinoud struct timespec wait_time; 105 1.1 reinoud uint32_t flags; 106 1.1 reinoud uint32_t start_sector; /* physical */ 107 1.1 reinoud 108 1.21 reinoud const char *fname; 109 1.21 reinoud int sline; 110 1.21 reinoud 111 1.1 reinoud struct buf *buf; 112 1.1 reinoud void *blob; 113 1.1 reinoud 114 1.1 reinoud struct buf *bufs[UDF_MAX_PACKET_SIZE]; 115 1.1 reinoud uint32_t bufs_bpos[UDF_MAX_PACKET_SIZE]; 116 1.1 reinoud int bufs_len[UDF_MAX_PACKET_SIZE]; 117 1.1 reinoud 118 1.1 reinoud int queued_on; /* on which BUFQ list */ 119 1.1 reinoud LIST_ENTRY(udf_eccline) hashchain; /* on sector lookup */ 120 1.1 reinoud }; 121 1.1 reinoud 122 1.1 reinoud 123 1.1 reinoud struct strat_private { 124 1.1 reinoud lwp_t *queue_lwp; 125 1.1 reinoud kcondvar_t discstrat_cv; /* to wait on */ 126 1.1 reinoud kmutex_t discstrat_mutex; /* disc strategy */ 127 1.1 reinoud kmutex_t seqwrite_mutex; /* protect mappings */ 128 1.1 reinoud 129 1.13 reinoud int thread_running; /* thread control */ 130 1.1 reinoud int run_thread; /* thread control */ 131 1.1 reinoud int thread_finished; /* thread control */ 132 1.1 reinoud int cur_queue; 133 1.1 reinoud 134 1.1 reinoud int num_floating; 135 1.1 reinoud int num_queued[UDF_SHED_MAX]; 136 1.1 reinoud struct bufq_state *queues[UDF_SHED_MAX]; 137 1.1 reinoud struct timespec last_queued[UDF_SHED_MAX]; 138 1.1 reinoud struct disk_strategy old_strategy_setting; 139 1.1 reinoud 140 1.1 reinoud struct pool eccline_pool; 141 1.1 reinoud struct pool ecclineblob_pool; 142 1.1 reinoud LIST_HEAD(, udf_eccline) eccline_hash[UDF_ECCBUF_HASHSIZE]; 143 1.1 reinoud }; 144 1.1 reinoud 145 1.1 reinoud /* --------------------------------------------------------------------- */ 146 1.1 reinoud 147 1.21 reinoud #define UDF_LOCK_ECCLINE(eccline) udf_lock_eccline(eccline, __FILE__, __LINE__) 148 1.21 reinoud #define UDF_UNLOCK_ECCLINE(eccline) udf_unlock_eccline(eccline, __FILE__, __LINE__) 149 1.1 reinoud 150 1.1 reinoud /* can be called with or without discstrat lock */ 151 1.1 reinoud static void 152 1.21 reinoud udf_lock_eccline(struct udf_eccline *eccline, const char *fname, int sline) 153 1.1 reinoud { 154 1.1 reinoud struct strat_private *priv = PRIV(eccline->ump); 155 1.1 reinoud int waslocked, ret; 156 1.1 reinoud 157 1.21 reinoud KASSERT(mutex_owned(&priv->discstrat_mutex)); 158 1.21 reinoud 159 1.1 reinoud waslocked = mutex_owned(&priv->discstrat_mutex); 160 1.1 reinoud if (!waslocked) 161 1.1 reinoud mutex_enter(&priv->discstrat_mutex); 162 1.1 reinoud 163 1.1 reinoud /* wait until its unlocked first */ 164 1.21 reinoud eccline->refcnt++; 165 1.1 reinoud while (eccline->flags & ECC_LOCKED) { 166 1.21 reinoud DPRINTF(ECCLINE, ("waiting for lock at %s:%d\n", 167 1.21 reinoud fname, sline)); 168 1.21 reinoud DPRINTF(ECCLINE, ("was locked at %s:%d\n", 169 1.21 reinoud eccline->fname, eccline->sline)); 170 1.1 reinoud eccline->flags |= ECC_WANTED; 171 1.1 reinoud ret = cv_timedwait(&priv->discstrat_cv, &priv->discstrat_mutex, 172 1.1 reinoud hz/8); 173 1.1 reinoud if (ret == EWOULDBLOCK) 174 1.29 andvar DPRINTF(LOCKING, ("eccline lock held, waiting for " 175 1.1 reinoud "release")); 176 1.1 reinoud } 177 1.1 reinoud eccline->flags |= ECC_LOCKED; 178 1.1 reinoud eccline->flags &= ~ECC_WANTED; 179 1.21 reinoud eccline->refcnt--; 180 1.21 reinoud 181 1.21 reinoud eccline->fname = fname; 182 1.21 reinoud eccline->sline = sline; 183 1.1 reinoud 184 1.1 reinoud if (!waslocked) 185 1.1 reinoud mutex_exit(&priv->discstrat_mutex); 186 1.1 reinoud } 187 1.1 reinoud 188 1.1 reinoud 189 1.1 reinoud /* can be called with or without discstrat lock */ 190 1.1 reinoud static void 191 1.21 reinoud udf_unlock_eccline(struct udf_eccline *eccline, const char *fname, int sline) 192 1.1 reinoud { 193 1.1 reinoud struct strat_private *priv = PRIV(eccline->ump); 194 1.1 reinoud int waslocked; 195 1.1 reinoud 196 1.21 reinoud KASSERT(mutex_owned(&priv->discstrat_mutex)); 197 1.21 reinoud 198 1.1 reinoud waslocked = mutex_owned(&priv->discstrat_mutex); 199 1.1 reinoud if (!waslocked) 200 1.1 reinoud mutex_enter(&priv->discstrat_mutex); 201 1.1 reinoud 202 1.1 reinoud eccline->flags &= ~ECC_LOCKED; 203 1.1 reinoud cv_broadcast(&priv->discstrat_cv); 204 1.1 reinoud 205 1.1 reinoud if (!waslocked) 206 1.1 reinoud mutex_exit(&priv->discstrat_mutex); 207 1.1 reinoud } 208 1.1 reinoud 209 1.1 reinoud 210 1.1 reinoud /* NOTE discstrat_mutex should be held! */ 211 1.1 reinoud static void 212 1.1 reinoud udf_dispose_eccline(struct udf_eccline *eccline) 213 1.1 reinoud { 214 1.1 reinoud struct strat_private *priv = PRIV(eccline->ump); 215 1.1 reinoud 216 1.1 reinoud KASSERT(mutex_owned(&priv->discstrat_mutex)); 217 1.1 reinoud 218 1.3 reinoud DPRINTF(ECCLINE, ("dispose eccline with start sector %d, " 219 1.1 reinoud "present %0"PRIx64"\n", eccline->start_sector, 220 1.1 reinoud eccline->present)); 221 1.1 reinoud 222 1.21 reinoud KASSERT(eccline->refcnt == 0); 223 1.21 reinoud KASSERT(eccline->dirty == 0); 224 1.21 reinoud KASSERT(eccline->queued_on == 0); 225 1.21 reinoud KASSERT(eccline->flags & ECC_FLOATING); 226 1.21 reinoud KASSERT(eccline->flags & ECC_LOCKED); 227 1.21 reinoud 228 1.1 reinoud LIST_REMOVE(eccline, hashchain); 229 1.21 reinoud priv->num_floating--; 230 1.1 reinoud 231 1.1 reinoud putiobuf(eccline->buf); 232 1.1 reinoud pool_put(&priv->ecclineblob_pool, eccline->blob); 233 1.1 reinoud pool_put(&priv->eccline_pool, eccline); 234 1.1 reinoud } 235 1.1 reinoud 236 1.1 reinoud 237 1.1 reinoud /* NOTE discstrat_mutex should be held! */ 238 1.1 reinoud static void 239 1.1 reinoud udf_push_eccline(struct udf_eccline *eccline, int newqueue) 240 1.1 reinoud { 241 1.1 reinoud struct strat_private *priv = PRIV(eccline->ump); 242 1.1 reinoud 243 1.1 reinoud KASSERT(mutex_owned(&priv->discstrat_mutex)); 244 1.1 reinoud 245 1.1 reinoud DPRINTF(PARANOIA, ("DEBUG: buf %p pushed on queue %d\n", eccline->buf, newqueue)); 246 1.1 reinoud 247 1.21 reinoud KASSERT(eccline->queued_on == 0); 248 1.21 reinoud KASSERT(eccline->flags & ECC_FLOATING); 249 1.1 reinoud 250 1.10 reinoud /* set buffer block numbers to make sure its queued correctly */ 251 1.10 reinoud eccline->buf->b_lblkno = eccline->start_sector; 252 1.10 reinoud eccline->buf->b_blkno = eccline->start_sector; 253 1.10 reinoud eccline->buf->b_rawblkno = eccline->start_sector; 254 1.10 reinoud 255 1.21 reinoud vfs_timestamp(&priv->last_queued[newqueue]); 256 1.21 reinoud eccline->flags &= ~ECC_FLOATING; 257 1.21 reinoud priv->num_floating--; 258 1.1 reinoud eccline->queued_on = newqueue; 259 1.1 reinoud priv->num_queued[newqueue]++; 260 1.20 reinoud bufq_put(priv->queues[newqueue], eccline->buf); 261 1.1 reinoud 262 1.21 reinoud UDF_UNLOCK_ECCLINE(eccline); 263 1.1 reinoud 264 1.21 reinoud /* XXX tickle disc strategy statemachine */ 265 1.14 reinoud if (newqueue != UDF_SHED_IDLE) 266 1.1 reinoud cv_signal(&priv->discstrat_cv); 267 1.1 reinoud } 268 1.1 reinoud 269 1.1 reinoud 270 1.1 reinoud static struct udf_eccline * 271 1.21 reinoud udf_peek_eccline(struct strat_private *priv, int queued_on) 272 1.21 reinoud { 273 1.21 reinoud struct udf_eccline *eccline; 274 1.21 reinoud struct buf *buf; 275 1.21 reinoud 276 1.21 reinoud KASSERT(mutex_owned(&priv->discstrat_mutex)); 277 1.21 reinoud 278 1.21 reinoud for(;;) { 279 1.21 reinoud buf = bufq_peek(priv->queues[queued_on]); 280 1.21 reinoud /* could have been a race, but we'll revisit later */ 281 1.21 reinoud if (buf == NULL) 282 1.21 reinoud return NULL; 283 1.21 reinoud 284 1.21 reinoud eccline = BTOE(buf); 285 1.21 reinoud UDF_LOCK_ECCLINE(eccline); 286 1.21 reinoud 287 1.21 reinoud /* might have changed before we obtained the lock */ 288 1.21 reinoud if (eccline->queued_on == queued_on) 289 1.21 reinoud break; 290 1.21 reinoud 291 1.21 reinoud UDF_UNLOCK_ECCLINE(eccline); 292 1.21 reinoud } 293 1.21 reinoud 294 1.21 reinoud KASSERT(eccline->queued_on == queued_on); 295 1.21 reinoud KASSERT((eccline->flags & ECC_FLOATING) == 0); 296 1.21 reinoud 297 1.21 reinoud DPRINTF(PARANOIA, ("DEBUG: buf %p peeked at queue %d\n", 298 1.21 reinoud eccline->buf, queued_on)); 299 1.21 reinoud 300 1.21 reinoud return eccline; 301 1.21 reinoud } 302 1.21 reinoud 303 1.21 reinoud 304 1.21 reinoud static struct udf_eccline * 305 1.1 reinoud udf_pop_eccline(struct strat_private *priv, int queued_on) 306 1.1 reinoud { 307 1.1 reinoud struct udf_eccline *eccline; 308 1.1 reinoud struct buf *buf; 309 1.1 reinoud 310 1.1 reinoud KASSERT(mutex_owned(&priv->discstrat_mutex)); 311 1.1 reinoud 312 1.21 reinoud for(;;) { 313 1.21 reinoud buf = bufq_get(priv->queues[queued_on]); 314 1.21 reinoud if (buf == NULL) { 315 1.21 reinoud // KASSERT(priv->num_queued[queued_on] == 0); 316 1.21 reinoud return NULL; 317 1.21 reinoud } 318 1.21 reinoud 319 1.21 reinoud eccline = BTOE(buf); 320 1.21 reinoud UDF_LOCK_ECCLINE(eccline); 321 1.21 reinoud 322 1.21 reinoud /* might have changed before we obtained the lock */ 323 1.21 reinoud if (eccline->queued_on == queued_on) 324 1.21 reinoud break; 325 1.21 reinoud 326 1.21 reinoud UDF_UNLOCK_ECCLINE(eccline); 327 1.1 reinoud } 328 1.1 reinoud 329 1.1 reinoud KASSERT(eccline->queued_on == queued_on); 330 1.21 reinoud KASSERT((eccline->flags & ECC_FLOATING) == 0); 331 1.21 reinoud 332 1.21 reinoud priv->num_queued[queued_on]--; 333 1.1 reinoud eccline->queued_on = 0; 334 1.1 reinoud 335 1.1 reinoud eccline->flags |= ECC_FLOATING; 336 1.1 reinoud priv->num_floating++; 337 1.1 reinoud 338 1.1 reinoud DPRINTF(PARANOIA, ("DEBUG: buf %p popped from queue %d\n", 339 1.1 reinoud eccline->buf, queued_on)); 340 1.1 reinoud 341 1.1 reinoud return eccline; 342 1.1 reinoud } 343 1.1 reinoud 344 1.1 reinoud 345 1.21 reinoud static void 346 1.21 reinoud udf_unqueue_eccline(struct strat_private *priv, struct udf_eccline *eccline) 347 1.21 reinoud { 348 1.24 mrg struct buf *ret __diagused; 349 1.21 reinoud 350 1.21 reinoud UDF_LOCK_ECCLINE(eccline); 351 1.21 reinoud if (eccline->queued_on == 0) { 352 1.21 reinoud KASSERT(eccline->flags & ECC_FLOATING); 353 1.21 reinoud return; 354 1.21 reinoud } 355 1.21 reinoud 356 1.21 reinoud ret = bufq_cancel(priv->queues[eccline->queued_on], eccline->buf); 357 1.21 reinoud KASSERT(ret == eccline->buf); 358 1.21 reinoud 359 1.21 reinoud priv->num_queued[eccline->queued_on]--; 360 1.21 reinoud eccline->queued_on = 0; 361 1.21 reinoud 362 1.21 reinoud eccline->flags |= ECC_FLOATING; 363 1.21 reinoud priv->num_floating++; 364 1.21 reinoud } 365 1.21 reinoud 366 1.21 reinoud 367 1.1 reinoud static struct udf_eccline * 368 1.1 reinoud udf_geteccline(struct udf_mount *ump, uint32_t sector, int flags) 369 1.1 reinoud { 370 1.1 reinoud struct strat_private *priv = PRIV(ump); 371 1.1 reinoud struct udf_eccline *eccline; 372 1.1 reinoud uint32_t start_sector, lb_size, blobsize; 373 1.1 reinoud uint8_t *eccline_blob; 374 1.1 reinoud int line, line_offset; 375 1.23 christos int num_busy; 376 1.1 reinoud 377 1.21 reinoud mutex_enter(&priv->discstrat_mutex); 378 1.21 reinoud 379 1.21 reinoud /* lookup in our line cache hashtable */ 380 1.1 reinoud line_offset = sector % ump->packet_size; 381 1.1 reinoud start_sector = sector - line_offset; 382 1.1 reinoud line = (start_sector/ump->packet_size) & UDF_ECCBUF_HASHMASK; 383 1.1 reinoud 384 1.13 reinoud KASSERT(priv->thread_running); 385 1.1 reinoud 386 1.1 reinoud retry: 387 1.3 reinoud DPRINTF(ECCLINE, ("get line sector %d, line %d\n", sector, line)); 388 1.1 reinoud LIST_FOREACH(eccline, &priv->eccline_hash[line], hashchain) { 389 1.1 reinoud if (eccline->start_sector == start_sector) { 390 1.3 reinoud DPRINTF(ECCLINE, ("\tfound eccline, start_sector %d\n", 391 1.1 reinoud eccline->start_sector)); 392 1.21 reinoud udf_unqueue_eccline(priv, eccline); 393 1.1 reinoud 394 1.1 reinoud mutex_exit(&priv->discstrat_mutex); 395 1.1 reinoud return eccline; 396 1.1 reinoud } 397 1.1 reinoud } 398 1.1 reinoud 399 1.21 reinoud /* not found in eccline cache */ 400 1.3 reinoud DPRINTF(ECCLINE, ("\tnot found in eccline cache\n")); 401 1.1 reinoud 402 1.1 reinoud lb_size = udf_rw32(ump->logical_vol->lb_size); 403 1.1 reinoud blobsize = ump->packet_size * lb_size; 404 1.1 reinoud 405 1.1 reinoud /* dont allow too many pending requests */ 406 1.3 reinoud DPRINTF(ECCLINE, ("\tallocating new eccline\n")); 407 1.1 reinoud num_busy = (priv->num_queued[UDF_SHED_SEQWRITING] + priv->num_floating); 408 1.1 reinoud if ((flags & ECC_SEQWRITING) && (num_busy > UDF_ECCLINE_MAXBUSY)) { 409 1.23 christos cv_timedwait(&priv->discstrat_cv, 410 1.1 reinoud &priv->discstrat_mutex, hz/8); 411 1.1 reinoud goto retry; 412 1.1 reinoud } 413 1.1 reinoud 414 1.1 reinoud eccline_blob = pool_get(&priv->ecclineblob_pool, PR_NOWAIT); 415 1.1 reinoud eccline = pool_get(&priv->eccline_pool, PR_NOWAIT); 416 1.1 reinoud if ((eccline_blob == NULL) || (eccline == NULL)) { 417 1.1 reinoud if (eccline_blob) 418 1.1 reinoud pool_put(&priv->ecclineblob_pool, eccline_blob); 419 1.1 reinoud if (eccline) 420 1.1 reinoud pool_put(&priv->eccline_pool, eccline); 421 1.1 reinoud 422 1.1 reinoud /* out of memory for now; canibalise freelist */ 423 1.1 reinoud eccline = udf_pop_eccline(priv, UDF_SHED_FREE); 424 1.1 reinoud if (eccline == NULL) { 425 1.1 reinoud /* serious trouble; wait and retry */ 426 1.1 reinoud cv_timedwait(&priv->discstrat_cv, 427 1.1 reinoud &priv->discstrat_mutex, hz/8); 428 1.1 reinoud goto retry; 429 1.1 reinoud } 430 1.20 reinoud 431 1.20 reinoud /* push back line if we're waiting for it or its locked */ 432 1.21 reinoud if (eccline->flags & ECC_WANTED) { 433 1.21 reinoud /* we won a race, but someone else needed it */ 434 1.21 reinoud udf_push_eccline(eccline, UDF_SHED_FREE); 435 1.1 reinoud goto retry; 436 1.1 reinoud } 437 1.1 reinoud 438 1.1 reinoud /* unlink this entry */ 439 1.1 reinoud LIST_REMOVE(eccline, hashchain); 440 1.21 reinoud KASSERT(eccline->flags & ECC_FLOATING); 441 1.21 reinoud KASSERT(eccline->queued_on == 0); 442 1.1 reinoud 443 1.1 reinoud eccline_blob = eccline->blob; 444 1.21 reinoud eccline->flags = ECC_FLOATING | ECC_LOCKED; 445 1.1 reinoud } else { 446 1.21 reinoud eccline->flags = ECC_FLOATING | ECC_LOCKED; 447 1.1 reinoud priv->num_floating++; 448 1.1 reinoud } 449 1.1 reinoud 450 1.1 reinoud eccline->queued_on = 0; 451 1.1 reinoud eccline->blob = eccline_blob; 452 1.1 reinoud eccline->buf = getiobuf(NULL, true); 453 1.1 reinoud eccline->buf->b_private = eccline; /* IMPORTANT */ 454 1.1 reinoud 455 1.1 reinoud /* initialise eccline blob */ 456 1.21 reinoud /* XXX memset expensive and strictly not needed XXX */ 457 1.1 reinoud memset(eccline->blob, 0, blobsize); 458 1.1 reinoud 459 1.1 reinoud eccline->ump = ump; 460 1.1 reinoud eccline->present = eccline->readin = eccline->dirty = 0; 461 1.1 reinoud eccline->error = 0; 462 1.1 reinoud eccline->refcnt = 0; 463 1.21 reinoud memset(eccline->bufs, 0, UDF_MAX_PACKET_SIZE * sizeof(struct buf *)); 464 1.10 reinoud 465 1.10 reinoud eccline->start_sector = start_sector; 466 1.10 reinoud eccline->buf->b_lblkno = start_sector; 467 1.10 reinoud eccline->buf->b_blkno = start_sector; 468 1.10 reinoud eccline->buf->b_rawblkno = start_sector; 469 1.1 reinoud 470 1.1 reinoud LIST_INSERT_HEAD(&priv->eccline_hash[line], eccline, hashchain); 471 1.1 reinoud 472 1.1 reinoud /* 473 1.1 reinoud * TODO possible optimalisation for checking overlap with partitions 474 1.1 reinoud * to get a clue on future eccline usage 475 1.1 reinoud */ 476 1.1 reinoud 477 1.21 reinoud KASSERT(eccline->refcnt == 0); 478 1.21 reinoud KASSERT(eccline->flags & ECC_FLOATING); 479 1.21 reinoud KASSERT(eccline->flags & ECC_LOCKED); 480 1.1 reinoud mutex_exit(&priv->discstrat_mutex); 481 1.1 reinoud 482 1.1 reinoud return eccline; 483 1.1 reinoud } 484 1.1 reinoud 485 1.1 reinoud 486 1.1 reinoud static void 487 1.1 reinoud udf_puteccline(struct udf_eccline *eccline) 488 1.1 reinoud { 489 1.1 reinoud struct strat_private *priv = PRIV(eccline->ump); 490 1.1 reinoud struct udf_mount *ump = eccline->ump; 491 1.1 reinoud uint64_t allbits = ((uint64_t) 1 << ump->packet_size)-1; 492 1.21 reinoud int new_queue; 493 1.1 reinoud 494 1.1 reinoud mutex_enter(&priv->discstrat_mutex); 495 1.1 reinoud 496 1.3 reinoud DPRINTF(ECCLINE, ("put eccline start sector %d, refcnt %d\n", 497 1.1 reinoud eccline->start_sector, eccline->refcnt)); 498 1.1 reinoud 499 1.21 reinoud KASSERT(eccline->flags & ECC_LOCKED); 500 1.21 reinoud KASSERT(eccline->flags & ECC_FLOATING); 501 1.21 reinoud 502 1.21 reinoud /* clear all read bits that are already read in */ 503 1.21 reinoud if (eccline->readin & eccline->present) 504 1.21 reinoud eccline->readin &= (~eccline->present) & allbits; 505 1.21 reinoud 506 1.11 reinoud /* if we have active nodes we dont set it on seqwriting */ 507 1.1 reinoud if (eccline->refcnt > 1) 508 1.1 reinoud eccline->flags &= ~ECC_SEQWRITING; 509 1.1 reinoud 510 1.21 reinoud /* select state */ 511 1.21 reinoud new_queue = UDF_SHED_FREE; 512 1.21 reinoud if (eccline->refcnt > 0) 513 1.21 reinoud new_queue = UDF_SHED_IDLE; 514 1.21 reinoud if (eccline->flags & ECC_WANTED) 515 1.21 reinoud new_queue = UDF_SHED_IDLE; 516 1.21 reinoud if (eccline->readin) 517 1.21 reinoud new_queue = UDF_SHED_READING; 518 1.21 reinoud if (eccline->dirty) { 519 1.21 reinoud new_queue = UDF_SHED_WAITING; 520 1.21 reinoud vfs_timestamp(&eccline->wait_time); 521 1.21 reinoud eccline->wait_time.tv_sec += ECC_WAITTIME; 522 1.21 reinoud 523 1.21 reinoud if (eccline->present == allbits) { 524 1.21 reinoud new_queue = UDF_SHED_WRITING; 525 1.21 reinoud if (eccline->flags & ECC_SEQWRITING) 526 1.21 reinoud new_queue = UDF_SHED_SEQWRITING; 527 1.21 reinoud } 528 1.21 reinoud } 529 1.21 reinoud udf_push_eccline(eccline, new_queue); 530 1.1 reinoud 531 1.1 reinoud mutex_exit(&priv->discstrat_mutex); 532 1.1 reinoud } 533 1.1 reinoud 534 1.1 reinoud /* --------------------------------------------------------------------- */ 535 1.1 reinoud 536 1.1 reinoud static int 537 1.5 reinoud udf_create_nodedscr_rmw(struct udf_strat_args *args) 538 1.1 reinoud { 539 1.1 reinoud union dscrptr **dscrptr = &args->dscr; 540 1.1 reinoud struct udf_mount *ump = args->ump; 541 1.1 reinoud struct long_ad *icb = args->icb; 542 1.1 reinoud struct udf_eccline *eccline; 543 1.1 reinoud uint64_t bit; 544 1.1 reinoud uint32_t sectornr, lb_size, dummy; 545 1.1 reinoud uint8_t *mem; 546 1.1 reinoud int error, eccsect; 547 1.1 reinoud 548 1.1 reinoud error = udf_translate_vtop(ump, icb, §ornr, &dummy); 549 1.1 reinoud if (error) 550 1.1 reinoud return error; 551 1.1 reinoud 552 1.1 reinoud lb_size = udf_rw32(ump->logical_vol->lb_size); 553 1.1 reinoud 554 1.1 reinoud /* get our eccline */ 555 1.1 reinoud eccline = udf_geteccline(ump, sectornr, 0); 556 1.1 reinoud eccsect = sectornr - eccline->start_sector; 557 1.1 reinoud 558 1.1 reinoud bit = (uint64_t) 1 << eccsect; 559 1.1 reinoud eccline->readin &= ~bit; /* just in case */ 560 1.1 reinoud eccline->present |= bit; 561 1.1 reinoud eccline->dirty &= ~bit; /* Err... euhm... clean? */ 562 1.1 reinoud 563 1.1 reinoud eccline->refcnt++; 564 1.1 reinoud 565 1.1 reinoud /* clear space */ 566 1.1 reinoud mem = ((uint8_t *) eccline->blob) + eccsect * lb_size; 567 1.1 reinoud memset(mem, 0, lb_size); 568 1.1 reinoud 569 1.1 reinoud udf_puteccline(eccline); 570 1.1 reinoud 571 1.1 reinoud *dscrptr = (union dscrptr *) mem; 572 1.1 reinoud return 0; 573 1.1 reinoud } 574 1.1 reinoud 575 1.1 reinoud 576 1.1 reinoud static void 577 1.5 reinoud udf_free_nodedscr_rmw(struct udf_strat_args *args) 578 1.1 reinoud { 579 1.1 reinoud struct udf_mount *ump = args->ump; 580 1.1 reinoud struct long_ad *icb = args->icb; 581 1.1 reinoud struct udf_eccline *eccline; 582 1.1 reinoud uint64_t bit; 583 1.1 reinoud uint32_t sectornr, dummy; 584 1.1 reinoud int error, eccsect; 585 1.1 reinoud 586 1.1 reinoud error = udf_translate_vtop(ump, icb, §ornr, &dummy); 587 1.1 reinoud if (error) 588 1.1 reinoud return; 589 1.1 reinoud 590 1.1 reinoud /* get our eccline */ 591 1.1 reinoud eccline = udf_geteccline(ump, sectornr, 0); 592 1.1 reinoud eccsect = sectornr - eccline->start_sector; 593 1.1 reinoud 594 1.1 reinoud bit = (uint64_t) 1 << eccsect; 595 1.21 reinoud KASSERT(eccline->present & bit); 596 1.21 reinoud 597 1.1 reinoud eccline->readin &= ~bit; /* just in case */ 598 1.21 reinoud /* XXX eccline->dirty? */ 599 1.1 reinoud 600 1.1 reinoud KASSERT(eccline->refcnt >= 1); 601 1.1 reinoud eccline->refcnt--; 602 1.1 reinoud 603 1.1 reinoud udf_puteccline(eccline); 604 1.1 reinoud } 605 1.1 reinoud 606 1.1 reinoud 607 1.1 reinoud static int 608 1.5 reinoud udf_read_nodedscr_rmw(struct udf_strat_args *args) 609 1.1 reinoud { 610 1.1 reinoud union dscrptr **dscrptr = &args->dscr; 611 1.1 reinoud struct udf_mount *ump = args->ump; 612 1.1 reinoud struct long_ad *icb = args->icb; 613 1.21 reinoud struct strat_private *priv; 614 1.1 reinoud struct udf_eccline *eccline; 615 1.1 reinoud uint64_t bit; 616 1.1 reinoud uint32_t sectornr, dummy; 617 1.1 reinoud uint8_t *pos; 618 1.1 reinoud int sector_size = ump->discinfo.sector_size; 619 1.26 christos int lb_size __diagused = udf_rw32(ump->logical_vol->lb_size); 620 1.1 reinoud int i, error, dscrlen, eccsect; 621 1.1 reinoud 622 1.1 reinoud KASSERT(sector_size == lb_size); 623 1.1 reinoud error = udf_translate_vtop(ump, icb, §ornr, &dummy); 624 1.1 reinoud if (error) 625 1.1 reinoud return error; 626 1.1 reinoud 627 1.1 reinoud /* get our eccline */ 628 1.1 reinoud eccline = udf_geteccline(ump, sectornr, 0); 629 1.1 reinoud eccsect = sectornr - eccline->start_sector; 630 1.1 reinoud 631 1.1 reinoud bit = (uint64_t) 1 << eccsect; 632 1.1 reinoud if ((eccline->present & bit) == 0) { 633 1.1 reinoud /* mark bit for readin */ 634 1.1 reinoud eccline->readin |= bit; 635 1.1 reinoud eccline->refcnt++; /* prevent recycling */ 636 1.1 reinoud KASSERT(eccline->bufs[eccsect] == NULL); 637 1.1 reinoud udf_puteccline(eccline); 638 1.1 reinoud 639 1.21 reinoud /* wait for completion */ 640 1.21 reinoud priv = PRIV(eccline->ump); 641 1.21 reinoud mutex_enter(&priv->discstrat_mutex); 642 1.21 reinoud while (((eccline->present | eccline->error) & bit) == 0) { 643 1.21 reinoud error = cv_timedwait(&priv->discstrat_cv, 644 1.21 reinoud &priv->discstrat_mutex, 645 1.21 reinoud hz/8); 646 1.21 reinoud if (error == EWOULDBLOCK) 647 1.21 reinoud DPRINTF(LOCKING, ("eccline waiting for read\n")); 648 1.1 reinoud } 649 1.21 reinoud mutex_exit(&priv->discstrat_mutex); 650 1.1 reinoud 651 1.1 reinoud /* reget our line */ 652 1.1 reinoud eccline = udf_geteccline(ump, sectornr, 0); 653 1.1 reinoud KASSERT(eccline->refcnt >= 1); 654 1.1 reinoud eccline->refcnt--; /* undo refcnt */ 655 1.21 reinoud 656 1.21 reinoud if (eccline->error & bit) { 657 1.21 reinoud *dscrptr = NULL; 658 1.21 reinoud udf_puteccline(eccline); 659 1.21 reinoud return EIO; /* XXX error code */ 660 1.21 reinoud } 661 1.1 reinoud } 662 1.1 reinoud 663 1.1 reinoud *dscrptr = (union dscrptr *) 664 1.1 reinoud (((uint8_t *) eccline->blob) + eccsect * sector_size); 665 1.1 reinoud 666 1.1 reinoud /* code from read_phys_descr */ 667 1.1 reinoud /* check if its a valid tag */ 668 1.1 reinoud error = udf_check_tag(*dscrptr); 669 1.1 reinoud if (error) { 670 1.1 reinoud /* check if its an empty block */ 671 1.1 reinoud pos = (uint8_t *) *dscrptr; 672 1.1 reinoud for (i = 0; i < sector_size; i++, pos++) { 673 1.1 reinoud if (*pos) break; 674 1.1 reinoud } 675 1.1 reinoud if (i == sector_size) { 676 1.1 reinoud /* return no error but with no dscrptr */ 677 1.1 reinoud error = 0; 678 1.1 reinoud } 679 1.1 reinoud *dscrptr = NULL; 680 1.1 reinoud udf_puteccline(eccline); 681 1.1 reinoud return error; 682 1.1 reinoud } 683 1.1 reinoud 684 1.1 reinoud /* calculate descriptor size */ 685 1.1 reinoud dscrlen = udf_tagsize(*dscrptr, sector_size); 686 1.1 reinoud error = udf_check_tag_payload(*dscrptr, dscrlen); 687 1.1 reinoud if (error) { 688 1.1 reinoud *dscrptr = NULL; 689 1.1 reinoud udf_puteccline(eccline); 690 1.1 reinoud return error; 691 1.1 reinoud } 692 1.1 reinoud 693 1.21 reinoud /* we have a hold since it has a node descriptor */ 694 1.1 reinoud eccline->refcnt++; 695 1.1 reinoud udf_puteccline(eccline); 696 1.1 reinoud 697 1.1 reinoud return 0; 698 1.1 reinoud } 699 1.1 reinoud 700 1.1 reinoud 701 1.1 reinoud static int 702 1.5 reinoud udf_write_nodedscr_rmw(struct udf_strat_args *args) 703 1.1 reinoud { 704 1.1 reinoud union dscrptr *dscrptr = args->dscr; 705 1.1 reinoud struct udf_mount *ump = args->ump; 706 1.1 reinoud struct long_ad *icb = args->icb; 707 1.1 reinoud struct udf_node *udf_node = args->udf_node; 708 1.1 reinoud struct udf_eccline *eccline; 709 1.1 reinoud uint64_t bit; 710 1.1 reinoud uint32_t sectornr, logsectornr, dummy; 711 1.1 reinoud // int waitfor = args->waitfor; 712 1.1 reinoud int sector_size = ump->discinfo.sector_size; 713 1.26 christos int lb_size __diagused = udf_rw32(ump->logical_vol->lb_size); 714 1.1 reinoud int error, eccsect; 715 1.1 reinoud 716 1.1 reinoud KASSERT(sector_size == lb_size); 717 1.1 reinoud sectornr = 0; 718 1.1 reinoud error = udf_translate_vtop(ump, icb, §ornr, &dummy); 719 1.1 reinoud if (error) 720 1.1 reinoud return error; 721 1.1 reinoud 722 1.1 reinoud /* get our eccline */ 723 1.1 reinoud eccline = udf_geteccline(ump, sectornr, 0); 724 1.1 reinoud eccsect = sectornr - eccline->start_sector; 725 1.1 reinoud 726 1.1 reinoud bit = (uint64_t) 1 << eccsect; 727 1.1 reinoud 728 1.1 reinoud /* old callback still pending? */ 729 1.1 reinoud if (eccline->bufs[eccsect]) { 730 1.5 reinoud DPRINTF(WRITE, ("udf_write_nodedscr_rmw: writing descriptor" 731 1.1 reinoud " over buffer?\n")); 732 1.1 reinoud nestiobuf_done(eccline->bufs[eccsect], 733 1.1 reinoud eccline->bufs_len[eccsect], 734 1.1 reinoud 0); 735 1.1 reinoud eccline->bufs[eccsect] = NULL; 736 1.1 reinoud } 737 1.1 reinoud 738 1.1 reinoud /* set sector number in the descriptor and validate */ 739 1.1 reinoud dscrptr = (union dscrptr *) 740 1.1 reinoud (((uint8_t *) eccline->blob) + eccsect * sector_size); 741 1.1 reinoud KASSERT(dscrptr == args->dscr); 742 1.1 reinoud 743 1.1 reinoud logsectornr = udf_rw32(icb->loc.lb_num); 744 1.1 reinoud dscrptr->tag.tag_loc = udf_rw32(logsectornr); 745 1.1 reinoud udf_validate_tag_and_crc_sums(dscrptr); 746 1.1 reinoud 747 1.1 reinoud udf_fixup_node_internals(ump, (uint8_t *) dscrptr, UDF_C_NODE); 748 1.1 reinoud 749 1.1 reinoud /* set our flags */ 750 1.1 reinoud KASSERT(eccline->present & bit); 751 1.1 reinoud eccline->dirty |= bit; 752 1.1 reinoud 753 1.1 reinoud KASSERT(udf_tagsize(dscrptr, sector_size) <= sector_size); 754 1.1 reinoud 755 1.5 reinoud udf_node->outstanding_nodedscr--; 756 1.5 reinoud if (udf_node->outstanding_nodedscr == 0) { 757 1.21 reinoud /* XXX still using wakeup! */ 758 1.19 reinoud UDF_UNLOCK_NODE(udf_node, 0); 759 1.31 reinoud cv_broadcast(&udf_node->node_lock); 760 1.5 reinoud } 761 1.21 reinoud udf_puteccline(eccline); 762 1.5 reinoud 763 1.1 reinoud /* XXX waitfor not used */ 764 1.1 reinoud return 0; 765 1.1 reinoud } 766 1.1 reinoud 767 1.1 reinoud 768 1.1 reinoud static void 769 1.1 reinoud udf_queuebuf_rmw(struct udf_strat_args *args) 770 1.1 reinoud { 771 1.1 reinoud struct udf_mount *ump = args->ump; 772 1.1 reinoud struct buf *buf = args->nestbuf; 773 1.6 reinoud struct desc_tag *tag; 774 1.1 reinoud struct strat_private *priv = PRIV(ump); 775 1.1 reinoud struct udf_eccline *eccline; 776 1.1 reinoud struct long_ad *node_ad_cpy; 777 1.1 reinoud uint64_t bit, *lmapping, *pmapping, *lmappos, *pmappos, blknr; 778 1.6 reinoud uint32_t buf_len, len, sectors, sectornr, our_sectornr; 779 1.1 reinoud uint32_t bpos; 780 1.6 reinoud uint16_t vpart_num; 781 1.1 reinoud uint8_t *fidblk, *src, *dst; 782 1.1 reinoud int sector_size = ump->discinfo.sector_size; 783 1.1 reinoud int blks = sector_size / DEV_BSIZE; 784 1.1 reinoud int eccsect, what, queue, error; 785 1.1 reinoud 786 1.1 reinoud KASSERT(ump); 787 1.1 reinoud KASSERT(buf); 788 1.1 reinoud KASSERT(buf->b_iodone == nestiobuf_iodone); 789 1.1 reinoud 790 1.1 reinoud blknr = buf->b_blkno; 791 1.1 reinoud our_sectornr = blknr / blks; 792 1.1 reinoud 793 1.1 reinoud what = buf->b_udf_c_type; 794 1.1 reinoud queue = UDF_SHED_READING; 795 1.1 reinoud if ((buf->b_flags & B_READ) == 0) { 796 1.1 reinoud /* writing */ 797 1.1 reinoud queue = UDF_SHED_SEQWRITING; 798 1.18 reinoud if (what == UDF_C_ABSOLUTE) 799 1.18 reinoud queue = UDF_SHED_WRITING; 800 1.1 reinoud if (what == UDF_C_DSCR) 801 1.1 reinoud queue = UDF_SHED_WRITING; 802 1.1 reinoud if (what == UDF_C_NODE) 803 1.1 reinoud queue = UDF_SHED_WRITING; 804 1.1 reinoud } 805 1.1 reinoud 806 1.1 reinoud if (queue == UDF_SHED_READING) { 807 1.3 reinoud DPRINTF(SHEDULE, ("\nudf_queuebuf_rmw READ %p : sector %d type %d," 808 1.1 reinoud "b_resid %d, b_bcount %d, b_bufsize %d\n", 809 1.1 reinoud buf, (uint32_t) buf->b_blkno / blks, buf->b_udf_c_type, 810 1.1 reinoud buf->b_resid, buf->b_bcount, buf->b_bufsize)); 811 1.1 reinoud 812 1.1 reinoud /* mark bits for reading */ 813 1.1 reinoud buf_len = buf->b_bcount; 814 1.1 reinoud sectornr = our_sectornr; 815 1.1 reinoud eccline = udf_geteccline(ump, sectornr, 0); 816 1.1 reinoud eccsect = sectornr - eccline->start_sector; 817 1.1 reinoud bpos = 0; 818 1.1 reinoud while (buf_len) { 819 1.1 reinoud len = MIN(buf_len, sector_size); 820 1.21 reinoud if ((eccsect < 0) || (eccsect >= ump->packet_size)) { 821 1.1 reinoud udf_puteccline(eccline); 822 1.1 reinoud eccline = udf_geteccline(ump, sectornr, 0); 823 1.1 reinoud eccsect = sectornr - eccline->start_sector; 824 1.1 reinoud } 825 1.1 reinoud bit = (uint64_t) 1 << eccsect; 826 1.1 reinoud error = eccline->error & bit ? EIO : 0; 827 1.1 reinoud if (eccline->present & bit) { 828 1.1 reinoud src = (uint8_t *) eccline->blob + 829 1.1 reinoud eccsect * sector_size; 830 1.1 reinoud dst = (uint8_t *) buf->b_data + bpos; 831 1.1 reinoud if (!error) 832 1.1 reinoud memcpy(dst, src, len); 833 1.1 reinoud nestiobuf_done(buf, len, error); 834 1.1 reinoud } else { 835 1.1 reinoud eccline->readin |= bit; 836 1.1 reinoud KASSERT(eccline->bufs[eccsect] == NULL); 837 1.1 reinoud eccline->bufs[eccsect] = buf; 838 1.1 reinoud eccline->bufs_bpos[eccsect] = bpos; 839 1.1 reinoud eccline->bufs_len[eccsect] = len; 840 1.1 reinoud } 841 1.1 reinoud bpos += sector_size; 842 1.1 reinoud eccsect++; 843 1.1 reinoud sectornr++; 844 1.1 reinoud buf_len -= len; 845 1.1 reinoud } 846 1.1 reinoud udf_puteccline(eccline); 847 1.1 reinoud return; 848 1.1 reinoud } 849 1.1 reinoud 850 1.1 reinoud if (queue == UDF_SHED_WRITING) { 851 1.3 reinoud DPRINTF(SHEDULE, ("\nudf_queuebuf_rmw WRITE %p : sector %d " 852 1.1 reinoud "type %d, b_resid %d, b_bcount %d, b_bufsize %d\n", 853 1.1 reinoud buf, (uint32_t) buf->b_blkno / blks, buf->b_udf_c_type, 854 1.1 reinoud buf->b_resid, buf->b_bcount, buf->b_bufsize)); 855 1.21 reinoud 856 1.1 reinoud /* if we have FIDs fixup using buffer's sector number(s) */ 857 1.21 reinoud if (buf->b_udf_c_type == UDF_C_FIDS) 858 1.1 reinoud panic("UDF_C_FIDS in SHED_WRITING!\n"); 859 1.21 reinoud 860 1.1 reinoud udf_fixup_node_internals(ump, buf->b_data, buf->b_udf_c_type); 861 1.1 reinoud 862 1.1 reinoud /* copy parts into the bufs and set for writing */ 863 1.1 reinoud buf_len = buf->b_bcount; 864 1.1 reinoud sectornr = our_sectornr; 865 1.1 reinoud eccline = udf_geteccline(ump, sectornr, 0); 866 1.1 reinoud eccsect = sectornr - eccline->start_sector; 867 1.1 reinoud bpos = 0; 868 1.1 reinoud while (buf_len) { 869 1.1 reinoud len = MIN(buf_len, sector_size); 870 1.21 reinoud if ((eccsect < 0) || (eccsect >= ump->packet_size)) { 871 1.1 reinoud udf_puteccline(eccline); 872 1.1 reinoud eccline = udf_geteccline(ump, sectornr, 0); 873 1.1 reinoud eccsect = sectornr - eccline->start_sector; 874 1.1 reinoud } 875 1.1 reinoud bit = (uint64_t) 1 << eccsect; 876 1.1 reinoud KASSERT((eccline->readin & bit) == 0); 877 1.1 reinoud eccline->present |= bit; 878 1.1 reinoud eccline->dirty |= bit; 879 1.1 reinoud if (eccline->bufs[eccsect]) { 880 1.1 reinoud /* old callback still pending */ 881 1.1 reinoud nestiobuf_done(eccline->bufs[eccsect], 882 1.1 reinoud eccline->bufs_len[eccsect], 883 1.1 reinoud 0); 884 1.1 reinoud eccline->bufs[eccsect] = NULL; 885 1.1 reinoud } 886 1.1 reinoud 887 1.2 reinoud src = (uint8_t *) buf->b_data + bpos; 888 1.2 reinoud dst = (uint8_t *) eccline->blob + eccsect * sector_size; 889 1.2 reinoud if (len != sector_size) 890 1.2 reinoud memset(dst, 0, sector_size); 891 1.2 reinoud memcpy(dst, src, len); 892 1.2 reinoud 893 1.1 reinoud /* note that its finished for this extent */ 894 1.1 reinoud eccline->bufs[eccsect] = NULL; 895 1.1 reinoud nestiobuf_done(buf, len, 0); 896 1.1 reinoud 897 1.1 reinoud bpos += sector_size; 898 1.1 reinoud eccsect++; 899 1.1 reinoud sectornr++; 900 1.1 reinoud buf_len -= len; 901 1.1 reinoud } 902 1.1 reinoud udf_puteccline(eccline); 903 1.1 reinoud return; 904 1.1 reinoud 905 1.1 reinoud } 906 1.1 reinoud 907 1.1 reinoud /* sequential writing */ 908 1.1 reinoud KASSERT(queue == UDF_SHED_SEQWRITING); 909 1.3 reinoud DPRINTF(SHEDULE, ("\nudf_queuebuf_rmw SEQWRITE %p : sector XXXX " 910 1.1 reinoud "type %d, b_resid %d, b_bcount %d, b_bufsize %d\n", 911 1.1 reinoud buf, buf->b_udf_c_type, buf->b_resid, buf->b_bcount, 912 1.1 reinoud buf->b_bufsize)); 913 1.1 reinoud /* 914 1.1 reinoud * Buffers should not have been allocated to disc addresses yet on 915 1.1 reinoud * this queue. Note that a buffer can get multiple extents allocated. 916 1.1 reinoud * Note that it *looks* like the normal writing but its different in 917 1.1 reinoud * the details. 918 1.1 reinoud * 919 1.6 reinoud * lmapping contains lb_num relative to base partition. 920 1.6 reinoud * 921 1.6 reinoud * XXX should we try to claim/organize the allocated memory to 922 1.6 reinoud * block-aligned pieces? 923 1.1 reinoud */ 924 1.1 reinoud mutex_enter(&priv->seqwrite_mutex); 925 1.1 reinoud 926 1.1 reinoud lmapping = ump->la_lmapping; 927 1.1 reinoud node_ad_cpy = ump->la_node_ad_cpy; 928 1.1 reinoud 929 1.6 reinoud /* logically allocate buf and map it in the file */ 930 1.6 reinoud udf_late_allocate_buf(ump, buf, lmapping, node_ad_cpy, &vpart_num); 931 1.1 reinoud 932 1.1 reinoud /* if we have FIDs, fixup using the new allocation table */ 933 1.1 reinoud if (buf->b_udf_c_type == UDF_C_FIDS) { 934 1.1 reinoud buf_len = buf->b_bcount; 935 1.1 reinoud bpos = 0; 936 1.1 reinoud lmappos = lmapping; 937 1.1 reinoud while (buf_len) { 938 1.1 reinoud sectornr = *lmappos++; 939 1.1 reinoud len = MIN(buf_len, sector_size); 940 1.1 reinoud fidblk = (uint8_t *) buf->b_data + bpos; 941 1.1 reinoud udf_fixup_fid_block(fidblk, sector_size, 942 1.1 reinoud 0, len, sectornr); 943 1.1 reinoud bpos += len; 944 1.1 reinoud buf_len -= len; 945 1.1 reinoud } 946 1.1 reinoud } 947 1.6 reinoud if (buf->b_udf_c_type == UDF_C_METADATA_SBM) { 948 1.6 reinoud if (buf->b_lblkno == 0) { 949 1.6 reinoud /* update the tag location inside */ 950 1.6 reinoud tag = (struct desc_tag *) buf->b_data; 951 1.7 reinoud tag->tag_loc = udf_rw32(*lmapping); 952 1.6 reinoud udf_validate_tag_and_crc_sums(buf->b_data); 953 1.6 reinoud } 954 1.6 reinoud } 955 1.1 reinoud udf_fixup_node_internals(ump, buf->b_data, buf->b_udf_c_type); 956 1.1 reinoud 957 1.6 reinoud /* 958 1.6 reinoud * Translate new mappings in lmapping to pmappings. 959 1.30 msaitoh * pmapping to contain lb_nums as used for disc addressing. 960 1.6 reinoud */ 961 1.6 reinoud pmapping = ump->la_pmapping; 962 1.6 reinoud sectors = (buf->b_bcount + sector_size -1) / sector_size; 963 1.6 reinoud udf_translate_vtop_list(ump, sectors, vpart_num, lmapping, pmapping); 964 1.6 reinoud 965 1.1 reinoud /* copy parts into the bufs and set for writing */ 966 1.1 reinoud pmappos = pmapping; 967 1.1 reinoud buf_len = buf->b_bcount; 968 1.1 reinoud sectornr = *pmappos++; 969 1.1 reinoud eccline = udf_geteccline(ump, sectornr, ECC_SEQWRITING); 970 1.1 reinoud eccsect = sectornr - eccline->start_sector; 971 1.1 reinoud bpos = 0; 972 1.1 reinoud while (buf_len) { 973 1.1 reinoud len = MIN(buf_len, sector_size); 974 1.1 reinoud eccsect = sectornr - eccline->start_sector; 975 1.1 reinoud if ((eccsect < 0) || (eccsect >= ump->packet_size)) { 976 1.1 reinoud eccline->flags |= ECC_SEQWRITING; 977 1.1 reinoud udf_puteccline(eccline); 978 1.1 reinoud eccline = udf_geteccline(ump, sectornr, ECC_SEQWRITING); 979 1.1 reinoud eccsect = sectornr - eccline->start_sector; 980 1.1 reinoud } 981 1.1 reinoud bit = (uint64_t) 1 << eccsect; 982 1.1 reinoud KASSERT((eccline->readin & bit) == 0); 983 1.1 reinoud eccline->present |= bit; 984 1.1 reinoud eccline->dirty |= bit; 985 1.1 reinoud eccline->bufs[eccsect] = NULL; 986 1.1 reinoud 987 1.1 reinoud src = (uint8_t *) buf->b_data + bpos; 988 1.1 reinoud dst = (uint8_t *) 989 1.1 reinoud eccline->blob + eccsect * sector_size; 990 1.1 reinoud if (len != sector_size) 991 1.1 reinoud memset(dst, 0, sector_size); 992 1.1 reinoud memcpy(dst, src, len); 993 1.1 reinoud 994 1.1 reinoud /* note that its finished for this extent */ 995 1.1 reinoud nestiobuf_done(buf, len, 0); 996 1.1 reinoud 997 1.1 reinoud bpos += sector_size; 998 1.1 reinoud sectornr = *pmappos++; 999 1.1 reinoud buf_len -= len; 1000 1.1 reinoud } 1001 1.1 reinoud eccline->flags |= ECC_SEQWRITING; 1002 1.1 reinoud udf_puteccline(eccline); 1003 1.1 reinoud mutex_exit(&priv->seqwrite_mutex); 1004 1.1 reinoud } 1005 1.1 reinoud 1006 1.1 reinoud /* --------------------------------------------------------------------- */ 1007 1.1 reinoud 1008 1.28 reinoud static void 1009 1.28 reinoud udf_sync_caches_rmw(struct udf_strat_args *args) 1010 1.28 reinoud { 1011 1.28 reinoud struct udf_mount *ump = args->ump; 1012 1.28 reinoud 1013 1.28 reinoud udf_mmc_synchronise_caches(ump); 1014 1.28 reinoud } 1015 1.28 reinoud 1016 1.28 reinoud /* --------------------------------------------------------------------- */ 1017 1.28 reinoud 1018 1.1 reinoud static void 1019 1.1 reinoud udf_shedule_read_callback(struct buf *buf) 1020 1.1 reinoud { 1021 1.1 reinoud struct udf_eccline *eccline = BTOE(buf); 1022 1.1 reinoud struct udf_mount *ump = eccline->ump; 1023 1.1 reinoud uint64_t bit; 1024 1.1 reinoud uint8_t *src, *dst; 1025 1.1 reinoud int sector_size = ump->discinfo.sector_size; 1026 1.1 reinoud int error, i, len; 1027 1.1 reinoud 1028 1.21 reinoud DPRINTF(ECCLINE, ("read callback called on buf %p\n", buf)); 1029 1.21 reinoud 1030 1.1 reinoud /* post process read action */ 1031 1.21 reinoud KASSERT(eccline->flags & ECC_LOCKED); 1032 1.1 reinoud error = buf->b_error; 1033 1.1 reinoud for (i = 0; i < ump->packet_size; i++) { 1034 1.1 reinoud bit = (uint64_t) 1 << i; 1035 1.1 reinoud src = (uint8_t *) buf->b_data + i * sector_size; 1036 1.1 reinoud dst = (uint8_t *) eccline->blob + i * sector_size; 1037 1.1 reinoud if (eccline->present & bit) 1038 1.1 reinoud continue; 1039 1.8 reinoud eccline->present |= bit; 1040 1.8 reinoud if (error) 1041 1.1 reinoud eccline->error |= bit; 1042 1.1 reinoud if (eccline->bufs[i]) { 1043 1.1 reinoud dst = (uint8_t *) eccline->bufs[i]->b_data + 1044 1.1 reinoud eccline->bufs_bpos[i]; 1045 1.1 reinoud len = eccline->bufs_len[i]; 1046 1.1 reinoud if (!error) 1047 1.1 reinoud memcpy(dst, src, len); 1048 1.1 reinoud nestiobuf_done(eccline->bufs[i], len, error); 1049 1.1 reinoud eccline->bufs[i] = NULL; 1050 1.1 reinoud } 1051 1.1 reinoud 1052 1.1 reinoud } 1053 1.1 reinoud KASSERT(buf->b_data == eccline->blob); 1054 1.1 reinoud KASSERT(eccline->present == ((uint64_t) 1 << ump->packet_size)-1); 1055 1.1 reinoud 1056 1.1 reinoud /* 1057 1.1 reinoud * XXX TODO what to do on read errors? read in all sectors 1058 1.1 reinoud * synchronously and allocate a sparable entry? 1059 1.1 reinoud */ 1060 1.1 reinoud 1061 1.1 reinoud udf_puteccline(eccline); 1062 1.3 reinoud DPRINTF(ECCLINE, ("read callback finished\n")); 1063 1.1 reinoud } 1064 1.1 reinoud 1065 1.1 reinoud 1066 1.1 reinoud static void 1067 1.1 reinoud udf_shedule_write_callback(struct buf *buf) 1068 1.1 reinoud { 1069 1.1 reinoud struct udf_eccline *eccline = BTOE(buf); 1070 1.1 reinoud struct udf_mount *ump = eccline->ump; 1071 1.1 reinoud uint64_t bit; 1072 1.21 reinoud int error, i; 1073 1.21 reinoud 1074 1.21 reinoud DPRINTF(ECCLINE, ("write callback called on buf %p\n", buf)); 1075 1.1 reinoud 1076 1.1 reinoud /* post process write action */ 1077 1.21 reinoud KASSERT(eccline->flags & ECC_LOCKED); 1078 1.1 reinoud error = buf->b_error; 1079 1.1 reinoud for (i = 0; i < ump->packet_size; i++) { 1080 1.1 reinoud bit = (uint64_t) 1 << i; 1081 1.1 reinoud if ((eccline->dirty & bit) == 0) 1082 1.1 reinoud continue; 1083 1.1 reinoud if (error) { 1084 1.1 reinoud eccline->error |= bit; 1085 1.1 reinoud } else { 1086 1.1 reinoud eccline->dirty &= ~bit; 1087 1.1 reinoud } 1088 1.21 reinoud 1089 1.21 reinoud KASSERT(eccline->bufs[i] == 0); 1090 1.1 reinoud } 1091 1.1 reinoud KASSERT(eccline->dirty == 0); 1092 1.21 reinoud KASSERT(error == 0); 1093 1.1 reinoud 1094 1.1 reinoud /* 1095 1.13 reinoud * XXX TODO on write errors allocate a sparable entry and reissue 1096 1.1 reinoud */ 1097 1.1 reinoud 1098 1.1 reinoud udf_puteccline(eccline); 1099 1.21 reinoud DPRINTF(ECCLINE, ("write callback finished\n")); 1100 1.1 reinoud } 1101 1.1 reinoud 1102 1.1 reinoud 1103 1.1 reinoud static void 1104 1.1 reinoud udf_issue_eccline(struct udf_eccline *eccline, int queued_on) 1105 1.1 reinoud { 1106 1.1 reinoud struct udf_mount *ump = eccline->ump; 1107 1.1 reinoud struct strat_private *priv = PRIV(ump); 1108 1.1 reinoud struct buf *buf, *nestbuf; 1109 1.1 reinoud uint64_t bit, allbits = ((uint64_t) 1 << ump->packet_size)-1; 1110 1.1 reinoud uint32_t start; 1111 1.1 reinoud int sector_size = ump->discinfo.sector_size; 1112 1.1 reinoud int blks = sector_size / DEV_BSIZE; 1113 1.1 reinoud int i; 1114 1.1 reinoud 1115 1.21 reinoud KASSERT(eccline->flags & ECC_LOCKED); 1116 1.21 reinoud 1117 1.1 reinoud if (queued_on == UDF_SHED_READING) { 1118 1.3 reinoud DPRINTF(SHEDULE, ("udf_issue_eccline reading : ")); 1119 1.1 reinoud /* read all bits that are not yet present */ 1120 1.1 reinoud eccline->readin = (~eccline->present) & allbits; 1121 1.1 reinoud KASSERT(eccline->readin); 1122 1.1 reinoud start = eccline->start_sector; 1123 1.1 reinoud buf = eccline->buf; 1124 1.1 reinoud buf->b_flags = B_READ | B_ASYNC; 1125 1.4 reinoud SET(buf->b_cflags, BC_BUSY); /* mark buffer busy */ 1126 1.1 reinoud buf->b_oflags = 0; 1127 1.1 reinoud buf->b_iodone = udf_shedule_read_callback; 1128 1.1 reinoud buf->b_data = eccline->blob; 1129 1.1 reinoud buf->b_bcount = ump->packet_size * sector_size; 1130 1.1 reinoud buf->b_resid = buf->b_bcount; 1131 1.1 reinoud buf->b_bufsize = buf->b_bcount; 1132 1.1 reinoud buf->b_private = eccline; 1133 1.1 reinoud BIO_SETPRIO(buf, BPRIO_DEFAULT); 1134 1.1 reinoud buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = start * blks; 1135 1.1 reinoud buf->b_proc = NULL; 1136 1.1 reinoud 1137 1.1 reinoud if (eccline->present != 0) { 1138 1.1 reinoud for (i = 0; i < ump->packet_size; i++) { 1139 1.1 reinoud bit = (uint64_t) 1 << i; 1140 1.1 reinoud if (eccline->present & bit) { 1141 1.1 reinoud nestiobuf_done(buf, sector_size, 0); 1142 1.1 reinoud continue; 1143 1.1 reinoud } 1144 1.1 reinoud nestbuf = getiobuf(NULL, true); 1145 1.1 reinoud nestiobuf_setup(buf, nestbuf, i * sector_size, 1146 1.1 reinoud sector_size); 1147 1.1 reinoud /* adjust blocknumber to read */ 1148 1.1 reinoud nestbuf->b_blkno = buf->b_blkno + i*blks; 1149 1.1 reinoud nestbuf->b_rawblkno = buf->b_rawblkno + i*blks; 1150 1.1 reinoud 1151 1.21 reinoud DPRINTF(SHEDULE, ("sector %d ", start + i)); 1152 1.21 reinoud 1153 1.21 reinoud /* mutex dance since it could lock */ 1154 1.21 reinoud mutex_exit(&priv->discstrat_mutex); 1155 1.21 reinoud /* call asynchronous */ 1156 1.21 reinoud VOP_STRATEGY(ump->devvp, nestbuf); 1157 1.21 reinoud mutex_enter(&priv->discstrat_mutex); 1158 1.1 reinoud } 1159 1.3 reinoud DPRINTF(SHEDULE, ("\n")); 1160 1.1 reinoud return; 1161 1.1 reinoud } 1162 1.1 reinoud } else { 1163 1.1 reinoud /* write or seqwrite */ 1164 1.3 reinoud DPRINTF(SHEDULE, ("udf_issue_eccline writing or seqwriting : ")); 1165 1.13 reinoud DPRINTF(SHEDULE, ("\n\tpresent %"PRIx64", readin %"PRIx64", " 1166 1.13 reinoud "dirty %"PRIx64"\n\t", eccline->present, eccline->readin, 1167 1.13 reinoud eccline->dirty)); 1168 1.21 reinoud KASSERT(eccline->present == allbits); 1169 1.21 reinoud 1170 1.1 reinoud start = eccline->start_sector; 1171 1.1 reinoud buf = eccline->buf; 1172 1.1 reinoud buf->b_flags = B_WRITE | B_ASYNC; 1173 1.4 reinoud SET(buf->b_cflags, BC_BUSY); /* mark buffer busy */ 1174 1.1 reinoud buf->b_oflags = 0; 1175 1.1 reinoud buf->b_iodone = udf_shedule_write_callback; 1176 1.1 reinoud buf->b_data = eccline->blob; 1177 1.1 reinoud buf->b_bcount = ump->packet_size * sector_size; 1178 1.1 reinoud buf->b_resid = buf->b_bcount; 1179 1.1 reinoud buf->b_bufsize = buf->b_bcount; 1180 1.1 reinoud buf->b_private = eccline; 1181 1.1 reinoud BIO_SETPRIO(buf, BPRIO_DEFAULT); 1182 1.1 reinoud buf->b_lblkno = buf->b_blkno = buf->b_rawblkno = start * blks; 1183 1.1 reinoud buf->b_proc = NULL; 1184 1.1 reinoud } 1185 1.1 reinoud 1186 1.21 reinoud /* mutex dance since it could lock */ 1187 1.1 reinoud mutex_exit(&priv->discstrat_mutex); 1188 1.1 reinoud /* call asynchronous */ 1189 1.3 reinoud DPRINTF(SHEDULE, ("sector %d for %d\n", 1190 1.3 reinoud start, ump->packet_size)); 1191 1.1 reinoud VOP_STRATEGY(ump->devvp, buf); 1192 1.1 reinoud mutex_enter(&priv->discstrat_mutex); 1193 1.1 reinoud } 1194 1.1 reinoud 1195 1.1 reinoud 1196 1.1 reinoud static void 1197 1.1 reinoud udf_discstrat_thread(void *arg) 1198 1.1 reinoud { 1199 1.1 reinoud struct udf_mount *ump = (struct udf_mount *) arg; 1200 1.1 reinoud struct strat_private *priv = PRIV(ump); 1201 1.1 reinoud struct udf_eccline *eccline; 1202 1.1 reinoud struct timespec now, *last; 1203 1.11 reinoud uint64_t allbits = ((uint64_t) 1 << ump->packet_size)-1; 1204 1.21 reinoud int new_queue, wait, work; 1205 1.1 reinoud 1206 1.1 reinoud work = 1; 1207 1.13 reinoud priv->thread_running = 1; 1208 1.31 reinoud cv_broadcast(&priv->discstrat_cv); 1209 1.31 reinoud 1210 1.1 reinoud mutex_enter(&priv->discstrat_mutex); 1211 1.1 reinoud priv->num_floating = 0; 1212 1.1 reinoud while (priv->run_thread || work || priv->num_floating) { 1213 1.11 reinoud /* get our time */ 1214 1.11 reinoud vfs_timestamp(&now); 1215 1.11 reinoud 1216 1.11 reinoud /* maintenance: handle eccline state machine */ 1217 1.21 reinoud for(;;) { 1218 1.21 reinoud /* only peek at it */ 1219 1.21 reinoud eccline = udf_peek_eccline(priv, UDF_SHED_WAITING); 1220 1.21 reinoud if (eccline == NULL) 1221 1.21 reinoud break; 1222 1.21 reinoud 1223 1.22 reinoud /* if not reading, wait until the time has come */ 1224 1.22 reinoud if ((priv->cur_queue != UDF_SHED_READING) && 1225 1.22 reinoud (eccline->wait_time.tv_sec - now.tv_sec > 0)) { 1226 1.22 reinoud UDF_UNLOCK_ECCLINE(eccline); 1227 1.22 reinoud /* all others are later, so break off */ 1228 1.22 reinoud break; 1229 1.21 reinoud } 1230 1.21 reinoud 1231 1.21 reinoud /* release */ 1232 1.21 reinoud UDF_UNLOCK_ECCLINE(eccline); 1233 1.21 reinoud 1234 1.21 reinoud /* do get it */ 1235 1.11 reinoud eccline = udf_pop_eccline(priv, UDF_SHED_WAITING); 1236 1.21 reinoud 1237 1.21 reinoud /* requeue according to state */ 1238 1.21 reinoud new_queue = UDF_SHED_FREE; /* unlikely */ 1239 1.11 reinoud if (eccline->refcnt > 0) 1240 1.11 reinoud new_queue = UDF_SHED_IDLE; 1241 1.11 reinoud if (eccline->flags & ECC_WANTED) 1242 1.11 reinoud new_queue = UDF_SHED_IDLE; 1243 1.11 reinoud if (eccline->readin) 1244 1.11 reinoud new_queue = UDF_SHED_READING; 1245 1.11 reinoud if (eccline->dirty) { 1246 1.21 reinoud new_queue = UDF_SHED_READING; 1247 1.21 reinoud if (eccline->present == allbits) { 1248 1.11 reinoud new_queue = UDF_SHED_WRITING; 1249 1.11 reinoud if (eccline->flags & ECC_SEQWRITING) 1250 1.11 reinoud new_queue = UDF_SHED_SEQWRITING; 1251 1.11 reinoud } 1252 1.11 reinoud } 1253 1.11 reinoud udf_push_eccline(eccline, new_queue); 1254 1.11 reinoud } 1255 1.11 reinoud 1256 1.20 reinoud /* maintenance: free excess ecclines */ 1257 1.1 reinoud while (priv->num_queued[UDF_SHED_FREE] > UDF_ECCLINE_MAXFREE) { 1258 1.1 reinoud eccline = udf_pop_eccline(priv, UDF_SHED_FREE); 1259 1.1 reinoud KASSERT(eccline); 1260 1.1 reinoud KASSERT(eccline->refcnt == 0); 1261 1.21 reinoud if (eccline->flags & ECC_WANTED) { 1262 1.21 reinoud /* we won the race, but we dont want to win */ 1263 1.21 reinoud DPRINTF(ECCLINE, ("Tried removing, pushed back to free list\n")); 1264 1.11 reinoud udf_push_eccline(eccline, UDF_SHED_IDLE); 1265 1.11 reinoud } else { 1266 1.11 reinoud DPRINTF(ECCLINE, ("Removing entry from free list\n")); 1267 1.11 reinoud udf_dispose_eccline(eccline); 1268 1.11 reinoud } 1269 1.1 reinoud } 1270 1.1 reinoud 1271 1.11 reinoud /* process the current selected queue */ 1272 1.1 reinoud /* get our time */ 1273 1.1 reinoud vfs_timestamp(&now); 1274 1.1 reinoud last = &priv->last_queued[priv->cur_queue]; 1275 1.1 reinoud 1276 1.1 reinoud /* get our line */ 1277 1.1 reinoud eccline = udf_pop_eccline(priv, priv->cur_queue); 1278 1.1 reinoud if (eccline) { 1279 1.1 reinoud wait = 0; 1280 1.1 reinoud new_queue = priv->cur_queue; 1281 1.3 reinoud DPRINTF(ECCLINE, ("UDF_ISSUE_ECCLINE\n")); 1282 1.1 reinoud 1283 1.1 reinoud udf_issue_eccline(eccline, priv->cur_queue); 1284 1.1 reinoud } else { 1285 1.13 reinoud /* don't switch too quickly */ 1286 1.13 reinoud if (now.tv_sec - last->tv_sec < 2) { 1287 1.13 reinoud /* wait some time */ 1288 1.13 reinoud cv_timedwait(&priv->discstrat_cv, 1289 1.13 reinoud &priv->discstrat_mutex, hz); 1290 1.13 reinoud /* we assume there is work to be done */ 1291 1.13 reinoud work = 1; 1292 1.13 reinoud continue; 1293 1.13 reinoud } 1294 1.13 reinoud 1295 1.13 reinoud /* XXX select on queue lengths ? */ 1296 1.1 reinoud wait = 1; 1297 1.1 reinoud /* check if we can/should switch */ 1298 1.1 reinoud new_queue = priv->cur_queue; 1299 1.17 yamt if (bufq_peek(priv->queues[UDF_SHED_READING])) 1300 1.1 reinoud new_queue = UDF_SHED_READING; 1301 1.17 yamt if (bufq_peek(priv->queues[UDF_SHED_WRITING])) 1302 1.1 reinoud new_queue = UDF_SHED_WRITING; 1303 1.17 yamt if (bufq_peek(priv->queues[UDF_SHED_SEQWRITING])) 1304 1.1 reinoud new_queue = UDF_SHED_SEQWRITING; 1305 1.1 reinoud } 1306 1.1 reinoud 1307 1.1 reinoud /* give room */ 1308 1.1 reinoud mutex_exit(&priv->discstrat_mutex); 1309 1.1 reinoud 1310 1.1 reinoud if (new_queue != priv->cur_queue) { 1311 1.1 reinoud wait = 0; 1312 1.1 reinoud DPRINTF(SHEDULE, ("switching from %d to %d\n", 1313 1.1 reinoud priv->cur_queue, new_queue)); 1314 1.1 reinoud priv->cur_queue = new_queue; 1315 1.1 reinoud } 1316 1.1 reinoud mutex_enter(&priv->discstrat_mutex); 1317 1.1 reinoud 1318 1.1 reinoud /* wait for more if needed */ 1319 1.1 reinoud if (wait) 1320 1.1 reinoud cv_timedwait(&priv->discstrat_cv, 1321 1.13 reinoud &priv->discstrat_mutex, hz/4); /* /8 */ 1322 1.1 reinoud 1323 1.17 yamt work = (bufq_peek(priv->queues[UDF_SHED_WAITING]) != NULL); 1324 1.17 yamt work |= (bufq_peek(priv->queues[UDF_SHED_READING]) != NULL); 1325 1.17 yamt work |= (bufq_peek(priv->queues[UDF_SHED_WRITING]) != NULL); 1326 1.17 yamt work |= (bufq_peek(priv->queues[UDF_SHED_SEQWRITING]) != NULL); 1327 1.1 reinoud 1328 1.1 reinoud DPRINTF(PARANOIA, ("work : (%d, %d, %d) -> work %d, float %d\n", 1329 1.17 yamt (bufq_peek(priv->queues[UDF_SHED_READING]) != NULL), 1330 1.17 yamt (bufq_peek(priv->queues[UDF_SHED_WRITING]) != NULL), 1331 1.17 yamt (bufq_peek(priv->queues[UDF_SHED_SEQWRITING]) != NULL), 1332 1.1 reinoud work, priv->num_floating)); 1333 1.1 reinoud } 1334 1.1 reinoud 1335 1.1 reinoud mutex_exit(&priv->discstrat_mutex); 1336 1.1 reinoud 1337 1.1 reinoud /* tear down remaining ecclines */ 1338 1.1 reinoud mutex_enter(&priv->discstrat_mutex); 1339 1.21 reinoud KASSERT(bufq_peek(priv->queues[UDF_SHED_WAITING]) == NULL); 1340 1.21 reinoud KASSERT(bufq_peek(priv->queues[UDF_SHED_IDLE]) == NULL); 1341 1.21 reinoud KASSERT(bufq_peek(priv->queues[UDF_SHED_READING]) == NULL); 1342 1.21 reinoud KASSERT(bufq_peek(priv->queues[UDF_SHED_WRITING]) == NULL); 1343 1.21 reinoud KASSERT(bufq_peek(priv->queues[UDF_SHED_SEQWRITING]) == NULL); 1344 1.21 reinoud 1345 1.11 reinoud KASSERT(priv->num_queued[UDF_SHED_WAITING] == 0); 1346 1.1 reinoud KASSERT(priv->num_queued[UDF_SHED_IDLE] == 0); 1347 1.1 reinoud KASSERT(priv->num_queued[UDF_SHED_READING] == 0); 1348 1.1 reinoud KASSERT(priv->num_queued[UDF_SHED_WRITING] == 0); 1349 1.1 reinoud KASSERT(priv->num_queued[UDF_SHED_SEQWRITING] == 0); 1350 1.1 reinoud 1351 1.1 reinoud eccline = udf_pop_eccline(priv, UDF_SHED_FREE); 1352 1.1 reinoud while (eccline) { 1353 1.1 reinoud udf_dispose_eccline(eccline); 1354 1.1 reinoud eccline = udf_pop_eccline(priv, UDF_SHED_FREE); 1355 1.1 reinoud } 1356 1.1 reinoud KASSERT(priv->num_queued[UDF_SHED_FREE] == 0); 1357 1.1 reinoud mutex_exit(&priv->discstrat_mutex); 1358 1.1 reinoud 1359 1.13 reinoud priv->thread_running = 0; 1360 1.1 reinoud priv->thread_finished = 1; 1361 1.31 reinoud cv_broadcast(&priv->discstrat_cv); 1362 1.31 reinoud 1363 1.1 reinoud kthread_exit(0); 1364 1.1 reinoud /* not reached */ 1365 1.1 reinoud } 1366 1.1 reinoud 1367 1.1 reinoud /* --------------------------------------------------------------------- */ 1368 1.1 reinoud 1369 1.1 reinoud /* 1370 1.1 reinoud * Buffer memory pool allocator. 1371 1.1 reinoud */ 1372 1.1 reinoud 1373 1.1 reinoud static void * 1374 1.1 reinoud ecclinepool_page_alloc(struct pool *pp, int flags) 1375 1.1 reinoud { 1376 1.1 reinoud return (void *)uvm_km_alloc(kernel_map, 1377 1.1 reinoud MAXBSIZE, MAXBSIZE, 1378 1.1 reinoud ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) 1379 1.1 reinoud | UVM_KMF_WIRED /* UVM_KMF_PAGABLE? */); 1380 1.1 reinoud } 1381 1.1 reinoud 1382 1.1 reinoud static void 1383 1.1 reinoud ecclinepool_page_free(struct pool *pp, void *v) 1384 1.1 reinoud { 1385 1.1 reinoud uvm_km_free(kernel_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED); 1386 1.1 reinoud } 1387 1.1 reinoud 1388 1.1 reinoud static struct pool_allocator ecclinepool_allocator = { 1389 1.1 reinoud .pa_alloc = ecclinepool_page_alloc, 1390 1.1 reinoud .pa_free = ecclinepool_page_free, 1391 1.1 reinoud .pa_pagesz = MAXBSIZE, 1392 1.1 reinoud }; 1393 1.1 reinoud 1394 1.1 reinoud 1395 1.1 reinoud static void 1396 1.1 reinoud udf_discstrat_init_rmw(struct udf_strat_args *args) 1397 1.1 reinoud { 1398 1.1 reinoud struct udf_mount *ump = args->ump; 1399 1.1 reinoud struct strat_private *priv = PRIV(ump); 1400 1.1 reinoud uint32_t lb_size, blobsize, hashline; 1401 1.1 reinoud int i; 1402 1.1 reinoud 1403 1.1 reinoud KASSERT(ump); 1404 1.1 reinoud KASSERT(ump->logical_vol); 1405 1.1 reinoud KASSERT(priv == NULL); 1406 1.1 reinoud 1407 1.1 reinoud lb_size = udf_rw32(ump->logical_vol->lb_size); 1408 1.1 reinoud blobsize = ump->packet_size * lb_size; 1409 1.1 reinoud KASSERT(lb_size > 0); 1410 1.1 reinoud KASSERT(ump->packet_size <= 64); 1411 1.1 reinoud 1412 1.1 reinoud /* initialise our memory space */ 1413 1.1 reinoud ump->strategy_private = malloc(sizeof(struct strat_private), 1414 1.1 reinoud M_UDFTEMP, M_WAITOK); 1415 1.1 reinoud priv = ump->strategy_private; 1416 1.1 reinoud memset(priv, 0 , sizeof(struct strat_private)); 1417 1.1 reinoud 1418 1.1 reinoud /* initialise locks */ 1419 1.1 reinoud cv_init(&priv->discstrat_cv, "udfstrat"); 1420 1.15 reinoud mutex_init(&priv->discstrat_mutex, MUTEX_DEFAULT, IPL_NONE); 1421 1.1 reinoud mutex_init(&priv->seqwrite_mutex, MUTEX_DEFAULT, IPL_NONE); 1422 1.1 reinoud 1423 1.1 reinoud /* initialise struct eccline pool */ 1424 1.1 reinoud pool_init(&priv->eccline_pool, sizeof(struct udf_eccline), 1425 1.1 reinoud 0, 0, 0, "udf_eccline_pool", NULL, IPL_NONE); 1426 1.1 reinoud 1427 1.1 reinoud /* initialise eccline blob pool */ 1428 1.12 reinoud ecclinepool_allocator.pa_pagesz = blobsize; 1429 1.1 reinoud pool_init(&priv->ecclineblob_pool, blobsize, 1430 1.12 reinoud 0, 0, 0, "udf_eccline_blob", &ecclinepool_allocator, IPL_NONE); 1431 1.1 reinoud 1432 1.1 reinoud /* initialise main queues */ 1433 1.1 reinoud for (i = 0; i < UDF_SHED_MAX; i++) { 1434 1.1 reinoud priv->num_queued[i] = 0; 1435 1.1 reinoud vfs_timestamp(&priv->last_queued[i]); 1436 1.1 reinoud } 1437 1.11 reinoud bufq_alloc(&priv->queues[UDF_SHED_WAITING], "fcfs", 1438 1.11 reinoud BUFQ_SORT_RAWBLOCK); 1439 1.1 reinoud bufq_alloc(&priv->queues[UDF_SHED_READING], "disksort", 1440 1.1 reinoud BUFQ_SORT_RAWBLOCK); 1441 1.1 reinoud bufq_alloc(&priv->queues[UDF_SHED_WRITING], "disksort", 1442 1.1 reinoud BUFQ_SORT_RAWBLOCK); 1443 1.1 reinoud bufq_alloc(&priv->queues[UDF_SHED_SEQWRITING], "disksort", 0); 1444 1.1 reinoud 1445 1.1 reinoud /* initialise administrative queues */ 1446 1.1 reinoud bufq_alloc(&priv->queues[UDF_SHED_IDLE], "fcfs", 0); 1447 1.1 reinoud bufq_alloc(&priv->queues[UDF_SHED_FREE], "fcfs", 0); 1448 1.1 reinoud 1449 1.1 reinoud for (hashline = 0; hashline < UDF_ECCBUF_HASHSIZE; hashline++) { 1450 1.1 reinoud LIST_INIT(&priv->eccline_hash[hashline]); 1451 1.1 reinoud } 1452 1.1 reinoud 1453 1.1 reinoud /* create our disk strategy thread */ 1454 1.1 reinoud priv->cur_queue = UDF_SHED_READING; 1455 1.1 reinoud priv->thread_finished = 0; 1456 1.13 reinoud priv->thread_running = 0; 1457 1.1 reinoud priv->run_thread = 1; 1458 1.1 reinoud if (kthread_create(PRI_NONE, 0 /* KTHREAD_MPSAFE*/, NULL /* cpu_info*/, 1459 1.1 reinoud udf_discstrat_thread, ump, &priv->queue_lwp, 1460 1.1 reinoud "%s", "udf_rw")) { 1461 1.1 reinoud panic("fork udf_rw"); 1462 1.1 reinoud } 1463 1.13 reinoud 1464 1.13 reinoud /* wait for thread to spin up */ 1465 1.31 reinoud mutex_enter(&priv->discstrat_mutex); 1466 1.13 reinoud while (!priv->thread_running) { 1467 1.31 reinoud cv_timedwait(&priv->discstrat_cv, &priv->discstrat_mutex, hz); 1468 1.13 reinoud } 1469 1.31 reinoud mutex_exit(&priv->discstrat_mutex); 1470 1.1 reinoud } 1471 1.1 reinoud 1472 1.1 reinoud 1473 1.1 reinoud static void 1474 1.1 reinoud udf_discstrat_finish_rmw(struct udf_strat_args *args) 1475 1.1 reinoud { 1476 1.1 reinoud struct udf_mount *ump = args->ump; 1477 1.1 reinoud struct strat_private *priv = PRIV(ump); 1478 1.1 reinoud 1479 1.1 reinoud if (ump == NULL) 1480 1.1 reinoud return; 1481 1.1 reinoud 1482 1.1 reinoud /* stop our sheduling thread */ 1483 1.1 reinoud KASSERT(priv->run_thread == 1); 1484 1.1 reinoud priv->run_thread = 0; 1485 1.31 reinoud 1486 1.31 reinoud mutex_enter(&priv->discstrat_mutex); 1487 1.1 reinoud while (!priv->thread_finished) { 1488 1.31 reinoud cv_broadcast(&priv->discstrat_cv); 1489 1.31 reinoud cv_timedwait(&priv->discstrat_cv, &priv->discstrat_mutex, hz); 1490 1.1 reinoud } 1491 1.31 reinoud mutex_exit(&priv->discstrat_mutex); 1492 1.31 reinoud 1493 1.1 reinoud /* kthread should be finished now */ 1494 1.31 reinoud cv_destroy(&priv->discstrat_cv); 1495 1.31 reinoud mutex_destroy(&priv->discstrat_mutex); 1496 1.31 reinoud mutex_destroy(&priv->seqwrite_mutex); 1497 1.1 reinoud 1498 1.1 reinoud /* cleanup our pools */ 1499 1.1 reinoud pool_destroy(&priv->eccline_pool); 1500 1.1 reinoud pool_destroy(&priv->ecclineblob_pool); 1501 1.1 reinoud 1502 1.1 reinoud /* free our private space */ 1503 1.1 reinoud free(ump->strategy_private, M_UDFTEMP); 1504 1.1 reinoud ump->strategy_private = NULL; 1505 1.1 reinoud } 1506 1.1 reinoud 1507 1.1 reinoud /* --------------------------------------------------------------------- */ 1508 1.1 reinoud 1509 1.1 reinoud struct udf_strategy udf_strat_rmw = 1510 1.1 reinoud { 1511 1.5 reinoud udf_create_nodedscr_rmw, 1512 1.5 reinoud udf_free_nodedscr_rmw, 1513 1.5 reinoud udf_read_nodedscr_rmw, 1514 1.5 reinoud udf_write_nodedscr_rmw, 1515 1.1 reinoud udf_queuebuf_rmw, 1516 1.28 reinoud udf_sync_caches_rmw, 1517 1.1 reinoud udf_discstrat_init_rmw, 1518 1.1 reinoud udf_discstrat_finish_rmw 1519 1.1 reinoud }; 1520 1.1 reinoud 1521