Home | History | Annotate | Line # | Download | only in chfs
chfs_wbuf.c revision 1.4.4.2
      1  1.4.4.2  yamt /*	$NetBSD: chfs_wbuf.c,v 1.4.4.2 2012/04/17 00:08:55 yamt Exp $	*/
      2  1.4.4.2  yamt 
      3  1.4.4.2  yamt /*-
      4  1.4.4.2  yamt  * Copyright (c) 2010 Department of Software Engineering,
      5  1.4.4.2  yamt  *		      University of Szeged, Hungary
      6  1.4.4.2  yamt  * Copyright (C) 2010 Tamas Toth <ttoth (at) inf.u-szeged.hu>
      7  1.4.4.2  yamt  * Copyright (C) 2010 Adam Hoka <ahoka (at) NetBSD.org>
      8  1.4.4.2  yamt  * All rights reserved.
      9  1.4.4.2  yamt  *
     10  1.4.4.2  yamt  * This code is derived from software contributed to The NetBSD Foundation
     11  1.4.4.2  yamt  * by the Department of Software Engineering, University of Szeged, Hungary
     12  1.4.4.2  yamt  *
     13  1.4.4.2  yamt  * Redistribution and use in source and binary forms, with or without
     14  1.4.4.2  yamt  * modification, are permitted provided that the following conditions
     15  1.4.4.2  yamt  * are met:
     16  1.4.4.2  yamt  * 1. Redistributions of source code must retain the above copyright
     17  1.4.4.2  yamt  *    notice, this list of conditions and the following disclaimer.
     18  1.4.4.2  yamt  * 2. Redistributions in binary form must reproduce the above copyright
     19  1.4.4.2  yamt  *    notice, this list of conditions and the following disclaimer in the
     20  1.4.4.2  yamt  *    documentation and/or other materials provided with the distribution.
     21  1.4.4.2  yamt  *
     22  1.4.4.2  yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     23  1.4.4.2  yamt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     24  1.4.4.2  yamt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     25  1.4.4.2  yamt  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     26  1.4.4.2  yamt  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     27  1.4.4.2  yamt  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     28  1.4.4.2  yamt  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
     29  1.4.4.2  yamt  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
     30  1.4.4.2  yamt  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  1.4.4.2  yamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  1.4.4.2  yamt  * SUCH DAMAGE.
     33  1.4.4.2  yamt  */
     34  1.4.4.2  yamt 
     35  1.4.4.2  yamt #include <dev/flash/flash.h>
     36  1.4.4.2  yamt #include <sys/uio.h>
     37  1.4.4.2  yamt #include "chfs.h"
     38  1.4.4.2  yamt 
     39  1.4.4.2  yamt #define DBG_WBUF 1		/* XXX unused, but should be */
     40  1.4.4.2  yamt 
     41  1.4.4.2  yamt #define PAD(x) (((x)+3)&~3)
     42  1.4.4.2  yamt 
     43  1.4.4.2  yamt #define EB_ADDRESS(x) ( rounddown((x), chmp->chm_ebh->eb_size) )
     44  1.4.4.2  yamt 
     45  1.4.4.2  yamt #define PAGE_DIV(x) ( rounddown((x), chmp->chm_wbuf_pagesize) )
     46  1.4.4.2  yamt #define PAGE_MOD(x) ( (x) % (chmp->chm_wbuf_pagesize) )
     47  1.4.4.2  yamt 
     48  1.4.4.2  yamt enum {
     49  1.4.4.2  yamt 	WBUF_NOPAD,
     50  1.4.4.2  yamt 	WBUF_SETPAD
     51  1.4.4.2  yamt };
     52  1.4.4.2  yamt 
     53  1.4.4.2  yamt /**
     54  1.4.4.2  yamt  * chfs_flush_wbuf - write wbuf to the flash
     55  1.4.4.2  yamt  * @chmp: super block info
     56  1.4.4.2  yamt  * @pad: padding (WBUF_NOPAD / WBUF_SETPAD)
     57  1.4.4.2  yamt  * Returns zero in case of success.
     58  1.4.4.2  yamt  */
     59  1.4.4.2  yamt static int
     60  1.4.4.2  yamt chfs_flush_wbuf(struct chfs_mount *chmp, int pad)
     61  1.4.4.2  yamt {
     62  1.4.4.2  yamt 	int ret;
     63  1.4.4.2  yamt 	size_t retlen;
     64  1.4.4.2  yamt 	struct chfs_node_ref *nref;
     65  1.4.4.2  yamt 	struct chfs_flash_padding_node* padnode;
     66  1.4.4.2  yamt 
     67  1.4.4.2  yamt 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
     68  1.4.4.2  yamt 	KASSERT(mutex_owned(&chmp->chm_lock_sizes));
     69  1.4.4.2  yamt 	KASSERT(rw_write_held(&chmp->chm_lock_wbuf));
     70  1.4.4.2  yamt 	KASSERT(pad == WBUF_SETPAD || pad == WBUF_NOPAD);
     71  1.4.4.2  yamt 
     72  1.4.4.2  yamt 	if (pad == WBUF_SETPAD) {
     73  1.4.4.2  yamt 		chmp->chm_wbuf_len = PAD(chmp->chm_wbuf_len);
     74  1.4.4.2  yamt 		memset(chmp->chm_wbuf + chmp->chm_wbuf_len, 0,
     75  1.4.4.2  yamt 		    chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len);
     76  1.4.4.2  yamt 
     77  1.4.4.2  yamt 		padnode = (void *)(chmp->chm_wbuf + chmp->chm_wbuf_len);
     78  1.4.4.2  yamt 		padnode->magic = htole16(CHFS_FS_MAGIC_BITMASK);
     79  1.4.4.2  yamt 		padnode->type = htole16(CHFS_NODETYPE_PADDING);
     80  1.4.4.2  yamt 		padnode->length = htole32(chmp->chm_wbuf_pagesize
     81  1.4.4.2  yamt 		    - chmp->chm_wbuf_len);
     82  1.4.4.2  yamt 		padnode->hdr_crc = htole32(crc32(0, (uint8_t *)padnode,
     83  1.4.4.2  yamt 			sizeof(*padnode)-4));
     84  1.4.4.2  yamt 
     85  1.4.4.2  yamt 		nref = chfs_alloc_node_ref(chmp->chm_nextblock);
     86  1.4.4.2  yamt 		nref->nref_offset = chmp->chm_wbuf_ofs + chmp->chm_wbuf_len;
     87  1.4.4.2  yamt 		nref->nref_offset = CHFS_GET_OFS(nref->nref_offset) |
     88  1.4.4.2  yamt 		    CHFS_OBSOLETE_NODE_MASK;
     89  1.4.4.2  yamt 		chmp->chm_wbuf_len = chmp->chm_wbuf_pagesize;
     90  1.4.4.2  yamt 
     91  1.4.4.2  yamt 		chfs_change_size_free(chmp, chmp->chm_nextblock,
     92  1.4.4.2  yamt 		    -padnode->length);
     93  1.4.4.2  yamt 		chfs_change_size_wasted(chmp, chmp->chm_nextblock,
     94  1.4.4.2  yamt 		    padnode->length);
     95  1.4.4.2  yamt 	}
     96  1.4.4.2  yamt 
     97  1.4.4.2  yamt 	ret = chfs_write_leb(chmp, chmp->chm_nextblock->lnr, chmp->chm_wbuf,
     98  1.4.4.2  yamt 	    chmp->chm_wbuf_ofs, chmp->chm_wbuf_len, &retlen);
     99  1.4.4.2  yamt 	if (ret) {
    100  1.4.4.2  yamt 		return ret;
    101  1.4.4.2  yamt 	}
    102  1.4.4.2  yamt 
    103  1.4.4.2  yamt 	memset(chmp->chm_wbuf, 0xff, chmp->chm_wbuf_pagesize);
    104  1.4.4.2  yamt 	chmp->chm_wbuf_ofs += chmp->chm_wbuf_pagesize;
    105  1.4.4.2  yamt 	chmp->chm_wbuf_len = 0;
    106  1.4.4.2  yamt 
    107  1.4.4.2  yamt 	return 0;
    108  1.4.4.2  yamt }
    109  1.4.4.2  yamt 
    110  1.4.4.2  yamt 
    111  1.4.4.2  yamt /**
    112  1.4.4.2  yamt  * chfs_fill_wbuf - write to wbuf
    113  1.4.4.2  yamt  * @chmp: super block info
    114  1.4.4.2  yamt  * @buf: buffer
    115  1.4.4.2  yamt  * @len: buffer length
    116  1.4.4.2  yamt  * Return the len of the buf what we didn't write to the wbuf.
    117  1.4.4.2  yamt  */
    118  1.4.4.2  yamt static size_t
    119  1.4.4.2  yamt chfs_fill_wbuf(struct chfs_mount *chmp, const u_char *buf, size_t len)
    120  1.4.4.2  yamt {
    121  1.4.4.2  yamt 	if (len && !chmp->chm_wbuf_len && (len >= chmp->chm_wbuf_pagesize)) {
    122  1.4.4.2  yamt 		return 0;
    123  1.4.4.2  yamt 	}
    124  1.4.4.2  yamt 	if (len > (chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len)) {
    125  1.4.4.2  yamt 		len = chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len;
    126  1.4.4.2  yamt 	}
    127  1.4.4.2  yamt 	memcpy(chmp->chm_wbuf + chmp->chm_wbuf_len, buf, len);
    128  1.4.4.2  yamt 
    129  1.4.4.2  yamt 	chmp->chm_wbuf_len += (int) len;
    130  1.4.4.2  yamt 	return len;
    131  1.4.4.2  yamt }
    132  1.4.4.2  yamt 
    133  1.4.4.2  yamt /**
    134  1.4.4.2  yamt  * chfs_write_wbuf - write to wbuf and then the flash
    135  1.4.4.2  yamt  * @chmp: super block info
    136  1.4.4.2  yamt  * @invecs: io vectors
    137  1.4.4.2  yamt  * @count: num of vectors
    138  1.4.4.2  yamt  * @to: offset of target
    139  1.4.4.2  yamt  * @retlen: writed bytes
    140  1.4.4.2  yamt  * Returns zero in case of success.
    141  1.4.4.2  yamt  */
    142  1.4.4.2  yamt int
    143  1.4.4.2  yamt chfs_write_wbuf(struct chfs_mount* chmp, const struct iovec *invecs, long count,
    144  1.4.4.2  yamt     off_t to, size_t *retlen)
    145  1.4.4.2  yamt {
    146  1.4.4.2  yamt 	int invec, ret = 0;
    147  1.4.4.2  yamt 	size_t wbuf_retlen, donelen = 0;
    148  1.4.4.2  yamt 	int outvec_to = to;
    149  1.4.4.2  yamt 
    150  1.4.4.2  yamt 	int lnr = chmp->chm_nextblock->lnr;
    151  1.4.4.2  yamt 
    152  1.4.4.2  yamt 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    153  1.4.4.2  yamt 	KASSERT(mutex_owned(&chmp->chm_lock_sizes));
    154  1.4.4.2  yamt 	KASSERT(!rw_write_held(&chmp->chm_lock_wbuf));
    155  1.4.4.2  yamt 
    156  1.4.4.2  yamt 	rw_enter(&chmp->chm_lock_wbuf, RW_WRITER);
    157  1.4.4.2  yamt 
    158  1.4.4.2  yamt 	//dbg("1. wbuf ofs: %zu, len: %zu\n", chmp->chm_wbuf_ofs, chmp->chm_wbuf_len);
    159  1.4.4.2  yamt 
    160  1.4.4.2  yamt 	if (chmp->chm_wbuf_ofs == 0xffffffff) {
    161  1.4.4.2  yamt 		chmp->chm_wbuf_ofs = PAGE_DIV(to);
    162  1.4.4.2  yamt 		chmp->chm_wbuf_len = PAGE_MOD(to);
    163  1.4.4.2  yamt 		memset(chmp->chm_wbuf, 0xff, chmp->chm_wbuf_pagesize);
    164  1.4.4.2  yamt 	}
    165  1.4.4.2  yamt 
    166  1.4.4.2  yamt 	//dbg("2. wbuf ofs: %zu, len: %zu\n", chmp->chm_wbuf_ofs, chmp->chm_wbuf_len);
    167  1.4.4.2  yamt 
    168  1.4.4.2  yamt 	if (EB_ADDRESS(to) != EB_ADDRESS(chmp->chm_wbuf_ofs)) {
    169  1.4.4.2  yamt 		if (chmp->chm_wbuf_len) {
    170  1.4.4.2  yamt 			ret = chfs_flush_wbuf(chmp, WBUF_SETPAD);
    171  1.4.4.2  yamt 			if (ret)
    172  1.4.4.2  yamt 				goto outerr;
    173  1.4.4.2  yamt 		}
    174  1.4.4.2  yamt 		chmp->chm_wbuf_ofs = PAGE_DIV(to);
    175  1.4.4.2  yamt 		chmp->chm_wbuf_len = PAGE_MOD(to);
    176  1.4.4.2  yamt 	}
    177  1.4.4.2  yamt 
    178  1.4.4.2  yamt 	//dbg("3. wbuf ofs: %zu, len: %zu\n", chmp->chm_wbuf_ofs, chmp->chm_wbuf_len);
    179  1.4.4.2  yamt 
    180  1.4.4.2  yamt 	if (to != PAD(chmp->chm_wbuf_ofs + chmp->chm_wbuf_len)) {
    181  1.4.4.2  yamt 		dbg("to: %llu != %zu\n", (unsigned long long)to,
    182  1.4.4.2  yamt 			PAD(chmp->chm_wbuf_ofs + chmp->chm_wbuf_len));
    183  1.4.4.2  yamt 		dbg("Non-contiguous write\n");
    184  1.4.4.2  yamt 		panic("BUG\n");
    185  1.4.4.2  yamt 	}
    186  1.4.4.2  yamt 
    187  1.4.4.2  yamt 	/* adjust alignment offset */
    188  1.4.4.2  yamt 	if (chmp->chm_wbuf_len != PAGE_MOD(to)) {
    189  1.4.4.2  yamt 		chmp->chm_wbuf_len = PAGE_MOD(to);
    190  1.4.4.2  yamt 		/* take care of alignement to next page*/
    191  1.4.4.2  yamt 		if (!chmp->chm_wbuf_len) {
    192  1.4.4.2  yamt 			chmp->chm_wbuf_len += chmp->chm_wbuf_pagesize;
    193  1.4.4.2  yamt 			ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
    194  1.4.4.2  yamt 			if (ret)
    195  1.4.4.2  yamt 				goto outerr;
    196  1.4.4.2  yamt 		}
    197  1.4.4.2  yamt 	}
    198  1.4.4.2  yamt 
    199  1.4.4.2  yamt 	for (invec = 0; invec < count; invec++) {
    200  1.4.4.2  yamt 		int vlen = invecs[invec].iov_len;
    201  1.4.4.2  yamt 		u_char* v = invecs[invec].iov_base;
    202  1.4.4.2  yamt 
    203  1.4.4.2  yamt 		//dbg("invec:%d len:%d\n", invec, vlen);
    204  1.4.4.2  yamt 
    205  1.4.4.2  yamt 		wbuf_retlen = chfs_fill_wbuf(chmp, v, vlen);
    206  1.4.4.2  yamt 		if (chmp->chm_wbuf_len == chmp->chm_wbuf_pagesize) {
    207  1.4.4.2  yamt 			ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
    208  1.4.4.2  yamt 			if (ret) {
    209  1.4.4.2  yamt 				goto outerr;
    210  1.4.4.2  yamt 			}
    211  1.4.4.2  yamt 		}
    212  1.4.4.2  yamt 		vlen -= wbuf_retlen;
    213  1.4.4.2  yamt 		outvec_to += wbuf_retlen;
    214  1.4.4.2  yamt 		v += wbuf_retlen;
    215  1.4.4.2  yamt 		donelen += wbuf_retlen;
    216  1.4.4.2  yamt 		if (vlen >= chmp->chm_wbuf_pagesize) {
    217  1.4.4.2  yamt 			ret = chfs_write_leb(chmp, lnr, v, outvec_to, PAGE_DIV(vlen), &wbuf_retlen);
    218  1.4.4.2  yamt 			//dbg("fd->write: %zu\n", wbuf_retlen);
    219  1.4.4.2  yamt 			vlen -= wbuf_retlen;
    220  1.4.4.2  yamt 			outvec_to += wbuf_retlen;
    221  1.4.4.2  yamt 			chmp->chm_wbuf_ofs = outvec_to;
    222  1.4.4.2  yamt 			v += wbuf_retlen;
    223  1.4.4.2  yamt 			donelen += wbuf_retlen;
    224  1.4.4.2  yamt 		}
    225  1.4.4.2  yamt 		wbuf_retlen = chfs_fill_wbuf(chmp, v, vlen);
    226  1.4.4.2  yamt 		if (chmp->chm_wbuf_len == chmp->chm_wbuf_pagesize) {
    227  1.4.4.2  yamt 			ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
    228  1.4.4.2  yamt 			if (ret)
    229  1.4.4.2  yamt 				goto outerr;
    230  1.4.4.2  yamt 		}
    231  1.4.4.2  yamt 
    232  1.4.4.2  yamt 		// if we write the last vector, we flush with padding
    233  1.4.4.2  yamt 		/*if (invec == count-1) {
    234  1.4.4.2  yamt 		  ret = chfs_flush_wbuf(chmp, WBUF_SETPAD);
    235  1.4.4.2  yamt 		  if (ret)
    236  1.4.4.2  yamt 		  goto outerr;
    237  1.4.4.2  yamt 		  }*/
    238  1.4.4.2  yamt 		outvec_to += wbuf_retlen;
    239  1.4.4.2  yamt 		donelen += wbuf_retlen;
    240  1.4.4.2  yamt 	}
    241  1.4.4.2  yamt 	*retlen = donelen;
    242  1.4.4.2  yamt 	rw_exit(&chmp->chm_lock_wbuf);
    243  1.4.4.2  yamt 	return ret;
    244  1.4.4.2  yamt 
    245  1.4.4.2  yamt outerr:
    246  1.4.4.2  yamt 	*retlen = 0;
    247  1.4.4.2  yamt 	return ret;
    248  1.4.4.2  yamt }
    249  1.4.4.2  yamt 
    250  1.4.4.2  yamt int chfs_flush_pending_wbuf(struct chfs_mount *chmp)
    251  1.4.4.2  yamt {
    252  1.4.4.2  yamt 	//dbg("flush pending wbuf\n");
    253  1.4.4.2  yamt 	int err;
    254  1.4.4.2  yamt 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
    255  1.4.4.2  yamt 	mutex_enter(&chmp->chm_lock_sizes);
    256  1.4.4.2  yamt 	rw_enter(&chmp->chm_lock_wbuf, RW_WRITER);
    257  1.4.4.2  yamt 	err = chfs_flush_wbuf(chmp, WBUF_SETPAD);
    258  1.4.4.2  yamt 	rw_exit(&chmp->chm_lock_wbuf);
    259  1.4.4.2  yamt 	mutex_exit(&chmp->chm_lock_sizes);
    260  1.4.4.2  yamt 	return err;
    261  1.4.4.2  yamt }
    262