chfs_wbuf.c revision 1.4.4.3 1 1.4.4.3 yamt /* $NetBSD: chfs_wbuf.c,v 1.4.4.3 2012/10/30 17:22:58 yamt Exp $ */
2 1.4.4.2 yamt
3 1.4.4.2 yamt /*-
4 1.4.4.2 yamt * Copyright (c) 2010 Department of Software Engineering,
5 1.4.4.2 yamt * University of Szeged, Hungary
6 1.4.4.2 yamt * Copyright (C) 2010 Tamas Toth <ttoth (at) inf.u-szeged.hu>
7 1.4.4.2 yamt * Copyright (C) 2010 Adam Hoka <ahoka (at) NetBSD.org>
8 1.4.4.2 yamt * All rights reserved.
9 1.4.4.2 yamt *
10 1.4.4.2 yamt * This code is derived from software contributed to The NetBSD Foundation
11 1.4.4.2 yamt * by the Department of Software Engineering, University of Szeged, Hungary
12 1.4.4.2 yamt *
13 1.4.4.2 yamt * Redistribution and use in source and binary forms, with or without
14 1.4.4.2 yamt * modification, are permitted provided that the following conditions
15 1.4.4.2 yamt * are met:
16 1.4.4.2 yamt * 1. Redistributions of source code must retain the above copyright
17 1.4.4.2 yamt * notice, this list of conditions and the following disclaimer.
18 1.4.4.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
19 1.4.4.2 yamt * notice, this list of conditions and the following disclaimer in the
20 1.4.4.2 yamt * documentation and/or other materials provided with the distribution.
21 1.4.4.2 yamt *
22 1.4.4.2 yamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 1.4.4.2 yamt * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 1.4.4.2 yamt * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 1.4.4.2 yamt * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 1.4.4.2 yamt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 1.4.4.2 yamt * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 1.4.4.2 yamt * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 1.4.4.2 yamt * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 1.4.4.2 yamt * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 1.4.4.2 yamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 1.4.4.2 yamt * SUCH DAMAGE.
33 1.4.4.2 yamt */
34 1.4.4.2 yamt
35 1.4.4.2 yamt #include <dev/flash/flash.h>
36 1.4.4.2 yamt #include <sys/uio.h>
37 1.4.4.2 yamt #include "chfs.h"
38 1.4.4.2 yamt
39 1.4.4.2 yamt #define DBG_WBUF 1 /* XXX unused, but should be */
40 1.4.4.2 yamt
41 1.4.4.2 yamt #define PAD(x) (((x)+3)&~3)
42 1.4.4.2 yamt
43 1.4.4.2 yamt #define EB_ADDRESS(x) ( rounddown((x), chmp->chm_ebh->eb_size) )
44 1.4.4.2 yamt
45 1.4.4.2 yamt #define PAGE_DIV(x) ( rounddown((x), chmp->chm_wbuf_pagesize) )
46 1.4.4.2 yamt #define PAGE_MOD(x) ( (x) % (chmp->chm_wbuf_pagesize) )
47 1.4.4.2 yamt
48 1.4.4.3 yamt /* writebuffer options */
49 1.4.4.2 yamt enum {
50 1.4.4.2 yamt WBUF_NOPAD,
51 1.4.4.2 yamt WBUF_SETPAD
52 1.4.4.2 yamt };
53 1.4.4.2 yamt
54 1.4.4.3 yamt /*
55 1.4.4.2 yamt * chfs_flush_wbuf - write wbuf to the flash
56 1.4.4.2 yamt * Returns zero in case of success.
57 1.4.4.2 yamt */
58 1.4.4.2 yamt static int
59 1.4.4.2 yamt chfs_flush_wbuf(struct chfs_mount *chmp, int pad)
60 1.4.4.2 yamt {
61 1.4.4.2 yamt int ret;
62 1.4.4.2 yamt size_t retlen;
63 1.4.4.2 yamt struct chfs_node_ref *nref;
64 1.4.4.2 yamt struct chfs_flash_padding_node* padnode;
65 1.4.4.2 yamt
66 1.4.4.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
67 1.4.4.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_sizes));
68 1.4.4.2 yamt KASSERT(rw_write_held(&chmp->chm_lock_wbuf));
69 1.4.4.2 yamt KASSERT(pad == WBUF_SETPAD || pad == WBUF_NOPAD);
70 1.4.4.2 yamt
71 1.4.4.3 yamt /* check padding option */
72 1.4.4.2 yamt if (pad == WBUF_SETPAD) {
73 1.4.4.2 yamt chmp->chm_wbuf_len = PAD(chmp->chm_wbuf_len);
74 1.4.4.2 yamt memset(chmp->chm_wbuf + chmp->chm_wbuf_len, 0,
75 1.4.4.2 yamt chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len);
76 1.4.4.2 yamt
77 1.4.4.3 yamt /* add a padding node */
78 1.4.4.2 yamt padnode = (void *)(chmp->chm_wbuf + chmp->chm_wbuf_len);
79 1.4.4.2 yamt padnode->magic = htole16(CHFS_FS_MAGIC_BITMASK);
80 1.4.4.2 yamt padnode->type = htole16(CHFS_NODETYPE_PADDING);
81 1.4.4.2 yamt padnode->length = htole32(chmp->chm_wbuf_pagesize
82 1.4.4.2 yamt - chmp->chm_wbuf_len);
83 1.4.4.2 yamt padnode->hdr_crc = htole32(crc32(0, (uint8_t *)padnode,
84 1.4.4.2 yamt sizeof(*padnode)-4));
85 1.4.4.2 yamt
86 1.4.4.2 yamt nref = chfs_alloc_node_ref(chmp->chm_nextblock);
87 1.4.4.2 yamt nref->nref_offset = chmp->chm_wbuf_ofs + chmp->chm_wbuf_len;
88 1.4.4.2 yamt nref->nref_offset = CHFS_GET_OFS(nref->nref_offset) |
89 1.4.4.2 yamt CHFS_OBSOLETE_NODE_MASK;
90 1.4.4.2 yamt chmp->chm_wbuf_len = chmp->chm_wbuf_pagesize;
91 1.4.4.2 yamt
92 1.4.4.3 yamt /* change sizes after padding node */
93 1.4.4.2 yamt chfs_change_size_free(chmp, chmp->chm_nextblock,
94 1.4.4.2 yamt -padnode->length);
95 1.4.4.2 yamt chfs_change_size_wasted(chmp, chmp->chm_nextblock,
96 1.4.4.2 yamt padnode->length);
97 1.4.4.2 yamt }
98 1.4.4.2 yamt
99 1.4.4.3 yamt /* write out the buffer */
100 1.4.4.2 yamt ret = chfs_write_leb(chmp, chmp->chm_nextblock->lnr, chmp->chm_wbuf,
101 1.4.4.2 yamt chmp->chm_wbuf_ofs, chmp->chm_wbuf_len, &retlen);
102 1.4.4.2 yamt if (ret) {
103 1.4.4.2 yamt return ret;
104 1.4.4.2 yamt }
105 1.4.4.2 yamt
106 1.4.4.3 yamt /* reset the buffer */
107 1.4.4.2 yamt memset(chmp->chm_wbuf, 0xff, chmp->chm_wbuf_pagesize);
108 1.4.4.2 yamt chmp->chm_wbuf_ofs += chmp->chm_wbuf_pagesize;
109 1.4.4.2 yamt chmp->chm_wbuf_len = 0;
110 1.4.4.2 yamt
111 1.4.4.2 yamt return 0;
112 1.4.4.2 yamt }
113 1.4.4.2 yamt
114 1.4.4.2 yamt
115 1.4.4.3 yamt /*
116 1.4.4.3 yamt * chfs_fill_wbuf - write data to wbuf
117 1.4.4.2 yamt * Return the len of the buf what we didn't write to the wbuf.
118 1.4.4.2 yamt */
119 1.4.4.2 yamt static size_t
120 1.4.4.2 yamt chfs_fill_wbuf(struct chfs_mount *chmp, const u_char *buf, size_t len)
121 1.4.4.2 yamt {
122 1.4.4.3 yamt /* check available space */
123 1.4.4.2 yamt if (len && !chmp->chm_wbuf_len && (len >= chmp->chm_wbuf_pagesize)) {
124 1.4.4.2 yamt return 0;
125 1.4.4.2 yamt }
126 1.4.4.3 yamt /* check buffer's length */
127 1.4.4.2 yamt if (len > (chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len)) {
128 1.4.4.2 yamt len = chmp->chm_wbuf_pagesize - chmp->chm_wbuf_len;
129 1.4.4.2 yamt }
130 1.4.4.3 yamt /* write into the wbuf */
131 1.4.4.2 yamt memcpy(chmp->chm_wbuf + chmp->chm_wbuf_len, buf, len);
132 1.4.4.2 yamt
133 1.4.4.3 yamt /* update the actual length of writebuffer */
134 1.4.4.2 yamt chmp->chm_wbuf_len += (int) len;
135 1.4.4.2 yamt return len;
136 1.4.4.2 yamt }
137 1.4.4.2 yamt
138 1.4.4.3 yamt /*
139 1.4.4.2 yamt * chfs_write_wbuf - write to wbuf and then the flash
140 1.4.4.2 yamt * Returns zero in case of success.
141 1.4.4.2 yamt */
142 1.4.4.2 yamt int
143 1.4.4.2 yamt chfs_write_wbuf(struct chfs_mount* chmp, const struct iovec *invecs, long count,
144 1.4.4.2 yamt off_t to, size_t *retlen)
145 1.4.4.2 yamt {
146 1.4.4.2 yamt int invec, ret = 0;
147 1.4.4.2 yamt size_t wbuf_retlen, donelen = 0;
148 1.4.4.2 yamt int outvec_to = to;
149 1.4.4.2 yamt
150 1.4.4.2 yamt int lnr = chmp->chm_nextblock->lnr;
151 1.4.4.2 yamt
152 1.4.4.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
153 1.4.4.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_sizes));
154 1.4.4.2 yamt KASSERT(!rw_write_held(&chmp->chm_lock_wbuf));
155 1.4.4.2 yamt
156 1.4.4.2 yamt rw_enter(&chmp->chm_lock_wbuf, RW_WRITER);
157 1.4.4.2 yamt
158 1.4.4.2 yamt if (chmp->chm_wbuf_ofs == 0xffffffff) {
159 1.4.4.2 yamt chmp->chm_wbuf_ofs = PAGE_DIV(to);
160 1.4.4.2 yamt chmp->chm_wbuf_len = PAGE_MOD(to);
161 1.4.4.2 yamt memset(chmp->chm_wbuf, 0xff, chmp->chm_wbuf_pagesize);
162 1.4.4.2 yamt }
163 1.4.4.2 yamt
164 1.4.4.2 yamt if (EB_ADDRESS(to) != EB_ADDRESS(chmp->chm_wbuf_ofs)) {
165 1.4.4.2 yamt if (chmp->chm_wbuf_len) {
166 1.4.4.2 yamt ret = chfs_flush_wbuf(chmp, WBUF_SETPAD);
167 1.4.4.2 yamt if (ret)
168 1.4.4.2 yamt goto outerr;
169 1.4.4.2 yamt }
170 1.4.4.2 yamt chmp->chm_wbuf_ofs = PAGE_DIV(to);
171 1.4.4.2 yamt chmp->chm_wbuf_len = PAGE_MOD(to);
172 1.4.4.2 yamt }
173 1.4.4.2 yamt
174 1.4.4.2 yamt if (to != PAD(chmp->chm_wbuf_ofs + chmp->chm_wbuf_len)) {
175 1.4.4.2 yamt dbg("to: %llu != %zu\n", (unsigned long long)to,
176 1.4.4.2 yamt PAD(chmp->chm_wbuf_ofs + chmp->chm_wbuf_len));
177 1.4.4.2 yamt dbg("Non-contiguous write\n");
178 1.4.4.2 yamt panic("BUG\n");
179 1.4.4.2 yamt }
180 1.4.4.2 yamt
181 1.4.4.2 yamt /* adjust alignment offset */
182 1.4.4.2 yamt if (chmp->chm_wbuf_len != PAGE_MOD(to)) {
183 1.4.4.2 yamt chmp->chm_wbuf_len = PAGE_MOD(to);
184 1.4.4.3 yamt /* take care of alignement to next page */
185 1.4.4.2 yamt if (!chmp->chm_wbuf_len) {
186 1.4.4.2 yamt chmp->chm_wbuf_len += chmp->chm_wbuf_pagesize;
187 1.4.4.2 yamt ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
188 1.4.4.2 yamt if (ret)
189 1.4.4.2 yamt goto outerr;
190 1.4.4.2 yamt }
191 1.4.4.2 yamt }
192 1.4.4.2 yamt
193 1.4.4.2 yamt for (invec = 0; invec < count; invec++) {
194 1.4.4.2 yamt int vlen = invecs[invec].iov_len;
195 1.4.4.2 yamt u_char* v = invecs[invec].iov_base;
196 1.4.4.2 yamt
197 1.4.4.3 yamt /* fill the whole wbuf */
198 1.4.4.2 yamt wbuf_retlen = chfs_fill_wbuf(chmp, v, vlen);
199 1.4.4.2 yamt if (chmp->chm_wbuf_len == chmp->chm_wbuf_pagesize) {
200 1.4.4.2 yamt ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
201 1.4.4.2 yamt if (ret) {
202 1.4.4.2 yamt goto outerr;
203 1.4.4.2 yamt }
204 1.4.4.2 yamt }
205 1.4.4.3 yamt
206 1.4.4.2 yamt vlen -= wbuf_retlen;
207 1.4.4.2 yamt outvec_to += wbuf_retlen;
208 1.4.4.2 yamt v += wbuf_retlen;
209 1.4.4.2 yamt donelen += wbuf_retlen;
210 1.4.4.3 yamt
211 1.4.4.3 yamt /* if there is more residual data than the length of the wbuf
212 1.4.4.3 yamt * write it out directly until it's fit in the wbuf */
213 1.4.4.2 yamt if (vlen >= chmp->chm_wbuf_pagesize) {
214 1.4.4.2 yamt ret = chfs_write_leb(chmp, lnr, v, outvec_to, PAGE_DIV(vlen), &wbuf_retlen);
215 1.4.4.2 yamt vlen -= wbuf_retlen;
216 1.4.4.2 yamt outvec_to += wbuf_retlen;
217 1.4.4.2 yamt chmp->chm_wbuf_ofs = outvec_to;
218 1.4.4.2 yamt v += wbuf_retlen;
219 1.4.4.2 yamt donelen += wbuf_retlen;
220 1.4.4.2 yamt }
221 1.4.4.3 yamt
222 1.4.4.3 yamt /* write the residual data to the wbuf */
223 1.4.4.2 yamt wbuf_retlen = chfs_fill_wbuf(chmp, v, vlen);
224 1.4.4.2 yamt if (chmp->chm_wbuf_len == chmp->chm_wbuf_pagesize) {
225 1.4.4.2 yamt ret = chfs_flush_wbuf(chmp, WBUF_NOPAD);
226 1.4.4.2 yamt if (ret)
227 1.4.4.2 yamt goto outerr;
228 1.4.4.2 yamt }
229 1.4.4.2 yamt
230 1.4.4.2 yamt outvec_to += wbuf_retlen;
231 1.4.4.2 yamt donelen += wbuf_retlen;
232 1.4.4.2 yamt }
233 1.4.4.2 yamt *retlen = donelen;
234 1.4.4.2 yamt rw_exit(&chmp->chm_lock_wbuf);
235 1.4.4.2 yamt return ret;
236 1.4.4.2 yamt
237 1.4.4.2 yamt outerr:
238 1.4.4.2 yamt *retlen = 0;
239 1.4.4.2 yamt return ret;
240 1.4.4.2 yamt }
241 1.4.4.2 yamt
242 1.4.4.3 yamt /*
243 1.4.4.3 yamt * chfs_flush_peding_wbuf - write wbuf to the flash
244 1.4.4.3 yamt * Used when we must flush wbuf right now.
245 1.4.4.3 yamt * If wbuf has free space, pad it to the size of wbuf and write out.
246 1.4.4.3 yamt */
247 1.4.4.2 yamt int chfs_flush_pending_wbuf(struct chfs_mount *chmp)
248 1.4.4.2 yamt {
249 1.4.4.2 yamt int err;
250 1.4.4.2 yamt KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
251 1.4.4.2 yamt mutex_enter(&chmp->chm_lock_sizes);
252 1.4.4.2 yamt rw_enter(&chmp->chm_lock_wbuf, RW_WRITER);
253 1.4.4.2 yamt err = chfs_flush_wbuf(chmp, WBUF_SETPAD);
254 1.4.4.2 yamt rw_exit(&chmp->chm_lock_wbuf);
255 1.4.4.2 yamt mutex_exit(&chmp->chm_lock_sizes);
256 1.4.4.2 yamt return err;
257 1.4.4.2 yamt }
258