cfi_0002.c revision 1.6.2.2 1 1.6.2.2 matt /* $NetBSD: cfi_0002.c,v 1.6.2.2 2011/12/27 17:35:48 matt Exp $ */
2 1.6.2.2 matt /*-
3 1.6.2.2 matt * Copyright (c) 2011 The NetBSD Foundation, Inc.
4 1.6.2.2 matt * All rights reserved.
5 1.6.2.2 matt *
6 1.6.2.2 matt * This code is derived from software contributed to The NetBSD Foundation
7 1.6.2.2 matt * by Cliff Neighbors.
8 1.6.2.2 matt *
9 1.6.2.2 matt * Redistribution and use in source and binary forms, with or without
10 1.6.2.2 matt * modification, are permitted provided that the following conditions
11 1.6.2.2 matt * are met:
12 1.6.2.2 matt * 1. Redistributions of source code must retain the above copyright
13 1.6.2.2 matt * notice, this list of conditions and the following disclaimer.
14 1.6.2.2 matt * 2. Redistributions in binary form must reproduce the above copyright
15 1.6.2.2 matt * notice, this list of conditions and the following disclaimer in the
16 1.6.2.2 matt * documentation and/or other materials provided with the distribution.
17 1.6.2.2 matt *
18 1.6.2.2 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 1.6.2.2 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 1.6.2.2 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 1.6.2.2 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 1.6.2.2 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 1.6.2.2 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 1.6.2.2 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 1.6.2.2 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 1.6.2.2 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 1.6.2.2 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 1.6.2.2 matt * POSSIBILITY OF SUCH DAMAGE.
29 1.6.2.2 matt */
30 1.6.2.2 matt
31 1.6.2.2 matt #include "opt_flash.h"
32 1.6.2.2 matt
33 1.6.2.2 matt #include <sys/cdefs.h>
34 1.6.2.2 matt __KERNEL_RCSID(0, "$NetBSD: cfi_0002.c,v 1.6.2.2 2011/12/27 17:35:48 matt Exp $");
35 1.6.2.2 matt
36 1.6.2.2 matt #include <sys/param.h>
37 1.6.2.2 matt #include <sys/systm.h>
38 1.6.2.2 matt #include <sys/cdefs.h>
39 1.6.2.2 matt #include <sys/device.h>
40 1.6.2.2 matt #include <sys/endian.h>
41 1.6.2.2 matt #include <sys/proc.h> /* for yield() */
42 1.6.2.2 matt #include <sys/time.h>
43 1.6.2.2 matt
44 1.6.2.2 matt #include <sys/bus.h>
45 1.6.2.2 matt
46 1.6.2.2 matt #include <dev/nor/nor.h>
47 1.6.2.2 matt #include <dev/nor/cfi.h>
48 1.6.2.2 matt #include <dev/nor/cfi_0002.h>
49 1.6.2.2 matt
50 1.6.2.2 matt
51 1.6.2.2 matt static void cfi_0002_version_init(struct cfi * const);
52 1.6.2.2 matt static int cfi_0002_read_page(device_t, flash_off_t, uint8_t *);
53 1.6.2.2 matt static int cfi_0002_program_page(device_t, flash_off_t, const uint8_t *);
54 1.6.2.2 matt static int cfi_0002_erase_block(device_t, flash_off_t);
55 1.6.2.2 matt static int cfi_0002_erase_all(device_t);
56 1.6.2.2 matt static int cfi_0002_busy(device_t, flash_off_t, u_long);
57 1.6.2.2 matt static int cfi_0002_busy_wait(struct cfi * const, flash_off_t, u_long);
58 1.6.2.2 matt static int cfi_0002_busy_poll(struct cfi * const, flash_off_t, u_long);
59 1.6.2.2 matt static int cfi_0002_busy_yield(struct cfi * const, flash_off_t, u_long);
60 1.6.2.2 matt static int cfi_0002_busy_dq7(struct cfi * const , flash_off_t);
61 1.6.2.2 matt #ifdef NOTYET
62 1.6.2.2 matt static int cfi_0002_busy_reg(struct cfi * const, flash_off_t);
63 1.6.2.2 matt #endif
64 1.6.2.2 matt
65 1.6.2.2 matt
66 1.6.2.2 matt static const char *page_mode_str[] = {
67 1.6.2.2 matt "(not supported)",
68 1.6.2.2 matt "4 word page",
69 1.6.2.2 matt "8 word page",
70 1.6.2.2 matt "16 word page",
71 1.6.2.2 matt };
72 1.6.2.2 matt
73 1.6.2.2 matt static const char *wp_mode_str[] = {
74 1.6.2.2 matt "Flash device without WP Protect (No Boot)",
75 1.6.2.2 matt "Eight 8 kB Sectors at TOP and Bottom with WP (Dual Boot)",
76 1.6.2.2 matt "Bottom Boot Device with WP Protect (Bottom Boot)",
77 1.6.2.2 matt "Top Boot Device with WP Protect (Top Boot)",
78 1.6.2.2 matt "Uniform, Bottom WP Protect (Uniform Bottom Boot)",
79 1.6.2.2 matt "Uniform, Top WP Protect (Uniform Top Boot)",
80 1.6.2.2 matt "WP Protect for all sectors",
81 1.6.2.2 matt "Uniform, Top or Bottom WP Protect",
82 1.6.2.2 matt };
83 1.6.2.2 matt
84 1.6.2.2 matt
85 1.6.2.2 matt static inline const char *
86 1.6.2.2 matt cfi_0002_page_mode_str(uint8_t mode)
87 1.6.2.2 matt {
88 1.6.2.2 matt if (mode >= __arraycount(page_mode_str))
89 1.6.2.2 matt panic("%s: mode %d out of range", __func__, mode);
90 1.6.2.2 matt return page_mode_str[mode];
91 1.6.2.2 matt }
92 1.6.2.2 matt
93 1.6.2.2 matt static inline const char *
94 1.6.2.2 matt cfi_0002_wp_mode_str(uint8_t mode)
95 1.6.2.2 matt {
96 1.6.2.2 matt if (mode >= __arraycount(wp_mode_str))
97 1.6.2.2 matt panic("%s: mode %d out of range", __func__, mode);
98 1.6.2.2 matt return wp_mode_str[mode];
99 1.6.2.2 matt }
100 1.6.2.2 matt
101 1.6.2.2 matt /*
102 1.6.2.2 matt * cfi_0002_time_write_nbyte - maximum usec delay waiting for write buffer
103 1.6.2.2 matt */
104 1.6.2.2 matt static inline u_long
105 1.6.2.2 matt cfi_0002_time_write_nbyte(struct cfi *cfi)
106 1.6.2.2 matt {
107 1.6.2.2 matt u_int shft = cfi->cfi_qry_data.write_nbyte_time_typ;
108 1.6.2.2 matt shft += cfi->cfi_qry_data.write_nbyte_time_max;
109 1.6.2.2 matt u_long usec = 1UL << shft;
110 1.6.2.2 matt return usec;
111 1.6.2.2 matt }
112 1.6.2.2 matt
113 1.6.2.2 matt /*
114 1.6.2.2 matt * cfi_0002_time_erase_blk - maximum usec delay waiting for erase block
115 1.6.2.2 matt */
116 1.6.2.2 matt static inline u_long
117 1.6.2.2 matt cfi_0002_time_erase_blk(struct cfi *cfi)
118 1.6.2.2 matt {
119 1.6.2.2 matt u_int shft = cfi->cfi_qry_data.erase_blk_time_typ;
120 1.6.2.2 matt shft += cfi->cfi_qry_data.erase_blk_time_max;
121 1.6.2.2 matt u_long usec = 1000UL << shft;
122 1.6.2.2 matt return usec;
123 1.6.2.2 matt }
124 1.6.2.2 matt
125 1.6.2.2 matt /*
126 1.6.2.2 matt * cfi_0002_time_erase_all - maximum usec delay waiting for erase chip
127 1.6.2.2 matt */
128 1.6.2.2 matt static inline u_long
129 1.6.2.2 matt cfi_0002_time_erase_all(struct cfi *cfi)
130 1.6.2.2 matt {
131 1.6.2.2 matt u_int shft = cfi->cfi_qry_data.erase_chip_time_typ;
132 1.6.2.2 matt shft += cfi->cfi_qry_data.erase_chip_time_max;
133 1.6.2.2 matt u_long usec = 1000UL << shft;
134 1.6.2.2 matt return usec;
135 1.6.2.2 matt }
136 1.6.2.2 matt
137 1.6.2.2 matt /*
138 1.6.2.2 matt * cfi_0002_time_dflt - maximum usec delay to use waiting for ready
139 1.6.2.2 matt *
140 1.6.2.2 matt * use the maximum delay for chip erase function
141 1.6.2.2 matt * that should be the worst non-sick case
142 1.6.2.2 matt */
143 1.6.2.2 matt static inline u_long
144 1.6.2.2 matt cfi_0002_time_dflt(struct cfi *cfi)
145 1.6.2.2 matt {
146 1.6.2.2 matt return cfi_0002_time_erase_all(cfi);
147 1.6.2.2 matt }
148 1.6.2.2 matt
149 1.6.2.2 matt void
150 1.6.2.2 matt cfi_0002_init(struct nor_softc * const sc, struct cfi * const cfi,
151 1.6.2.2 matt struct nor_chip * const chip)
152 1.6.2.2 matt {
153 1.6.2.2 matt CFI_0002_STATS_INIT(sc->sc_dev, cfi);
154 1.6.2.2 matt
155 1.6.2.2 matt cfi_0002_version_init(cfi);
156 1.6.2.2 matt
157 1.6.2.2 matt cfi->cfi_ops.cfi_reset = cfi_reset_std;
158 1.6.2.2 matt cfi->cfi_yield_time = 500; /* 500 usec */
159 1.6.2.2 matt
160 1.6.2.2 matt /* page size for buffered write */
161 1.6.2.2 matt chip->nc_page_size =
162 1.6.2.2 matt 1 << cfi->cfi_qry_data.write_nbyte_size_max;
163 1.6.2.2 matt
164 1.6.2.2 matt /* these are unused */
165 1.6.2.2 matt chip->nc_spare_size = 0;
166 1.6.2.2 matt chip->nc_badmarker_offs = 0;
167 1.6.2.2 matt
168 1.6.2.2 matt /* establish command-set-specific interface ops */
169 1.6.2.2 matt sc->sc_nor_if->read_page = cfi_0002_read_page;
170 1.6.2.2 matt sc->sc_nor_if->program_page = cfi_0002_program_page;
171 1.6.2.2 matt sc->sc_nor_if->erase_block = cfi_0002_erase_block;
172 1.6.2.2 matt sc->sc_nor_if->erase_all = cfi_0002_erase_all;
173 1.6.2.2 matt sc->sc_nor_if->busy = cfi_0002_busy;
174 1.6.2.2 matt
175 1.6.2.2 matt }
176 1.6.2.2 matt
177 1.6.2.2 matt /*
178 1.6.2.2 matt * cfi_0002_version_init - command set version-specific initialization
179 1.6.2.2 matt *
180 1.6.2.2 matt * see "Programmer's Guide for the Spansion 65 nm GL-S MirrorBit EclipseTM
181 1.6.2.2 matt * Flash Non-Volatile Memory Family Architecture" section 5.
182 1.6.2.2 matt */
183 1.6.2.2 matt static void
184 1.6.2.2 matt cfi_0002_version_init(struct cfi * const cfi)
185 1.6.2.2 matt {
186 1.6.2.2 matt const uint8_t major = cfi->cfi_qry_data.pri.cmd_0002.version_maj;
187 1.6.2.2 matt const uint8_t minor = cfi->cfi_qry_data.pri.cmd_0002.version_min;
188 1.6.2.2 matt
189 1.6.2.2 matt if ((minor == '3') && (major == '1')) {
190 1.6.2.2 matt /* cmdset version 1.3 */
191 1.6.2.2 matt cfi->cfi_ops.cfi_busy = cfi_0002_busy_dq7;
192 1.6.2.2 matt #ifdef NOTYET
193 1.6.2.2 matt cfi->cfi_ops.cfi_erase_sector = cfi_0002_erase_sector_q;
194 1.6.2.2 matt cfi->cfi_ops.cfi_program_word = cfi_0002_program_word_ub;
195 1.6.2.2 matt } else if ((minor >= '5') && (major == '1')) {
196 1.6.2.2 matt /* cmdset version 1.5 or later */
197 1.6.2.2 matt cfi->cfi_ops.cfi_busy = cfi_0002_busy_reg;
198 1.6.2.2 matt cfi->cfi_ops.cfi_erase_sector = cfi_0002_erase_sector_1;
199 1.6.2.2 matt cfi->cfi_ops.cfi_program_word = cfi_0002_program_word_no_ub;
200 1.6.2.2 matt #endif
201 1.6.2.2 matt } else {
202 1.6.2.2 matt /* XXX this is excessive */
203 1.6.2.2 matt panic("%s: unknown cmdset version %c.%c\n",
204 1.6.2.2 matt __func__, major, minor);
205 1.6.2.2 matt }
206 1.6.2.2 matt
207 1.6.2.2 matt }
208 1.6.2.2 matt
209 1.6.2.2 matt void
210 1.6.2.2 matt cfi_0002_print(device_t self, struct cfi * const cfi)
211 1.6.2.2 matt {
212 1.6.2.2 matt #ifdef NOR_VERBOSE
213 1.6.2.2 matt struct cmdset_0002_query_data *pri = &cfi->cfi_qry_data.pri.cmd_0002;
214 1.6.2.2 matt
215 1.6.2.2 matt aprint_normal_dev(self, "AMD/Fujitsu cmdset (0x0002) version=%c.%c\n",
216 1.6.2.2 matt pri->version_maj, pri->version_min);
217 1.6.2.2 matt aprint_normal_dev(self, "page mode type: %s\n",
218 1.6.2.2 matt cfi_0002_page_mode_str(pri->page_mode_type));
219 1.6.2.2 matt aprint_normal_dev(self, "wp protection: %s\n",
220 1.6.2.2 matt cfi_0002_wp_mode_str(pri->wp_prot));
221 1.6.2.2 matt aprint_normal_dev(self, "program suspend %ssupported\n",
222 1.6.2.2 matt (pri->prog_susp == 0) ? "not " : "");
223 1.6.2.2 matt aprint_normal_dev(self, "unlock bypass %ssupported\n",
224 1.6.2.2 matt (pri->unlock_bypass == 0) ? "not " : "");
225 1.6.2.2 matt aprint_normal_dev(self, "secure silicon sector size %#x\n",
226 1.6.2.2 matt 1 << pri->sss_size);
227 1.6.2.2 matt aprint_normal_dev(self, "SW features %#x\n", pri->soft_feat);
228 1.6.2.2 matt aprint_normal_dev(self, "page size %d\n", 1 << pri->page_size);
229 1.6.2.2 matt #endif
230 1.6.2.2 matt }
231 1.6.2.2 matt
232 1.6.2.2 matt static int
233 1.6.2.2 matt cfi_0002_read_page(device_t self, flash_off_t offset, uint8_t *datap)
234 1.6.2.2 matt {
235 1.6.2.2 matt struct nor_softc * const sc = device_private(self);
236 1.6.2.2 matt KASSERT(sc != NULL);
237 1.6.2.2 matt KASSERT(sc->sc_nor_if != NULL);
238 1.6.2.2 matt struct cfi *cfi = (struct cfi * const)sc->sc_nor_if->private;
239 1.6.2.2 matt KASSERT(cfi != NULL);
240 1.6.2.2 matt struct nor_chip * const chip = &sc->sc_chip;
241 1.6.2.2 matt KASSERT(chip != NULL);
242 1.6.2.2 matt KASSERT(chip->nc_page_mask != 0);
243 1.6.2.2 matt KASSERT((offset & ~chip->nc_page_mask) == 0);
244 1.6.2.2 matt KASSERT (chip->nc_page_size != 0);
245 1.6.2.2 matt KASSERT((chip->nc_page_size & ((1 << cfi->cfi_portwidth) - 1)) == 0);
246 1.6.2.2 matt
247 1.6.2.2 matt CFI_0002_STATS_INC(cfi, read_page);
248 1.6.2.2 matt
249 1.6.2.2 matt bus_size_t count = chip->nc_page_size >> cfi->cfi_portwidth;
250 1.6.2.2 matt /* #words/page */
251 1.6.2.2 matt
252 1.6.2.2 matt int error = cfi_0002_busy_wait(cfi, offset, cfi_0002_time_dflt(cfi));
253 1.6.2.2 matt if (error != 0)
254 1.6.2.2 matt return error;
255 1.6.2.2 matt
256 1.6.2.2 matt switch(cfi->cfi_portwidth) {
257 1.6.2.2 matt case 0:
258 1.6.2.2 matt bus_space_read_region_1(cfi->cfi_bst, cfi->cfi_bsh, offset,
259 1.6.2.2 matt (uint8_t *)datap, count);
260 1.6.2.2 matt break;
261 1.6.2.2 matt case 1:
262 1.6.2.2 matt bus_space_read_region_2(cfi->cfi_bst, cfi->cfi_bsh, offset,
263 1.6.2.2 matt (uint16_t *)datap, count);
264 1.6.2.2 matt break;
265 1.6.2.2 matt case 2:
266 1.6.2.2 matt bus_space_read_region_4(cfi->cfi_bst, cfi->cfi_bsh, offset,
267 1.6.2.2 matt (uint32_t *)datap, count);
268 1.6.2.2 matt break;
269 1.6.2.2 matt default:
270 1.6.2.2 matt panic("%s: bad port width %d\n", __func__, cfi->cfi_portwidth);
271 1.6.2.2 matt };
272 1.6.2.2 matt
273 1.6.2.2 matt return 0;
274 1.6.2.2 matt }
275 1.6.2.2 matt
276 1.6.2.2 matt static int
277 1.6.2.2 matt cfi_0002_program_page(device_t self, flash_off_t offset, const uint8_t *datap)
278 1.6.2.2 matt {
279 1.6.2.2 matt struct nor_softc * const sc = device_private(self);
280 1.6.2.2 matt KASSERT(sc != NULL);
281 1.6.2.2 matt KASSERT(sc->sc_nor_if != NULL);
282 1.6.2.2 matt struct cfi *cfi = (struct cfi * const)sc->sc_nor_if->private;
283 1.6.2.2 matt KASSERT(cfi != NULL);
284 1.6.2.2 matt struct nor_chip * const chip = &sc->sc_chip;
285 1.6.2.2 matt KASSERT(chip != NULL);
286 1.6.2.2 matt KASSERT(chip->nc_page_mask != 0);
287 1.6.2.2 matt KASSERT((offset & ~chip->nc_page_mask) == 0);
288 1.6.2.2 matt KASSERT (chip->nc_page_size != 0);
289 1.6.2.2 matt KASSERT((chip->nc_page_size & ((1 << cfi->cfi_portwidth) - 1)) == 0);
290 1.6.2.2 matt
291 1.6.2.2 matt CFI_0002_STATS_INC(cfi, program_page);
292 1.6.2.2 matt
293 1.6.2.2 matt bus_size_t count = chip->nc_page_size >> cfi->cfi_portwidth;
294 1.6.2.2 matt /* #words/page */
295 1.6.2.2 matt bus_size_t sa = offset >> cfi->cfi_portwidth; /* sector addr */
296 1.6.2.2 matt uint32_t wc = count - 1; /* #words - 1 */
297 1.6.2.2 matt
298 1.6.2.2 matt int error = cfi_0002_busy_wait(cfi, offset, cfi_0002_time_dflt(cfi));
299 1.6.2.2 matt if (error != 0)
300 1.6.2.2 matt return ETIMEDOUT;
301 1.6.2.2 matt
302 1.6.2.2 matt cfi_cmd(cfi, 0x555, 0xaa); /* unlock 1 */
303 1.6.2.2 matt cfi_cmd(cfi, 0x2aa, 0x55); /* unlock 2 */
304 1.6.2.2 matt cfi_cmd(cfi, sa, 0x25); /* Write To Buffer */
305 1.6.2.2 matt cfi_cmd(cfi, sa, wc);
306 1.6.2.2 matt
307 1.6.2.2 matt switch(cfi->cfi_portwidth) {
308 1.6.2.2 matt case 0:
309 1.6.2.2 matt bus_space_write_region_1(cfi->cfi_bst, cfi->cfi_bsh, offset,
310 1.6.2.2 matt (const uint8_t *)datap, count);
311 1.6.2.2 matt break;
312 1.6.2.2 matt case 1:
313 1.6.2.2 matt bus_space_write_region_2(cfi->cfi_bst, cfi->cfi_bsh, offset,
314 1.6.2.2 matt (const uint16_t *)datap, count);
315 1.6.2.2 matt break;
316 1.6.2.2 matt case 2:
317 1.6.2.2 matt bus_space_write_region_4(cfi->cfi_bst, cfi->cfi_bsh, offset,
318 1.6.2.2 matt (const uint32_t *)datap, count);
319 1.6.2.2 matt break;
320 1.6.2.2 matt default:
321 1.6.2.2 matt panic("%s: bad port width %d\n", __func__, cfi->cfi_portwidth);
322 1.6.2.2 matt };
323 1.6.2.2 matt
324 1.6.2.2 matt cfi_cmd(cfi, sa, 0x29); /* Write Buffer Program Confirm */
325 1.6.2.2 matt
326 1.6.2.2 matt error = cfi_0002_busy_wait(cfi, offset, cfi_0002_time_write_nbyte(cfi));
327 1.6.2.2 matt
328 1.6.2.2 matt return error;
329 1.6.2.2 matt }
330 1.6.2.2 matt
331 1.6.2.2 matt static int
332 1.6.2.2 matt cfi_0002_erase_all(device_t self)
333 1.6.2.2 matt {
334 1.6.2.2 matt struct nor_softc * const sc = device_private(self);
335 1.6.2.2 matt KASSERT(sc != NULL);
336 1.6.2.2 matt KASSERT(sc->sc_nor_if != NULL);
337 1.6.2.2 matt struct cfi *cfi = (struct cfi * const)sc->sc_nor_if->private;
338 1.6.2.2 matt KASSERT(cfi != NULL);
339 1.6.2.2 matt struct nor_chip * const chip = &sc->sc_chip;
340 1.6.2.2 matt KASSERT(chip != NULL);
341 1.6.2.2 matt
342 1.6.2.2 matt CFI_0002_STATS_INC(cfi, erase_all);
343 1.6.2.2 matt
344 1.6.2.2 matt int error = cfi_0002_busy_wait(cfi, 0, cfi_0002_time_dflt(cfi));
345 1.6.2.2 matt if (error != 0)
346 1.6.2.2 matt return ETIMEDOUT;
347 1.6.2.2 matt
348 1.6.2.2 matt cfi_cmd(cfi, 0x555, 0xaa); /* unlock 1 */
349 1.6.2.2 matt cfi_cmd(cfi, 0x2aa, 0x55); /* unlock 2 */
350 1.6.2.2 matt cfi_cmd(cfi, 0x555, 0x80); /* erase start */
351 1.6.2.2 matt cfi_cmd(cfi, 0x555, 0xaa); /* unlock 1 */
352 1.6.2.2 matt cfi_cmd(cfi, 0x2aa, 0x55); /* unlock 2 */
353 1.6.2.2 matt cfi_cmd(cfi, 0x555, 0x10); /* erase chip */
354 1.6.2.2 matt
355 1.6.2.2 matt error = cfi_0002_busy_wait(cfi, 0, cfi_0002_time_erase_all(cfi));
356 1.6.2.2 matt
357 1.6.2.2 matt return error;
358 1.6.2.2 matt }
359 1.6.2.2 matt
360 1.6.2.2 matt static int
361 1.6.2.2 matt cfi_0002_erase_block(device_t self, flash_off_t offset)
362 1.6.2.2 matt {
363 1.6.2.2 matt struct nor_softc * const sc = device_private(self);
364 1.6.2.2 matt KASSERT(sc != NULL);
365 1.6.2.2 matt KASSERT(sc->sc_nor_if != NULL);
366 1.6.2.2 matt struct cfi *cfi = (struct cfi * const)sc->sc_nor_if->private;
367 1.6.2.2 matt KASSERT(cfi != NULL);
368 1.6.2.2 matt struct nor_chip * const chip = &sc->sc_chip;
369 1.6.2.2 matt KASSERT(chip != NULL);
370 1.6.2.2 matt KASSERT(chip->nc_block_mask != 0);
371 1.6.2.2 matt KASSERT((offset & ~chip->nc_block_mask) == 0);
372 1.6.2.2 matt KASSERT(chip->nc_block_size != 0);
373 1.6.2.2 matt KASSERT((chip->nc_block_size & ((1 << cfi->cfi_portwidth) - 1)) == 0);
374 1.6.2.2 matt
375 1.6.2.2 matt CFI_0002_STATS_INC(cfi, erase_block);
376 1.6.2.2 matt
377 1.6.2.2 matt /* scale sector addr by portwidth or chipwidth ? */
378 1.6.2.2 matt bus_size_t sa = offset >> cfi->cfi_portwidth;
379 1.6.2.2 matt
380 1.6.2.2 matt int error = cfi_0002_busy_wait(cfi, offset, cfi_0002_time_dflt(cfi));
381 1.6.2.2 matt if (error != 0)
382 1.6.2.2 matt return ETIMEDOUT;
383 1.6.2.2 matt
384 1.6.2.2 matt cfi_cmd(cfi, 0x555, 0xaa); /* unlock 1 */
385 1.6.2.2 matt cfi_cmd(cfi, 0x2aa, 0x55); /* unlock 2 */
386 1.6.2.2 matt cfi_cmd(cfi, 0x555, 0x80); /* erase start */
387 1.6.2.2 matt cfi_cmd(cfi, 0x555, 0xaa); /* unlock 1 */
388 1.6.2.2 matt cfi_cmd(cfi, 0x2aa, 0x55); /* unlock 2 */
389 1.6.2.2 matt cfi_cmd(cfi, sa, 0x30); /* erase sector */
390 1.6.2.2 matt
391 1.6.2.2 matt error = cfi_0002_busy_wait(cfi, offset, cfi_0002_time_erase_blk(cfi));
392 1.6.2.2 matt
393 1.6.2.2 matt return error;
394 1.6.2.2 matt }
395 1.6.2.2 matt
396 1.6.2.2 matt /*
397 1.6.2.2 matt * cfi_0002_busy - nor_interface busy op
398 1.6.2.2 matt */
399 1.6.2.2 matt static int
400 1.6.2.2 matt cfi_0002_busy(device_t self, flash_off_t offset, u_long usec)
401 1.6.2.2 matt {
402 1.6.2.2 matt struct nor_softc *sc = device_private(self);
403 1.6.2.2 matt KASSERT(sc != NULL);
404 1.6.2.2 matt KASSERT(sc->sc_nor_if != NULL);
405 1.6.2.2 matt struct cfi * const cfi = (struct cfi * const)sc->sc_nor_if->private;
406 1.6.2.2 matt
407 1.6.2.2 matt CFI_0002_STATS_INC(cfi, busy);
408 1.6.2.2 matt
409 1.6.2.2 matt return cfi_0002_busy_wait(cfi, offset, usec);
410 1.6.2.2 matt }
411 1.6.2.2 matt
412 1.6.2.2 matt /*
413 1.6.2.2 matt * cfi_0002_busy_wait - wait until device is not busy
414 1.6.2.2 matt */
415 1.6.2.2 matt static int
416 1.6.2.2 matt cfi_0002_busy_wait(struct cfi * const cfi, flash_off_t offset, u_long usec)
417 1.6.2.2 matt {
418 1.6.2.2 matt int error;
419 1.6.2.2 matt
420 1.6.2.2 matt #ifdef CFI_0002_STATS
421 1.6.2.2 matt struct timeval start;
422 1.6.2.2 matt struct timeval now;
423 1.6.2.2 matt struct timeval delta;
424 1.6.2.2 matt
425 1.6.2.2 matt if (usec > cfi->cfi_0002_stats.busy_usec_max)
426 1.6.2.2 matt cfi->cfi_0002_stats.busy_usec_max = usec;
427 1.6.2.2 matt if (usec < cfi->cfi_0002_stats.busy_usec_min)
428 1.6.2.2 matt cfi->cfi_0002_stats.busy_usec_min = usec;
429 1.6.2.2 matt microtime(&start);
430 1.6.2.2 matt #endif
431 1.6.2.2 matt if (usec > cfi->cfi_yield_time) {
432 1.6.2.2 matt error = cfi_0002_busy_yield(cfi, offset, usec);
433 1.6.2.2 matt #ifdef CFI_0002_STATS
434 1.6.2.2 matt microtime(&now);
435 1.6.2.2 matt cfi->cfi_0002_stats.busy_yield++;
436 1.6.2.2 matt timersub(&now, &start, &delta);
437 1.6.2.2 matt timeradd(&delta,
438 1.6.2.2 matt &cfi->cfi_0002_stats.busy_yield_tv,
439 1.6.2.2 matt &cfi->cfi_0002_stats.busy_yield_tv);
440 1.6.2.2 matt #endif
441 1.6.2.2 matt } else {
442 1.6.2.2 matt error = cfi_0002_busy_poll(cfi, offset, usec);
443 1.6.2.2 matt #ifdef CFI_0002_STATS
444 1.6.2.2 matt microtime(&now);
445 1.6.2.2 matt cfi->cfi_0002_stats.busy_poll++;
446 1.6.2.2 matt timersub(&now, &start, &delta);
447 1.6.2.2 matt timeradd(&delta,
448 1.6.2.2 matt &cfi->cfi_0002_stats.busy_poll_tv,
449 1.6.2.2 matt &cfi->cfi_0002_stats.busy_poll_tv);
450 1.6.2.2 matt #endif
451 1.6.2.2 matt }
452 1.6.2.2 matt return error;
453 1.6.2.2 matt }
454 1.6.2.2 matt
455 1.6.2.2 matt /*
456 1.6.2.2 matt * cfi_0002_busy_poll - poll until device is not busy
457 1.6.2.2 matt */
458 1.6.2.2 matt static int
459 1.6.2.2 matt cfi_0002_busy_poll(struct cfi * const cfi, flash_off_t offset, u_long usec)
460 1.6.2.2 matt {
461 1.6.2.2 matt u_long count = usec >> 3;
462 1.6.2.2 matt if (count == 0)
463 1.6.2.2 matt count = 1; /* enforce minimum */
464 1.6.2.2 matt do {
465 1.6.2.2 matt if (! cfi->cfi_ops.cfi_busy(cfi, offset))
466 1.6.2.2 matt return 0; /* not busy */
467 1.6.2.2 matt DELAY(8);
468 1.6.2.2 matt } while (count-- != 0);
469 1.6.2.2 matt
470 1.6.2.2 matt return ETIMEDOUT; /* busy */
471 1.6.2.2 matt }
472 1.6.2.2 matt
473 1.6.2.2 matt /*
474 1.6.2.2 matt * cfi_0002_busy_yield - yield until device is not busy
475 1.6.2.2 matt */
476 1.6.2.2 matt static int
477 1.6.2.2 matt cfi_0002_busy_yield(struct cfi * const cfi, flash_off_t offset, u_long usec)
478 1.6.2.2 matt {
479 1.6.2.2 matt struct timeval start;
480 1.6.2.2 matt struct timeval delta;
481 1.6.2.2 matt struct timeval limit;
482 1.6.2.2 matt struct timeval now;
483 1.6.2.2 matt
484 1.6.2.2 matt microtime(&start);
485 1.6.2.2 matt
486 1.6.2.2 matt /* try optimism */
487 1.6.2.2 matt if (! cfi->cfi_ops.cfi_busy(cfi, offset)) {
488 1.6.2.2 matt CFI_0002_STATS_INC(cfi, busy_yield_hit);
489 1.6.2.2 matt return 0; /* not busy */
490 1.6.2.2 matt }
491 1.6.2.2 matt CFI_0002_STATS_INC(cfi, busy_yield_miss);
492 1.6.2.2 matt
493 1.6.2.2 matt delta.tv_sec = usec / 1000000;
494 1.6.2.2 matt delta.tv_usec = usec % 1000000;
495 1.6.2.2 matt timeradd(&start, &delta, &limit);
496 1.6.2.2 matt do {
497 1.6.2.2 matt yield();
498 1.6.2.2 matt microtime(&now);
499 1.6.2.2 matt if (! cfi->cfi_ops.cfi_busy(cfi, offset))
500 1.6.2.2 matt return 0; /* not busy */
501 1.6.2.2 matt } while (timercmp(&now, &limit, <));
502 1.6.2.2 matt
503 1.6.2.2 matt CFI_0002_STATS_INC(cfi, busy_yield_timo);
504 1.6.2.2 matt
505 1.6.2.2 matt return ETIMEDOUT; /* busy */
506 1.6.2.2 matt }
507 1.6.2.2 matt
508 1.6.2.2 matt /*
509 1.6.2.2 matt * cfi_0002_busy_dq7 - DQ7 "toggle" method to check busy
510 1.6.2.2 matt *
511 1.6.2.2 matt * Check busy during/after erase, program, protect operation.
512 1.6.2.2 matt *
513 1.6.2.2 matt * NOTE:
514 1.6.2.2 matt * Chip manufacturers (Spansion) plan to deprecate this method.
515 1.6.2.2 matt */
516 1.6.2.2 matt static int
517 1.6.2.2 matt cfi_0002_busy_dq7(struct cfi * const cfi, flash_off_t offset)
518 1.6.2.2 matt {
519 1.6.2.2 matt bus_space_tag_t bst = cfi->cfi_bst;
520 1.6.2.2 matt bus_space_handle_t bsh = cfi->cfi_bsh;
521 1.6.2.2 matt bool busy;
522 1.6.2.2 matt
523 1.6.2.2 matt switch(cfi->cfi_portwidth) {
524 1.6.2.2 matt case 0: {
525 1.6.2.2 matt uint8_t r0 = bus_space_read_1(bst, bsh, 0) & __BIT(7);
526 1.6.2.2 matt uint8_t r1 = bus_space_read_1(bst, bsh, 0) & __BIT(7);
527 1.6.2.2 matt busy = (r0 != r1);
528 1.6.2.2 matt break;
529 1.6.2.2 matt }
530 1.6.2.2 matt case 1: {
531 1.6.2.2 matt uint16_t r0 = bus_space_read_2(bst, bsh, 0);
532 1.6.2.2 matt uint16_t r1 = bus_space_read_2(bst, bsh, 0);
533 1.6.2.2 matt busy = (r0 != r1);
534 1.6.2.2 matt break;
535 1.6.2.2 matt }
536 1.6.2.2 matt case 2: {
537 1.6.2.2 matt uint32_t r0 = bus_space_read_4(bst, bsh, 0);
538 1.6.2.2 matt uint32_t r1 = bus_space_read_4(bst, bsh, 0);
539 1.6.2.2 matt busy = (r0 != r1);
540 1.6.2.2 matt break;
541 1.6.2.2 matt }
542 1.6.2.2 matt default:
543 1.6.2.2 matt busy = true; /* appeas gcc */
544 1.6.2.2 matt panic("%s: bad port width %d\n",
545 1.6.2.2 matt __func__, cfi->cfi_portwidth);
546 1.6.2.2 matt }
547 1.6.2.2 matt return busy;
548 1.6.2.2 matt }
549 1.6.2.2 matt
550 1.6.2.2 matt #ifdef NOTYET
551 1.6.2.2 matt /*
552 1.6.2.2 matt * cfi_0002_busy_reg - read and evaluate Read Status Register
553 1.6.2.2 matt *
554 1.6.2.2 matt * NOTE:
555 1.6.2.2 matt * Read Status Register not present on all chips
556 1.6.2.2 matt * use "toggle" method when Read Status Register not available.
557 1.6.2.2 matt */
558 1.6.2.2 matt static bool
559 1.6.2.2 matt cfi_0002_busy_reg(struct cfi * const cfi, flash_off_t offset)
560 1.6.2.2 matt {
561 1.6.2.2 matt bus_space_tag_t bst = cfi->cfi_bst;
562 1.6.2.2 matt bus_space_handle_t bsh = cfi->cfi_bsh;
563 1.6.2.2 matt uint32_t r;
564 1.6.2.2 matt
565 1.6.2.2 matt cfi_cmd(cfi, 0x555, 0x70); /* Status Register Read */
566 1.6.2.2 matt
567 1.6.2.2 matt switch(cfi->cfi_portwidth) {
568 1.6.2.2 matt case 0:
569 1.6.2.2 matt r = bus_space_read_1(bst, bsh, 0);
570 1.6.2.2 matt break;
571 1.6.2.2 matt case 1:
572 1.6.2.2 matt r = bus_space_read_2(bst, bsh, 0);
573 1.6.2.2 matt break;
574 1.6.2.2 matt case 2:
575 1.6.2.2 matt r = bus_space_read_4(bst, bsh, 0);
576 1.6.2.2 matt break;
577 1.6.2.2 matt default:
578 1.6.2.2 matt panic("%s: bad port width %d\n",
579 1.6.2.2 matt __func__, cfi->cfi_portwidth);
580 1.6.2.2 matt }
581 1.6.2.2 matt
582 1.6.2.2 matt return ((r & __BIT(7)) == 0):
583 1.6.2.2 matt }
584 1.6.2.2 matt #endif /* NOTYET */
585 1.6.2.2 matt
586 1.6.2.2 matt #ifdef CFI_0002_STATS
587 1.6.2.2 matt void
588 1.6.2.2 matt cfi_0002_stats_reset(struct cfi *cfi)
589 1.6.2.2 matt {
590 1.6.2.2 matt memset(&cfi->cfi_0002_stats, 0, sizeof(struct cfi_0002_stats));
591 1.6.2.2 matt cfi->cfi_0002_stats.busy_usec_min = ~0;
592 1.6.2.2 matt }
593 1.6.2.2 matt
594 1.6.2.2 matt void
595 1.6.2.2 matt cfi_0002_stats_print(struct cfi *cfi)
596 1.6.2.2 matt {
597 1.6.2.2 matt printf("read_page %lu\n", cfi->cfi_0002_stats.read_page);
598 1.6.2.2 matt printf("program_page %lu\n", cfi->cfi_0002_stats.program_page);
599 1.6.2.2 matt printf("erase_all %lu\n", cfi->cfi_0002_stats.erase_all);
600 1.6.2.2 matt printf("erase_block %lu\n", cfi->cfi_0002_stats.erase_block);
601 1.6.2.2 matt printf("busy %lu\n", cfi->cfi_0002_stats.busy);
602 1.6.2.2 matt
603 1.6.2.2 matt printf("write_nbyte_time_typ %d\n",
604 1.6.2.2 matt cfi->cfi_qry_data.write_nbyte_time_typ);
605 1.6.2.2 matt printf("write_nbyte_time_max %d\n",
606 1.6.2.2 matt cfi->cfi_qry_data.write_nbyte_time_max);
607 1.6.2.2 matt
608 1.6.2.2 matt printf("erase_blk_time_typ %d\n",
609 1.6.2.2 matt cfi->cfi_qry_data.erase_blk_time_typ);
610 1.6.2.2 matt printf("erase_blk_time_max %d\n",
611 1.6.2.2 matt cfi->cfi_qry_data.erase_blk_time_max);
612 1.6.2.2 matt
613 1.6.2.2 matt printf("erase_chip_time_typ %d\n",
614 1.6.2.2 matt cfi->cfi_qry_data.erase_chip_time_typ);
615 1.6.2.2 matt printf("erase_chip_time_max %d\n",
616 1.6.2.2 matt cfi->cfi_qry_data.erase_chip_time_max);
617 1.6.2.2 matt
618 1.6.2.2 matt printf("time_write_nbyte %lu\n", cfi_0002_time_write_nbyte(cfi));
619 1.6.2.2 matt printf("time_erase_blk %lu\n", cfi_0002_time_erase_blk(cfi));
620 1.6.2.2 matt printf("time_erase_all %lu\n", cfi_0002_time_erase_all(cfi));
621 1.6.2.2 matt
622 1.6.2.2 matt printf("busy_usec_min %lu\n", cfi->cfi_0002_stats.busy_usec_min);
623 1.6.2.2 matt printf("busy_usec_max %lu\n", cfi->cfi_0002_stats.busy_usec_max);
624 1.6.2.2 matt
625 1.6.2.2 matt printf("busy_poll_tv %ld.%ld\n",
626 1.6.2.2 matt cfi->cfi_0002_stats.busy_poll_tv.tv_sec,
627 1.6.2.2 matt cfi->cfi_0002_stats.busy_poll_tv.tv_usec);
628 1.6.2.2 matt printf("busy_yield_tv %ld.%ld\n",
629 1.6.2.2 matt cfi->cfi_0002_stats.busy_yield_tv.tv_sec,
630 1.6.2.2 matt cfi->cfi_0002_stats.busy_yield_tv.tv_usec);
631 1.6.2.2 matt printf("busy_poll %lu\n", cfi->cfi_0002_stats.busy_poll);
632 1.6.2.2 matt printf("busy_yield %lu\n", cfi->cfi_0002_stats.busy_yield);
633 1.6.2.2 matt printf("busy_yield_hit %lu\n", cfi->cfi_0002_stats.busy_yield_hit);
634 1.6.2.2 matt printf("busy_yield_miss %lu\n", cfi->cfi_0002_stats.busy_yield_miss);
635 1.6.2.2 matt printf("busy_yield_timo %lu\n", cfi->cfi_0002_stats.busy_yield_timo);
636 1.6.2.2 matt }
637 1.6.2.2 matt #endif /* CFI_0002_STATS */
638