1 1.8 matt /* $NetBSD: cfi_0002.c,v 1.8 2015/06/09 21:42:21 matt Exp $ */ 2 1.3 cliff /*- 3 1.3 cliff * Copyright (c) 2011 The NetBSD Foundation, Inc. 4 1.3 cliff * All rights reserved. 5 1.3 cliff * 6 1.3 cliff * This code is derived from software contributed to The NetBSD Foundation 7 1.3 cliff * by Cliff Neighbors. 8 1.3 cliff * 9 1.3 cliff * Redistribution and use in source and binary forms, with or without 10 1.3 cliff * modification, are permitted provided that the following conditions 11 1.3 cliff * are met: 12 1.3 cliff * 1. Redistributions of source code must retain the above copyright 13 1.3 cliff * notice, this list of conditions and the following disclaimer. 14 1.3 cliff * 2. Redistributions in binary form must reproduce the above copyright 15 1.3 cliff * notice, this list of conditions and the following disclaimer in the 16 1.3 cliff * documentation and/or other materials provided with the distribution. 17 1.3 cliff * 18 1.3 cliff * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 1.3 cliff * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 1.3 cliff * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 1.3 cliff * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 1.3 cliff * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 1.3 cliff * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 1.3 cliff * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 1.3 cliff * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 1.3 cliff * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 1.3 cliff * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 1.3 cliff * POSSIBILITY OF SUCH DAMAGE. 29 1.3 cliff */ 30 1.1 cliff 31 1.1 cliff #include "opt_flash.h" 32 1.1 cliff 33 1.1 cliff #include <sys/cdefs.h> 34 1.8 matt __KERNEL_RCSID(0, "$NetBSD: cfi_0002.c,v 1.8 2015/06/09 21:42:21 matt Exp $"); 35 1.1 cliff 36 1.1 cliff #include <sys/param.h> 37 1.1 cliff #include <sys/systm.h> 38 1.1 cliff #include <sys/cdefs.h> 39 1.1 cliff #include <sys/device.h> 40 1.1 cliff #include <sys/endian.h> 41 1.8 matt #include <sys/sched.h> 42 1.1 cliff #include <sys/time.h> 43 1.1 cliff 44 1.2 dyoung #include <sys/bus.h> 45 1.1 cliff 46 1.1 cliff #include <dev/nor/nor.h> 47 1.1 cliff #include <dev/nor/cfi.h> 48 1.1 cliff #include <dev/nor/cfi_0002.h> 49 1.1 cliff 50 1.1 cliff 51 1.1 cliff static void cfi_0002_version_init(struct cfi * const); 52 1.1 cliff static int cfi_0002_read_page(device_t, flash_off_t, uint8_t *); 53 1.1 cliff static int cfi_0002_program_page(device_t, flash_off_t, const uint8_t *); 54 1.1 cliff static int cfi_0002_erase_block(device_t, flash_off_t); 55 1.1 cliff static int cfi_0002_erase_all(device_t); 56 1.1 cliff static int cfi_0002_busy(device_t, flash_off_t, u_long); 57 1.1 cliff static int cfi_0002_busy_wait(struct cfi * const, flash_off_t, u_long); 58 1.1 cliff static int cfi_0002_busy_poll(struct cfi * const, flash_off_t, u_long); 59 1.1 cliff static int cfi_0002_busy_yield(struct cfi * const, flash_off_t, u_long); 60 1.1 cliff static int cfi_0002_busy_dq7(struct cfi * const , flash_off_t); 61 1.1 cliff #ifdef NOTYET 62 1.1 cliff static int cfi_0002_busy_reg(struct cfi * const, flash_off_t); 63 1.1 cliff #endif 64 1.1 cliff 65 1.7 joerg #ifdef NOR_VERBOSE 66 1.1 cliff static const char *page_mode_str[] = { 67 1.1 cliff "(not supported)", 68 1.1 cliff "4 word page", 69 1.1 cliff "8 word page", 70 1.1 cliff "16 word page", 71 1.1 cliff }; 72 1.1 cliff 73 1.1 cliff static const char *wp_mode_str[] = { 74 1.1 cliff "Flash device without WP Protect (No Boot)", 75 1.1 cliff "Eight 8 kB Sectors at TOP and Bottom with WP (Dual Boot)", 76 1.1 cliff "Bottom Boot Device with WP Protect (Bottom Boot)", 77 1.1 cliff "Top Boot Device with WP Protect (Top Boot)", 78 1.1 cliff "Uniform, Bottom WP Protect (Uniform Bottom Boot)", 79 1.1 cliff "Uniform, Top WP Protect (Uniform Top Boot)", 80 1.1 cliff "WP Protect for all sectors", 81 1.1 cliff "Uniform, Top or Bottom WP Protect", 82 1.1 cliff }; 83 1.1 cliff 84 1.1 cliff static inline const char * 85 1.1 cliff cfi_0002_page_mode_str(uint8_t mode) 86 1.1 cliff { 87 1.1 cliff if (mode >= __arraycount(page_mode_str)) 88 1.1 cliff panic("%s: mode %d out of range", __func__, mode); 89 1.1 cliff return page_mode_str[mode]; 90 1.1 cliff } 91 1.1 cliff 92 1.1 cliff static inline const char * 93 1.1 cliff cfi_0002_wp_mode_str(uint8_t mode) 94 1.1 cliff { 95 1.1 cliff if (mode >= __arraycount(wp_mode_str)) 96 1.1 cliff panic("%s: mode %d out of range", __func__, mode); 97 1.1 cliff return wp_mode_str[mode]; 98 1.1 cliff } 99 1.7 joerg #endif 100 1.1 cliff 101 1.1 cliff /* 102 1.1 cliff * cfi_0002_time_write_nbyte - maximum usec delay waiting for write buffer 103 1.1 cliff */ 104 1.1 cliff static inline u_long 105 1.1 cliff cfi_0002_time_write_nbyte(struct cfi *cfi) 106 1.1 cliff { 107 1.1 cliff u_int shft = cfi->cfi_qry_data.write_nbyte_time_typ; 108 1.1 cliff shft += cfi->cfi_qry_data.write_nbyte_time_max; 109 1.1 cliff u_long usec = 1UL << shft; 110 1.1 cliff return usec; 111 1.1 cliff } 112 1.1 cliff 113 1.1 cliff /* 114 1.1 cliff * cfi_0002_time_erase_blk - maximum usec delay waiting for erase block 115 1.1 cliff */ 116 1.1 cliff static inline u_long 117 1.1 cliff cfi_0002_time_erase_blk(struct cfi *cfi) 118 1.1 cliff { 119 1.1 cliff u_int shft = cfi->cfi_qry_data.erase_blk_time_typ; 120 1.1 cliff shft += cfi->cfi_qry_data.erase_blk_time_max; 121 1.1 cliff u_long usec = 1000UL << shft; 122 1.1 cliff return usec; 123 1.1 cliff } 124 1.1 cliff 125 1.1 cliff /* 126 1.1 cliff * cfi_0002_time_erase_all - maximum usec delay waiting for erase chip 127 1.1 cliff */ 128 1.1 cliff static inline u_long 129 1.1 cliff cfi_0002_time_erase_all(struct cfi *cfi) 130 1.1 cliff { 131 1.4 cliff u_int shft = cfi->cfi_qry_data.erase_chip_time_typ; 132 1.4 cliff shft += cfi->cfi_qry_data.erase_chip_time_max; 133 1.1 cliff u_long usec = 1000UL << shft; 134 1.1 cliff return usec; 135 1.1 cliff } 136 1.1 cliff 137 1.1 cliff /* 138 1.1 cliff * cfi_0002_time_dflt - maximum usec delay to use waiting for ready 139 1.1 cliff * 140 1.1 cliff * use the maximum delay for chip erase function 141 1.1 cliff * that should be the worst non-sick case 142 1.1 cliff */ 143 1.1 cliff static inline u_long 144 1.1 cliff cfi_0002_time_dflt(struct cfi *cfi) 145 1.1 cliff { 146 1.1 cliff return cfi_0002_time_erase_all(cfi); 147 1.1 cliff } 148 1.1 cliff 149 1.1 cliff void 150 1.1 cliff cfi_0002_init(struct nor_softc * const sc, struct cfi * const cfi, 151 1.1 cliff struct nor_chip * const chip) 152 1.1 cliff { 153 1.1 cliff CFI_0002_STATS_INIT(sc->sc_dev, cfi); 154 1.1 cliff 155 1.1 cliff cfi_0002_version_init(cfi); 156 1.1 cliff 157 1.1 cliff cfi->cfi_ops.cfi_reset = cfi_reset_std; 158 1.1 cliff cfi->cfi_yield_time = 500; /* 500 usec */ 159 1.1 cliff 160 1.1 cliff /* page size for buffered write */ 161 1.1 cliff chip->nc_page_size = 162 1.1 cliff 1 << cfi->cfi_qry_data.write_nbyte_size_max; 163 1.1 cliff 164 1.1 cliff /* these are unused */ 165 1.1 cliff chip->nc_spare_size = 0; 166 1.1 cliff chip->nc_badmarker_offs = 0; 167 1.1 cliff 168 1.1 cliff /* establish command-set-specific interface ops */ 169 1.1 cliff sc->sc_nor_if->read_page = cfi_0002_read_page; 170 1.1 cliff sc->sc_nor_if->program_page = cfi_0002_program_page; 171 1.1 cliff sc->sc_nor_if->erase_block = cfi_0002_erase_block; 172 1.1 cliff sc->sc_nor_if->erase_all = cfi_0002_erase_all; 173 1.1 cliff sc->sc_nor_if->busy = cfi_0002_busy; 174 1.1 cliff 175 1.1 cliff } 176 1.1 cliff 177 1.1 cliff /* 178 1.1 cliff * cfi_0002_version_init - command set version-specific initialization 179 1.1 cliff * 180 1.1 cliff * see "Programmer's Guide for the Spansion 65 nm GL-S MirrorBit EclipseTM 181 1.1 cliff * Flash Non-Volatile Memory Family Architecture" section 5. 182 1.1 cliff */ 183 1.1 cliff static void 184 1.1 cliff cfi_0002_version_init(struct cfi * const cfi) 185 1.1 cliff { 186 1.1 cliff const uint8_t major = cfi->cfi_qry_data.pri.cmd_0002.version_maj; 187 1.1 cliff const uint8_t minor = cfi->cfi_qry_data.pri.cmd_0002.version_min; 188 1.1 cliff 189 1.1 cliff if ((minor == '3') && (major == '1')) { 190 1.1 cliff /* cmdset version 1.3 */ 191 1.1 cliff cfi->cfi_ops.cfi_busy = cfi_0002_busy_dq7; 192 1.1 cliff #ifdef NOTYET 193 1.1 cliff cfi->cfi_ops.cfi_erase_sector = cfi_0002_erase_sector_q; 194 1.1 cliff cfi->cfi_ops.cfi_program_word = cfi_0002_program_word_ub; 195 1.1 cliff } else if ((minor >= '5') && (major == '1')) { 196 1.1 cliff /* cmdset version 1.5 or later */ 197 1.1 cliff cfi->cfi_ops.cfi_busy = cfi_0002_busy_reg; 198 1.1 cliff cfi->cfi_ops.cfi_erase_sector = cfi_0002_erase_sector_1; 199 1.1 cliff cfi->cfi_ops.cfi_program_word = cfi_0002_program_word_no_ub; 200 1.1 cliff #endif 201 1.1 cliff } else { 202 1.1 cliff /* XXX this is excessive */ 203 1.1 cliff panic("%s: unknown cmdset version %c.%c\n", 204 1.1 cliff __func__, major, minor); 205 1.1 cliff } 206 1.1 cliff 207 1.1 cliff } 208 1.1 cliff 209 1.1 cliff void 210 1.1 cliff cfi_0002_print(device_t self, struct cfi * const cfi) 211 1.1 cliff { 212 1.1 cliff #ifdef NOR_VERBOSE 213 1.1 cliff struct cmdset_0002_query_data *pri = &cfi->cfi_qry_data.pri.cmd_0002; 214 1.1 cliff 215 1.1 cliff aprint_normal_dev(self, "AMD/Fujitsu cmdset (0x0002) version=%c.%c\n", 216 1.1 cliff pri->version_maj, pri->version_min); 217 1.1 cliff aprint_normal_dev(self, "page mode type: %s\n", 218 1.1 cliff cfi_0002_page_mode_str(pri->page_mode_type)); 219 1.1 cliff aprint_normal_dev(self, "wp protection: %s\n", 220 1.1 cliff cfi_0002_wp_mode_str(pri->wp_prot)); 221 1.1 cliff aprint_normal_dev(self, "program suspend %ssupported\n", 222 1.1 cliff (pri->prog_susp == 0) ? "not " : ""); 223 1.1 cliff aprint_normal_dev(self, "unlock bypass %ssupported\n", 224 1.1 cliff (pri->unlock_bypass == 0) ? "not " : ""); 225 1.1 cliff aprint_normal_dev(self, "secure silicon sector size %#x\n", 226 1.1 cliff 1 << pri->sss_size); 227 1.1 cliff aprint_normal_dev(self, "SW features %#x\n", pri->soft_feat); 228 1.1 cliff aprint_normal_dev(self, "page size %d\n", 1 << pri->page_size); 229 1.1 cliff #endif 230 1.1 cliff } 231 1.1 cliff 232 1.1 cliff static int 233 1.1 cliff cfi_0002_read_page(device_t self, flash_off_t offset, uint8_t *datap) 234 1.1 cliff { 235 1.1 cliff struct nor_softc * const sc = device_private(self); 236 1.1 cliff KASSERT(sc != NULL); 237 1.1 cliff KASSERT(sc->sc_nor_if != NULL); 238 1.1 cliff struct cfi *cfi = (struct cfi * const)sc->sc_nor_if->private; 239 1.1 cliff KASSERT(cfi != NULL); 240 1.1 cliff struct nor_chip * const chip = &sc->sc_chip; 241 1.1 cliff KASSERT(chip != NULL); 242 1.1 cliff KASSERT(chip->nc_page_mask != 0); 243 1.1 cliff KASSERT((offset & ~chip->nc_page_mask) == 0); 244 1.1 cliff KASSERT (chip->nc_page_size != 0); 245 1.1 cliff KASSERT((chip->nc_page_size & ((1 << cfi->cfi_portwidth) - 1)) == 0); 246 1.1 cliff 247 1.1 cliff CFI_0002_STATS_INC(cfi, read_page); 248 1.1 cliff 249 1.1 cliff bus_size_t count = chip->nc_page_size >> cfi->cfi_portwidth; 250 1.1 cliff /* #words/page */ 251 1.1 cliff 252 1.1 cliff int error = cfi_0002_busy_wait(cfi, offset, cfi_0002_time_dflt(cfi)); 253 1.1 cliff if (error != 0) 254 1.1 cliff return error; 255 1.1 cliff 256 1.1 cliff switch(cfi->cfi_portwidth) { 257 1.1 cliff case 0: 258 1.1 cliff bus_space_read_region_1(cfi->cfi_bst, cfi->cfi_bsh, offset, 259 1.1 cliff (uint8_t *)datap, count); 260 1.1 cliff break; 261 1.1 cliff case 1: 262 1.1 cliff bus_space_read_region_2(cfi->cfi_bst, cfi->cfi_bsh, offset, 263 1.1 cliff (uint16_t *)datap, count); 264 1.1 cliff break; 265 1.1 cliff case 2: 266 1.1 cliff bus_space_read_region_4(cfi->cfi_bst, cfi->cfi_bsh, offset, 267 1.1 cliff (uint32_t *)datap, count); 268 1.1 cliff break; 269 1.1 cliff default: 270 1.1 cliff panic("%s: bad port width %d\n", __func__, cfi->cfi_portwidth); 271 1.1 cliff }; 272 1.1 cliff 273 1.1 cliff return 0; 274 1.1 cliff } 275 1.1 cliff 276 1.1 cliff static int 277 1.1 cliff cfi_0002_program_page(device_t self, flash_off_t offset, const uint8_t *datap) 278 1.1 cliff { 279 1.1 cliff struct nor_softc * const sc = device_private(self); 280 1.1 cliff KASSERT(sc != NULL); 281 1.1 cliff KASSERT(sc->sc_nor_if != NULL); 282 1.1 cliff struct cfi *cfi = (struct cfi * const)sc->sc_nor_if->private; 283 1.1 cliff KASSERT(cfi != NULL); 284 1.1 cliff struct nor_chip * const chip = &sc->sc_chip; 285 1.1 cliff KASSERT(chip != NULL); 286 1.1 cliff KASSERT(chip->nc_page_mask != 0); 287 1.1 cliff KASSERT((offset & ~chip->nc_page_mask) == 0); 288 1.1 cliff KASSERT (chip->nc_page_size != 0); 289 1.1 cliff KASSERT((chip->nc_page_size & ((1 << cfi->cfi_portwidth) - 1)) == 0); 290 1.1 cliff 291 1.1 cliff CFI_0002_STATS_INC(cfi, program_page); 292 1.1 cliff 293 1.1 cliff bus_size_t count = chip->nc_page_size >> cfi->cfi_portwidth; 294 1.1 cliff /* #words/page */ 295 1.6 phx bus_size_t sa = offset << (3 - cfi->cfi_portwidth); 296 1.6 phx /* sector addr */ 297 1.1 cliff uint32_t wc = count - 1; /* #words - 1 */ 298 1.1 cliff 299 1.1 cliff int error = cfi_0002_busy_wait(cfi, offset, cfi_0002_time_dflt(cfi)); 300 1.1 cliff if (error != 0) 301 1.1 cliff return ETIMEDOUT; 302 1.1 cliff 303 1.6 phx cfi_cmd(cfi, cfi->cfi_unlock_addr1, 0xaa); 304 1.6 phx cfi_cmd(cfi, cfi->cfi_unlock_addr2, 0x55); 305 1.6 phx cfi_cmd(cfi, sa, 0x25); /* Write To Buffer */ 306 1.6 phx cfi_cmd(cfi, sa, wc); 307 1.1 cliff 308 1.1 cliff switch(cfi->cfi_portwidth) { 309 1.1 cliff case 0: 310 1.1 cliff bus_space_write_region_1(cfi->cfi_bst, cfi->cfi_bsh, offset, 311 1.1 cliff (const uint8_t *)datap, count); 312 1.1 cliff break; 313 1.1 cliff case 1: 314 1.1 cliff bus_space_write_region_2(cfi->cfi_bst, cfi->cfi_bsh, offset, 315 1.1 cliff (const uint16_t *)datap, count); 316 1.1 cliff break; 317 1.1 cliff case 2: 318 1.1 cliff bus_space_write_region_4(cfi->cfi_bst, cfi->cfi_bsh, offset, 319 1.1 cliff (const uint32_t *)datap, count); 320 1.1 cliff break; 321 1.1 cliff default: 322 1.1 cliff panic("%s: bad port width %d\n", __func__, cfi->cfi_portwidth); 323 1.1 cliff }; 324 1.1 cliff 325 1.6 phx cfi_cmd(cfi, sa, 0x29); /* Write Buffer Program Confirm */ 326 1.1 cliff 327 1.1 cliff error = cfi_0002_busy_wait(cfi, offset, cfi_0002_time_write_nbyte(cfi)); 328 1.1 cliff 329 1.1 cliff return error; 330 1.1 cliff } 331 1.1 cliff 332 1.1 cliff static int 333 1.1 cliff cfi_0002_erase_all(device_t self) 334 1.1 cliff { 335 1.1 cliff struct nor_softc * const sc = device_private(self); 336 1.1 cliff KASSERT(sc != NULL); 337 1.1 cliff KASSERT(sc->sc_nor_if != NULL); 338 1.1 cliff struct cfi *cfi = (struct cfi * const)sc->sc_nor_if->private; 339 1.1 cliff KASSERT(cfi != NULL); 340 1.1 cliff 341 1.1 cliff CFI_0002_STATS_INC(cfi, erase_all); 342 1.1 cliff 343 1.1 cliff int error = cfi_0002_busy_wait(cfi, 0, cfi_0002_time_dflt(cfi)); 344 1.1 cliff if (error != 0) 345 1.1 cliff return ETIMEDOUT; 346 1.1 cliff 347 1.6 phx cfi_cmd(cfi, cfi->cfi_unlock_addr1, 0xaa); 348 1.6 phx cfi_cmd(cfi, cfi->cfi_unlock_addr2, 0x55); 349 1.6 phx cfi_cmd(cfi, cfi->cfi_unlock_addr1, 0x80); /* erase start */ 350 1.6 phx cfi_cmd(cfi, cfi->cfi_unlock_addr1, 0xaa); 351 1.6 phx cfi_cmd(cfi, cfi->cfi_unlock_addr2, 0x55); 352 1.6 phx cfi_cmd(cfi, cfi->cfi_unlock_addr1, 0x10); /* erase chip */ 353 1.1 cliff 354 1.1 cliff error = cfi_0002_busy_wait(cfi, 0, cfi_0002_time_erase_all(cfi)); 355 1.1 cliff 356 1.1 cliff return error; 357 1.1 cliff } 358 1.1 cliff 359 1.1 cliff static int 360 1.1 cliff cfi_0002_erase_block(device_t self, flash_off_t offset) 361 1.1 cliff { 362 1.1 cliff struct nor_softc * const sc = device_private(self); 363 1.1 cliff KASSERT(sc != NULL); 364 1.1 cliff KASSERT(sc->sc_nor_if != NULL); 365 1.1 cliff struct cfi *cfi = (struct cfi * const)sc->sc_nor_if->private; 366 1.1 cliff KASSERT(cfi != NULL); 367 1.1 cliff 368 1.1 cliff CFI_0002_STATS_INC(cfi, erase_block); 369 1.1 cliff 370 1.6 phx bus_size_t sa = offset << (3 - cfi->cfi_portwidth); 371 1.1 cliff 372 1.1 cliff int error = cfi_0002_busy_wait(cfi, offset, cfi_0002_time_dflt(cfi)); 373 1.1 cliff if (error != 0) 374 1.1 cliff return ETIMEDOUT; 375 1.1 cliff 376 1.6 phx cfi_cmd(cfi, cfi->cfi_unlock_addr1, 0xaa); 377 1.6 phx cfi_cmd(cfi, cfi->cfi_unlock_addr2, 0x55); 378 1.6 phx cfi_cmd(cfi, cfi->cfi_unlock_addr1, 0x80); /* erase start */ 379 1.6 phx cfi_cmd(cfi, cfi->cfi_unlock_addr1, 0xaa); 380 1.6 phx cfi_cmd(cfi, cfi->cfi_unlock_addr2, 0x55); 381 1.6 phx cfi_cmd(cfi, sa, 0x30); /* erase sector */ 382 1.1 cliff 383 1.1 cliff error = cfi_0002_busy_wait(cfi, offset, cfi_0002_time_erase_blk(cfi)); 384 1.1 cliff 385 1.1 cliff return error; 386 1.1 cliff } 387 1.1 cliff 388 1.1 cliff /* 389 1.1 cliff * cfi_0002_busy - nor_interface busy op 390 1.1 cliff */ 391 1.1 cliff static int 392 1.1 cliff cfi_0002_busy(device_t self, flash_off_t offset, u_long usec) 393 1.1 cliff { 394 1.1 cliff struct nor_softc *sc = device_private(self); 395 1.1 cliff KASSERT(sc != NULL); 396 1.1 cliff KASSERT(sc->sc_nor_if != NULL); 397 1.1 cliff struct cfi * const cfi = (struct cfi * const)sc->sc_nor_if->private; 398 1.1 cliff 399 1.1 cliff CFI_0002_STATS_INC(cfi, busy); 400 1.1 cliff 401 1.1 cliff return cfi_0002_busy_wait(cfi, offset, usec); 402 1.1 cliff } 403 1.1 cliff 404 1.1 cliff /* 405 1.1 cliff * cfi_0002_busy_wait - wait until device is not busy 406 1.1 cliff */ 407 1.1 cliff static int 408 1.1 cliff cfi_0002_busy_wait(struct cfi * const cfi, flash_off_t offset, u_long usec) 409 1.1 cliff { 410 1.1 cliff int error; 411 1.1 cliff 412 1.1 cliff #ifdef CFI_0002_STATS 413 1.1 cliff struct timeval start; 414 1.1 cliff struct timeval now; 415 1.1 cliff struct timeval delta; 416 1.1 cliff 417 1.1 cliff if (usec > cfi->cfi_0002_stats.busy_usec_max) 418 1.1 cliff cfi->cfi_0002_stats.busy_usec_max = usec; 419 1.1 cliff if (usec < cfi->cfi_0002_stats.busy_usec_min) 420 1.1 cliff cfi->cfi_0002_stats.busy_usec_min = usec; 421 1.1 cliff microtime(&start); 422 1.1 cliff #endif 423 1.1 cliff if (usec > cfi->cfi_yield_time) { 424 1.1 cliff error = cfi_0002_busy_yield(cfi, offset, usec); 425 1.1 cliff #ifdef CFI_0002_STATS 426 1.1 cliff microtime(&now); 427 1.1 cliff cfi->cfi_0002_stats.busy_yield++; 428 1.1 cliff timersub(&now, &start, &delta); 429 1.1 cliff timeradd(&delta, 430 1.1 cliff &cfi->cfi_0002_stats.busy_yield_tv, 431 1.1 cliff &cfi->cfi_0002_stats.busy_yield_tv); 432 1.1 cliff #endif 433 1.1 cliff } else { 434 1.1 cliff error = cfi_0002_busy_poll(cfi, offset, usec); 435 1.1 cliff #ifdef CFI_0002_STATS 436 1.1 cliff microtime(&now); 437 1.1 cliff cfi->cfi_0002_stats.busy_poll++; 438 1.1 cliff timersub(&now, &start, &delta); 439 1.1 cliff timeradd(&delta, 440 1.1 cliff &cfi->cfi_0002_stats.busy_poll_tv, 441 1.1 cliff &cfi->cfi_0002_stats.busy_poll_tv); 442 1.1 cliff #endif 443 1.1 cliff } 444 1.1 cliff return error; 445 1.1 cliff } 446 1.1 cliff 447 1.1 cliff /* 448 1.1 cliff * cfi_0002_busy_poll - poll until device is not busy 449 1.1 cliff */ 450 1.1 cliff static int 451 1.1 cliff cfi_0002_busy_poll(struct cfi * const cfi, flash_off_t offset, u_long usec) 452 1.1 cliff { 453 1.1 cliff u_long count = usec >> 3; 454 1.1 cliff if (count == 0) 455 1.1 cliff count = 1; /* enforce minimum */ 456 1.1 cliff do { 457 1.1 cliff if (! cfi->cfi_ops.cfi_busy(cfi, offset)) 458 1.1 cliff return 0; /* not busy */ 459 1.1 cliff DELAY(8); 460 1.1 cliff } while (count-- != 0); 461 1.1 cliff 462 1.1 cliff return ETIMEDOUT; /* busy */ 463 1.1 cliff } 464 1.1 cliff 465 1.1 cliff /* 466 1.1 cliff * cfi_0002_busy_yield - yield until device is not busy 467 1.1 cliff */ 468 1.1 cliff static int 469 1.1 cliff cfi_0002_busy_yield(struct cfi * const cfi, flash_off_t offset, u_long usec) 470 1.1 cliff { 471 1.1 cliff struct timeval start; 472 1.1 cliff struct timeval delta; 473 1.1 cliff struct timeval limit; 474 1.1 cliff struct timeval now; 475 1.1 cliff 476 1.1 cliff microtime(&start); 477 1.1 cliff 478 1.1 cliff /* try optimism */ 479 1.1 cliff if (! cfi->cfi_ops.cfi_busy(cfi, offset)) { 480 1.1 cliff CFI_0002_STATS_INC(cfi, busy_yield_hit); 481 1.1 cliff return 0; /* not busy */ 482 1.1 cliff } 483 1.1 cliff CFI_0002_STATS_INC(cfi, busy_yield_miss); 484 1.1 cliff 485 1.1 cliff delta.tv_sec = usec / 1000000; 486 1.1 cliff delta.tv_usec = usec % 1000000; 487 1.1 cliff timeradd(&start, &delta, &limit); 488 1.1 cliff do { 489 1.1 cliff yield(); 490 1.1 cliff microtime(&now); 491 1.1 cliff if (! cfi->cfi_ops.cfi_busy(cfi, offset)) 492 1.1 cliff return 0; /* not busy */ 493 1.1 cliff } while (timercmp(&now, &limit, <)); 494 1.1 cliff 495 1.1 cliff CFI_0002_STATS_INC(cfi, busy_yield_timo); 496 1.1 cliff 497 1.1 cliff return ETIMEDOUT; /* busy */ 498 1.1 cliff } 499 1.1 cliff 500 1.1 cliff /* 501 1.1 cliff * cfi_0002_busy_dq7 - DQ7 "toggle" method to check busy 502 1.1 cliff * 503 1.1 cliff * Check busy during/after erase, program, protect operation. 504 1.1 cliff * 505 1.1 cliff * NOTE: 506 1.1 cliff * Chip manufacturers (Spansion) plan to deprecate this method. 507 1.1 cliff */ 508 1.1 cliff static int 509 1.1 cliff cfi_0002_busy_dq7(struct cfi * const cfi, flash_off_t offset) 510 1.1 cliff { 511 1.1 cliff bus_space_tag_t bst = cfi->cfi_bst; 512 1.1 cliff bus_space_handle_t bsh = cfi->cfi_bsh; 513 1.1 cliff bool busy; 514 1.1 cliff 515 1.1 cliff switch(cfi->cfi_portwidth) { 516 1.1 cliff case 0: { 517 1.1 cliff uint8_t r0 = bus_space_read_1(bst, bsh, 0) & __BIT(7); 518 1.1 cliff uint8_t r1 = bus_space_read_1(bst, bsh, 0) & __BIT(7); 519 1.1 cliff busy = (r0 != r1); 520 1.1 cliff break; 521 1.1 cliff } 522 1.1 cliff case 1: { 523 1.1 cliff uint16_t r0 = bus_space_read_2(bst, bsh, 0); 524 1.1 cliff uint16_t r1 = bus_space_read_2(bst, bsh, 0); 525 1.1 cliff busy = (r0 != r1); 526 1.1 cliff break; 527 1.1 cliff } 528 1.1 cliff case 2: { 529 1.1 cliff uint32_t r0 = bus_space_read_4(bst, bsh, 0); 530 1.1 cliff uint32_t r1 = bus_space_read_4(bst, bsh, 0); 531 1.1 cliff busy = (r0 != r1); 532 1.1 cliff break; 533 1.1 cliff } 534 1.1 cliff default: 535 1.1 cliff busy = true; /* appeas gcc */ 536 1.1 cliff panic("%s: bad port width %d\n", 537 1.1 cliff __func__, cfi->cfi_portwidth); 538 1.1 cliff } 539 1.1 cliff return busy; 540 1.1 cliff } 541 1.1 cliff 542 1.1 cliff #ifdef NOTYET 543 1.1 cliff /* 544 1.1 cliff * cfi_0002_busy_reg - read and evaluate Read Status Register 545 1.1 cliff * 546 1.1 cliff * NOTE: 547 1.1 cliff * Read Status Register not present on all chips 548 1.1 cliff * use "toggle" method when Read Status Register not available. 549 1.1 cliff */ 550 1.1 cliff static bool 551 1.1 cliff cfi_0002_busy_reg(struct cfi * const cfi, flash_off_t offset) 552 1.1 cliff { 553 1.1 cliff bus_space_tag_t bst = cfi->cfi_bst; 554 1.1 cliff bus_space_handle_t bsh = cfi->cfi_bsh; 555 1.1 cliff uint32_t r; 556 1.1 cliff 557 1.6 phx cfi_cmd(cfi, cfi->cfi_unlock_addr1, 0x70); /* Status Register Read */ 558 1.1 cliff 559 1.1 cliff switch(cfi->cfi_portwidth) { 560 1.1 cliff case 0: 561 1.1 cliff r = bus_space_read_1(bst, bsh, 0); 562 1.1 cliff break; 563 1.1 cliff case 1: 564 1.1 cliff r = bus_space_read_2(bst, bsh, 0); 565 1.1 cliff break; 566 1.1 cliff case 2: 567 1.1 cliff r = bus_space_read_4(bst, bsh, 0); 568 1.1 cliff break; 569 1.1 cliff default: 570 1.1 cliff panic("%s: bad port width %d\n", 571 1.1 cliff __func__, cfi->cfi_portwidth); 572 1.1 cliff } 573 1.1 cliff 574 1.1 cliff return ((r & __BIT(7)) == 0): 575 1.1 cliff } 576 1.1 cliff #endif /* NOTYET */ 577 1.1 cliff 578 1.1 cliff #ifdef CFI_0002_STATS 579 1.1 cliff void 580 1.1 cliff cfi_0002_stats_reset(struct cfi *cfi) 581 1.1 cliff { 582 1.1 cliff memset(&cfi->cfi_0002_stats, 0, sizeof(struct cfi_0002_stats)); 583 1.1 cliff cfi->cfi_0002_stats.busy_usec_min = ~0; 584 1.1 cliff } 585 1.1 cliff 586 1.1 cliff void 587 1.1 cliff cfi_0002_stats_print(struct cfi *cfi) 588 1.1 cliff { 589 1.1 cliff printf("read_page %lu\n", cfi->cfi_0002_stats.read_page); 590 1.1 cliff printf("program_page %lu\n", cfi->cfi_0002_stats.program_page); 591 1.1 cliff printf("erase_all %lu\n", cfi->cfi_0002_stats.erase_all); 592 1.1 cliff printf("erase_block %lu\n", cfi->cfi_0002_stats.erase_block); 593 1.1 cliff printf("busy %lu\n", cfi->cfi_0002_stats.busy); 594 1.1 cliff 595 1.1 cliff printf("write_nbyte_time_typ %d\n", 596 1.1 cliff cfi->cfi_qry_data.write_nbyte_time_typ); 597 1.1 cliff printf("write_nbyte_time_max %d\n", 598 1.1 cliff cfi->cfi_qry_data.write_nbyte_time_max); 599 1.1 cliff 600 1.1 cliff printf("erase_blk_time_typ %d\n", 601 1.1 cliff cfi->cfi_qry_data.erase_blk_time_typ); 602 1.1 cliff printf("erase_blk_time_max %d\n", 603 1.1 cliff cfi->cfi_qry_data.erase_blk_time_max); 604 1.1 cliff 605 1.4 cliff printf("erase_chip_time_typ %d\n", 606 1.4 cliff cfi->cfi_qry_data.erase_chip_time_typ); 607 1.4 cliff printf("erase_chip_time_max %d\n", 608 1.4 cliff cfi->cfi_qry_data.erase_chip_time_max); 609 1.1 cliff 610 1.1 cliff printf("time_write_nbyte %lu\n", cfi_0002_time_write_nbyte(cfi)); 611 1.1 cliff printf("time_erase_blk %lu\n", cfi_0002_time_erase_blk(cfi)); 612 1.1 cliff printf("time_erase_all %lu\n", cfi_0002_time_erase_all(cfi)); 613 1.1 cliff 614 1.1 cliff printf("busy_usec_min %lu\n", cfi->cfi_0002_stats.busy_usec_min); 615 1.1 cliff printf("busy_usec_max %lu\n", cfi->cfi_0002_stats.busy_usec_max); 616 1.1 cliff 617 1.1 cliff printf("busy_poll_tv %lld.%d\n", 618 1.1 cliff cfi->cfi_0002_stats.busy_poll_tv.tv_sec, 619 1.1 cliff cfi->cfi_0002_stats.busy_poll_tv.tv_usec); 620 1.1 cliff printf("busy_yield_tv %lld.%d\n", 621 1.1 cliff cfi->cfi_0002_stats.busy_yield_tv.tv_sec, 622 1.1 cliff cfi->cfi_0002_stats.busy_yield_tv.tv_usec); 623 1.1 cliff printf("busy_poll %lu\n", cfi->cfi_0002_stats.busy_poll); 624 1.1 cliff printf("busy_yield %lu\n", cfi->cfi_0002_stats.busy_yield); 625 1.1 cliff printf("busy_yield_hit %lu\n", cfi->cfi_0002_stats.busy_yield_hit); 626 1.1 cliff printf("busy_yield_miss %lu\n", cfi->cfi_0002_stats.busy_yield_miss); 627 1.1 cliff printf("busy_yield_timo %lu\n", cfi->cfi_0002_stats.busy_yield_timo); 628 1.1 cliff } 629 1.1 cliff #endif /* CFI_0002_STATS */ 630