Home | History | Annotate | Line # | Download | only in pci
if_iwm.c revision 1.57
      1 /*	$NetBSD: if_iwm.c,v 1.57 2017/01/10 04:27:04 nonaka Exp $	*/
      2 /*	OpenBSD: if_iwm.c,v 1.148 2016/11/19 21:07:08 stsp Exp	*/
      3 #define IEEE80211_NO_HT
      4 /*
      5  * Copyright (c) 2014, 2016 genua gmbh <info (at) genua.de>
      6  *   Author: Stefan Sperling <stsp (at) openbsd.org>
      7  * Copyright (c) 2014 Fixup Software Ltd.
      8  *
      9  * Permission to use, copy, modify, and distribute this software for any
     10  * purpose with or without fee is hereby granted, provided that the above
     11  * copyright notice and this permission notice appear in all copies.
     12  *
     13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     20  */
     21 
     22 /*-
     23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
     24  * which were used as the reference documentation for this implementation.
     25  *
     26  ***********************************************************************
     27  *
     28  * This file is provided under a dual BSD/GPLv2 license.  When using or
     29  * redistributing this file, you may do so under either license.
     30  *
     31  * GPL LICENSE SUMMARY
     32  *
     33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
     34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
     35  * Copyright(c) 2016 Intel Deutschland GmbH
     36  *
     37  * This program is free software; you can redistribute it and/or modify
     38  * it under the terms of version 2 of the GNU General Public License as
     39  * published by the Free Software Foundation.
     40  *
     41  * This program is distributed in the hope that it will be useful, but
     42  * WITHOUT ANY WARRANTY; without even the implied warranty of
     43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     44  * General Public License for more details.
     45  *
     46  * You should have received a copy of the GNU General Public License
     47  * along with this program; if not, write to the Free Software
     48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
     49  * USA
     50  *
     51  * The full GNU General Public License is included in this distribution
     52  * in the file called COPYING.
     53  *
     54  * Contact Information:
     55  *  Intel Linux Wireless <ilw (at) linux.intel.com>
     56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
     57  *
     58  *
     59  * BSD LICENSE
     60  *
     61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
     62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
     63  * Copyright(c) 2016 Intel Deutschland GmbH
     64  * All rights reserved.
     65  *
     66  * Redistribution and use in source and binary forms, with or without
     67  * modification, are permitted provided that the following conditions
     68  * are met:
     69  *
     70  *  * Redistributions of source code must retain the above copyright
     71  *    notice, this list of conditions and the following disclaimer.
     72  *  * Redistributions in binary form must reproduce the above copyright
     73  *    notice, this list of conditions and the following disclaimer in
     74  *    the documentation and/or other materials provided with the
     75  *    distribution.
     76  *  * Neither the name Intel Corporation nor the names of its
     77  *    contributors may be used to endorse or promote products derived
     78  *    from this software without specific prior written permission.
     79  *
     80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     91  */
     92 
     93 /*-
     94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
     95  *
     96  * Permission to use, copy, modify, and distribute this software for any
     97  * purpose with or without fee is hereby granted, provided that the above
     98  * copyright notice and this permission notice appear in all copies.
     99  *
    100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
    101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    107  */
    108 
    109 #include <sys/cdefs.h>
    110 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.57 2017/01/10 04:27:04 nonaka Exp $");
    111 
    112 #include <sys/param.h>
    113 #include <sys/conf.h>
    114 #include <sys/kernel.h>
    115 #include <sys/kmem.h>
    116 #include <sys/mbuf.h>
    117 #include <sys/mutex.h>
    118 #include <sys/proc.h>
    119 #include <sys/socket.h>
    120 #include <sys/sockio.h>
    121 #include <sys/sysctl.h>
    122 #include <sys/systm.h>
    123 
    124 #include <sys/cpu.h>
    125 #include <sys/bus.h>
    126 #include <sys/workqueue.h>
    127 #include <machine/endian.h>
    128 #include <machine/intr.h>
    129 
    130 #include <dev/pci/pcireg.h>
    131 #include <dev/pci/pcivar.h>
    132 #include <dev/pci/pcidevs.h>
    133 #include <dev/firmload.h>
    134 
    135 #include <net/bpf.h>
    136 #include <net/if.h>
    137 #include <net/if_dl.h>
    138 #include <net/if_media.h>
    139 #include <net/if_ether.h>
    140 
    141 #include <netinet/in.h>
    142 #include <netinet/ip.h>
    143 
    144 #include <net80211/ieee80211_var.h>
    145 #include <net80211/ieee80211_amrr.h>
    146 #include <net80211/ieee80211_radiotap.h>
    147 
    148 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
    149 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
    150 
    151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
    152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
    153 
    154 #ifdef IWM_DEBUG
    155 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
    156 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
    157 int iwm_debug = 0;
    158 #else
    159 #define DPRINTF(x)	do { ; } while (0)
    160 #define DPRINTFN(n, x)	do { ; } while (0)
    161 #endif
    162 
    163 #include <dev/pci/if_iwmreg.h>
    164 #include <dev/pci/if_iwmvar.h>
    165 
    166 static const uint8_t iwm_nvm_channels[] = {
    167 	/* 2.4 GHz */
    168 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
    169 	/* 5 GHz */
    170 	36, 40, 44, 48, 52, 56, 60, 64,
    171 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
    172 	149, 153, 157, 161, 165
    173 };
    174 
    175 static const uint8_t iwm_nvm_channels_8000[] = {
    176 	/* 2.4 GHz */
    177 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
    178 	/* 5 GHz */
    179 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
    180 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
    181 	149, 153, 157, 161, 165, 169, 173, 177, 181
    182 };
    183 
    184 #define IWM_NUM_2GHZ_CHANNELS	14
    185 
    186 static const struct iwm_rate {
    187 	uint8_t rate;
    188 	uint8_t plcp;
    189 	uint8_t ht_plcp;
    190 } iwm_rates[] = {
    191 		/* Legacy */		/* HT */
    192 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
    193 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
    194 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
    195 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
    196 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
    197 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
    198 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
    199 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
    200 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
    201 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
    202 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
    203 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
    204 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
    205 };
    206 #define IWM_RIDX_CCK	0
    207 #define IWM_RIDX_OFDM	4
    208 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
    209 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
    210 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
    211 
    212 #ifndef IEEE80211_NO_HT
    213 /* Convert an MCS index into an iwm_rates[] index. */
    214 static const int iwm_mcs2ridx[] = {
    215 	IWM_RATE_MCS_0_INDEX,
    216 	IWM_RATE_MCS_1_INDEX,
    217 	IWM_RATE_MCS_2_INDEX,
    218 	IWM_RATE_MCS_3_INDEX,
    219 	IWM_RATE_MCS_4_INDEX,
    220 	IWM_RATE_MCS_5_INDEX,
    221 	IWM_RATE_MCS_6_INDEX,
    222 	IWM_RATE_MCS_7_INDEX,
    223 };
    224 #endif
    225 
    226 struct iwm_nvm_section {
    227 	uint16_t length;
    228 	uint8_t *data;
    229 };
    230 
    231 struct iwm_newstate_state {
    232 	struct work ns_wk;
    233 	enum ieee80211_state ns_nstate;
    234 	int ns_arg;
    235 	int ns_generation;
    236 };
    237 
    238 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
    239 static int	iwm_firmware_store_section(struct iwm_softc *,
    240 		    enum iwm_ucode_type, uint8_t *, size_t);
    241 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
    242 static int	iwm_read_firmware(struct iwm_softc *);
    243 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
    244 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
    245 #ifdef IWM_DEBUG
    246 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
    247 #endif
    248 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
    249 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
    250 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
    251 static int	iwm_nic_lock(struct iwm_softc *);
    252 static void	iwm_nic_unlock(struct iwm_softc *);
    253 static void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
    254 		    uint32_t);
    255 static void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    256 static void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    257 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
    258 		    bus_size_t, bus_size_t);
    259 static void	iwm_dma_contig_free(struct iwm_dma_info *);
    260 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    261 static void	iwm_disable_rx_dma(struct iwm_softc *);
    262 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    263 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    264 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
    265 		    int);
    266 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    267 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    268 static void	iwm_enable_rfkill_int(struct iwm_softc *);
    269 static int	iwm_check_rfkill(struct iwm_softc *);
    270 static void	iwm_enable_interrupts(struct iwm_softc *);
    271 static void	iwm_restore_interrupts(struct iwm_softc *);
    272 static void	iwm_disable_interrupts(struct iwm_softc *);
    273 static void	iwm_ict_reset(struct iwm_softc *);
    274 static int	iwm_set_hw_ready(struct iwm_softc *);
    275 static int	iwm_prepare_card_hw(struct iwm_softc *);
    276 static void	iwm_apm_config(struct iwm_softc *);
    277 static int	iwm_apm_init(struct iwm_softc *);
    278 static void	iwm_apm_stop(struct iwm_softc *);
    279 static int	iwm_allow_mcast(struct iwm_softc *);
    280 static int	iwm_start_hw(struct iwm_softc *);
    281 static void	iwm_stop_device(struct iwm_softc *);
    282 static void	iwm_nic_config(struct iwm_softc *);
    283 static int	iwm_nic_rx_init(struct iwm_softc *);
    284 static int	iwm_nic_tx_init(struct iwm_softc *);
    285 static int	iwm_nic_init(struct iwm_softc *);
    286 static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
    287 static int	iwm_post_alive(struct iwm_softc *);
    288 static struct iwm_phy_db_entry *
    289 		iwm_phy_db_get_section(struct iwm_softc *,
    290 		    enum iwm_phy_db_section_type, uint16_t);
    291 static int	iwm_phy_db_set_section(struct iwm_softc *,
    292 		    struct iwm_calib_res_notif_phy_db *, uint16_t);
    293 static int	iwm_is_valid_channel(uint16_t);
    294 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
    295 static uint16_t iwm_channel_id_to_papd(uint16_t);
    296 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
    297 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
    298 		    uint8_t **, uint16_t *, uint16_t);
    299 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
    300 		    void *);
    301 static int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
    302 		    enum iwm_phy_db_section_type, uint8_t);
    303 static int	iwm_send_phy_db_data(struct iwm_softc *);
    304 static void	iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
    305 		    struct iwm_time_event_cmd_v1 *);
    306 static int	iwm_send_time_event_cmd(struct iwm_softc *,
    307 		    const struct iwm_time_event_cmd_v2 *);
    308 static void	iwm_protect_session(struct iwm_softc *, struct iwm_node *,
    309 		    uint32_t, uint32_t);
    310 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
    311 		    uint16_t, uint8_t *, uint16_t *);
    312 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
    313 		    uint16_t *, size_t);
    314 static void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
    315 		    const uint8_t *, size_t);
    316 #ifndef IEEE80211_NO_HT
    317 static void	iwm_setup_ht_rates(struct iwm_softc *);
    318 static void	iwm_htprot_task(void *);
    319 static void	iwm_update_htprot(struct ieee80211com *,
    320 		    struct ieee80211_node *);
    321 static int	iwm_ampdu_rx_start(struct ieee80211com *,
    322 		    struct ieee80211_node *, uint8_t);
    323 static void	iwm_ampdu_rx_stop(struct ieee80211com *,
    324 		    struct ieee80211_node *, uint8_t);
    325 static void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
    326 		    uint8_t, uint16_t, int);
    327 #ifdef notyet
    328 static int	iwm_ampdu_tx_start(struct ieee80211com *,
    329 		    struct ieee80211_node *, uint8_t);
    330 static void	iwm_ampdu_tx_stop(struct ieee80211com *,
    331 		    struct ieee80211_node *, uint8_t);
    332 #endif
    333 static void	iwm_ba_task(void *);
    334 #endif
    335 
    336 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
    337 		    const uint16_t *, const uint16_t *, const uint16_t *,
    338 		    const uint16_t *, const uint16_t *);
    339 static void	iwm_set_hw_address_8000(struct iwm_softc *,
    340 		    struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
    341 static int	iwm_parse_nvm_sections(struct iwm_softc *,
    342 		    struct iwm_nvm_section *);
    343 static int	iwm_nvm_init(struct iwm_softc *);
    344 static int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
    345 		    const uint8_t *, uint32_t);
    346 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
    347 		    const uint8_t *, uint32_t);
    348 static int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
    349 static int	iwm_load_cpu_sections_8000(struct iwm_softc *,
    350 		    struct iwm_fw_sects *, int , int *);
    351 static int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
    352 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
    353 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
    354 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
    355 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
    356 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
    357 		    enum iwm_ucode_type);
    358 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
    359 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
    360 static int	iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
    361 static int	iwm_get_signal_strength(struct iwm_softc *,
    362 		    struct iwm_rx_phy_info *);
    363 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
    364 		    struct iwm_rx_packet *, struct iwm_rx_data *);
    365 static int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
    366 static void	iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
    367 		    struct iwm_rx_data *);
    368 static void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,		    struct iwm_node *);
    369 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
    370 		    struct iwm_rx_data *);
    371 static int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
    372 		    uint32_t);
    373 #if 0
    374 static int	iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
    375 static int	iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
    376 #endif
    377 static void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
    378 		    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
    379 static void	iwm_phy_ctxt_cmd_data(struct iwm_softc *,
    380 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
    381 		    uint8_t, uint8_t);
    382 static int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
    383 		    uint8_t, uint8_t, uint32_t, uint32_t);
    384 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
    385 static int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
    386 		    uint16_t, const void *);
    387 static int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
    388 		    uint32_t *);
    389 static int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
    390 		    const void *, uint32_t *);
    391 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
    392 static void	iwm_cmd_done(struct iwm_softc *, int qid, int idx);
    393 #if 0
    394 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
    395 		    uint16_t);
    396 #endif
    397 static const struct iwm_rate *
    398 		iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
    399 		    struct ieee80211_frame *, struct iwm_tx_cmd *);
    400 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
    401 		    struct ieee80211_node *, int);
    402 static void	iwm_led_enable(struct iwm_softc *);
    403 static void	iwm_led_disable(struct iwm_softc *);
    404 static int	iwm_led_is_enabled(struct iwm_softc *);
    405 static void	iwm_led_blink_timeout(void *);
    406 static void	iwm_led_blink_start(struct iwm_softc *);
    407 static void	iwm_led_blink_stop(struct iwm_softc *);
    408 static int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
    409 		    struct iwm_beacon_filter_cmd *);
    410 static void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
    411 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
    412 static int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
    413 		    int);
    414 static void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
    415 		    struct iwm_mac_power_cmd *);
    416 static int	iwm_power_mac_update_mode(struct iwm_softc *,
    417 		    struct iwm_node *);
    418 static int	iwm_power_update_device(struct iwm_softc *);
    419 #ifdef notyet
    420 static int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
    421 #endif
    422 static int	iwm_disable_beacon_filter(struct iwm_softc *);
    423 static int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
    424 static int	iwm_add_aux_sta(struct iwm_softc *);
    425 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
    426 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
    427 #ifdef notyet
    428 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
    429 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
    430 #endif
    431 static uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
    432 		    struct iwm_scan_channel_cfg_lmac *, int);
    433 static int	iwm_fill_probe_req(struct iwm_softc *,
    434 		    struct iwm_scan_probe_req *);
    435 static int	iwm_lmac_scan(struct iwm_softc *);
    436 static int	iwm_config_umac_scan(struct iwm_softc *);
    437 static int	iwm_umac_scan(struct iwm_softc *);
    438 static uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
    439 static void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
    440 		    int *);
    441 static void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
    442 		    struct iwm_mac_ctx_cmd *, uint32_t, int);
    443 static void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
    444 		    struct iwm_mac_data_sta *, int);
    445 static int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
    446 		    uint32_t, int);
    447 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
    448 static int	iwm_auth(struct iwm_softc *);
    449 static int	iwm_assoc(struct iwm_softc *);
    450 static void	iwm_calib_timeout(void *);
    451 #ifndef IEEE80211_NO_HT
    452 static void	iwm_setrates_task(void *);
    453 static int	iwm_setrates(struct iwm_node *);
    454 #endif
    455 static int	iwm_media_change(struct ifnet *);
    456 static void	iwm_newstate_cb(struct work *, void *);
    457 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
    458 static void	iwm_endscan(struct iwm_softc *);
    459 static void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
    460 		    struct ieee80211_node *);
    461 static int	iwm_sf_config(struct iwm_softc *, int);
    462 static int	iwm_send_bt_init_conf(struct iwm_softc *);
    463 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
    464 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
    465 static int	iwm_init_hw(struct iwm_softc *);
    466 static int	iwm_init(struct ifnet *);
    467 static void	iwm_start(struct ifnet *);
    468 static void	iwm_stop(struct ifnet *, int);
    469 static void	iwm_watchdog(struct ifnet *);
    470 static int	iwm_ioctl(struct ifnet *, u_long, void *);
    471 #ifdef IWM_DEBUG
    472 static const char *iwm_desc_lookup(uint32_t);
    473 static void	iwm_nic_error(struct iwm_softc *);
    474 static void	iwm_nic_umac_error(struct iwm_softc *);
    475 #endif
    476 static void	iwm_notif_intr(struct iwm_softc *);
    477 static void	iwm_softintr(void *);
    478 static int	iwm_intr(void *);
    479 static int	iwm_preinit(struct iwm_softc *);
    480 static void	iwm_attach_hook(device_t);
    481 static void	iwm_attach(device_t, device_t, void *);
    482 #if 0
    483 static void	iwm_init_task(void *);
    484 static int	iwm_activate(device_t, enum devact);
    485 static void	iwm_wakeup(struct iwm_softc *);
    486 #endif
    487 static void	iwm_radiotap_attach(struct iwm_softc *);
    488 static int	iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
    489 
    490 static int iwm_sysctl_root_num;
    491 
    492 static int
    493 iwm_firmload(struct iwm_softc *sc)
    494 {
    495 	struct iwm_fw_info *fw = &sc->sc_fw;
    496 	firmware_handle_t fwh;
    497 	int err;
    498 
    499 	if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
    500 		return 0;
    501 
    502 	/* Open firmware image. */
    503 	err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
    504 	if (err) {
    505 		aprint_error_dev(sc->sc_dev,
    506 		    "could not get firmware handle %s\n", sc->sc_fwname);
    507 		return err;
    508 	}
    509 
    510 	if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
    511 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
    512 		fw->fw_rawdata = NULL;
    513 	}
    514 
    515 	fw->fw_rawsize = firmware_get_size(fwh);
    516 	/*
    517 	 * Well, this is how the Linux driver checks it ....
    518 	 */
    519 	if (fw->fw_rawsize < sizeof(uint32_t)) {
    520 		aprint_error_dev(sc->sc_dev,
    521 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
    522 		err = EINVAL;
    523 		goto out;
    524 	}
    525 
    526 	/* some sanity */
    527 	if (fw->fw_rawsize > IWM_FWMAXSIZE) {
    528 		aprint_error_dev(sc->sc_dev,
    529 		    "firmware size is ridiculous: %zd bytes\n", fw->fw_rawsize);
    530 		err = EINVAL;
    531 		goto out;
    532 	}
    533 
    534 	/* Read the firmware. */
    535 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
    536 	if (fw->fw_rawdata == NULL) {
    537 		aprint_error_dev(sc->sc_dev,
    538 		    "not enough memory to stock firmware %s\n", sc->sc_fwname);
    539 		err = ENOMEM;
    540 		goto out;
    541 	}
    542 	err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
    543 	if (err) {
    544 		aprint_error_dev(sc->sc_dev,
    545 		    "could not read firmware %s\n", sc->sc_fwname);
    546 		goto out;
    547 	}
    548 
    549 	SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
    550  out:
    551 	/* caller will release memory, if necessary */
    552 
    553 	firmware_close(fwh);
    554 	return err;
    555 }
    556 
    557 /*
    558  * just maintaining status quo.
    559  */
    560 static void
    561 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
    562 {
    563 	struct ieee80211com *ic = &sc->sc_ic;
    564 	struct ieee80211_frame *wh;
    565 	uint8_t subtype;
    566 
    567 	wh = mtod(m, struct ieee80211_frame *);
    568 
    569 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
    570 		return;
    571 
    572 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
    573 
    574 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
    575 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
    576 		return;
    577 
    578 	int chan = le32toh(sc->sc_last_phy_info.channel);
    579 	if (chan < __arraycount(ic->ic_channels))
    580 		ic->ic_curchan = &ic->ic_channels[chan];
    581 }
    582 
    583 static int
    584 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
    585 {
    586 	struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
    587 
    588 	if (dlen < sizeof(*l) ||
    589 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
    590 		return EINVAL;
    591 
    592 	/* we don't actually store anything for now, always use s/w crypto */
    593 
    594 	return 0;
    595 }
    596 
    597 static int
    598 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
    599     uint8_t *data, size_t dlen)
    600 {
    601 	struct iwm_fw_sects *fws;
    602 	struct iwm_fw_onesect *fwone;
    603 
    604 	if (type >= IWM_UCODE_TYPE_MAX)
    605 		return EINVAL;
    606 	if (dlen < sizeof(uint32_t))
    607 		return EINVAL;
    608 
    609 	fws = &sc->sc_fw.fw_sects[type];
    610 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
    611 		return EINVAL;
    612 
    613 	fwone = &fws->fw_sect[fws->fw_count];
    614 
    615 	/* first 32bit are device load offset */
    616 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
    617 
    618 	/* rest is data */
    619 	fwone->fws_data = data + sizeof(uint32_t);
    620 	fwone->fws_len = dlen - sizeof(uint32_t);
    621 
    622 	/* for freeing the buffer during driver unload */
    623 	fwone->fws_alloc = data;
    624 	fwone->fws_allocsize = dlen;
    625 
    626 	fws->fw_count++;
    627 	fws->fw_totlen += fwone->fws_len;
    628 
    629 	return 0;
    630 }
    631 
    632 struct iwm_tlv_calib_data {
    633 	uint32_t ucode_type;
    634 	struct iwm_tlv_calib_ctrl calib;
    635 } __packed;
    636 
    637 static int
    638 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
    639 {
    640 	const struct iwm_tlv_calib_data *def_calib = data;
    641 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
    642 
    643 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
    644 		DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
    645 		    DEVNAME(sc), ucode_type));
    646 		return EINVAL;
    647 	}
    648 
    649 	sc->sc_default_calib[ucode_type].flow_trigger =
    650 	    def_calib->calib.flow_trigger;
    651 	sc->sc_default_calib[ucode_type].event_trigger =
    652 	    def_calib->calib.event_trigger;
    653 
    654 	return 0;
    655 }
    656 
    657 static int
    658 iwm_read_firmware(struct iwm_softc *sc)
    659 {
    660 	struct iwm_fw_info *fw = &sc->sc_fw;
    661 	struct iwm_tlv_ucode_header *uhdr;
    662 	struct iwm_ucode_tlv tlv;
    663 	enum iwm_ucode_tlv_type tlv_type;
    664 	uint8_t *data;
    665 	int err, status;
    666 	size_t len;
    667 
    668 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
    669 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
    670 	} else {
    671 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
    672 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
    673 	}
    674 	status = fw->fw_status;
    675 
    676 	if (status == IWM_FW_STATUS_DONE)
    677 		return 0;
    678 
    679 	err = iwm_firmload(sc);
    680 	if (err) {
    681 		aprint_error_dev(sc->sc_dev,
    682 		    "could not read firmware %s (error %d)\n",
    683 		    sc->sc_fwname, err);
    684 		goto out;
    685 	}
    686 
    687 	sc->sc_capaflags = 0;
    688 	sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
    689 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
    690 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
    691 
    692 	uhdr = (void *)fw->fw_rawdata;
    693 	if (*(uint32_t *)fw->fw_rawdata != 0
    694 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
    695 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
    696 		    sc->sc_fwname);
    697 		err = EINVAL;
    698 		goto out;
    699 	}
    700 
    701 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
    702 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
    703 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
    704 	    IWM_UCODE_API(le32toh(uhdr->ver)));
    705 	data = uhdr->data;
    706 	len = fw->fw_rawsize - sizeof(*uhdr);
    707 
    708 	while (len >= sizeof(tlv)) {
    709 		size_t tlv_len;
    710 		void *tlv_data;
    711 
    712 		memcpy(&tlv, data, sizeof(tlv));
    713 		tlv_len = le32toh(tlv.length);
    714 		tlv_type = le32toh(tlv.type);
    715 
    716 		len -= sizeof(tlv);
    717 		data += sizeof(tlv);
    718 		tlv_data = data;
    719 
    720 		if (len < tlv_len) {
    721 			aprint_error_dev(sc->sc_dev,
    722 			    "firmware too short: %zu bytes\n", len);
    723 			err = EINVAL;
    724 			goto parse_out;
    725 		}
    726 
    727 		switch (tlv_type) {
    728 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
    729 			if (tlv_len < sizeof(uint32_t)) {
    730 				err = EINVAL;
    731 				goto parse_out;
    732 			}
    733 			sc->sc_capa_max_probe_len
    734 			    = le32toh(*(uint32_t *)tlv_data);
    735 			/* limit it to something sensible */
    736 			if (sc->sc_capa_max_probe_len >
    737 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
    738 				err = EINVAL;
    739 				goto parse_out;
    740 			}
    741 			break;
    742 		case IWM_UCODE_TLV_PAN:
    743 			if (tlv_len) {
    744 				err = EINVAL;
    745 				goto parse_out;
    746 			}
    747 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
    748 			break;
    749 		case IWM_UCODE_TLV_FLAGS:
    750 			if (tlv_len < sizeof(uint32_t)) {
    751 				err = EINVAL;
    752 				goto parse_out;
    753 			}
    754 			/*
    755 			 * Apparently there can be many flags, but Linux driver
    756 			 * parses only the first one, and so do we.
    757 			 *
    758 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
    759 			 * Intentional or a bug?  Observations from
    760 			 * current firmware file:
    761 			 *  1) TLV_PAN is parsed first
    762 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
    763 			 * ==> this resets TLV_PAN to itself... hnnnk
    764 			 */
    765 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
    766 			break;
    767 		case IWM_UCODE_TLV_CSCHEME:
    768 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
    769 			if (err)
    770 				goto parse_out;
    771 			break;
    772 		case IWM_UCODE_TLV_NUM_OF_CPU: {
    773 			uint32_t num_cpu;
    774 			if (tlv_len != sizeof(uint32_t)) {
    775 				err = EINVAL;
    776 				goto parse_out;
    777 			}
    778 			num_cpu = le32toh(*(uint32_t *)tlv_data);
    779 			if (num_cpu < 1 || num_cpu > 2) {
    780 				err = EINVAL;
    781 				goto parse_out;
    782 			}
    783 			break;
    784 		}
    785 		case IWM_UCODE_TLV_SEC_RT:
    786 			err = iwm_firmware_store_section(sc,
    787 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
    788 			if (err)
    789 				goto parse_out;
    790 			break;
    791 		case IWM_UCODE_TLV_SEC_INIT:
    792 			err = iwm_firmware_store_section(sc,
    793 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
    794 			if (err)
    795 				goto parse_out;
    796 			break;
    797 		case IWM_UCODE_TLV_SEC_WOWLAN:
    798 			err = iwm_firmware_store_section(sc,
    799 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
    800 			if (err)
    801 				goto parse_out;
    802 			break;
    803 		case IWM_UCODE_TLV_DEF_CALIB:
    804 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
    805 				err = EINVAL;
    806 				goto parse_out;
    807 			}
    808 			err = iwm_set_default_calib(sc, tlv_data);
    809 			if (err)
    810 				goto parse_out;
    811 			break;
    812 		case IWM_UCODE_TLV_PHY_SKU:
    813 			if (tlv_len != sizeof(uint32_t)) {
    814 				err = EINVAL;
    815 				goto parse_out;
    816 			}
    817 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
    818 			break;
    819 
    820 		case IWM_UCODE_TLV_API_CHANGES_SET: {
    821 			struct iwm_ucode_api *api;
    822 			if (tlv_len != sizeof(*api)) {
    823 				err = EINVAL;
    824 				goto parse_out;
    825 			}
    826 			api = (struct iwm_ucode_api *)tlv_data;
    827 			/* Flags may exceed 32 bits in future firmware. */
    828 			if (le32toh(api->api_index) > 0) {
    829 				goto parse_out;
    830 			}
    831 			sc->sc_ucode_api = le32toh(api->api_flags);
    832 			break;
    833 		}
    834 
    835 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
    836 			struct iwm_ucode_capa *capa;
    837 			int idx, i;
    838 			if (tlv_len != sizeof(*capa)) {
    839 				err = EINVAL;
    840 				goto parse_out;
    841 			}
    842 			capa = (struct iwm_ucode_capa *)tlv_data;
    843 			idx = le32toh(capa->api_index);
    844 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
    845 				goto parse_out;
    846 			}
    847 			for (i = 0; i < 32; i++) {
    848 				if (!ISSET(le32toh(capa->api_capa), __BIT(i)))
    849 					continue;
    850 				setbit(sc->sc_enabled_capa, i + (32 * idx));
    851 			}
    852 			break;
    853 		}
    854 
    855 		case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
    856 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
    857 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
    858 			/* ignore, not used by current driver */
    859 			break;
    860 
    861 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
    862 			err = iwm_firmware_store_section(sc,
    863 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
    864 			    tlv_len);
    865 			if (err)
    866 				goto parse_out;
    867 			break;
    868 
    869 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
    870 			if (tlv_len != sizeof(uint32_t)) {
    871 				err = EINVAL;
    872 				goto parse_out;
    873 			}
    874 			sc->sc_capa_n_scan_channels =
    875 			  le32toh(*(uint32_t *)tlv_data);
    876 			break;
    877 
    878 		case IWM_UCODE_TLV_FW_VERSION:
    879 			if (tlv_len != sizeof(uint32_t) * 3) {
    880 				err = EINVAL;
    881 				goto parse_out;
    882 			}
    883 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
    884 			    "%d.%d.%d",
    885 			    le32toh(((uint32_t *)tlv_data)[0]),
    886 			    le32toh(((uint32_t *)tlv_data)[1]),
    887 			    le32toh(((uint32_t *)tlv_data)[2]));
    888 			break;
    889 
    890 		default:
    891 			DPRINTF(("%s: unknown firmware section %d, abort\n",
    892 			    DEVNAME(sc), tlv_type));
    893 			err = EINVAL;
    894 			goto parse_out;
    895 		}
    896 
    897 		len -= roundup(tlv_len, 4);
    898 		data += roundup(tlv_len, 4);
    899 	}
    900 
    901 	KASSERT(err == 0);
    902 
    903  parse_out:
    904 	if (err) {
    905 		aprint_error_dev(sc->sc_dev,
    906 		    "firmware parse error, section type %d\n", tlv_type);
    907 	}
    908 
    909 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
    910 		aprint_error_dev(sc->sc_dev,
    911 		    "device uses unsupported power ops\n");
    912 		err = ENOTSUP;
    913 	}
    914 
    915  out:
    916 	if (err)
    917 		fw->fw_status = IWM_FW_STATUS_NONE;
    918 	else
    919 		fw->fw_status = IWM_FW_STATUS_DONE;
    920 	wakeup(&sc->sc_fw);
    921 
    922 	if (err && fw->fw_rawdata != NULL) {
    923 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
    924 		fw->fw_rawdata = NULL;
    925 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
    926 		/* don't touch fw->fw_status */
    927 		memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
    928 	}
    929 	return err;
    930 }
    931 
    932 static uint32_t
    933 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
    934 {
    935 	IWM_WRITE(sc,
    936 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
    937 	IWM_BARRIER_READ_WRITE(sc);
    938 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
    939 }
    940 
    941 static void
    942 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    943 {
    944 	IWM_WRITE(sc,
    945 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
    946 	IWM_BARRIER_WRITE(sc);
    947 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
    948 }
    949 
    950 #ifdef IWM_DEBUG
    951 static int
    952 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
    953 {
    954 	int offs;
    955 	uint32_t *vals = buf;
    956 
    957 	if (iwm_nic_lock(sc)) {
    958 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
    959 		for (offs = 0; offs < dwords; offs++)
    960 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
    961 		iwm_nic_unlock(sc);
    962 		return 0;
    963 	}
    964 	return EBUSY;
    965 }
    966 #endif
    967 
    968 static int
    969 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
    970 {
    971 	int offs;
    972 	const uint32_t *vals = buf;
    973 
    974 	if (iwm_nic_lock(sc)) {
    975 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
    976 		/* WADDR auto-increments */
    977 		for (offs = 0; offs < dwords; offs++) {
    978 			uint32_t val = vals ? vals[offs] : 0;
    979 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
    980 		}
    981 		iwm_nic_unlock(sc);
    982 		return 0;
    983 	}
    984 	return EBUSY;
    985 }
    986 
    987 static int
    988 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    989 {
    990 	return iwm_write_mem(sc, addr, &val, 1);
    991 }
    992 
    993 static int
    994 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
    995     int timo)
    996 {
    997 	for (;;) {
    998 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
    999 			return 1;
   1000 		}
   1001 		if (timo < 10) {
   1002 			return 0;
   1003 		}
   1004 		timo -= 10;
   1005 		DELAY(10);
   1006 	}
   1007 }
   1008 
   1009 static int
   1010 iwm_nic_lock(struct iwm_softc *sc)
   1011 {
   1012 	int rv = 0;
   1013 
   1014 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   1015 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1016 
   1017 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   1018 		DELAY(2);
   1019 
   1020 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   1021 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
   1022 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
   1023 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
   1024 		rv = 1;
   1025 	} else {
   1026 		aprint_error_dev(sc->sc_dev, "device timeout\n");
   1027 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
   1028 	}
   1029 
   1030 	return rv;
   1031 }
   1032 
   1033 static void
   1034 iwm_nic_unlock(struct iwm_softc *sc)
   1035 {
   1036 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1037 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1038 }
   1039 
   1040 static void
   1041 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
   1042     uint32_t mask)
   1043 {
   1044 	uint32_t val;
   1045 
   1046 	/* XXX: no error path? */
   1047 	if (iwm_nic_lock(sc)) {
   1048 		val = iwm_read_prph(sc, reg) & mask;
   1049 		val |= bits;
   1050 		iwm_write_prph(sc, reg, val);
   1051 		iwm_nic_unlock(sc);
   1052 	}
   1053 }
   1054 
   1055 static void
   1056 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
   1057 {
   1058 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
   1059 }
   1060 
   1061 static void
   1062 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
   1063 {
   1064 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
   1065 }
   1066 
   1067 static int
   1068 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
   1069     bus_size_t size, bus_size_t alignment)
   1070 {
   1071 	int nsegs, err;
   1072 	void *va;
   1073 
   1074 	dma->tag = tag;
   1075 	dma->size = size;
   1076 
   1077 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
   1078 	    &dma->map);
   1079 	if (err)
   1080 		goto fail;
   1081 
   1082 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
   1083 	    BUS_DMA_NOWAIT);
   1084 	if (err)
   1085 		goto fail;
   1086 
   1087 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
   1088 	if (err)
   1089 		goto fail;
   1090 	dma->vaddr = va;
   1091 
   1092 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
   1093 	    BUS_DMA_NOWAIT);
   1094 	if (err)
   1095 		goto fail;
   1096 
   1097 	memset(dma->vaddr, 0, size);
   1098 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
   1099 	dma->paddr = dma->map->dm_segs[0].ds_addr;
   1100 
   1101 	return 0;
   1102 
   1103 fail:	iwm_dma_contig_free(dma);
   1104 	return err;
   1105 }
   1106 
   1107 static void
   1108 iwm_dma_contig_free(struct iwm_dma_info *dma)
   1109 {
   1110 	if (dma->map != NULL) {
   1111 		if (dma->vaddr != NULL) {
   1112 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
   1113 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1114 			bus_dmamap_unload(dma->tag, dma->map);
   1115 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
   1116 			bus_dmamem_free(dma->tag, &dma->seg, 1);
   1117 			dma->vaddr = NULL;
   1118 		}
   1119 		bus_dmamap_destroy(dma->tag, dma->map);
   1120 		dma->map = NULL;
   1121 	}
   1122 }
   1123 
   1124 static int
   1125 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1126 {
   1127 	bus_size_t size;
   1128 	int i, err;
   1129 
   1130 	ring->cur = 0;
   1131 
   1132 	/* Allocate RX descriptors (256-byte aligned). */
   1133 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
   1134 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1135 	if (err) {
   1136 		aprint_error_dev(sc->sc_dev,
   1137 		    "could not allocate RX ring DMA memory\n");
   1138 		goto fail;
   1139 	}
   1140 	ring->desc = ring->desc_dma.vaddr;
   1141 
   1142 	/* Allocate RX status area (16-byte aligned). */
   1143 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
   1144 	    sizeof(*ring->stat), 16);
   1145 	if (err) {
   1146 		aprint_error_dev(sc->sc_dev,
   1147 		    "could not allocate RX status DMA memory\n");
   1148 		goto fail;
   1149 	}
   1150 	ring->stat = ring->stat_dma.vaddr;
   1151 
   1152 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1153 		struct iwm_rx_data *data = &ring->data[i];
   1154 
   1155 		memset(data, 0, sizeof(*data));
   1156 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
   1157 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
   1158 		    &data->map);
   1159 		if (err) {
   1160 			aprint_error_dev(sc->sc_dev,
   1161 			    "could not create RX buf DMA map\n");
   1162 			goto fail;
   1163 		}
   1164 
   1165 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
   1166 		if (err)
   1167 			goto fail;
   1168 	}
   1169 	return 0;
   1170 
   1171 fail:	iwm_free_rx_ring(sc, ring);
   1172 	return err;
   1173 }
   1174 
   1175 static void
   1176 iwm_disable_rx_dma(struct iwm_softc *sc)
   1177 {
   1178 	int ntries;
   1179 
   1180 	if (iwm_nic_lock(sc)) {
   1181 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
   1182 		for (ntries = 0; ntries < 1000; ntries++) {
   1183 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
   1184 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
   1185 				break;
   1186 			DELAY(10);
   1187 		}
   1188 		iwm_nic_unlock(sc);
   1189 	}
   1190 }
   1191 
   1192 void
   1193 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1194 {
   1195 	ring->cur = 0;
   1196 	memset(ring->stat, 0, sizeof(*ring->stat));
   1197 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
   1198 	    ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1199 }
   1200 
   1201 static void
   1202 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1203 {
   1204 	int i;
   1205 
   1206 	iwm_dma_contig_free(&ring->desc_dma);
   1207 	iwm_dma_contig_free(&ring->stat_dma);
   1208 
   1209 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1210 		struct iwm_rx_data *data = &ring->data[i];
   1211 
   1212 		if (data->m != NULL) {
   1213 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1214 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1215 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1216 			m_freem(data->m);
   1217 			data->m = NULL;
   1218 		}
   1219 		if (data->map != NULL) {
   1220 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1221 			data->map = NULL;
   1222 		}
   1223 	}
   1224 }
   1225 
   1226 static int
   1227 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
   1228 {
   1229 	bus_addr_t paddr;
   1230 	bus_size_t size;
   1231 	int i, err;
   1232 
   1233 	ring->qid = qid;
   1234 	ring->queued = 0;
   1235 	ring->cur = 0;
   1236 
   1237 	/* Allocate TX descriptors (256-byte aligned). */
   1238 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
   1239 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1240 	if (err) {
   1241 		aprint_error_dev(sc->sc_dev,
   1242 		    "could not allocate TX ring DMA memory\n");
   1243 		goto fail;
   1244 	}
   1245 	ring->desc = ring->desc_dma.vaddr;
   1246 
   1247 	/*
   1248 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
   1249 	 * to allocate commands space for other rings.
   1250 	 */
   1251 	if (qid > IWM_CMD_QUEUE)
   1252 		return 0;
   1253 
   1254 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
   1255 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
   1256 	if (err) {
   1257 		aprint_error_dev(sc->sc_dev,
   1258 		    "could not allocate TX cmd DMA memory\n");
   1259 		goto fail;
   1260 	}
   1261 	ring->cmd = ring->cmd_dma.vaddr;
   1262 
   1263 	paddr = ring->cmd_dma.paddr;
   1264 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1265 		struct iwm_tx_data *data = &ring->data[i];
   1266 		size_t mapsize;
   1267 
   1268 		data->cmd_paddr = paddr;
   1269 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
   1270 		    + offsetof(struct iwm_tx_cmd, scratch);
   1271 		paddr += sizeof(struct iwm_device_cmd);
   1272 
   1273 		/* FW commands may require more mapped space than packets. */
   1274 		if (qid == IWM_CMD_QUEUE)
   1275 			mapsize = (sizeof(struct iwm_cmd_header) +
   1276 			    IWM_MAX_CMD_PAYLOAD_SIZE);
   1277 		else
   1278 			mapsize = MCLBYTES;
   1279 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
   1280 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT, &data->map);
   1281 		if (err) {
   1282 			aprint_error_dev(sc->sc_dev,
   1283 			    "could not create TX buf DMA map\n");
   1284 			goto fail;
   1285 		}
   1286 	}
   1287 	KASSERT(paddr == ring->cmd_dma.paddr + size);
   1288 	return 0;
   1289 
   1290 fail:	iwm_free_tx_ring(sc, ring);
   1291 	return err;
   1292 }
   1293 
   1294 static void
   1295 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1296 {
   1297 	int i;
   1298 
   1299 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1300 		struct iwm_tx_data *data = &ring->data[i];
   1301 
   1302 		if (data->m != NULL) {
   1303 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1304 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1305 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1306 			m_freem(data->m);
   1307 			data->m = NULL;
   1308 		}
   1309 	}
   1310 	/* Clear TX descriptors. */
   1311 	memset(ring->desc, 0, ring->desc_dma.size);
   1312 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
   1313 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
   1314 	sc->qfullmsk &= ~(1 << ring->qid);
   1315 	ring->queued = 0;
   1316 	ring->cur = 0;
   1317 }
   1318 
   1319 static void
   1320 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1321 {
   1322 	int i;
   1323 
   1324 	iwm_dma_contig_free(&ring->desc_dma);
   1325 	iwm_dma_contig_free(&ring->cmd_dma);
   1326 
   1327 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1328 		struct iwm_tx_data *data = &ring->data[i];
   1329 
   1330 		if (data->m != NULL) {
   1331 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1332 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1333 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1334 			m_freem(data->m);
   1335 			data->m = NULL;
   1336 		}
   1337 		if (data->map != NULL) {
   1338 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1339 			data->map = NULL;
   1340 		}
   1341 	}
   1342 }
   1343 
   1344 static void
   1345 iwm_enable_rfkill_int(struct iwm_softc *sc)
   1346 {
   1347 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
   1348 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1349 }
   1350 
   1351 static int
   1352 iwm_check_rfkill(struct iwm_softc *sc)
   1353 {
   1354 	uint32_t v;
   1355 	int s;
   1356 	int rv;
   1357 
   1358 	s = splnet();
   1359 
   1360 	/*
   1361 	 * "documentation" is not really helpful here:
   1362 	 *  27:	HW_RF_KILL_SW
   1363 	 *	Indicates state of (platform's) hardware RF-Kill switch
   1364 	 *
   1365 	 * But apparently when it's off, it's on ...
   1366 	 */
   1367 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
   1368 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
   1369 	if (rv) {
   1370 		sc->sc_flags |= IWM_FLAG_RFKILL;
   1371 	} else {
   1372 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
   1373 	}
   1374 
   1375 	splx(s);
   1376 	return rv;
   1377 }
   1378 
   1379 static void
   1380 iwm_enable_interrupts(struct iwm_softc *sc)
   1381 {
   1382 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
   1383 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1384 }
   1385 
   1386 static void
   1387 iwm_restore_interrupts(struct iwm_softc *sc)
   1388 {
   1389 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1390 }
   1391 
   1392 static void
   1393 iwm_disable_interrupts(struct iwm_softc *sc)
   1394 {
   1395 	int s = splnet();
   1396 
   1397 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   1398 
   1399 	/* acknowledge all interrupts */
   1400 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1401 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
   1402 
   1403 	splx(s);
   1404 }
   1405 
   1406 static void
   1407 iwm_ict_reset(struct iwm_softc *sc)
   1408 {
   1409 	iwm_disable_interrupts(sc);
   1410 
   1411 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
   1412 	bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
   1413 	    BUS_DMASYNC_PREWRITE);
   1414 	sc->ict_cur = 0;
   1415 
   1416 	/* Set physical address of ICT (4KB aligned). */
   1417 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
   1418 	    IWM_CSR_DRAM_INT_TBL_ENABLE
   1419 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
   1420 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
   1421 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
   1422 
   1423 	/* Switch to ICT interrupt mode in driver. */
   1424 	sc->sc_flags |= IWM_FLAG_USE_ICT;
   1425 
   1426 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1427 	iwm_enable_interrupts(sc);
   1428 }
   1429 
   1430 #define IWM_HW_READY_TIMEOUT 50
   1431 static int
   1432 iwm_set_hw_ready(struct iwm_softc *sc)
   1433 {
   1434 	int ready;
   1435 
   1436 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1437 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
   1438 
   1439 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1440 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1441 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1442 	    IWM_HW_READY_TIMEOUT);
   1443 	if (ready)
   1444 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
   1445 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
   1446 
   1447 	return ready;
   1448 }
   1449 #undef IWM_HW_READY_TIMEOUT
   1450 
   1451 static int
   1452 iwm_prepare_card_hw(struct iwm_softc *sc)
   1453 {
   1454 	int t = 0;
   1455 
   1456 	if (iwm_set_hw_ready(sc))
   1457 		return 0;
   1458 
   1459 	DELAY(100);
   1460 
   1461 	/* If HW is not ready, prepare the conditions to check again */
   1462 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1463 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
   1464 
   1465 	do {
   1466 		if (iwm_set_hw_ready(sc))
   1467 			return 0;
   1468 		DELAY(200);
   1469 		t += 200;
   1470 	} while (t < 150000);
   1471 
   1472 	return ETIMEDOUT;
   1473 }
   1474 
   1475 static void
   1476 iwm_apm_config(struct iwm_softc *sc)
   1477 {
   1478 	pcireg_t reg;
   1479 
   1480 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
   1481 	    sc->sc_cap_off + PCIE_LCSR);
   1482 	if (reg & PCIE_LCSR_ASPM_L1) {
   1483 		/* Um the Linux driver prints "Disabling L0S for this one ... */
   1484 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
   1485 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1486 	} else {
   1487 		/* ... and "Enabling" here */
   1488 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
   1489 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1490 	}
   1491 }
   1492 
   1493 /*
   1494  * Start up NIC's basic functionality after it has been reset
   1495  * e.g. after platform boot or shutdown.
   1496  * NOTE:  This does not load uCode nor start the embedded processor
   1497  */
   1498 static int
   1499 iwm_apm_init(struct iwm_softc *sc)
   1500 {
   1501 	int err = 0;
   1502 
   1503 	/* Disable L0S exit timer (platform NMI workaround) */
   1504 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
   1505 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1506 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
   1507 
   1508 	/*
   1509 	 * Disable L0s without affecting L1;
   1510 	 *  don't wait for ICH L0s (ICH bug W/A)
   1511 	 */
   1512 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1513 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
   1514 
   1515 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
   1516 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
   1517 
   1518 	/*
   1519 	 * Enable HAP INTA (interrupt from management bus) to
   1520 	 * wake device's PCI Express link L1a -> L0s
   1521 	 */
   1522 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1523 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
   1524 
   1525 	iwm_apm_config(sc);
   1526 
   1527 #if 0 /* not for 7k/8k */
   1528 	/* Configure analog phase-lock-loop before activating to D0A */
   1529 	if (trans->cfg->base_params->pll_cfg_val)
   1530 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
   1531 		    trans->cfg->base_params->pll_cfg_val);
   1532 #endif
   1533 
   1534 	/*
   1535 	 * Set "initialization complete" bit to move adapter from
   1536 	 * D0U* --> D0A* (powered-up active) state.
   1537 	 */
   1538 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
   1539 
   1540 	/*
   1541 	 * Wait for clock stabilization; once stabilized, access to
   1542 	 * device-internal resources is supported, e.g. iwm_write_prph()
   1543 	 * and accesses to uCode SRAM.
   1544 	 */
   1545 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   1546 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   1547 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
   1548 		aprint_error_dev(sc->sc_dev,
   1549 		    "timeout waiting for clock stabilization\n");
   1550 		err = ETIMEDOUT;
   1551 		goto out;
   1552 	}
   1553 
   1554 	if (sc->host_interrupt_operation_mode) {
   1555 		/*
   1556 		 * This is a bit of an abuse - This is needed for 7260 / 3160
   1557 		 * only check host_interrupt_operation_mode even if this is
   1558 		 * not related to host_interrupt_operation_mode.
   1559 		 *
   1560 		 * Enable the oscillator to count wake up time for L1 exit. This
   1561 		 * consumes slightly more power (100uA) - but allows to be sure
   1562 		 * that we wake up from L1 on time.
   1563 		 *
   1564 		 * This looks weird: read twice the same register, discard the
   1565 		 * value, set a bit, and yet again, read that same register
   1566 		 * just to discard the value. But that's the way the hardware
   1567 		 * seems to like it.
   1568 		 */
   1569 		iwm_read_prph(sc, IWM_OSC_CLK);
   1570 		iwm_read_prph(sc, IWM_OSC_CLK);
   1571 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
   1572 		iwm_read_prph(sc, IWM_OSC_CLK);
   1573 		iwm_read_prph(sc, IWM_OSC_CLK);
   1574 	}
   1575 
   1576 	/*
   1577 	 * Enable DMA clock and wait for it to stabilize.
   1578 	 *
   1579 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
   1580 	 * do not disable clocks.  This preserves any hardware bits already
   1581 	 * set by default in "CLK_CTRL_REG" after reset.
   1582 	 */
   1583 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   1584 		iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
   1585 		    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1586 		DELAY(20);
   1587 
   1588 		/* Disable L1-Active */
   1589 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1590 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1591 
   1592 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
   1593 		iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
   1594 		    IWM_APMG_RTC_INT_STT_RFKILL);
   1595 	}
   1596  out:
   1597 	if (err)
   1598 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
   1599 	return err;
   1600 }
   1601 
   1602 static void
   1603 iwm_apm_stop(struct iwm_softc *sc)
   1604 {
   1605 	/* stop device's busmaster DMA activity */
   1606 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
   1607 
   1608 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
   1609 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
   1610 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
   1611 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
   1612 	DPRINTF(("iwm apm stop\n"));
   1613 }
   1614 
   1615 static int
   1616 iwm_start_hw(struct iwm_softc *sc)
   1617 {
   1618 	int err;
   1619 
   1620 	err = iwm_prepare_card_hw(sc);
   1621 	if (err)
   1622 		return err;
   1623 
   1624 	/* Reset the entire device */
   1625 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
   1626 	DELAY(10);
   1627 
   1628 	err = iwm_apm_init(sc);
   1629 	if (err)
   1630 		return err;
   1631 
   1632 	iwm_enable_rfkill_int(sc);
   1633 	iwm_check_rfkill(sc);
   1634 
   1635 	return 0;
   1636 }
   1637 
   1638 static void
   1639 iwm_stop_device(struct iwm_softc *sc)
   1640 {
   1641 	int chnl, ntries;
   1642 	int qid;
   1643 
   1644 	iwm_disable_interrupts(sc);
   1645 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
   1646 
   1647 	/* Deactivate TX scheduler. */
   1648 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1649 
   1650 	/* Stop all DMA channels. */
   1651 	if (iwm_nic_lock(sc)) {
   1652 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1653 			IWM_WRITE(sc,
   1654 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
   1655 			for (ntries = 0; ntries < 200; ntries++) {
   1656 				uint32_t r;
   1657 
   1658 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
   1659 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
   1660 				    chnl))
   1661 					break;
   1662 				DELAY(20);
   1663 			}
   1664 		}
   1665 		iwm_nic_unlock(sc);
   1666 	}
   1667 	iwm_disable_rx_dma(sc);
   1668 
   1669 	iwm_reset_rx_ring(sc, &sc->rxq);
   1670 
   1671 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
   1672 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
   1673 
   1674 	/*
   1675 	 * Power-down device's busmaster DMA clocks
   1676 	 */
   1677 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1678 	DELAY(5);
   1679 
   1680 	/* Make sure (redundant) we've released our request to stay awake */
   1681 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1682 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1683 
   1684 	/* Stop the device, and put it in low power state */
   1685 	iwm_apm_stop(sc);
   1686 
   1687 	/*
   1688 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
   1689 	 * Clean again the interrupt here
   1690 	 */
   1691 	iwm_disable_interrupts(sc);
   1692 
   1693 	/* Reset the on-board processor. */
   1694 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
   1695 
   1696 	/* Even though we stop the HW we still want the RF kill interrupt. */
   1697 	iwm_enable_rfkill_int(sc);
   1698 	iwm_check_rfkill(sc);
   1699 }
   1700 
   1701 static void
   1702 iwm_nic_config(struct iwm_softc *sc)
   1703 {
   1704 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
   1705 	uint32_t reg_val = 0;
   1706 
   1707 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
   1708 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
   1709 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
   1710 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
   1711 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
   1712 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
   1713 
   1714 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
   1715 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
   1716 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
   1717 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
   1718 
   1719 	/* radio configuration */
   1720 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
   1721 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
   1722 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
   1723 
   1724 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
   1725 
   1726 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
   1727 	    radio_cfg_step, radio_cfg_dash));
   1728 
   1729 	/*
   1730 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
   1731 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
   1732 	 * to lose ownership and not being able to obtain it back.
   1733 	 */
   1734 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
   1735 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1736 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
   1737 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
   1738 }
   1739 
   1740 static int
   1741 iwm_nic_rx_init(struct iwm_softc *sc)
   1742 {
   1743 	if (!iwm_nic_lock(sc))
   1744 		return EBUSY;
   1745 
   1746 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
   1747 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
   1748 	    0, sc->rxq.stat_dma.size,
   1749 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1750 
   1751 	iwm_disable_rx_dma(sc);
   1752 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
   1753 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
   1754 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
   1755 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
   1756 
   1757 	/* Set physical address of RX ring (256-byte aligned). */
   1758 	IWM_WRITE(sc,
   1759 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
   1760 
   1761 	/* Set physical address of RX status (16-byte aligned). */
   1762 	IWM_WRITE(sc,
   1763 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
   1764 
   1765 	/* Enable RX. */
   1766 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
   1767 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
   1768 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
   1769 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
   1770 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
   1771 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
   1772 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
   1773 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
   1774 
   1775 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
   1776 
   1777 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
   1778 	if (sc->host_interrupt_operation_mode)
   1779 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
   1780 
   1781 	/*
   1782 	 * This value should initially be 0 (before preparing any RBs),
   1783 	 * and should be 8 after preparing the first 8 RBs (for example).
   1784 	 */
   1785 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
   1786 
   1787 	iwm_nic_unlock(sc);
   1788 
   1789 	return 0;
   1790 }
   1791 
   1792 static int
   1793 iwm_nic_tx_init(struct iwm_softc *sc)
   1794 {
   1795 	int qid;
   1796 
   1797 	if (!iwm_nic_lock(sc))
   1798 		return EBUSY;
   1799 
   1800 	/* Deactivate TX scheduler. */
   1801 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1802 
   1803 	/* Set physical address of "keep warm" page (16-byte aligned). */
   1804 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
   1805 
   1806 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
   1807 		struct iwm_tx_ring *txq = &sc->txq[qid];
   1808 
   1809 		/* Set physical address of TX ring (256-byte aligned). */
   1810 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
   1811 		    txq->desc_dma.paddr >> 8);
   1812 		DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
   1813 		    qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
   1814 	}
   1815 
   1816 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
   1817 
   1818 	iwm_nic_unlock(sc);
   1819 
   1820 	return 0;
   1821 }
   1822 
   1823 static int
   1824 iwm_nic_init(struct iwm_softc *sc)
   1825 {
   1826 	int err;
   1827 
   1828 	iwm_apm_init(sc);
   1829 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
   1830 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1831 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
   1832 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
   1833 
   1834 	iwm_nic_config(sc);
   1835 
   1836 	err = iwm_nic_rx_init(sc);
   1837 	if (err)
   1838 		return err;
   1839 
   1840 	err = iwm_nic_tx_init(sc);
   1841 	if (err)
   1842 		return err;
   1843 
   1844 	DPRINTF(("shadow registers enabled\n"));
   1845 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
   1846 
   1847 	return 0;
   1848 }
   1849 
   1850 static const uint8_t iwm_ac_to_tx_fifo[] = {
   1851 	IWM_TX_FIFO_VO,
   1852 	IWM_TX_FIFO_VI,
   1853 	IWM_TX_FIFO_BE,
   1854 	IWM_TX_FIFO_BK,
   1855 };
   1856 
   1857 static int
   1858 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
   1859 {
   1860 	if (!iwm_nic_lock(sc)) {
   1861 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
   1862 		return EBUSY;
   1863 	}
   1864 
   1865 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
   1866 
   1867 	if (qid == IWM_CMD_QUEUE) {
   1868 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1869 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
   1870 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
   1871 
   1872 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
   1873 
   1874 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
   1875 
   1876 		iwm_write_mem32(sc,
   1877 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
   1878 
   1879 		/* Set scheduler window size and frame limit. */
   1880 		iwm_write_mem32(sc,
   1881 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
   1882 		    sizeof(uint32_t),
   1883 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
   1884 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
   1885 		    ((IWM_FRAME_LIMIT
   1886 		        << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
   1887 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
   1888 
   1889 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1890 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
   1891 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
   1892 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
   1893 		    IWM_SCD_QUEUE_STTS_REG_MSK);
   1894 	} else {
   1895 		struct iwm_scd_txq_cfg_cmd cmd;
   1896 		int err;
   1897 
   1898 		iwm_nic_unlock(sc);
   1899 
   1900 		memset(&cmd, 0, sizeof(cmd));
   1901 		cmd.scd_queue = qid;
   1902 		cmd.enable = 1;
   1903 		cmd.sta_id = sta_id;
   1904 		cmd.tx_fifo = fifo;
   1905 		cmd.aggregate = 0;
   1906 		cmd.window = IWM_FRAME_LIMIT;
   1907 
   1908 		err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
   1909 		    &cmd);
   1910 		if (err)
   1911 			return err;
   1912 
   1913 		if (!iwm_nic_lock(sc))
   1914 			return EBUSY;
   1915 	}
   1916 
   1917 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
   1918 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
   1919 
   1920 	iwm_nic_unlock(sc);
   1921 
   1922 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
   1923 
   1924 	return 0;
   1925 }
   1926 
   1927 static int
   1928 iwm_post_alive(struct iwm_softc *sc)
   1929 {
   1930 	int nwords;
   1931 	int err, chnl;
   1932 	uint32_t base;
   1933 
   1934 	if (!iwm_nic_lock(sc))
   1935 		return EBUSY;
   1936 
   1937 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
   1938 	if (sc->sched_base != base) {
   1939 		DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
   1940 		    DEVNAME(sc), sc->sched_base, base));
   1941 		err = EINVAL;
   1942 		goto out;
   1943 	}
   1944 
   1945 	iwm_ict_reset(sc);
   1946 
   1947 	/* Clear TX scheduler state in SRAM. */
   1948 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
   1949 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
   1950 	    / sizeof(uint32_t);
   1951 	err = iwm_write_mem(sc,
   1952 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
   1953 	    NULL, nwords);
   1954 	if (err)
   1955 		goto out;
   1956 
   1957 	/* Set physical address of TX scheduler rings (1KB aligned). */
   1958 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
   1959 
   1960 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
   1961 
   1962 	iwm_nic_unlock(sc);
   1963 
   1964 	/* enable command channel */
   1965 	err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
   1966 	if (err)
   1967 		return err;
   1968 
   1969 	if (!iwm_nic_lock(sc))
   1970 		return EBUSY;
   1971 
   1972 	/* Activate TX scheduler. */
   1973 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
   1974 
   1975 	/* Enable DMA channels. */
   1976 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1977 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
   1978 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
   1979 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
   1980 	}
   1981 
   1982 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
   1983 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
   1984 
   1985 	/* Enable L1-Active */
   1986 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
   1987 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1988 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1989 
   1990  out:
   1991 	iwm_nic_unlock(sc);
   1992 	return err;
   1993 }
   1994 
   1995 static struct iwm_phy_db_entry *
   1996 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
   1997     uint16_t chg_id)
   1998 {
   1999 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   2000 
   2001 	if (type >= IWM_PHY_DB_MAX)
   2002 		return NULL;
   2003 
   2004 	switch (type) {
   2005 	case IWM_PHY_DB_CFG:
   2006 		return &phy_db->cfg;
   2007 	case IWM_PHY_DB_CALIB_NCH:
   2008 		return &phy_db->calib_nch;
   2009 	case IWM_PHY_DB_CALIB_CHG_PAPD:
   2010 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
   2011 			return NULL;
   2012 		return &phy_db->calib_ch_group_papd[chg_id];
   2013 	case IWM_PHY_DB_CALIB_CHG_TXP:
   2014 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
   2015 			return NULL;
   2016 		return &phy_db->calib_ch_group_txp[chg_id];
   2017 	default:
   2018 		return NULL;
   2019 	}
   2020 	return NULL;
   2021 }
   2022 
   2023 static int
   2024 iwm_phy_db_set_section(struct iwm_softc *sc,
   2025     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
   2026 {
   2027 	struct iwm_phy_db_entry *entry;
   2028 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
   2029 	uint16_t chg_id = 0;
   2030 
   2031 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
   2032 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
   2033 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
   2034 
   2035 	entry = iwm_phy_db_get_section(sc, type, chg_id);
   2036 	if (!entry)
   2037 		return EINVAL;
   2038 
   2039 	if (entry->data)
   2040 		kmem_intr_free(entry->data, entry->size);
   2041 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
   2042 	if (!entry->data) {
   2043 		entry->size = 0;
   2044 		return ENOMEM;
   2045 	}
   2046 	memcpy(entry->data, phy_db_notif->data, size);
   2047 	entry->size = size;
   2048 
   2049 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
   2050 	    __func__, __LINE__, type, size, entry->data));
   2051 
   2052 	return 0;
   2053 }
   2054 
   2055 static int
   2056 iwm_is_valid_channel(uint16_t ch_id)
   2057 {
   2058 	if (ch_id <= 14 ||
   2059 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
   2060 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
   2061 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
   2062 		return 1;
   2063 	return 0;
   2064 }
   2065 
   2066 static uint8_t
   2067 iwm_ch_id_to_ch_index(uint16_t ch_id)
   2068 {
   2069 	if (!iwm_is_valid_channel(ch_id))
   2070 		return 0xff;
   2071 
   2072 	if (ch_id <= 14)
   2073 		return ch_id - 1;
   2074 	if (ch_id <= 64)
   2075 		return (ch_id + 20) / 4;
   2076 	if (ch_id <= 140)
   2077 		return (ch_id - 12) / 4;
   2078 	return (ch_id - 13) / 4;
   2079 }
   2080 
   2081 
   2082 static uint16_t
   2083 iwm_channel_id_to_papd(uint16_t ch_id)
   2084 {
   2085 	if (!iwm_is_valid_channel(ch_id))
   2086 		return 0xff;
   2087 
   2088 	if (1 <= ch_id && ch_id <= 14)
   2089 		return 0;
   2090 	if (36 <= ch_id && ch_id <= 64)
   2091 		return 1;
   2092 	if (100 <= ch_id && ch_id <= 140)
   2093 		return 2;
   2094 	return 3;
   2095 }
   2096 
   2097 static uint16_t
   2098 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
   2099 {
   2100 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   2101 	struct iwm_phy_db_chg_txp *txp_chg;
   2102 	int i;
   2103 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
   2104 
   2105 	if (ch_index == 0xff)
   2106 		return 0xff;
   2107 
   2108 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
   2109 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
   2110 		if (!txp_chg)
   2111 			return 0xff;
   2112 		/*
   2113 		 * Looking for the first channel group the max channel
   2114 		 * of which is higher than the requested channel.
   2115 		 */
   2116 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
   2117 			return i;
   2118 	}
   2119 	return 0xff;
   2120 }
   2121 
   2122 static int
   2123 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
   2124     uint16_t *size, uint16_t ch_id)
   2125 {
   2126 	struct iwm_phy_db_entry *entry;
   2127 	uint16_t ch_group_id = 0;
   2128 
   2129 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
   2130 		ch_group_id = iwm_channel_id_to_papd(ch_id);
   2131 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
   2132 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
   2133 
   2134 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
   2135 	if (!entry)
   2136 		return EINVAL;
   2137 
   2138 	*data = entry->data;
   2139 	*size = entry->size;
   2140 
   2141 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
   2142 		       __func__, __LINE__, type, *size));
   2143 
   2144 	return 0;
   2145 }
   2146 
   2147 static int
   2148 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
   2149     void *data)
   2150 {
   2151 	struct iwm_phy_db_cmd phy_db_cmd;
   2152 	struct iwm_host_cmd cmd = {
   2153 		.id = IWM_PHY_DB_CMD,
   2154 		.flags = IWM_CMD_ASYNC,
   2155 	};
   2156 
   2157 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
   2158 	    type, length));
   2159 
   2160 	phy_db_cmd.type = le16toh(type);
   2161 	phy_db_cmd.length = le16toh(length);
   2162 
   2163 	cmd.data[0] = &phy_db_cmd;
   2164 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
   2165 	cmd.data[1] = data;
   2166 	cmd.len[1] = length;
   2167 
   2168 	return iwm_send_cmd(sc, &cmd);
   2169 }
   2170 
   2171 static int
   2172 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
   2173     enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
   2174 {
   2175 	uint16_t i;
   2176 	int err;
   2177 	struct iwm_phy_db_entry *entry;
   2178 
   2179 	/* Send all the channel-specific groups to operational fw */
   2180 	for (i = 0; i < max_ch_groups; i++) {
   2181 		entry = iwm_phy_db_get_section(sc, type, i);
   2182 		if (!entry)
   2183 			return EINVAL;
   2184 
   2185 		if (!entry->size)
   2186 			continue;
   2187 
   2188 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
   2189 		if (err) {
   2190 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
   2191 			    "err %d\n", DEVNAME(sc), type, i, err));
   2192 			return err;
   2193 		}
   2194 
   2195 		DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
   2196 		    DEVNAME(sc), type, i));
   2197 
   2198 		DELAY(1000);
   2199 	}
   2200 
   2201 	return 0;
   2202 }
   2203 
   2204 static int
   2205 iwm_send_phy_db_data(struct iwm_softc *sc)
   2206 {
   2207 	uint8_t *data = NULL;
   2208 	uint16_t size = 0;
   2209 	int err;
   2210 
   2211 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
   2212 	if (err)
   2213 		return err;
   2214 
   2215 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
   2216 	if (err)
   2217 		return err;
   2218 
   2219 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
   2220 	    &data, &size, 0);
   2221 	if (err)
   2222 		return err;
   2223 
   2224 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
   2225 	if (err)
   2226 		return err;
   2227 
   2228 	err = iwm_phy_db_send_all_channel_groups(sc,
   2229 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
   2230 	if (err)
   2231 		return err;
   2232 
   2233 	err = iwm_phy_db_send_all_channel_groups(sc,
   2234 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
   2235 	if (err)
   2236 		return err;
   2237 
   2238 	return 0;
   2239 }
   2240 
   2241 /*
   2242  * For the high priority TE use a time event type that has similar priority to
   2243  * the FW's action scan priority.
   2244  */
   2245 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
   2246 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
   2247 
   2248 /* used to convert from time event API v2 to v1 */
   2249 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
   2250 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
   2251 static inline uint16_t
   2252 iwm_te_v2_get_notify(uint16_t policy)
   2253 {
   2254 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
   2255 }
   2256 
   2257 static inline uint16_t
   2258 iwm_te_v2_get_dep_policy(uint16_t policy)
   2259 {
   2260 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
   2261 		IWM_TE_V2_PLACEMENT_POS;
   2262 }
   2263 
   2264 static inline uint16_t
   2265 iwm_te_v2_get_absence(uint16_t policy)
   2266 {
   2267 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
   2268 }
   2269 
   2270 static void
   2271 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
   2272     struct iwm_time_event_cmd_v1 *cmd_v1)
   2273 {
   2274 	cmd_v1->id_and_color = cmd_v2->id_and_color;
   2275 	cmd_v1->action = cmd_v2->action;
   2276 	cmd_v1->id = cmd_v2->id;
   2277 	cmd_v1->apply_time = cmd_v2->apply_time;
   2278 	cmd_v1->max_delay = cmd_v2->max_delay;
   2279 	cmd_v1->depends_on = cmd_v2->depends_on;
   2280 	cmd_v1->interval = cmd_v2->interval;
   2281 	cmd_v1->duration = cmd_v2->duration;
   2282 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
   2283 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
   2284 	else
   2285 		cmd_v1->repeat = htole32(cmd_v2->repeat);
   2286 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
   2287 	cmd_v1->interval_reciprocal = 0; /* unused */
   2288 
   2289 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
   2290 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
   2291 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
   2292 }
   2293 
   2294 static int
   2295 iwm_send_time_event_cmd(struct iwm_softc *sc,
   2296     const struct iwm_time_event_cmd_v2 *cmd)
   2297 {
   2298 	struct iwm_time_event_cmd_v1 cmd_v1;
   2299 
   2300 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
   2301 		return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
   2302 		    cmd);
   2303 
   2304 	iwm_te_v2_to_v1(cmd, &cmd_v1);
   2305 	return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
   2306 	    &cmd_v1);
   2307 }
   2308 
   2309 static void
   2310 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
   2311     uint32_t duration, uint32_t max_delay)
   2312 {
   2313 	struct iwm_time_event_cmd_v2 time_cmd;
   2314 
   2315 	memset(&time_cmd, 0, sizeof(time_cmd));
   2316 
   2317 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
   2318 	time_cmd.id_and_color =
   2319 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   2320 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
   2321 
   2322 	time_cmd.apply_time = htole32(0);
   2323 
   2324 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
   2325 	time_cmd.max_delay = htole32(max_delay);
   2326 	/* TODO: why do we need to interval = bi if it is not periodic? */
   2327 	time_cmd.interval = htole32(1);
   2328 	time_cmd.duration = htole32(duration);
   2329 	time_cmd.repeat = 1;
   2330 	time_cmd.policy
   2331 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
   2332 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
   2333 		IWM_T2_V2_START_IMMEDIATELY);
   2334 
   2335 	iwm_send_time_event_cmd(sc, &time_cmd);
   2336 }
   2337 
   2338 /*
   2339  * NVM read access and content parsing.  We do not support
   2340  * external NVM or writing NVM.
   2341  */
   2342 
   2343 /* list of NVM sections we are allowed/need to read */
   2344 static const int iwm_nvm_to_read[] = {
   2345 	IWM_NVM_SECTION_TYPE_HW,
   2346 	IWM_NVM_SECTION_TYPE_SW,
   2347 	IWM_NVM_SECTION_TYPE_REGULATORY,
   2348 	IWM_NVM_SECTION_TYPE_CALIBRATION,
   2349 	IWM_NVM_SECTION_TYPE_PRODUCTION,
   2350 	IWM_NVM_SECTION_TYPE_HW_8000,
   2351 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
   2352 	IWM_NVM_SECTION_TYPE_PHY_SKU,
   2353 };
   2354 
   2355 /* Default NVM size to read */
   2356 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
   2357 #define IWM_MAX_NVM_SECTION_SIZE	8192
   2358 
   2359 #define IWM_NVM_WRITE_OPCODE 1
   2360 #define IWM_NVM_READ_OPCODE 0
   2361 
   2362 static int
   2363 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
   2364     uint16_t length, uint8_t *data, uint16_t *len)
   2365 {
   2366 	offset = 0;
   2367 	struct iwm_nvm_access_cmd nvm_access_cmd = {
   2368 		.offset = htole16(offset),
   2369 		.length = htole16(length),
   2370 		.type = htole16(section),
   2371 		.op_code = IWM_NVM_READ_OPCODE,
   2372 	};
   2373 	struct iwm_nvm_access_resp *nvm_resp;
   2374 	struct iwm_rx_packet *pkt;
   2375 	struct iwm_host_cmd cmd = {
   2376 		.id = IWM_NVM_ACCESS_CMD,
   2377 		.flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
   2378 		.data = { &nvm_access_cmd, },
   2379 	};
   2380 	int err, offset_read;
   2381 	size_t bytes_read;
   2382 	uint8_t *resp_data;
   2383 
   2384 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
   2385 
   2386 	err = iwm_send_cmd(sc, &cmd);
   2387 	if (err) {
   2388 		DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
   2389 		    DEVNAME(sc), err));
   2390 		return err;
   2391 	}
   2392 
   2393 	pkt = cmd.resp_pkt;
   2394 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   2395 		err = EIO;
   2396 		goto exit;
   2397 	}
   2398 
   2399 	/* Extract NVM response */
   2400 	nvm_resp = (void *)pkt->data;
   2401 
   2402 	err = le16toh(nvm_resp->status);
   2403 	bytes_read = le16toh(nvm_resp->length);
   2404 	offset_read = le16toh(nvm_resp->offset);
   2405 	resp_data = nvm_resp->data;
   2406 	if (err) {
   2407 		err = EINVAL;
   2408 		goto exit;
   2409 	}
   2410 
   2411 	if (offset_read != offset) {
   2412 		err = EINVAL;
   2413 		goto exit;
   2414 	}
   2415 	if (bytes_read > length) {
   2416 		err = EINVAL;
   2417 		goto exit;
   2418 	}
   2419 
   2420 	memcpy(data + offset, resp_data, bytes_read);
   2421 	*len = bytes_read;
   2422 
   2423  exit:
   2424 	iwm_free_resp(sc, &cmd);
   2425 	return err;
   2426 }
   2427 
   2428 /*
   2429  * Reads an NVM section completely.
   2430  * NICs prior to 7000 family doesn't have a real NVM, but just read
   2431  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
   2432  * by uCode, we need to manually check in this case that we don't
   2433  * overflow and try to read more than the EEPROM size.
   2434  */
   2435 static int
   2436 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
   2437     uint16_t *len, size_t max_len)
   2438 {
   2439 	uint16_t chunklen, seglen;
   2440 	int err;
   2441 
   2442 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
   2443 	*len = 0;
   2444 
   2445 	/* Read NVM chunks until exhausted (reading less than requested) */
   2446 	while (seglen == chunklen && *len < max_len) {
   2447 		err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
   2448 		    &seglen);
   2449 		if (err) {
   2450 			DPRINTF(("%s:Cannot read NVM from section %d "
   2451 			    "offset %d, length %d\n",
   2452 			    DEVNAME(sc), section, *len, chunklen));
   2453 			return err;
   2454 		}
   2455 		*len += seglen;
   2456 	}
   2457 
   2458 	DPRINTFN(4, ("NVM section %d read completed\n", section));
   2459 	return 0;
   2460 }
   2461 
   2462 static uint8_t
   2463 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
   2464 {
   2465 	uint8_t tx_ant;
   2466 
   2467 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
   2468 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
   2469 
   2470 	if (sc->sc_nvm.valid_tx_ant)
   2471 		tx_ant &= sc->sc_nvm.valid_tx_ant;
   2472 
   2473 	return tx_ant;
   2474 }
   2475 
   2476 static uint8_t
   2477 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
   2478 {
   2479 	uint8_t rx_ant;
   2480 
   2481 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
   2482 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
   2483 
   2484 	if (sc->sc_nvm.valid_rx_ant)
   2485 		rx_ant &= sc->sc_nvm.valid_rx_ant;
   2486 
   2487 	return rx_ant;
   2488 }
   2489 
   2490 static void
   2491 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
   2492     const uint8_t *nvm_channels, size_t nchan)
   2493 {
   2494 	struct ieee80211com *ic = &sc->sc_ic;
   2495 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2496 	int ch_idx;
   2497 	struct ieee80211_channel *channel;
   2498 	uint16_t ch_flags;
   2499 	int is_5ghz;
   2500 	int flags, hw_value;
   2501 
   2502 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
   2503 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
   2504 
   2505 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
   2506 		    !data->sku_cap_band_52GHz_enable)
   2507 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
   2508 
   2509 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
   2510 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
   2511 			    iwm_nvm_channels[ch_idx],
   2512 			    ch_flags,
   2513 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
   2514 			    "5.2" : "2.4"));
   2515 			continue;
   2516 		}
   2517 
   2518 		hw_value = nvm_channels[ch_idx];
   2519 		channel = &ic->ic_channels[hw_value];
   2520 
   2521 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
   2522 		if (!is_5ghz) {
   2523 			flags = IEEE80211_CHAN_2GHZ;
   2524 			channel->ic_flags
   2525 			    = IEEE80211_CHAN_CCK
   2526 			    | IEEE80211_CHAN_OFDM
   2527 			    | IEEE80211_CHAN_DYN
   2528 			    | IEEE80211_CHAN_2GHZ;
   2529 		} else {
   2530 			flags = IEEE80211_CHAN_5GHZ;
   2531 			channel->ic_flags =
   2532 			    IEEE80211_CHAN_A;
   2533 		}
   2534 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
   2535 
   2536 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
   2537 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
   2538 
   2539 #ifndef IEEE80211_NO_HT
   2540 		if (data->sku_cap_11n_enable)
   2541 			channel->ic_flags |= IEEE80211_CHAN_HT;
   2542 #endif
   2543 	}
   2544 }
   2545 
   2546 #ifndef IEEE80211_NO_HT
   2547 static void
   2548 iwm_setup_ht_rates(struct iwm_softc *sc)
   2549 {
   2550 	struct ieee80211com *ic = &sc->sc_ic;
   2551 
   2552 	/* TX is supported with the same MCS as RX. */
   2553 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
   2554 
   2555 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
   2556 
   2557 #ifdef notyet
   2558 	if (sc->sc_nvm.sku_cap_mimo_disable)
   2559 		return;
   2560 
   2561 	if (iwm_fw_valid_rx_ant(sc) > 1)
   2562 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
   2563 	if (iwm_fw_valid_rx_ant(sc) > 2)
   2564 		ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
   2565 #endif
   2566 }
   2567 
   2568 #define IWM_MAX_RX_BA_SESSIONS 16
   2569 
   2570 static void
   2571 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
   2572     uint16_t ssn, int start)
   2573 {
   2574 	struct ieee80211com *ic = &sc->sc_ic;
   2575 	struct iwm_add_sta_cmd_v7 cmd;
   2576 	struct iwm_node *in = (struct iwm_node *)ni;
   2577 	int err, s;
   2578 	uint32_t status;
   2579 
   2580 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
   2581 		ieee80211_addba_req_refuse(ic, ni, tid);
   2582 		return;
   2583 	}
   2584 
   2585 	memset(&cmd, 0, sizeof(cmd));
   2586 
   2587 	cmd.sta_id = IWM_STATION_ID;
   2588 	cmd.mac_id_n_color
   2589 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   2590 	cmd.add_modify = IWM_STA_MODE_MODIFY;
   2591 
   2592 	if (start) {
   2593 		cmd.add_immediate_ba_tid = (uint8_t)tid;
   2594 		cmd.add_immediate_ba_ssn = ssn;
   2595 	} else {
   2596 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
   2597 	}
   2598 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
   2599 	    IWM_STA_MODIFY_REMOVE_BA_TID;
   2600 
   2601 	status = IWM_ADD_STA_SUCCESS;
   2602 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
   2603 	    &status);
   2604 
   2605 	s = splnet();
   2606 	if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
   2607 		if (start) {
   2608 			sc->sc_rx_ba_sessions++;
   2609 			ieee80211_addba_req_accept(ic, ni, tid);
   2610 		} else if (sc->sc_rx_ba_sessions > 0)
   2611 			sc->sc_rx_ba_sessions--;
   2612 	} else if (start)
   2613 		ieee80211_addba_req_refuse(ic, ni, tid);
   2614 
   2615 	splx(s);
   2616 }
   2617 
   2618 static void
   2619 iwm_htprot_task(void *arg)
   2620 {
   2621 	struct iwm_softc *sc = arg;
   2622 	struct ieee80211com *ic = &sc->sc_ic;
   2623 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   2624 	int err;
   2625 
   2626 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
   2627 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
   2628 	if (err)
   2629 		aprint_error_dev(sc->sc_dev,
   2630 		    "could not change HT protection: error %d\n", err);
   2631 }
   2632 
   2633 /*
   2634  * This function is called by upper layer when HT protection settings in
   2635  * beacons have changed.
   2636  */
   2637 static void
   2638 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
   2639 {
   2640 	struct iwm_softc *sc = ic->ic_softc;
   2641 
   2642 	/* assumes that ni == ic->ic_bss */
   2643 	task_add(systq, &sc->htprot_task);
   2644 }
   2645 
   2646 static void
   2647 iwm_ba_task(void *arg)
   2648 {
   2649 	struct iwm_softc *sc = arg;
   2650 	struct ieee80211com *ic = &sc->sc_ic;
   2651 	struct ieee80211_node *ni = ic->ic_bss;
   2652 
   2653 	if (sc->ba_start)
   2654 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
   2655 	else
   2656 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
   2657 }
   2658 
   2659 /*
   2660  * This function is called by upper layer when an ADDBA request is received
   2661  * from another STA and before the ADDBA response is sent.
   2662  */
   2663 static int
   2664 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
   2665     uint8_t tid)
   2666 {
   2667 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
   2668 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   2669 
   2670 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
   2671 		return ENOSPC;
   2672 
   2673 	sc->ba_start = 1;
   2674 	sc->ba_tid = tid;
   2675 	sc->ba_ssn = htole16(ba->ba_winstart);
   2676 	task_add(systq, &sc->ba_task);
   2677 
   2678 	return EBUSY;
   2679 }
   2680 
   2681 /*
   2682  * This function is called by upper layer on teardown of an HT-immediate
   2683  * Block Ack agreement (eg. upon receipt of a DELBA frame).
   2684  */
   2685 static void
   2686 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
   2687     uint8_t tid)
   2688 {
   2689 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   2690 
   2691 	sc->ba_start = 0;
   2692 	sc->ba_tid = tid;
   2693 	task_add(systq, &sc->ba_task);
   2694 }
   2695 #endif
   2696 
   2697 static void
   2698 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
   2699     const uint16_t *mac_override, const uint16_t *nvm_hw)
   2700 {
   2701 	static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
   2702 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
   2703 	};
   2704 	static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
   2705 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
   2706 	};
   2707 	const uint8_t *hw_addr;
   2708 
   2709 	if (mac_override) {
   2710 		hw_addr = (const uint8_t *)(mac_override +
   2711 		    IWM_MAC_ADDRESS_OVERRIDE_8000);
   2712 
   2713 		/*
   2714 		 * Store the MAC address from MAO section.
   2715 		 * No byte swapping is required in MAO section
   2716 		 */
   2717 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
   2718 
   2719 		/*
   2720 		 * Force the use of the OTP MAC address in case of reserved MAC
   2721 		 * address in the NVM, or if address is given but invalid.
   2722 		 */
   2723 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
   2724 		    (memcmp(etherbroadcastaddr, data->hw_addr,
   2725 		    sizeof(etherbroadcastaddr)) != 0) &&
   2726 		    (memcmp(etheranyaddr, data->hw_addr,
   2727 		    sizeof(etheranyaddr)) != 0) &&
   2728 		    !ETHER_IS_MULTICAST(data->hw_addr))
   2729 			return;
   2730 	}
   2731 
   2732 	if (nvm_hw) {
   2733 		/* Read the mac address from WFMP registers. */
   2734 		uint32_t mac_addr0 =
   2735 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
   2736 		uint32_t mac_addr1 =
   2737 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
   2738 
   2739 		hw_addr = (const uint8_t *)&mac_addr0;
   2740 		data->hw_addr[0] = hw_addr[3];
   2741 		data->hw_addr[1] = hw_addr[2];
   2742 		data->hw_addr[2] = hw_addr[1];
   2743 		data->hw_addr[3] = hw_addr[0];
   2744 
   2745 		hw_addr = (const uint8_t *)&mac_addr1;
   2746 		data->hw_addr[4] = hw_addr[1];
   2747 		data->hw_addr[5] = hw_addr[0];
   2748 
   2749 		return;
   2750 	}
   2751 
   2752 	aprint_error_dev(sc->sc_dev, "mac address not found\n");
   2753 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
   2754 }
   2755 
   2756 static int
   2757 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
   2758     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
   2759     const uint16_t *mac_override, const uint16_t *phy_sku,
   2760     const uint16_t *regulatory)
   2761 {
   2762 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2763 	uint8_t hw_addr[ETHER_ADDR_LEN];
   2764 	uint32_t sku;
   2765 
   2766 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
   2767 
   2768 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   2769 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
   2770 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
   2771 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
   2772 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
   2773 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
   2774 
   2775 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
   2776 	} else {
   2777 		uint32_t radio_cfg = le32_to_cpup(
   2778 		    (const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
   2779 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
   2780 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
   2781 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
   2782 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
   2783 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
   2784 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
   2785 
   2786 		sku = le32_to_cpup(
   2787 		    (const uint32_t *)(phy_sku + IWM_SKU_8000));
   2788 	}
   2789 
   2790 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
   2791 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
   2792 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
   2793 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
   2794 
   2795 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
   2796 
   2797 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   2798 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
   2799 		data->hw_addr[0] = hw_addr[1];
   2800 		data->hw_addr[1] = hw_addr[0];
   2801 		data->hw_addr[2] = hw_addr[3];
   2802 		data->hw_addr[3] = hw_addr[2];
   2803 		data->hw_addr[4] = hw_addr[5];
   2804 		data->hw_addr[5] = hw_addr[4];
   2805 	} else
   2806 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
   2807 
   2808 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
   2809 		iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
   2810 		    iwm_nvm_channels, __arraycount(iwm_nvm_channels));
   2811 	else
   2812 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
   2813 		    iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
   2814 
   2815 	data->calib_version = 255;   /* TODO:
   2816 					this value will prevent some checks from
   2817 					failing, we need to check if this
   2818 					field is still needed, and if it does,
   2819 					where is it in the NVM */
   2820 
   2821 	return 0;
   2822 }
   2823 
   2824 static int
   2825 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
   2826 {
   2827 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
   2828 	const uint16_t *regulatory = NULL;
   2829 
   2830 	/* Checking for required sections */
   2831 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   2832 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
   2833 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
   2834 			return ENOENT;
   2835 		}
   2836 
   2837 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
   2838 	} else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
   2839 		/* SW and REGULATORY sections are mandatory */
   2840 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
   2841 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
   2842 			return ENOENT;
   2843 		}
   2844 		/* MAC_OVERRIDE or at least HW section must exist */
   2845 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
   2846 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
   2847 			return ENOENT;
   2848 		}
   2849 
   2850 		/* PHY_SKU section is mandatory in B0 */
   2851 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
   2852 			return ENOENT;
   2853 		}
   2854 
   2855 		regulatory = (const uint16_t *)
   2856 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
   2857 		hw = (const uint16_t *)
   2858 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
   2859 		mac_override =
   2860 			(const uint16_t *)
   2861 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
   2862 		phy_sku = (const uint16_t *)
   2863 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
   2864 	} else {
   2865 		panic("unknown device family %d\n", sc->sc_device_family);
   2866 	}
   2867 
   2868 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
   2869 	calib = (const uint16_t *)
   2870 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
   2871 
   2872 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
   2873 	    phy_sku, regulatory);
   2874 }
   2875 
   2876 static int
   2877 iwm_nvm_init(struct iwm_softc *sc)
   2878 {
   2879 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
   2880 	int i, section, err;
   2881 	uint16_t len;
   2882 	uint8_t *buf;
   2883 	const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
   2884 
   2885 	/* Read From FW NVM */
   2886 	DPRINTF(("Read NVM\n"));
   2887 
   2888 	memset(nvm_sections, 0, sizeof(nvm_sections));
   2889 
   2890 	buf = kmem_alloc(bufsz, KM_SLEEP);
   2891 	if (buf == NULL)
   2892 		return ENOMEM;
   2893 
   2894 	for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
   2895 		section = iwm_nvm_to_read[i];
   2896 		KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
   2897 
   2898 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
   2899 		if (err) {
   2900 			err = 0;
   2901 			continue;
   2902 		}
   2903 		nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
   2904 		if (nvm_sections[section].data == NULL) {
   2905 			err = ENOMEM;
   2906 			break;
   2907 		}
   2908 		memcpy(nvm_sections[section].data, buf, len);
   2909 		nvm_sections[section].length = len;
   2910 	}
   2911 	kmem_free(buf, bufsz);
   2912 	if (err == 0)
   2913 		err = iwm_parse_nvm_sections(sc, nvm_sections);
   2914 
   2915 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
   2916 		if (nvm_sections[i].data != NULL)
   2917 			kmem_free(nvm_sections[i].data, nvm_sections[i].length);
   2918 	}
   2919 
   2920 	return err;
   2921 }
   2922 
   2923 static int
   2924 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
   2925     const uint8_t *section, uint32_t byte_cnt)
   2926 {
   2927 	int err = EINVAL;
   2928 	uint32_t chunk_sz, offset;
   2929 
   2930 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
   2931 
   2932 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
   2933 		uint32_t addr, len;
   2934 		const uint8_t *data;
   2935 
   2936 		addr = dst_addr + offset;
   2937 		len = MIN(chunk_sz, byte_cnt - offset);
   2938 		data = section + offset;
   2939 
   2940 		err = iwm_firmware_load_chunk(sc, addr, data, len);
   2941 		if (err)
   2942 			break;
   2943 	}
   2944 
   2945 	return err;
   2946 }
   2947 
   2948 static int
   2949 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
   2950     const uint8_t *section, uint32_t byte_cnt)
   2951 {
   2952 	struct iwm_dma_info *dma = &sc->fw_dma;
   2953 	bool is_extended = false;
   2954 	int err;
   2955 
   2956 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
   2957 	memcpy(dma->vaddr, section, byte_cnt);
   2958 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
   2959 	    BUS_DMASYNC_PREWRITE);
   2960 
   2961 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
   2962 	    dst_addr <= IWM_FW_MEM_EXTENDED_END)
   2963 		is_extended = true;
   2964 
   2965 	if (is_extended) {
   2966 		iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
   2967 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
   2968 	}
   2969 
   2970 	sc->sc_fw_chunk_done = 0;
   2971 
   2972 	if (!iwm_nic_lock(sc)) {
   2973 		if (is_extended)
   2974 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
   2975 			    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
   2976 		return EBUSY;
   2977 	}
   2978 
   2979 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2980 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
   2981 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
   2982 	    dst_addr);
   2983 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
   2984 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
   2985 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
   2986 	    (iwm_get_dma_hi_addr(dma->paddr)
   2987 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
   2988 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
   2989 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
   2990 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
   2991 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
   2992 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2993 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
   2994 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
   2995 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
   2996 
   2997 	iwm_nic_unlock(sc);
   2998 
   2999 	/* Wait for this segment to load. */
   3000 	err = 0;
   3001 	while (!sc->sc_fw_chunk_done) {
   3002 		err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
   3003 		if (err)
   3004 			break;
   3005 	}
   3006 	if (!sc->sc_fw_chunk_done) {
   3007 		aprint_error_dev(sc->sc_dev,
   3008 		    "fw chunk addr 0x%x len %d failed to load\n",
   3009 		    dst_addr, byte_cnt);
   3010 	}
   3011 
   3012 	if (is_extended) {
   3013 		int rv = iwm_nic_lock(sc);
   3014 		iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
   3015 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
   3016 		if (rv == 0)
   3017 			iwm_nic_unlock(sc);
   3018 	}
   3019 
   3020 	return err;
   3021 }
   3022 
   3023 static int
   3024 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3025 {
   3026 	struct iwm_fw_sects *fws;
   3027 	int err, i;
   3028 	void *data;
   3029 	uint32_t dlen;
   3030 	uint32_t offset;
   3031 
   3032 	fws = &sc->sc_fw.fw_sects[ucode_type];
   3033 	for (i = 0; i < fws->fw_count; i++) {
   3034 		data = fws->fw_sect[i].fws_data;
   3035 		dlen = fws->fw_sect[i].fws_len;
   3036 		offset = fws->fw_sect[i].fws_devoff;
   3037 		if (dlen > sc->sc_fwdmasegsz) {
   3038 			err = EFBIG;
   3039 		} else
   3040 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
   3041 		if (err) {
   3042 			aprint_error_dev(sc->sc_dev,
   3043 			    "could not load firmware chunk %u of %u\n",
   3044 			    i, fws->fw_count);
   3045 			return err;
   3046 		}
   3047 	}
   3048 
   3049 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
   3050 
   3051 	return 0;
   3052 }
   3053 
   3054 static int
   3055 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
   3056     int cpu, int *first_ucode_section)
   3057 {
   3058 	int shift_param;
   3059 	int i, err = 0, sec_num = 0x1;
   3060 	uint32_t val, last_read_idx = 0;
   3061 	void *data;
   3062 	uint32_t dlen;
   3063 	uint32_t offset;
   3064 
   3065 	if (cpu == 1) {
   3066 		shift_param = 0;
   3067 		*first_ucode_section = 0;
   3068 	} else {
   3069 		shift_param = 16;
   3070 		(*first_ucode_section)++;
   3071 	}
   3072 
   3073 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
   3074 		last_read_idx = i;
   3075 		data = fws->fw_sect[i].fws_data;
   3076 		dlen = fws->fw_sect[i].fws_len;
   3077 		offset = fws->fw_sect[i].fws_devoff;
   3078 
   3079 		/*
   3080 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
   3081 		 * CPU1 to CPU2.
   3082 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
   3083 		 * CPU2 non paged to CPU2 paging sec.
   3084 		 */
   3085 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
   3086 		    offset == IWM_PAGING_SEPARATOR_SECTION)
   3087 			break;
   3088 
   3089 		if (dlen > sc->sc_fwdmasegsz) {
   3090 			err = EFBIG;
   3091 		} else
   3092 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
   3093 		if (err) {
   3094 			aprint_error_dev(sc->sc_dev,
   3095 			    "could not load firmware chunk %d (error %d)\n",
   3096 			    i, err);
   3097 			return err;
   3098 		}
   3099 
   3100 		/* Notify the ucode of the loaded section number and status */
   3101 		if (iwm_nic_lock(sc)) {
   3102 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
   3103 			val = val | (sec_num << shift_param);
   3104 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
   3105 			sec_num = (sec_num << 1) | 0x1;
   3106 			iwm_nic_unlock(sc);
   3107 
   3108 			/*
   3109 			 * The firmware won't load correctly without this delay.
   3110 			 */
   3111 			DELAY(8000);
   3112 		}
   3113 	}
   3114 
   3115 	*first_ucode_section = last_read_idx;
   3116 
   3117 	if (iwm_nic_lock(sc)) {
   3118 		if (cpu == 1)
   3119 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
   3120 		else
   3121 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
   3122 		iwm_nic_unlock(sc);
   3123 	}
   3124 
   3125 	return 0;
   3126 }
   3127 
   3128 static int
   3129 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3130 {
   3131 	struct iwm_fw_sects *fws;
   3132 	int err = 0;
   3133 	int first_ucode_section;
   3134 
   3135 	fws = &sc->sc_fw.fw_sects[ucode_type];
   3136 
   3137 	/* configure the ucode to be ready to get the secured image */
   3138 	/* release CPU reset */
   3139 	iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
   3140 
   3141 	/* load to FW the binary Secured sections of CPU1 */
   3142 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
   3143 	if (err)
   3144 		return err;
   3145 
   3146 	/* load to FW the binary sections of CPU2 */
   3147 	return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
   3148 }
   3149 
   3150 static int
   3151 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3152 {
   3153 	int err, w;
   3154 
   3155 	sc->sc_uc.uc_intr = 0;
   3156 
   3157 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   3158 		err = iwm_load_firmware_8000(sc, ucode_type);
   3159 	else
   3160 		err = iwm_load_firmware_7000(sc, ucode_type);
   3161 
   3162 	if (err)
   3163 		return err;
   3164 
   3165 	/* wait for the firmware to load */
   3166 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
   3167 		err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
   3168 	if (err || !sc->sc_uc.uc_ok)
   3169 		aprint_error_dev(sc->sc_dev, "could not load firmware\n");
   3170 
   3171 	return err;
   3172 }
   3173 
   3174 static int
   3175 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3176 {
   3177 	int err;
   3178 
   3179 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   3180 
   3181 	err = iwm_nic_init(sc);
   3182 	if (err) {
   3183 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
   3184 		return err;
   3185 	}
   3186 
   3187 	/* make sure rfkill handshake bits are cleared */
   3188 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   3189 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
   3190 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
   3191 
   3192 	/* clear (again), then enable host interrupts */
   3193 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   3194 	iwm_enable_interrupts(sc);
   3195 
   3196 	/* really make sure rfkill handshake bits are cleared */
   3197 	/* maybe we should write a few times more?  just to make sure */
   3198 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   3199 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   3200 
   3201 	return iwm_load_firmware(sc, ucode_type);
   3202 }
   3203 
   3204 static int
   3205 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
   3206 {
   3207 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
   3208 		.valid = htole32(valid_tx_ant),
   3209 	};
   3210 
   3211 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
   3212 	    sizeof(tx_ant_cmd), &tx_ant_cmd);
   3213 }
   3214 
   3215 static int
   3216 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
   3217 {
   3218 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
   3219 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
   3220 
   3221 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
   3222 	phy_cfg_cmd.calib_control.event_trigger =
   3223 	    sc->sc_default_calib[ucode_type].event_trigger;
   3224 	phy_cfg_cmd.calib_control.flow_trigger =
   3225 	    sc->sc_default_calib[ucode_type].flow_trigger;
   3226 
   3227 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
   3228 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
   3229 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
   3230 }
   3231 
   3232 static int
   3233 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3234 {
   3235 	enum iwm_ucode_type old_type = sc->sc_uc_current;
   3236 	int err;
   3237 
   3238 	err = iwm_read_firmware(sc);
   3239 	if (err)
   3240 		return err;
   3241 
   3242 	sc->sc_uc_current = ucode_type;
   3243 	err = iwm_start_fw(sc, ucode_type);
   3244 	if (err) {
   3245 		sc->sc_uc_current = old_type;
   3246 		return err;
   3247 	}
   3248 
   3249 	return iwm_post_alive(sc);
   3250 }
   3251 
   3252 static int
   3253 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
   3254 {
   3255 	int err;
   3256 
   3257 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
   3258 		aprint_error_dev(sc->sc_dev,
   3259 		    "radio is disabled by hardware switch\n");
   3260 		return EPERM;
   3261 	}
   3262 
   3263 	sc->sc_init_complete = 0;
   3264 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
   3265 	if (err) {
   3266 		aprint_error_dev(sc->sc_dev, "failed to load init firmware\n");
   3267 		return err;
   3268 	}
   3269 
   3270 	if (justnvm) {
   3271 		err = iwm_nvm_init(sc);
   3272 		if (err) {
   3273 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
   3274 			return err;
   3275 		}
   3276 
   3277 		memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
   3278 		    ETHER_ADDR_LEN);
   3279 		return 0;
   3280 	}
   3281 
   3282 	err = iwm_send_bt_init_conf(sc);
   3283 	if (err)
   3284 		return err;
   3285 
   3286 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
   3287 	if (err)
   3288 		return err;
   3289 
   3290 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
   3291 	if (err)
   3292 		return err;
   3293 
   3294 	/*
   3295 	 * Send phy configurations command to init uCode
   3296 	 * to start the 16.0 uCode init image internal calibrations.
   3297 	 */
   3298 	err = iwm_send_phy_cfg_cmd(sc);
   3299 	if (err)
   3300 		return err;
   3301 
   3302 	/*
   3303 	 * Nothing to do but wait for the init complete notification
   3304 	 * from the firmware
   3305 	 */
   3306 	while (!sc->sc_init_complete) {
   3307 		err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
   3308 		if (err)
   3309 			break;
   3310 	}
   3311 
   3312 	return err;
   3313 }
   3314 
   3315 static int
   3316 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
   3317 {
   3318 	struct iwm_rx_ring *ring = &sc->rxq;
   3319 	struct iwm_rx_data *data = &ring->data[idx];
   3320 	struct mbuf *m;
   3321 	int err;
   3322 	int fatal = 0;
   3323 
   3324 	m = m_gethdr(M_DONTWAIT, MT_DATA);
   3325 	if (m == NULL)
   3326 		return ENOBUFS;
   3327 
   3328 	if (size <= MCLBYTES) {
   3329 		MCLGET(m, M_DONTWAIT);
   3330 	} else {
   3331 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   3332 	}
   3333 	if ((m->m_flags & M_EXT) == 0) {
   3334 		m_freem(m);
   3335 		return ENOBUFS;
   3336 	}
   3337 
   3338 	if (data->m != NULL) {
   3339 		bus_dmamap_unload(sc->sc_dmat, data->map);
   3340 		fatal = 1;
   3341 	}
   3342 
   3343 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3344 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   3345 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3346 	if (err) {
   3347 		/* XXX */
   3348 		if (fatal)
   3349 			panic("iwm: could not load RX mbuf");
   3350 		m_freem(m);
   3351 		return err;
   3352 	}
   3353 	data->m = m;
   3354 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
   3355 
   3356 	/* Update RX descriptor. */
   3357 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
   3358 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3359 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
   3360 
   3361 	return 0;
   3362 }
   3363 
   3364 #define IWM_RSSI_OFFSET 50
   3365 static int
   3366 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
   3367 {
   3368 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
   3369 	uint32_t agc_a, agc_b;
   3370 	uint32_t val;
   3371 
   3372 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
   3373 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
   3374 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
   3375 
   3376 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
   3377 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
   3378 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
   3379 
   3380 	/*
   3381 	 * dBm = rssi dB - agc dB - constant.
   3382 	 * Higher AGC (higher radio gain) means lower signal.
   3383 	 */
   3384 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
   3385 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
   3386 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
   3387 
   3388 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
   3389 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
   3390 
   3391 	return max_rssi_dbm;
   3392 }
   3393 
   3394 /*
   3395  * RSSI values are reported by the FW as positive values - need to negate
   3396  * to obtain their dBM.  Account for missing antennas by replacing 0
   3397  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
   3398  */
   3399 static int
   3400 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
   3401 {
   3402 	int energy_a, energy_b, energy_c, max_energy;
   3403 	uint32_t val;
   3404 
   3405 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
   3406 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
   3407 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
   3408 	energy_a = energy_a ? -energy_a : -256;
   3409 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
   3410 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
   3411 	energy_b = energy_b ? -energy_b : -256;
   3412 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
   3413 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
   3414 	energy_c = energy_c ? -energy_c : -256;
   3415 	max_energy = MAX(energy_a, energy_b);
   3416 	max_energy = MAX(max_energy, energy_c);
   3417 
   3418 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
   3419 	    energy_a, energy_b, energy_c, max_energy));
   3420 
   3421 	return max_energy;
   3422 }
   3423 
   3424 static void
   3425 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3426     struct iwm_rx_data *data)
   3427 {
   3428 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
   3429 
   3430 	DPRINTFN(20, ("received PHY stats\n"));
   3431 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
   3432 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
   3433 
   3434 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
   3435 }
   3436 
   3437 /*
   3438  * Retrieve the average noise (in dBm) among receivers.
   3439  */
   3440 static int
   3441 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
   3442 {
   3443 	int i, total, nbant, noise;
   3444 
   3445 	total = nbant = noise = 0;
   3446 	for (i = 0; i < 3; i++) {
   3447 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
   3448 		if (noise) {
   3449 			total += noise;
   3450 			nbant++;
   3451 		}
   3452 	}
   3453 
   3454 	/* There should be at least one antenna but check anyway. */
   3455 	return (nbant == 0) ? -127 : (total / nbant) - 107;
   3456 }
   3457 
   3458 static void
   3459 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3460     struct iwm_rx_data *data)
   3461 {
   3462 	struct ieee80211com *ic = &sc->sc_ic;
   3463 	struct ieee80211_frame *wh;
   3464 	struct ieee80211_node *ni;
   3465 	struct ieee80211_channel *c = NULL;
   3466 	struct mbuf *m;
   3467 	struct iwm_rx_phy_info *phy_info;
   3468 	struct iwm_rx_mpdu_res_start *rx_res;
   3469 	int device_timestamp;
   3470 	uint32_t len;
   3471 	uint32_t rx_pkt_status;
   3472 	int rssi;
   3473 	int s;
   3474 
   3475 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3476 	    BUS_DMASYNC_POSTREAD);
   3477 
   3478 	phy_info = &sc->sc_last_phy_info;
   3479 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
   3480 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
   3481 	len = le16toh(rx_res->byte_count);
   3482 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
   3483 	    sizeof(*rx_res) + len));
   3484 
   3485 	m = data->m;
   3486 	m->m_data = pkt->data + sizeof(*rx_res);
   3487 	m->m_pkthdr.len = m->m_len = len;
   3488 
   3489 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
   3490 		DPRINTF(("dsp size out of range [0,20]: %d\n",
   3491 		    phy_info->cfg_phy_cnt));
   3492 		return;
   3493 	}
   3494 
   3495 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
   3496 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
   3497 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
   3498 		return; /* drop */
   3499 	}
   3500 
   3501 	device_timestamp = le32toh(phy_info->system_timestamp);
   3502 
   3503 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
   3504 		rssi = iwm_get_signal_strength(sc, phy_info);
   3505 	} else {
   3506 		rssi = iwm_calc_rssi(sc, phy_info);
   3507 	}
   3508 	rssi = -rssi;
   3509 
   3510 	if (ic->ic_state == IEEE80211_S_SCAN)
   3511 		iwm_fix_channel(sc, m);
   3512 
   3513 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
   3514 		return;
   3515 
   3516 	m_set_rcvif(m, IC2IFP(ic));
   3517 
   3518 	if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
   3519 		c = &ic->ic_channels[le32toh(phy_info->channel)];
   3520 
   3521 	s = splnet();
   3522 
   3523 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
   3524 	if (c)
   3525 		ni->ni_chan = c;
   3526 
   3527 	if (__predict_false(sc->sc_drvbpf != NULL)) {
   3528 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
   3529 
   3530 		tap->wr_flags = 0;
   3531 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
   3532 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
   3533 		tap->wr_chan_freq =
   3534 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
   3535 		tap->wr_chan_flags =
   3536 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
   3537 		tap->wr_dbm_antsignal = (int8_t)rssi;
   3538 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
   3539 		tap->wr_tsft = phy_info->system_timestamp;
   3540 		if (phy_info->phy_flags &
   3541 		    htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
   3542 			uint8_t mcs = (phy_info->rate_n_flags &
   3543 			    htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK));
   3544 			tap->wr_rate = (0x80 | mcs);
   3545 		} else {
   3546 			uint8_t rate = (phy_info->rate_n_flags &
   3547 			    htole32(IWM_RATE_LEGACY_RATE_MSK));
   3548 			switch (rate) {
   3549 			/* CCK rates. */
   3550 			case  10: tap->wr_rate =   2; break;
   3551 			case  20: tap->wr_rate =   4; break;
   3552 			case  55: tap->wr_rate =  11; break;
   3553 			case 110: tap->wr_rate =  22; break;
   3554 			/* OFDM rates. */
   3555 			case 0xd: tap->wr_rate =  12; break;
   3556 			case 0xf: tap->wr_rate =  18; break;
   3557 			case 0x5: tap->wr_rate =  24; break;
   3558 			case 0x7: tap->wr_rate =  36; break;
   3559 			case 0x9: tap->wr_rate =  48; break;
   3560 			case 0xb: tap->wr_rate =  72; break;
   3561 			case 0x1: tap->wr_rate =  96; break;
   3562 			case 0x3: tap->wr_rate = 108; break;
   3563 			/* Unknown rate: should not happen. */
   3564 			default:  tap->wr_rate =   0;
   3565 			}
   3566 		}
   3567 
   3568 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
   3569 	}
   3570 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
   3571 	ieee80211_free_node(ni);
   3572 
   3573 	splx(s);
   3574 }
   3575 
   3576 static void
   3577 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3578     struct iwm_node *in)
   3579 {
   3580 	struct ieee80211com *ic = &sc->sc_ic;
   3581 	struct ifnet *ifp = IC2IFP(ic);
   3582 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
   3583 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
   3584 	int failack = tx_resp->failure_frame;
   3585 
   3586 	KASSERT(tx_resp->frame_count == 1);
   3587 
   3588 	/* Update rate control statistics. */
   3589 	in->in_amn.amn_txcnt++;
   3590 	if (failack > 0) {
   3591 		in->in_amn.amn_retrycnt++;
   3592 	}
   3593 
   3594 	if (status != IWM_TX_STATUS_SUCCESS &&
   3595 	    status != IWM_TX_STATUS_DIRECT_DONE)
   3596 		ifp->if_oerrors++;
   3597 	else
   3598 		ifp->if_opackets++;
   3599 }
   3600 
   3601 static void
   3602 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3603     struct iwm_rx_data *data)
   3604 {
   3605 	struct ieee80211com *ic = &sc->sc_ic;
   3606 	struct ifnet *ifp = IC2IFP(ic);
   3607 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
   3608 	int idx = cmd_hdr->idx;
   3609 	int qid = cmd_hdr->qid;
   3610 	struct iwm_tx_ring *ring = &sc->txq[qid];
   3611 	struct iwm_tx_data *txd = &ring->data[idx];
   3612 	struct iwm_node *in = txd->in;
   3613 
   3614 	if (txd->done) {
   3615 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
   3616 		    DEVNAME(sc)));
   3617 		return;
   3618 	}
   3619 
   3620 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3621 	    BUS_DMASYNC_POSTREAD);
   3622 
   3623 	sc->sc_tx_timer = 0;
   3624 
   3625 	iwm_rx_tx_cmd_single(sc, pkt, in);
   3626 
   3627 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
   3628 	    BUS_DMASYNC_POSTWRITE);
   3629 	bus_dmamap_unload(sc->sc_dmat, txd->map);
   3630 	m_freem(txd->m);
   3631 
   3632 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
   3633 	KASSERT(txd->done == 0);
   3634 	txd->done = 1;
   3635 	KASSERT(txd->in);
   3636 
   3637 	txd->m = NULL;
   3638 	txd->in = NULL;
   3639 	ieee80211_free_node(&in->in_ni);
   3640 
   3641 	if (--ring->queued < IWM_TX_RING_LOMARK) {
   3642 		sc->qfullmsk &= ~(1 << ring->qid);
   3643 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
   3644 			ifp->if_flags &= ~IFF_OACTIVE;
   3645 			if_start_lock(ifp);
   3646 		}
   3647 	}
   3648 }
   3649 
   3650 static int
   3651 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
   3652 {
   3653 	struct iwm_binding_cmd cmd;
   3654 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
   3655 	int i, err;
   3656 	uint32_t status;
   3657 
   3658 	memset(&cmd, 0, sizeof(cmd));
   3659 
   3660 	cmd.id_and_color
   3661 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3662 	cmd.action = htole32(action);
   3663 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3664 
   3665 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   3666 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
   3667 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
   3668 
   3669 	status = 0;
   3670 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
   3671 	    sizeof(cmd), &cmd, &status);
   3672 	if (err == 0 && status != 0)
   3673 		err = EIO;
   3674 
   3675 	return err;
   3676 }
   3677 
   3678 static void
   3679 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
   3680     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
   3681 {
   3682 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
   3683 
   3684 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
   3685 	    ctxt->color));
   3686 	cmd->action = htole32(action);
   3687 	cmd->apply_time = htole32(apply_time);
   3688 }
   3689 
   3690 static void
   3691 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
   3692     struct ieee80211_channel *chan, uint8_t chains_static,
   3693     uint8_t chains_dynamic)
   3694 {
   3695 	struct ieee80211com *ic = &sc->sc_ic;
   3696 	uint8_t active_cnt, idle_cnt;
   3697 
   3698 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
   3699 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
   3700 
   3701 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
   3702 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
   3703 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
   3704 
   3705 	/* Set rx the chains */
   3706 	idle_cnt = chains_static;
   3707 	active_cnt = chains_dynamic;
   3708 
   3709 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
   3710 	    IWM_PHY_RX_CHAIN_VALID_POS);
   3711 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
   3712 	cmd->rxchain_info |= htole32(active_cnt <<
   3713 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
   3714 
   3715 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
   3716 }
   3717 
   3718 static int
   3719 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
   3720     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
   3721     uint32_t apply_time)
   3722 {
   3723 	struct iwm_phy_context_cmd cmd;
   3724 
   3725 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
   3726 
   3727 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
   3728 	    chains_static, chains_dynamic);
   3729 
   3730 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
   3731 	    sizeof(struct iwm_phy_context_cmd), &cmd);
   3732 }
   3733 
   3734 static int
   3735 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3736 {
   3737 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
   3738 	struct iwm_tfd *desc;
   3739 	struct iwm_tx_data *txdata;
   3740 	struct iwm_device_cmd *cmd;
   3741 	struct mbuf *m;
   3742 	bus_addr_t paddr;
   3743 	uint32_t addr_lo;
   3744 	int err = 0, i, paylen, off, s;
   3745 	int code;
   3746 	int async, wantresp;
   3747 	int group_id;
   3748 	size_t hdrlen, datasz;
   3749 	uint8_t *data;
   3750 
   3751 	code = hcmd->id;
   3752 	async = hcmd->flags & IWM_CMD_ASYNC;
   3753 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
   3754 
   3755 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
   3756 		paylen += hcmd->len[i];
   3757 	}
   3758 
   3759 	/* if the command wants an answer, busy sc_cmd_resp */
   3760 	if (wantresp) {
   3761 		KASSERT(!async);
   3762 		while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
   3763 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
   3764 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
   3765 	}
   3766 
   3767 	/*
   3768 	 * Is the hardware still available?  (after e.g. above wait).
   3769 	 */
   3770 	s = splnet();
   3771 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
   3772 		err = ENXIO;
   3773 		goto out;
   3774 	}
   3775 
   3776 	desc = &ring->desc[ring->cur];
   3777 	txdata = &ring->data[ring->cur];
   3778 
   3779 	group_id = iwm_cmd_groupid(code);
   3780 	if (group_id != 0) {
   3781 		hdrlen = sizeof(cmd->hdr_wide);
   3782 		datasz = sizeof(cmd->data_wide);
   3783 	} else {
   3784 		hdrlen = sizeof(cmd->hdr);
   3785 		datasz = sizeof(cmd->data);
   3786 	}
   3787 
   3788 	if (paylen > datasz) {
   3789 		/* Command is too large to fit in pre-allocated space. */
   3790 		size_t totlen = hdrlen + paylen;
   3791 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
   3792 			aprint_error_dev(sc->sc_dev,
   3793 			    "firmware command too long (%zd bytes)\n", totlen);
   3794 			err = EINVAL;
   3795 			goto out;
   3796 		}
   3797 		m = m_gethdr(M_DONTWAIT, MT_DATA);
   3798 		if (m == NULL) {
   3799 			err = ENOMEM;
   3800 			goto out;
   3801 		}
   3802 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   3803 		if (!(m->m_flags & M_EXT)) {
   3804 			aprint_error_dev(sc->sc_dev,
   3805 			    "could not get fw cmd mbuf (%zd bytes)\n", totlen);
   3806 			m_freem(m);
   3807 			err = ENOMEM;
   3808 			goto out;
   3809 		}
   3810 		cmd = mtod(m, struct iwm_device_cmd *);
   3811 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
   3812 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3813 		if (err) {
   3814 			aprint_error_dev(sc->sc_dev,
   3815 			    "could not load fw cmd mbuf (%zd bytes)\n", totlen);
   3816 			m_freem(m);
   3817 			goto out;
   3818 		}
   3819 		txdata->m = m;
   3820 		paddr = txdata->map->dm_segs[0].ds_addr;
   3821 	} else {
   3822 		cmd = &ring->cmd[ring->cur];
   3823 		paddr = txdata->cmd_paddr;
   3824 	}
   3825 
   3826 	if (group_id != 0) {
   3827 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
   3828 		cmd->hdr_wide.group_id = group_id;
   3829 		cmd->hdr_wide.qid = ring->qid;
   3830 		cmd->hdr_wide.idx = ring->cur;
   3831 		cmd->hdr_wide.length = htole16(paylen);
   3832 		cmd->hdr_wide.version = iwm_cmd_version(code);
   3833 		data = cmd->data_wide;
   3834 	} else {
   3835 		cmd->hdr.code = code;
   3836 		cmd->hdr.flags = 0;
   3837 		cmd->hdr.qid = ring->qid;
   3838 		cmd->hdr.idx = ring->cur;
   3839 		data = cmd->data;
   3840 	}
   3841 
   3842 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
   3843 		if (hcmd->len[i] == 0)
   3844 			continue;
   3845 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
   3846 		off += hcmd->len[i];
   3847 	}
   3848 	KASSERT(off == paylen);
   3849 
   3850 	/* lo field is not aligned */
   3851 	addr_lo = htole32((uint32_t)paddr);
   3852 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
   3853 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
   3854 	    | ((hdrlen + paylen) << 4));
   3855 	desc->num_tbs = 1;
   3856 
   3857 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
   3858 	    code, hdrlen + paylen, async ? " (async)" : ""));
   3859 
   3860 	if (paylen > datasz) {
   3861 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
   3862 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
   3863 	} else {
   3864 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   3865 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   3866 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
   3867 	}
   3868 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3869 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   3870 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
   3871 
   3872 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   3873 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   3874 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   3875 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
   3876 	    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
   3877 	     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
   3878 		aprint_error_dev(sc->sc_dev, "acquiring device failed\n");
   3879 		err = EBUSY;
   3880 		goto out;
   3881 	}
   3882 
   3883 #if 0
   3884 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
   3885 #endif
   3886 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
   3887 	    code, ring->qid, ring->cur));
   3888 
   3889 	/* Kick command ring. */
   3890 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   3891 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   3892 
   3893 	if (!async) {
   3894 		int generation = sc->sc_generation;
   3895 		err = tsleep(desc, PCATCH, "iwmcmd", mstohz(1000));
   3896 		if (err == 0) {
   3897 			/* if hardware is no longer up, return error */
   3898 			if (generation != sc->sc_generation) {
   3899 				err = ENXIO;
   3900 			} else {
   3901 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
   3902 			}
   3903 		}
   3904 	}
   3905  out:
   3906 	if (wantresp && err) {
   3907 		iwm_free_resp(sc, hcmd);
   3908 	}
   3909 	splx(s);
   3910 
   3911 	return err;
   3912 }
   3913 
   3914 static int
   3915 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
   3916     uint16_t len, const void *data)
   3917 {
   3918 	struct iwm_host_cmd cmd = {
   3919 		.id = id,
   3920 		.len = { len, },
   3921 		.data = { data, },
   3922 		.flags = flags,
   3923 	};
   3924 
   3925 	return iwm_send_cmd(sc, &cmd);
   3926 }
   3927 
   3928 static int
   3929 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
   3930     uint32_t *status)
   3931 {
   3932 	struct iwm_rx_packet *pkt;
   3933 	struct iwm_cmd_response *resp;
   3934 	int err, resp_len;
   3935 
   3936 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
   3937 	cmd->flags |= IWM_CMD_WANT_SKB;
   3938 
   3939 	err = iwm_send_cmd(sc, cmd);
   3940 	if (err)
   3941 		return err;
   3942 	pkt = cmd->resp_pkt;
   3943 
   3944 	/* Can happen if RFKILL is asserted */
   3945 	if (!pkt) {
   3946 		err = 0;
   3947 		goto out_free_resp;
   3948 	}
   3949 
   3950 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   3951 		err = EIO;
   3952 		goto out_free_resp;
   3953 	}
   3954 
   3955 	resp_len = iwm_rx_packet_payload_len(pkt);
   3956 	if (resp_len != sizeof(*resp)) {
   3957 		err = EIO;
   3958 		goto out_free_resp;
   3959 	}
   3960 
   3961 	resp = (void *)pkt->data;
   3962 	*status = le32toh(resp->status);
   3963  out_free_resp:
   3964 	iwm_free_resp(sc, cmd);
   3965 	return err;
   3966 }
   3967 
   3968 static int
   3969 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
   3970     const void *data, uint32_t *status)
   3971 {
   3972 	struct iwm_host_cmd cmd = {
   3973 		.id = id,
   3974 		.len = { len, },
   3975 		.data = { data, },
   3976 	};
   3977 
   3978 	return iwm_send_cmd_status(sc, &cmd, status);
   3979 }
   3980 
   3981 static void
   3982 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3983 {
   3984 	KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
   3985 	KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
   3986 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
   3987 	wakeup(&sc->sc_wantresp);
   3988 }
   3989 
   3990 static void
   3991 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
   3992 {
   3993 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
   3994 	struct iwm_tx_data *data;
   3995 
   3996 	if (qid != IWM_CMD_QUEUE) {
   3997 		return;	/* Not a command ack. */
   3998 	}
   3999 
   4000 	data = &ring->data[idx];
   4001 
   4002 	if (data->m != NULL) {
   4003 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   4004 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   4005 		bus_dmamap_unload(sc->sc_dmat, data->map);
   4006 		m_freem(data->m);
   4007 		data->m = NULL;
   4008 	}
   4009 	wakeup(&ring->desc[idx]);
   4010 }
   4011 
   4012 #if 0
   4013 /*
   4014  * necessary only for block ack mode
   4015  */
   4016 void
   4017 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
   4018     uint16_t len)
   4019 {
   4020 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
   4021 	uint16_t w_val;
   4022 
   4023 	scd_bc_tbl = sc->sched_dma.vaddr;
   4024 
   4025 	len += 8; /* magic numbers came naturally from paris */
   4026 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
   4027 		len = roundup(len, 4) / 4;
   4028 
   4029 	w_val = htole16(sta_id << 12 | len);
   4030 
   4031 	/* Update TX scheduler. */
   4032 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
   4033 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   4034 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
   4035 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
   4036 
   4037 	/* I really wonder what this is ?!? */
   4038 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
   4039 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
   4040 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   4041 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
   4042 		    (char *)(void *)sc->sched_dma.vaddr,
   4043 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
   4044 	}
   4045 }
   4046 #endif
   4047 
   4048 /*
   4049  * Fill in various bit for management frames, and leave them
   4050  * unfilled for data frames (firmware takes care of that).
   4051  * Return the selected TX rate.
   4052  */
   4053 static const struct iwm_rate *
   4054 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
   4055     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
   4056 {
   4057 	struct ieee80211com *ic = &sc->sc_ic;
   4058 	struct ieee80211_node *ni = &in->in_ni;
   4059 	const struct iwm_rate *rinfo;
   4060 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   4061 	int ridx, rate_flags, i;
   4062 	int nrates = ni->ni_rates.rs_nrates;
   4063 
   4064 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
   4065 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
   4066 
   4067 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
   4068 	    type != IEEE80211_FC0_TYPE_DATA) {
   4069 		/* for non-data, use the lowest supported rate */
   4070 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
   4071 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
   4072 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
   4073 #ifndef IEEE80211_NO_HT
   4074 	} else if (ic->ic_fixed_mcs != -1) {
   4075 		ridx = sc->sc_fixed_ridx;
   4076 #endif
   4077 	} else if (ic->ic_fixed_rate != -1) {
   4078 		ridx = sc->sc_fixed_ridx;
   4079 	} else {
   4080 		/* for data frames, use RS table */
   4081 		tx->initial_rate_index = 0;
   4082 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
   4083 		DPRINTFN(12, ("start with txrate %d\n",
   4084 		    tx->initial_rate_index));
   4085 #ifndef IEEE80211_NO_HT
   4086 		if (ni->ni_flags & IEEE80211_NODE_HT) {
   4087 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
   4088 			return &iwm_rates[ridx];
   4089 		}
   4090 #endif
   4091 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
   4092 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
   4093 		for (i = 0; i < nrates; i++) {
   4094 			if (iwm_rates[i].rate == (ni->ni_txrate &
   4095 			    IEEE80211_RATE_VAL)) {
   4096 				ridx = i;
   4097 				break;
   4098 			}
   4099 		}
   4100 		return &iwm_rates[ridx];
   4101 	}
   4102 
   4103 	rinfo = &iwm_rates[ridx];
   4104 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
   4105 	if (IWM_RIDX_IS_CCK(ridx))
   4106 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
   4107 #ifndef IEEE80211_NO_HT
   4108 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   4109 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
   4110 		rate_flags |= IWM_RATE_MCS_HT_MSK;
   4111 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
   4112 	} else
   4113 #endif
   4114 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
   4115 
   4116 	return rinfo;
   4117 }
   4118 
   4119 #define TB0_SIZE 16
   4120 static int
   4121 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
   4122 {
   4123 	struct ieee80211com *ic = &sc->sc_ic;
   4124 	struct iwm_node *in = (struct iwm_node *)ni;
   4125 	struct iwm_tx_ring *ring;
   4126 	struct iwm_tx_data *data;
   4127 	struct iwm_tfd *desc;
   4128 	struct iwm_device_cmd *cmd;
   4129 	struct iwm_tx_cmd *tx;
   4130 	struct ieee80211_frame *wh;
   4131 	struct ieee80211_key *k = NULL;
   4132 	struct mbuf *m1;
   4133 	const struct iwm_rate *rinfo;
   4134 	uint32_t flags;
   4135 	u_int hdrlen;
   4136 	bus_dma_segment_t *seg;
   4137 	uint8_t tid, type;
   4138 	int i, totlen, err, pad;
   4139 
   4140 	wh = mtod(m, struct ieee80211_frame *);
   4141 	hdrlen = ieee80211_anyhdrsize(wh);
   4142 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   4143 
   4144 	tid = 0;
   4145 
   4146 	ring = &sc->txq[ac];
   4147 	desc = &ring->desc[ring->cur];
   4148 	memset(desc, 0, sizeof(*desc));
   4149 	data = &ring->data[ring->cur];
   4150 
   4151 	cmd = &ring->cmd[ring->cur];
   4152 	cmd->hdr.code = IWM_TX_CMD;
   4153 	cmd->hdr.flags = 0;
   4154 	cmd->hdr.qid = ring->qid;
   4155 	cmd->hdr.idx = ring->cur;
   4156 
   4157 	tx = (void *)cmd->data;
   4158 	memset(tx, 0, sizeof(*tx));
   4159 
   4160 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
   4161 
   4162 	if (__predict_false(sc->sc_drvbpf != NULL)) {
   4163 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
   4164 
   4165 		tap->wt_flags = 0;
   4166 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
   4167 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
   4168 #ifndef IEEE80211_NO_HT
   4169 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   4170 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
   4171 		    type == IEEE80211_FC0_TYPE_DATA &&
   4172 		    rinfo->plcp == IWM_RATE_INVM_PLCP) {
   4173 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
   4174 		} else
   4175 #endif
   4176 			tap->wt_rate = rinfo->rate;
   4177 		tap->wt_hwqueue = ac;
   4178 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
   4179 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
   4180 
   4181 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
   4182 	}
   4183 
   4184 	/* Encrypt the frame if need be. */
   4185 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
   4186 		k = ieee80211_crypto_encap(ic, ni, m);
   4187 		if (k == NULL) {
   4188 			m_freem(m);
   4189 			return ENOBUFS;
   4190 		}
   4191 		/* Packet header may have moved, reset our local pointer. */
   4192 		wh = mtod(m, struct ieee80211_frame *);
   4193 	}
   4194 	totlen = m->m_pkthdr.len;
   4195 
   4196 	flags = 0;
   4197 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
   4198 		flags |= IWM_TX_CMD_FLG_ACK;
   4199 	}
   4200 
   4201 	if (type == IEEE80211_FC0_TYPE_DATA &&
   4202 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
   4203 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
   4204 	     (ic->ic_flags & IEEE80211_F_USEPROT)))
   4205 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
   4206 
   4207 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
   4208 	    type != IEEE80211_FC0_TYPE_DATA)
   4209 		tx->sta_id = IWM_AUX_STA_ID;
   4210 	else
   4211 		tx->sta_id = IWM_STATION_ID;
   4212 
   4213 	if (type == IEEE80211_FC0_TYPE_MGT) {
   4214 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
   4215 
   4216 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
   4217 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
   4218 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
   4219 		else
   4220 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
   4221 	} else {
   4222 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
   4223 	}
   4224 
   4225 	if (hdrlen & 3) {
   4226 		/* First segment length must be a multiple of 4. */
   4227 		flags |= IWM_TX_CMD_FLG_MH_PAD;
   4228 		pad = 4 - (hdrlen & 3);
   4229 	} else
   4230 		pad = 0;
   4231 
   4232 	tx->driver_txop = 0;
   4233 	tx->next_frame_len = 0;
   4234 
   4235 	tx->len = htole16(totlen);
   4236 	tx->tid_tspec = tid;
   4237 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
   4238 
   4239 	/* Set physical address of "scratch area". */
   4240 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
   4241 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
   4242 
   4243 	/* Copy 802.11 header in TX command. */
   4244 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
   4245 
   4246 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
   4247 
   4248 	tx->sec_ctl = 0;
   4249 	tx->tx_flags |= htole32(flags);
   4250 
   4251 	/* Trim 802.11 header. */
   4252 	m_adj(m, hdrlen);
   4253 
   4254 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   4255 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   4256 	if (err) {
   4257 		if (err != EFBIG) {
   4258 			aprint_error_dev(sc->sc_dev,
   4259 			    "can't map mbuf (error %d)\n", err);
   4260 			m_freem(m);
   4261 			return err;
   4262 		}
   4263 		/* Too many DMA segments, linearize mbuf. */
   4264 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
   4265 		if (m1 == NULL) {
   4266 			m_freem(m);
   4267 			return ENOBUFS;
   4268 		}
   4269 		if (m->m_pkthdr.len > MHLEN) {
   4270 			MCLGET(m1, M_DONTWAIT);
   4271 			if (!(m1->m_flags & M_EXT)) {
   4272 				m_freem(m);
   4273 				m_freem(m1);
   4274 				return ENOBUFS;
   4275 			}
   4276 		}
   4277 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
   4278 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
   4279 		m_freem(m);
   4280 		m = m1;
   4281 
   4282 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   4283 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   4284 		if (err) {
   4285 			aprint_error_dev(sc->sc_dev,
   4286 			    "can't map mbuf (error %d)\n", err);
   4287 			m_freem(m);
   4288 			return err;
   4289 		}
   4290 	}
   4291 	data->m = m;
   4292 	data->in = in;
   4293 	data->done = 0;
   4294 
   4295 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
   4296 	KASSERT(data->in != NULL);
   4297 
   4298 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
   4299 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs));
   4300 
   4301 	/* Fill TX descriptor. */
   4302 	desc->num_tbs = 2 + data->map->dm_nsegs;
   4303 
   4304 	desc->tbs[0].lo = htole32(data->cmd_paddr);
   4305 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   4306 	    (TB0_SIZE << 4);
   4307 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
   4308 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   4309 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
   4310 	      + hdrlen + pad - TB0_SIZE) << 4);
   4311 
   4312 	/* Other DMA segments are for data payload. */
   4313 	seg = data->map->dm_segs;
   4314 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
   4315 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
   4316 		desc->tbs[i+2].hi_n_len =
   4317 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
   4318 		    | ((seg->ds_len) << 4);
   4319 	}
   4320 
   4321 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
   4322 	    BUS_DMASYNC_PREWRITE);
   4323 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   4324 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   4325 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
   4326 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   4327 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   4328 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
   4329 
   4330 #if 0
   4331 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
   4332 	    le16toh(tx->len));
   4333 #endif
   4334 
   4335 	/* Kick TX ring. */
   4336 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   4337 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   4338 
   4339 	/* Mark TX ring as full if we reach a certain threshold. */
   4340 	if (++ring->queued > IWM_TX_RING_HIMARK) {
   4341 		sc->qfullmsk |= 1 << ring->qid;
   4342 	}
   4343 
   4344 	return 0;
   4345 }
   4346 
   4347 #if 0
   4348 /* not necessary? */
   4349 static int
   4350 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
   4351 {
   4352 	struct iwm_tx_path_flush_cmd flush_cmd = {
   4353 		.queues_ctl = htole32(tfd_msk),
   4354 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
   4355 	};
   4356 	int err;
   4357 
   4358 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
   4359 	    sizeof(flush_cmd), &flush_cmd);
   4360 	if (err)
   4361 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
   4362 		    err);
   4363 	return err;
   4364 }
   4365 #endif
   4366 
   4367 static void
   4368 iwm_led_enable(struct iwm_softc *sc)
   4369 {
   4370 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
   4371 }
   4372 
   4373 static void
   4374 iwm_led_disable(struct iwm_softc *sc)
   4375 {
   4376 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
   4377 }
   4378 
   4379 static int
   4380 iwm_led_is_enabled(struct iwm_softc *sc)
   4381 {
   4382 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
   4383 }
   4384 
   4385 static void
   4386 iwm_led_blink_timeout(void *arg)
   4387 {
   4388 	struct iwm_softc *sc = arg;
   4389 
   4390 	if (iwm_led_is_enabled(sc))
   4391 		iwm_led_disable(sc);
   4392 	else
   4393 		iwm_led_enable(sc);
   4394 
   4395 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
   4396 }
   4397 
   4398 static void
   4399 iwm_led_blink_start(struct iwm_softc *sc)
   4400 {
   4401 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
   4402 }
   4403 
   4404 static void
   4405 iwm_led_blink_stop(struct iwm_softc *sc)
   4406 {
   4407 	callout_stop(&sc->sc_led_blink_to);
   4408 	iwm_led_disable(sc);
   4409 }
   4410 
   4411 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
   4412 
   4413 static int
   4414 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
   4415     struct iwm_beacon_filter_cmd *cmd)
   4416 {
   4417 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
   4418 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
   4419 }
   4420 
   4421 static void
   4422 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
   4423     struct iwm_beacon_filter_cmd *cmd)
   4424 {
   4425 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
   4426 }
   4427 
   4428 static int
   4429 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
   4430 {
   4431 	struct iwm_beacon_filter_cmd cmd = {
   4432 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4433 		.bf_enable_beacon_filter = htole32(1),
   4434 		.ba_enable_beacon_abort = htole32(enable),
   4435 	};
   4436 
   4437 	if (!sc->sc_bf.bf_enabled)
   4438 		return 0;
   4439 
   4440 	sc->sc_bf.ba_enabled = enable;
   4441 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4442 	return iwm_beacon_filter_send_cmd(sc, &cmd);
   4443 }
   4444 
   4445 static void
   4446 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
   4447     struct iwm_mac_power_cmd *cmd)
   4448 {
   4449 	struct ieee80211_node *ni = &in->in_ni;
   4450 	int dtim_period, dtim_msec, keep_alive;
   4451 
   4452 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   4453 	    in->in_color));
   4454 	if (ni->ni_dtim_period)
   4455 		dtim_period = ni->ni_dtim_period;
   4456 	else
   4457 		dtim_period = 1;
   4458 
   4459 	/*
   4460 	 * Regardless of power management state the driver must set
   4461 	 * keep alive period. FW will use it for sending keep alive NDPs
   4462 	 * immediately after association. Check that keep alive period
   4463 	 * is at least 3 * DTIM.
   4464 	 */
   4465 	dtim_msec = dtim_period * ni->ni_intval;
   4466 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
   4467 	keep_alive = roundup(keep_alive, 1000) / 1000;
   4468 	cmd->keep_alive_seconds = htole16(keep_alive);
   4469 
   4470 #ifdef notyet
   4471 	cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
   4472 	cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
   4473 	cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
   4474 #endif
   4475 }
   4476 
   4477 static int
   4478 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
   4479 {
   4480 	int err;
   4481 	int ba_enable;
   4482 	struct iwm_mac_power_cmd cmd;
   4483 
   4484 	memset(&cmd, 0, sizeof(cmd));
   4485 
   4486 	iwm_power_build_cmd(sc, in, &cmd);
   4487 
   4488 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
   4489 	    sizeof(cmd), &cmd);
   4490 	if (err)
   4491 		return err;
   4492 
   4493 	ba_enable = !!(cmd.flags &
   4494 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
   4495 	return iwm_update_beacon_abort(sc, in, ba_enable);
   4496 }
   4497 
   4498 static int
   4499 iwm_power_update_device(struct iwm_softc *sc)
   4500 {
   4501 	struct iwm_device_power_cmd cmd = {
   4502 #ifdef notyet
   4503 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
   4504 #else
   4505 		.flags = 0,
   4506 #endif
   4507 	};
   4508 
   4509 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
   4510 		return 0;
   4511 
   4512 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
   4513 	DPRINTF(("Sending device power command with flags = 0x%X\n",
   4514 	    cmd.flags));
   4515 
   4516 	return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
   4517 }
   4518 
   4519 #ifdef notyet
   4520 static int
   4521 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4522 {
   4523 	struct iwm_beacon_filter_cmd cmd = {
   4524 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4525 		.bf_enable_beacon_filter = htole32(1),
   4526 	};
   4527 	int err;
   4528 
   4529 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4530 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
   4531 
   4532 	if (err == 0)
   4533 		sc->sc_bf.bf_enabled = 1;
   4534 
   4535 	return err;
   4536 }
   4537 #endif
   4538 
   4539 static int
   4540 iwm_disable_beacon_filter(struct iwm_softc *sc)
   4541 {
   4542 	struct iwm_beacon_filter_cmd cmd;
   4543 	int err;
   4544 
   4545 	memset(&cmd, 0, sizeof(cmd));
   4546 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
   4547 		return 0;
   4548 
   4549 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
   4550 	if (err == 0)
   4551 		sc->sc_bf.bf_enabled = 0;
   4552 
   4553 	return err;
   4554 }
   4555 
   4556 static int
   4557 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
   4558 {
   4559 	struct iwm_add_sta_cmd_v7 add_sta_cmd;
   4560 	int err;
   4561 	uint32_t status;
   4562 
   4563 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
   4564 
   4565 	add_sta_cmd.sta_id = IWM_STATION_ID;
   4566 	add_sta_cmd.mac_id_n_color
   4567 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   4568 	if (!update) {
   4569 		int ac;
   4570 		for (ac = 0; ac < WME_NUM_AC; ac++) {
   4571 			add_sta_cmd.tfd_queue_msk |=
   4572 			    htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
   4573 		}
   4574 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
   4575 	}
   4576 	add_sta_cmd.add_modify = update ? 1 : 0;
   4577 	add_sta_cmd.station_flags_msk
   4578 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
   4579 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
   4580 	if (update)
   4581 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
   4582 
   4583 #ifndef IEEE80211_NO_HT
   4584 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
   4585 		add_sta_cmd.station_flags_msk
   4586 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
   4587 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
   4588 
   4589 		add_sta_cmd.station_flags
   4590 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
   4591 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
   4592 		case IEEE80211_AMPDU_PARAM_SS_2:
   4593 			add_sta_cmd.station_flags
   4594 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
   4595 			break;
   4596 		case IEEE80211_AMPDU_PARAM_SS_4:
   4597 			add_sta_cmd.station_flags
   4598 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
   4599 			break;
   4600 		case IEEE80211_AMPDU_PARAM_SS_8:
   4601 			add_sta_cmd.station_flags
   4602 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
   4603 			break;
   4604 		case IEEE80211_AMPDU_PARAM_SS_16:
   4605 			add_sta_cmd.station_flags
   4606 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
   4607 			break;
   4608 		default:
   4609 			break;
   4610 		}
   4611 	}
   4612 #endif
   4613 
   4614 	status = IWM_ADD_STA_SUCCESS;
   4615 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
   4616 	    &add_sta_cmd, &status);
   4617 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
   4618 		err = EIO;
   4619 
   4620 	return err;
   4621 }
   4622 
   4623 static int
   4624 iwm_add_aux_sta(struct iwm_softc *sc)
   4625 {
   4626 	struct iwm_add_sta_cmd_v7 cmd;
   4627 	int err;
   4628 	uint32_t status;
   4629 
   4630 	err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
   4631 	if (err)
   4632 		return err;
   4633 
   4634 	memset(&cmd, 0, sizeof(cmd));
   4635 	cmd.sta_id = IWM_AUX_STA_ID;
   4636 	cmd.mac_id_n_color =
   4637 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
   4638 	cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
   4639 	cmd.tid_disable_tx = htole16(0xffff);
   4640 
   4641 	status = IWM_ADD_STA_SUCCESS;
   4642 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
   4643 	    &status);
   4644 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
   4645 		err = EIO;
   4646 
   4647 	return err;
   4648 }
   4649 
   4650 #define IWM_PLCP_QUIET_THRESH 1
   4651 #define IWM_ACTIVE_QUIET_TIME 10
   4652 #define LONG_OUT_TIME_PERIOD 600
   4653 #define SHORT_OUT_TIME_PERIOD 200
   4654 #define SUSPEND_TIME_PERIOD 100
   4655 
   4656 static uint16_t
   4657 iwm_scan_rx_chain(struct iwm_softc *sc)
   4658 {
   4659 	uint16_t rx_chain;
   4660 	uint8_t rx_ant;
   4661 
   4662 	rx_ant = iwm_fw_valid_rx_ant(sc);
   4663 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
   4664 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
   4665 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
   4666 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
   4667 	return htole16(rx_chain);
   4668 }
   4669 
   4670 static uint32_t
   4671 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
   4672 {
   4673 	uint32_t tx_ant;
   4674 	int i, ind;
   4675 
   4676 	for (i = 0, ind = sc->sc_scan_last_antenna;
   4677 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
   4678 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
   4679 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
   4680 			sc->sc_scan_last_antenna = ind;
   4681 			break;
   4682 		}
   4683 	}
   4684 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
   4685 
   4686 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
   4687 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
   4688 				   tx_ant);
   4689 	else
   4690 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
   4691 }
   4692 
   4693 #ifdef notyet
   4694 /*
   4695  * If req->n_ssids > 0, it means we should do an active scan.
   4696  * In case of active scan w/o directed scan, we receive a zero-length SSID
   4697  * just to notify that this scan is active and not passive.
   4698  * In order to notify the FW of the number of SSIDs we wish to scan (including
   4699  * the zero-length one), we need to set the corresponding bits in chan->type,
   4700  * one for each SSID, and set the active bit (first). If the first SSID is
   4701  * already included in the probe template, so we need to set only
   4702  * req->n_ssids - 1 bits in addition to the first bit.
   4703  */
   4704 static uint16_t
   4705 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
   4706 {
   4707 	if (flags & IEEE80211_CHAN_2GHZ)
   4708 		return 30  + 3 * (n_ssids + 1);
   4709 	return 20  + 2 * (n_ssids + 1);
   4710 }
   4711 
   4712 static uint16_t
   4713 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
   4714 {
   4715 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
   4716 }
   4717 #endif
   4718 
   4719 static uint8_t
   4720 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
   4721     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
   4722 {
   4723 	struct ieee80211com *ic = &sc->sc_ic;
   4724 	struct ieee80211_channel *c;
   4725 	uint8_t nchan;
   4726 
   4727 	for (nchan = 0, c = &ic->ic_channels[1];
   4728 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
   4729 	    nchan < sc->sc_capa_n_scan_channels;
   4730 	    c++) {
   4731 		if (c->ic_flags == 0)
   4732 			continue;
   4733 
   4734 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
   4735 		chan->iter_count = htole16(1);
   4736 		chan->iter_interval = 0;
   4737 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
   4738 #if 0 /* makes scanning while associated less useful */
   4739 		if (n_ssids != 0)
   4740 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
   4741 #endif
   4742 		chan++;
   4743 		nchan++;
   4744 	}
   4745 
   4746 	return nchan;
   4747 }
   4748 
   4749 static uint8_t
   4750 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
   4751     struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
   4752 {
   4753 	struct ieee80211com *ic = &sc->sc_ic;
   4754 	struct ieee80211_channel *c;
   4755 	uint8_t nchan;
   4756 
   4757 	for (nchan = 0, c = &ic->ic_channels[1];
   4758 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
   4759 	    nchan < sc->sc_capa_n_scan_channels;
   4760 	    c++) {
   4761 		if (c->ic_flags == 0)
   4762 			continue;
   4763 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
   4764 		chan->iter_count = 1;
   4765 		chan->iter_interval = htole16(0);
   4766 #if 0 /* makes scanning while associated less useful */
   4767 		if (n_ssids != 0)
   4768 			chan->flags = htole32(1 << 0); /* select SSID 0 */
   4769 #endif
   4770 		chan++;
   4771 		nchan++;
   4772 	}
   4773 
   4774 	return nchan;
   4775 }
   4776 
   4777 static int
   4778 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
   4779 {
   4780 	struct ieee80211com *ic = &sc->sc_ic;
   4781 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
   4782 	struct ieee80211_rateset *rs;
   4783 	size_t remain = sizeof(preq->buf);
   4784 	uint8_t *frm, *pos;
   4785 
   4786 	memset(preq, 0, sizeof(*preq));
   4787 
   4788 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
   4789 		return ENOBUFS;
   4790 
   4791 	/*
   4792 	 * Build a probe request frame.  Most of the following code is a
   4793 	 * copy & paste of what is done in net80211.
   4794 	 */
   4795 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
   4796 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
   4797 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
   4798 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
   4799 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
   4800 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
   4801 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
   4802 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
   4803 
   4804 	frm = (uint8_t *)(wh + 1);
   4805 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
   4806 
   4807 	/* Tell the firmware where the MAC header is. */
   4808 	preq->mac_header.offset = 0;
   4809 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
   4810 	remain -= frm - (uint8_t *)wh;
   4811 
   4812 	/* Fill in 2GHz IEs and tell firmware where they are. */
   4813 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
   4814 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
   4815 		if (remain < 4 + rs->rs_nrates)
   4816 			return ENOBUFS;
   4817 	} else if (remain < 2 + rs->rs_nrates)
   4818 		return ENOBUFS;
   4819 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
   4820 	pos = frm;
   4821 	frm = ieee80211_add_rates(frm, rs);
   4822 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
   4823 		frm = ieee80211_add_xrates(frm, rs);
   4824 	preq->band_data[0].len = htole16(frm - pos);
   4825 	remain -= frm - pos;
   4826 
   4827 	if (isset(sc->sc_enabled_capa,
   4828 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
   4829 		if (remain < 3)
   4830 			return ENOBUFS;
   4831 		*frm++ = IEEE80211_ELEMID_DSPARMS;
   4832 		*frm++ = 1;
   4833 		*frm++ = 0;
   4834 		remain -= 3;
   4835 	}
   4836 
   4837 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
   4838 		/* Fill in 5GHz IEs. */
   4839 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
   4840 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
   4841 			if (remain < 4 + rs->rs_nrates)
   4842 				return ENOBUFS;
   4843 		} else if (remain < 2 + rs->rs_nrates)
   4844 			return ENOBUFS;
   4845 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
   4846 		pos = frm;
   4847 		frm = ieee80211_add_rates(frm, rs);
   4848 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
   4849 			frm = ieee80211_add_xrates(frm, rs);
   4850 		preq->band_data[1].len = htole16(frm - pos);
   4851 		remain -= frm - pos;
   4852 	}
   4853 
   4854 #ifndef IEEE80211_NO_HT
   4855 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
   4856 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
   4857 	pos = frm;
   4858 	if (ic->ic_flags & IEEE80211_F_HTON) {
   4859 		if (remain < 28)
   4860 			return ENOBUFS;
   4861 		frm = ieee80211_add_htcaps(frm, ic);
   4862 		/* XXX add WME info? */
   4863 	}
   4864 #endif
   4865 
   4866 	preq->common_data.len = htole16(frm - pos);
   4867 
   4868 	return 0;
   4869 }
   4870 
   4871 static int
   4872 iwm_lmac_scan(struct iwm_softc *sc)
   4873 {
   4874 	struct ieee80211com *ic = &sc->sc_ic;
   4875 	struct iwm_host_cmd hcmd = {
   4876 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
   4877 		.len = { 0, },
   4878 		.data = { NULL, },
   4879 		.flags = 0,
   4880 	};
   4881 	struct iwm_scan_req_lmac *req;
   4882 	size_t req_len;
   4883 	int err;
   4884 
   4885 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
   4886 
   4887 	req_len = sizeof(struct iwm_scan_req_lmac) +
   4888 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
   4889 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
   4890 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
   4891 		return ENOMEM;
   4892 	req = kmem_zalloc(req_len, KM_SLEEP);
   4893 	if (req == NULL)
   4894 		return ENOMEM;
   4895 
   4896 	hcmd.len[0] = (uint16_t)req_len;
   4897 	hcmd.data[0] = (void *)req;
   4898 
   4899 	/* These timings correspond to iwlwifi's UNASSOC scan. */
   4900 	req->active_dwell = 10;
   4901 	req->passive_dwell = 110;
   4902 	req->fragmented_dwell = 44;
   4903 	req->extended_dwell = 90;
   4904 	req->max_out_time = 0;
   4905 	req->suspend_time = 0;
   4906 
   4907 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
   4908 	req->rx_chain_select = iwm_scan_rx_chain(sc);
   4909 	req->iter_num = htole32(1);
   4910 	req->delay = 0;
   4911 
   4912 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
   4913 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
   4914 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
   4915 	if (ic->ic_des_esslen == 0)
   4916 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
   4917 	else
   4918 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
   4919 	if (isset(sc->sc_enabled_capa,
   4920 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
   4921 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
   4922 
   4923 	req->flags = htole32(IWM_PHY_BAND_24);
   4924 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
   4925 		req->flags |= htole32(IWM_PHY_BAND_5);
   4926 	req->filter_flags =
   4927 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
   4928 
   4929 	/* Tx flags 2 GHz. */
   4930 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
   4931 	    IWM_TX_CMD_FLG_BT_DIS);
   4932 	req->tx_cmd[0].rate_n_flags =
   4933 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
   4934 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
   4935 
   4936 	/* Tx flags 5 GHz. */
   4937 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
   4938 	    IWM_TX_CMD_FLG_BT_DIS);
   4939 	req->tx_cmd[1].rate_n_flags =
   4940 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
   4941 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
   4942 
   4943 	/* Check if we're doing an active directed scan. */
   4944 	if (ic->ic_des_esslen != 0) {
   4945 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
   4946 		req->direct_scan[0].len = ic->ic_des_esslen;
   4947 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
   4948 		    ic->ic_des_esslen);
   4949 	}
   4950 
   4951 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
   4952 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
   4953 	    ic->ic_des_esslen != 0);
   4954 
   4955 	err = iwm_fill_probe_req(sc,
   4956 	    (struct iwm_scan_probe_req *)(req->data +
   4957 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
   4958 	     sc->sc_capa_n_scan_channels)));
   4959 	if (err) {
   4960 		kmem_free(req, req_len);
   4961 		return err;
   4962 	}
   4963 
   4964 	/* Specify the scan plan: We'll do one iteration. */
   4965 	req->schedule[0].iterations = 1;
   4966 	req->schedule[0].full_scan_mul = 1;
   4967 
   4968 	/* Disable EBS. */
   4969 	req->channel_opt[0].non_ebs_ratio = 1;
   4970 	req->channel_opt[1].non_ebs_ratio = 1;
   4971 
   4972 	err = iwm_send_cmd(sc, &hcmd);
   4973 	kmem_free(req, req_len);
   4974 	return err;
   4975 }
   4976 
   4977 static int
   4978 iwm_config_umac_scan(struct iwm_softc *sc)
   4979 {
   4980 	struct ieee80211com *ic = &sc->sc_ic;
   4981 	struct iwm_scan_config *scan_config;
   4982 	int err, nchan;
   4983 	size_t cmd_size;
   4984 	struct ieee80211_channel *c;
   4985 	struct iwm_host_cmd hcmd = {
   4986 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
   4987 		.flags = 0,
   4988 	};
   4989 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
   4990 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
   4991 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
   4992 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
   4993 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
   4994 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
   4995 	    IWM_SCAN_CONFIG_RATE_54M);
   4996 
   4997 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
   4998 
   4999 	scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
   5000 	if (scan_config == NULL)
   5001 		return ENOMEM;
   5002 
   5003 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
   5004 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
   5005 	scan_config->legacy_rates = htole32(rates |
   5006 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
   5007 
   5008 	/* These timings correspond to iwlwifi's UNASSOC scan. */
   5009 	scan_config->dwell_active = 10;
   5010 	scan_config->dwell_passive = 110;
   5011 	scan_config->dwell_fragmented = 44;
   5012 	scan_config->dwell_extended = 90;
   5013 	scan_config->out_of_channel_time = htole32(0);
   5014 	scan_config->suspend_time = htole32(0);
   5015 
   5016 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
   5017 
   5018 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
   5019 	scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
   5020 	    IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
   5021 	    IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
   5022 
   5023 	for (c = &ic->ic_channels[1], nchan = 0;
   5024 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
   5025 	    nchan < sc->sc_capa_n_scan_channels; c++) {
   5026 		if (c->ic_flags == 0)
   5027 			continue;
   5028 		scan_config->channel_array[nchan++] =
   5029 		    ieee80211_mhz2ieee(c->ic_freq, 0);
   5030 	}
   5031 
   5032 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
   5033 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
   5034 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
   5035 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
   5036 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
   5037 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
   5038 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
   5039 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
   5040 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
   5041 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
   5042 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
   5043 
   5044 	hcmd.data[0] = scan_config;
   5045 	hcmd.len[0] = cmd_size;
   5046 
   5047 	err = iwm_send_cmd(sc, &hcmd);
   5048 	kmem_free(scan_config, cmd_size);
   5049 	return err;
   5050 }
   5051 
   5052 static int
   5053 iwm_umac_scan(struct iwm_softc *sc)
   5054 {
   5055 	struct ieee80211com *ic = &sc->sc_ic;
   5056 	struct iwm_host_cmd hcmd = {
   5057 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
   5058 		.len = { 0, },
   5059 		.data = { NULL, },
   5060 		.flags = 0,
   5061 	};
   5062 	struct iwm_scan_req_umac *req;
   5063 	struct iwm_scan_req_umac_tail *tail;
   5064 	size_t req_len;
   5065 	int err;
   5066 
   5067 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
   5068 
   5069 	req_len = sizeof(struct iwm_scan_req_umac) +
   5070 	    (sizeof(struct iwm_scan_channel_cfg_umac) *
   5071 	    sc->sc_capa_n_scan_channels) +
   5072 	    sizeof(struct iwm_scan_req_umac_tail);
   5073 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
   5074 		return ENOMEM;
   5075 	req = kmem_zalloc(req_len, KM_SLEEP);
   5076 	if (req == NULL)
   5077 		return ENOMEM;
   5078 
   5079 	hcmd.len[0] = (uint16_t)req_len;
   5080 	hcmd.data[0] = (void *)req;
   5081 
   5082 	/* These timings correspond to iwlwifi's UNASSOC scan. */
   5083 	req->active_dwell = 10;
   5084 	req->passive_dwell = 110;
   5085 	req->fragmented_dwell = 44;
   5086 	req->extended_dwell = 90;
   5087 	req->max_out_time = 0;
   5088 	req->suspend_time = 0;
   5089 
   5090 	req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
   5091 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
   5092 
   5093 	req->n_channels = iwm_umac_scan_fill_channels(sc,
   5094 	    (struct iwm_scan_channel_cfg_umac *)req->data,
   5095 	    ic->ic_des_esslen != 0);
   5096 
   5097 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
   5098 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
   5099 	    IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
   5100 
   5101 	tail = (struct iwm_scan_req_umac_tail *)(req->data +
   5102 		sizeof(struct iwm_scan_channel_cfg_umac) *
   5103 			sc->sc_capa_n_scan_channels);
   5104 
   5105 	/* Check if we're doing an active directed scan. */
   5106 	if (ic->ic_des_esslen != 0) {
   5107 		tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
   5108 		tail->direct_scan[0].len = ic->ic_des_esslen;
   5109 		memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
   5110 		    ic->ic_des_esslen);
   5111 		req->general_flags |=
   5112 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
   5113 	} else
   5114 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
   5115 
   5116 	if (isset(sc->sc_enabled_capa,
   5117 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
   5118 		req->general_flags |=
   5119 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
   5120 
   5121 	err = iwm_fill_probe_req(sc, &tail->preq);
   5122 	if (err) {
   5123 		kmem_free(req, req_len);
   5124 		return err;
   5125 	}
   5126 
   5127 	/* Specify the scan plan: We'll do one iteration. */
   5128 	tail->schedule[0].interval = 0;
   5129 	tail->schedule[0].iter_count = 1;
   5130 
   5131 	err = iwm_send_cmd(sc, &hcmd);
   5132 	kmem_free(req, req_len);
   5133 	return err;
   5134 }
   5135 
   5136 static uint8_t
   5137 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
   5138 {
   5139 	int i;
   5140 	uint8_t rval;
   5141 
   5142 	for (i = 0; i < rs->rs_nrates; i++) {
   5143 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
   5144 		if (rval == iwm_rates[ridx].rate)
   5145 			return rs->rs_rates[i];
   5146 	}
   5147 	return 0;
   5148 }
   5149 
   5150 static void
   5151 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
   5152     int *ofdm_rates)
   5153 {
   5154 	struct ieee80211_node *ni = &in->in_ni;
   5155 	struct ieee80211_rateset *rs = &ni->ni_rates;
   5156 	int lowest_present_ofdm = -1;
   5157 	int lowest_present_cck = -1;
   5158 	uint8_t cck = 0;
   5159 	uint8_t ofdm = 0;
   5160 	int i;
   5161 
   5162 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
   5163 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
   5164 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
   5165 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
   5166 				continue;
   5167 			cck |= (1 << i);
   5168 			if (lowest_present_cck == -1 || lowest_present_cck > i)
   5169 				lowest_present_cck = i;
   5170 		}
   5171 	}
   5172 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
   5173 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
   5174 			continue;
   5175 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
   5176 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
   5177 			lowest_present_ofdm = i;
   5178 	}
   5179 
   5180 	/*
   5181 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
   5182 	 * variables. This isn't sufficient though, as there might not
   5183 	 * be all the right rates in the bitmap. E.g. if the only basic
   5184 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
   5185 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
   5186 	 *
   5187 	 *    [...] a STA responding to a received frame shall transmit
   5188 	 *    its Control Response frame [...] at the highest rate in the
   5189 	 *    BSSBasicRateSet parameter that is less than or equal to the
   5190 	 *    rate of the immediately previous frame in the frame exchange
   5191 	 *    sequence ([...]) and that is of the same modulation class
   5192 	 *    ([...]) as the received frame. If no rate contained in the
   5193 	 *    BSSBasicRateSet parameter meets these conditions, then the
   5194 	 *    control frame sent in response to a received frame shall be
   5195 	 *    transmitted at the highest mandatory rate of the PHY that is
   5196 	 *    less than or equal to the rate of the received frame, and
   5197 	 *    that is of the same modulation class as the received frame.
   5198 	 *
   5199 	 * As a consequence, we need to add all mandatory rates that are
   5200 	 * lower than all of the basic rates to these bitmaps.
   5201 	 */
   5202 
   5203 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
   5204 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
   5205 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
   5206 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
   5207 	/* 6M already there or needed so always add */
   5208 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
   5209 
   5210 	/*
   5211 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
   5212 	 * Note, however:
   5213 	 *  - if no CCK rates are basic, it must be ERP since there must
   5214 	 *    be some basic rates at all, so they're OFDM => ERP PHY
   5215 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
   5216 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
   5217 	 *  - if 5.5M is basic, 1M and 2M are mandatory
   5218 	 *  - if 2M is basic, 1M is mandatory
   5219 	 *  - if 1M is basic, that's the only valid ACK rate.
   5220 	 * As a consequence, it's not as complicated as it sounds, just add
   5221 	 * any lower rates to the ACK rate bitmap.
   5222 	 */
   5223 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
   5224 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
   5225 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
   5226 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
   5227 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
   5228 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
   5229 	/* 1M already there or needed so always add */
   5230 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
   5231 
   5232 	*cck_rates = cck;
   5233 	*ofdm_rates = ofdm;
   5234 }
   5235 
   5236 static void
   5237 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
   5238     struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
   5239 {
   5240 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
   5241 	struct ieee80211com *ic = &sc->sc_ic;
   5242 	struct ieee80211_node *ni = ic->ic_bss;
   5243 	int cck_ack_rates, ofdm_ack_rates;
   5244 	int i;
   5245 
   5246 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   5247 	    in->in_color));
   5248 	cmd->action = htole32(action);
   5249 
   5250 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
   5251 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
   5252 
   5253 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
   5254 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
   5255 
   5256 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
   5257 	cmd->cck_rates = htole32(cck_ack_rates);
   5258 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
   5259 
   5260 	cmd->cck_short_preamble
   5261 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
   5262 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
   5263 	cmd->short_slot
   5264 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
   5265 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
   5266 
   5267 	for (i = 0; i < WME_NUM_AC; i++) {
   5268 		struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
   5269 		int txf = iwm_ac_to_tx_fifo[i];
   5270 
   5271 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
   5272 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
   5273 		cmd->ac[txf].aifsn = wmep->wmep_aifsn;
   5274 		cmd->ac[txf].fifos_mask = (1 << txf);
   5275 		cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
   5276 	}
   5277 	if (ni->ni_flags & IEEE80211_NODE_QOS)
   5278 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
   5279 
   5280 #ifndef IEEE80211_NO_HT
   5281 	if (ni->ni_flags & IEEE80211_NODE_HT) {
   5282 		enum ieee80211_htprot htprot =
   5283 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
   5284 		switch (htprot) {
   5285 		case IEEE80211_HTPROT_NONE:
   5286 			break;
   5287 		case IEEE80211_HTPROT_NONMEMBER:
   5288 		case IEEE80211_HTPROT_NONHT_MIXED:
   5289 			cmd->protection_flags |=
   5290 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
   5291 		case IEEE80211_HTPROT_20MHZ:
   5292 			cmd->protection_flags |=
   5293 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
   5294 			    IWM_MAC_PROT_FLG_FAT_PROT);
   5295 			break;
   5296 		default:
   5297 			break;
   5298 		}
   5299 
   5300 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
   5301 	}
   5302 #endif
   5303 
   5304 	if (ic->ic_flags & IEEE80211_F_USEPROT)
   5305 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
   5306 
   5307 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
   5308 #undef IWM_EXP2
   5309 }
   5310 
   5311 static void
   5312 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
   5313     struct iwm_mac_data_sta *sta, int assoc)
   5314 {
   5315 	struct ieee80211_node *ni = &in->in_ni;
   5316 	uint32_t dtim_off;
   5317 	uint64_t tsf;
   5318 
   5319 	dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
   5320 	tsf = le64toh(ni->ni_tstamp.tsf);
   5321 
   5322 	sta->is_assoc = htole32(assoc);
   5323 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
   5324 	sta->dtim_tsf = htole64(tsf + dtim_off);
   5325 	sta->bi = htole32(ni->ni_intval);
   5326 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
   5327 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
   5328 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
   5329 	sta->listen_interval = htole32(10);
   5330 	sta->assoc_id = htole32(ni->ni_associd);
   5331 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
   5332 }
   5333 
   5334 static int
   5335 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
   5336     int assoc)
   5337 {
   5338 	struct ieee80211_node *ni = &in->in_ni;
   5339 	struct iwm_mac_ctx_cmd cmd;
   5340 
   5341 	memset(&cmd, 0, sizeof(cmd));
   5342 
   5343 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
   5344 
   5345 	/* Allow beacons to pass through as long as we are not associated or we
   5346 	 * do not have dtim period information */
   5347 	if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
   5348 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
   5349 	else
   5350 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
   5351 
   5352 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
   5353 }
   5354 
   5355 #define IWM_MISSED_BEACONS_THRESHOLD 8
   5356 
   5357 static void
   5358 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
   5359 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   5360 {
   5361 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
   5362 
   5363 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
   5364 	    le32toh(mb->mac_id),
   5365 	    le32toh(mb->consec_missed_beacons),
   5366 	    le32toh(mb->consec_missed_beacons_since_last_rx),
   5367 	    le32toh(mb->num_recvd_beacons),
   5368 	    le32toh(mb->num_expected_beacons)));
   5369 
   5370 	/*
   5371 	 * TODO: the threshold should be adjusted based on latency conditions,
   5372 	 * and/or in case of a CS flow on one of the other AP vifs.
   5373 	 */
   5374 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
   5375 	    IWM_MISSED_BEACONS_THRESHOLD)
   5376 		ieee80211_beacon_miss(&sc->sc_ic);
   5377 }
   5378 
   5379 static int
   5380 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
   5381 {
   5382 	struct iwm_time_quota_cmd cmd;
   5383 	int i, idx, num_active_macs, quota, quota_rem;
   5384 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
   5385 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
   5386 	uint16_t id;
   5387 
   5388 	memset(&cmd, 0, sizeof(cmd));
   5389 
   5390 	/* currently, PHY ID == binding ID */
   5391 	if (in) {
   5392 		id = in->in_phyctxt->id;
   5393 		KASSERT(id < IWM_MAX_BINDINGS);
   5394 		colors[id] = in->in_phyctxt->color;
   5395 
   5396 		if (1)
   5397 			n_ifs[id] = 1;
   5398 	}
   5399 
   5400 	/*
   5401 	 * The FW's scheduling session consists of
   5402 	 * IWM_MAX_QUOTA fragments. Divide these fragments
   5403 	 * equally between all the bindings that require quota
   5404 	 */
   5405 	num_active_macs = 0;
   5406 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
   5407 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
   5408 		num_active_macs += n_ifs[i];
   5409 	}
   5410 
   5411 	quota = 0;
   5412 	quota_rem = 0;
   5413 	if (num_active_macs) {
   5414 		quota = IWM_MAX_QUOTA / num_active_macs;
   5415 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
   5416 	}
   5417 
   5418 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
   5419 		if (colors[i] < 0)
   5420 			continue;
   5421 
   5422 		cmd.quotas[idx].id_and_color =
   5423 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
   5424 
   5425 		if (n_ifs[i] <= 0) {
   5426 			cmd.quotas[idx].quota = htole32(0);
   5427 			cmd.quotas[idx].max_duration = htole32(0);
   5428 		} else {
   5429 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
   5430 			cmd.quotas[idx].max_duration = htole32(0);
   5431 		}
   5432 		idx++;
   5433 	}
   5434 
   5435 	/* Give the remainder of the session to the first binding */
   5436 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
   5437 
   5438 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
   5439 }
   5440 
   5441 static int
   5442 iwm_auth(struct iwm_softc *sc)
   5443 {
   5444 	struct ieee80211com *ic = &sc->sc_ic;
   5445 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   5446 	uint32_t duration;
   5447 	int err;
   5448 
   5449 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
   5450 	if (err)
   5451 		return err;
   5452 
   5453 	err = iwm_allow_mcast(sc);
   5454 	if (err)
   5455 		return err;
   5456 
   5457 	sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
   5458 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
   5459 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
   5460 	if (err)
   5461 		return err;
   5462 	in->in_phyctxt = &sc->sc_phyctxt[0];
   5463 
   5464 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
   5465 	if (err) {
   5466 		aprint_error_dev(sc->sc_dev,
   5467 		    "could not add MAC context (error %d)\n", err);
   5468 		return err;
   5469 	}
   5470 
   5471 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
   5472 	if (err)
   5473 		return err;
   5474 
   5475 	err = iwm_add_sta_cmd(sc, in, 0);
   5476 	if (err)
   5477 		return err;
   5478 
   5479 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
   5480 	if (err) {
   5481 		aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
   5482 		return err;
   5483 	}
   5484 
   5485 	/*
   5486 	 * Prevent the FW from wandering off channel during association
   5487 	 * by "protecting" the session with a time event.
   5488 	 */
   5489 	if (in->in_ni.ni_intval)
   5490 		duration = in->in_ni.ni_intval * 2;
   5491 	else
   5492 		duration = IEEE80211_DUR_TU;
   5493 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
   5494 	DELAY(100);
   5495 
   5496 	return 0;
   5497 }
   5498 
   5499 static int
   5500 iwm_assoc(struct iwm_softc *sc)
   5501 {
   5502 	struct ieee80211com *ic = &sc->sc_ic;
   5503 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   5504 	int err;
   5505 
   5506 	err = iwm_add_sta_cmd(sc, in, 1);
   5507 	if (err)
   5508 		return err;
   5509 
   5510 	return 0;
   5511 }
   5512 
   5513 static struct ieee80211_node *
   5514 iwm_node_alloc(struct ieee80211_node_table *nt)
   5515 {
   5516 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
   5517 }
   5518 
   5519 static void
   5520 iwm_calib_timeout(void *arg)
   5521 {
   5522 	struct iwm_softc *sc = arg;
   5523 	struct ieee80211com *ic = &sc->sc_ic;
   5524 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   5525 #ifndef IEEE80211_NO_HT
   5526 	struct ieee80211_node *ni = &in->in_ni;
   5527 	int otxrate;
   5528 #endif
   5529 	int s;
   5530 
   5531 	s = splnet();
   5532 	if ((ic->ic_fixed_rate == -1
   5533 #ifndef IEEE80211_NO_HT
   5534 	    || ic->ic_fixed_mcs == -1
   5535 #endif
   5536 	    ) &&
   5537 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
   5538 #ifndef IEEE80211_NO_HT
   5539 		if (ni->ni_flags & IEEE80211_NODE_HT)
   5540 			otxrate = ni->ni_txmcs;
   5541 		else
   5542 			otxrate = ni->ni_txrate;
   5543 #endif
   5544 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
   5545 
   5546 #ifndef IEEE80211_NO_HT
   5547 		/*
   5548 		 * If AMRR has chosen a new TX rate we must update
   5549 		 * the firwmare's LQ rate table from process context.
   5550 		 */
   5551 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   5552 		    otxrate != ni->ni_txmcs)
   5553 			softint_schedule(sc->setrates_task);
   5554 		else if (otxrate != ni->ni_txrate)
   5555 			softint_schedule(sc->setrates_task);
   5556 #endif
   5557 	}
   5558 	splx(s);
   5559 
   5560 	callout_schedule(&sc->sc_calib_to, mstohz(500));
   5561 }
   5562 
   5563 #ifndef IEEE80211_NO_HT
   5564 static void
   5565 iwm_setrates_task(void *arg)
   5566 {
   5567 	struct iwm_softc *sc = arg;
   5568 	struct ieee80211com *ic = &sc->sc_ic;
   5569 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   5570 
   5571 	/* Update rates table based on new TX rate determined by AMRR. */
   5572 	iwm_setrates(in);
   5573 }
   5574 
   5575 static int
   5576 iwm_setrates(struct iwm_node *in)
   5577 {
   5578 	struct ieee80211_node *ni = &in->in_ni;
   5579 	struct ieee80211com *ic = ni->ni_ic;
   5580 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   5581 	struct iwm_lq_cmd *lq = &in->in_lq;
   5582 	struct ieee80211_rateset *rs = &ni->ni_rates;
   5583 	int i, j, ridx, ridx_min, tab = 0;
   5584 #ifndef IEEE80211_NO_HT
   5585 	int sgi_ok;
   5586 #endif
   5587 	struct iwm_host_cmd cmd = {
   5588 		.id = IWM_LQ_CMD,
   5589 		.len = { sizeof(in->in_lq), },
   5590 	};
   5591 
   5592 	memset(lq, 0, sizeof(*lq));
   5593 	lq->sta_id = IWM_STATION_ID;
   5594 
   5595 	if (ic->ic_flags & IEEE80211_F_USEPROT)
   5596 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
   5597 
   5598 #ifndef IEEE80211_NO_HT
   5599 	sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
   5600 	    (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
   5601 #endif
   5602 
   5603 
   5604 	/*
   5605 	 * Fill the LQ rate selection table with legacy and/or HT rates
   5606 	 * in descending order, i.e. with the node's current TX rate first.
   5607 	 * In cases where throughput of an HT rate corresponds to a legacy
   5608 	 * rate it makes no sense to add both. We rely on the fact that
   5609 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
   5610 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
   5611 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
   5612 	 */
   5613 	j = 0;
   5614 	ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
   5615 	    IWM_RIDX_OFDM : IWM_RIDX_CCK;
   5616 	for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
   5617 		if (j >= __arraycount(lq->rs_table))
   5618 			break;
   5619 		tab = 0;
   5620 #ifndef IEEE80211_NO_HT
   5621 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   5622 		    iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
   5623 			for (i = ni->ni_txmcs; i >= 0; i--) {
   5624 				if (isclr(ni->ni_rxmcs, i))
   5625 					continue;
   5626 				if (ridx == iwm_mcs2ridx[i]) {
   5627 					tab = iwm_rates[ridx].ht_plcp;
   5628 					tab |= IWM_RATE_MCS_HT_MSK;
   5629 					if (sgi_ok)
   5630 						tab |= IWM_RATE_MCS_SGI_MSK;
   5631 					break;
   5632 				}
   5633 			}
   5634 		}
   5635 #endif
   5636 		if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
   5637 			for (i = ni->ni_txrate; i >= 0; i--) {
   5638 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
   5639 				    IEEE80211_RATE_VAL)) {
   5640 					tab = iwm_rates[ridx].plcp;
   5641 					break;
   5642 				}
   5643 			}
   5644 		}
   5645 
   5646 		if (tab == 0)
   5647 			continue;
   5648 
   5649 		tab |= 1 << IWM_RATE_MCS_ANT_POS;
   5650 		if (IWM_RIDX_IS_CCK(ridx))
   5651 			tab |= IWM_RATE_MCS_CCK_MSK;
   5652 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
   5653 		lq->rs_table[j++] = htole32(tab);
   5654 	}
   5655 
   5656 	/* Fill the rest with the lowest possible rate */
   5657 	i = j > 0 ? j - 1 : 0;
   5658 	while (j < __arraycount(lq->rs_table))
   5659 		lq->rs_table[j++] = lq->rs_table[i];
   5660 
   5661 	lq->single_stream_ant_msk = IWM_ANT_A;
   5662 	lq->dual_stream_ant_msk = IWM_ANT_AB;
   5663 
   5664 	lq->agg_time_limit = htole16(4000);	/* 4ms */
   5665 	lq->agg_disable_start_th = 3;
   5666 #ifdef notyet
   5667 	lq->agg_frame_cnt_limit = 0x3f;
   5668 #else
   5669 	lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
   5670 #endif
   5671 
   5672 	cmd.data[0] = &in->in_lq;
   5673 	return iwm_send_cmd(sc, &cmd);
   5674 }
   5675 #endif
   5676 
   5677 static int
   5678 iwm_media_change(struct ifnet *ifp)
   5679 {
   5680 	struct iwm_softc *sc = ifp->if_softc;
   5681 	struct ieee80211com *ic = &sc->sc_ic;
   5682 	uint8_t rate, ridx;
   5683 	int err;
   5684 
   5685 	err = ieee80211_media_change(ifp);
   5686 	if (err != ENETRESET)
   5687 		return err;
   5688 
   5689 #ifndef IEEE80211_NO_HT
   5690 	if (ic->ic_fixed_mcs != -1)
   5691 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
   5692 	else
   5693 #endif
   5694 	if (ic->ic_fixed_rate != -1) {
   5695 		rate = ic->ic_sup_rates[ic->ic_curmode].
   5696 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
   5697 		/* Map 802.11 rate to HW rate index. */
   5698 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
   5699 			if (iwm_rates[ridx].rate == rate)
   5700 				break;
   5701 		sc->sc_fixed_ridx = ridx;
   5702 	}
   5703 
   5704 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   5705 	    (IFF_UP | IFF_RUNNING)) {
   5706 		iwm_stop(ifp, 0);
   5707 		err = iwm_init(ifp);
   5708 	}
   5709 	return err;
   5710 }
   5711 
   5712 static void
   5713 iwm_newstate_cb(struct work *wk, void *v)
   5714 {
   5715 	struct iwm_softc *sc = v;
   5716 	struct ieee80211com *ic = &sc->sc_ic;
   5717 	struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
   5718 	enum ieee80211_state nstate = iwmns->ns_nstate;
   5719 	enum ieee80211_state ostate = ic->ic_state;
   5720 	int generation = iwmns->ns_generation;
   5721 	struct iwm_node *in;
   5722 	int arg = iwmns->ns_arg;
   5723 	int err;
   5724 
   5725 	kmem_free(iwmns, sizeof(*iwmns));
   5726 
   5727 	DPRINTF(("Prepare to switch state %d->%d\n", ostate, nstate));
   5728 	if (sc->sc_generation != generation) {
   5729 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
   5730 		if (nstate == IEEE80211_S_INIT) {
   5731 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
   5732 			sc->sc_newstate(ic, nstate, arg);
   5733 		}
   5734 		return;
   5735 	}
   5736 
   5737 	DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
   5738 	    ieee80211_state_name[nstate]));
   5739 
   5740 	if (ostate == IEEE80211_S_SCAN && nstate != ostate)
   5741 		iwm_led_blink_stop(sc);
   5742 
   5743 	if (ostate == IEEE80211_S_RUN && nstate != ostate)
   5744 		iwm_disable_beacon_filter(sc);
   5745 
   5746 	/* Reset the device if moving out of AUTH, ASSOC, or RUN. */
   5747 	/* XXX Is there a way to switch states without a full reset? */
   5748 	if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
   5749 		iwm_stop_device(sc);
   5750 		iwm_init_hw(sc);
   5751 
   5752 		/*
   5753 		 * Upon receiving a deauth frame from AP the net80211 stack
   5754 		 * puts the driver into AUTH state. This will fail with this
   5755 		 * driver so bring the FSM from RUN to SCAN in this case.
   5756 		 */
   5757 		if (nstate == IEEE80211_S_SCAN ||
   5758 		    nstate == IEEE80211_S_AUTH ||
   5759 		    nstate == IEEE80211_S_ASSOC) {
   5760 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
   5761 			/* Always pass arg as -1 since we can't Tx right now. */
   5762 			sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
   5763 			DPRINTF(("Going INIT->SCAN\n"));
   5764 			nstate = IEEE80211_S_SCAN;
   5765 		}
   5766 	}
   5767 
   5768 	switch (nstate) {
   5769 	case IEEE80211_S_INIT:
   5770 		break;
   5771 
   5772 	case IEEE80211_S_SCAN:
   5773 		if (ostate == nstate &&
   5774 		    ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
   5775 			return;
   5776 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
   5777 			err = iwm_umac_scan(sc);
   5778 		else
   5779 			err = iwm_lmac_scan(sc);
   5780 		if (err) {
   5781 			DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
   5782 			return;
   5783 		}
   5784 		SET(sc->sc_flags, IWM_FLAG_SCANNING);
   5785 		ic->ic_state = nstate;
   5786 		iwm_led_blink_start(sc);
   5787 		return;
   5788 
   5789 	case IEEE80211_S_AUTH:
   5790 		err = iwm_auth(sc);
   5791 		if (err) {
   5792 			DPRINTF(("%s: could not move to auth state: %d\n",
   5793 			    DEVNAME(sc), err));
   5794 			return;
   5795 		}
   5796 		break;
   5797 
   5798 	case IEEE80211_S_ASSOC:
   5799 		err = iwm_assoc(sc);
   5800 		if (err) {
   5801 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
   5802 			    err));
   5803 			return;
   5804 		}
   5805 		break;
   5806 
   5807 	case IEEE80211_S_RUN:
   5808 		in = (struct iwm_node *)ic->ic_bss;
   5809 
   5810 		/* We have now been assigned an associd by the AP. */
   5811 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
   5812 		if (err) {
   5813 			aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
   5814 			return;
   5815 		}
   5816 
   5817 		err = iwm_power_update_device(sc);
   5818 		if (err) {
   5819 			aprint_error_dev(sc->sc_dev,
   5820 			    "could send power command (error %d)\n", err);
   5821 			return;
   5822 		}
   5823 #ifdef notyet
   5824 		/*
   5825 		 * Disabled for now. Default beacon filter settings
   5826 		 * prevent net80211 from getting ERP and HT protection
   5827 		 * updates from beacons.
   5828 		 */
   5829 		err = iwm_enable_beacon_filter(sc, in);
   5830 		if (err) {
   5831 			aprint_error_dev(sc->sc_dev,
   5832 			    "could not enable beacon filter\n");
   5833 			return;
   5834 		}
   5835 #endif
   5836 		err = iwm_power_mac_update_mode(sc, in);
   5837 		if (err) {
   5838 			aprint_error_dev(sc->sc_dev,
   5839 			    "could not update MAC power (error %d)\n", err);
   5840 			return;
   5841 		}
   5842 
   5843 		err = iwm_update_quotas(sc, in);
   5844 		if (err) {
   5845 			aprint_error_dev(sc->sc_dev,
   5846 			    "could not update quotas (error %d)\n", err);
   5847 			return;
   5848 		}
   5849 
   5850 		ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
   5851 
   5852 		/* Start at lowest available bit-rate, AMRR will raise. */
   5853 		in->in_ni.ni_txrate = 0;
   5854 #ifndef IEEE80211_NO_HT
   5855 		in->in_ni.ni_txmcs = 0;
   5856 		iwm_setrates(in);
   5857 #endif
   5858 
   5859 		callout_schedule(&sc->sc_calib_to, mstohz(500));
   5860 		iwm_led_enable(sc);
   5861 		break;
   5862 
   5863 	default:
   5864 		break;
   5865 	}
   5866 
   5867 	sc->sc_newstate(ic, nstate, arg);
   5868 }
   5869 
   5870 static int
   5871 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
   5872 {
   5873 	struct iwm_newstate_state *iwmns;
   5874 	struct ifnet *ifp = IC2IFP(ic);
   5875 	struct iwm_softc *sc = ifp->if_softc;
   5876 
   5877 	callout_stop(&sc->sc_calib_to);
   5878 
   5879 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
   5880 	if (!iwmns) {
   5881 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
   5882 		return ENOMEM;
   5883 	}
   5884 
   5885 	iwmns->ns_nstate = nstate;
   5886 	iwmns->ns_arg = arg;
   5887 	iwmns->ns_generation = sc->sc_generation;
   5888 
   5889 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
   5890 
   5891 	return 0;
   5892 }
   5893 
   5894 static void
   5895 iwm_endscan(struct iwm_softc *sc)
   5896 {
   5897 	struct ieee80211com *ic = &sc->sc_ic;
   5898 
   5899 	DPRINTF(("scan ended\n"));
   5900 
   5901 	CLR(sc->sc_flags, IWM_FLAG_SCANNING);
   5902 	ieee80211_end_scan(ic);
   5903 }
   5904 
   5905 /*
   5906  * Aging and idle timeouts for the different possible scenarios
   5907  * in default configuration
   5908  */
   5909 static const uint32_t
   5910 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
   5911 	{
   5912 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
   5913 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
   5914 	},
   5915 	{
   5916 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
   5917 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
   5918 	},
   5919 	{
   5920 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
   5921 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
   5922 	},
   5923 	{
   5924 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
   5925 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
   5926 	},
   5927 	{
   5928 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
   5929 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
   5930 	},
   5931 };
   5932 
   5933 /*
   5934  * Aging and idle timeouts for the different possible scenarios
   5935  * in single BSS MAC configuration.
   5936  */
   5937 static const uint32_t
   5938 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
   5939 	{
   5940 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
   5941 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
   5942 	},
   5943 	{
   5944 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
   5945 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
   5946 	},
   5947 	{
   5948 		htole32(IWM_SF_MCAST_AGING_TIMER),
   5949 		htole32(IWM_SF_MCAST_IDLE_TIMER)
   5950 	},
   5951 	{
   5952 		htole32(IWM_SF_BA_AGING_TIMER),
   5953 		htole32(IWM_SF_BA_IDLE_TIMER)
   5954 	},
   5955 	{
   5956 		htole32(IWM_SF_TX_RE_AGING_TIMER),
   5957 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
   5958 	},
   5959 };
   5960 
   5961 static void
   5962 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
   5963     struct ieee80211_node *ni)
   5964 {
   5965 	int i, j, watermark;
   5966 
   5967 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
   5968 
   5969 	/*
   5970 	 * If we are in association flow - check antenna configuration
   5971 	 * capabilities of the AP station, and choose the watermark accordingly.
   5972 	 */
   5973 	if (ni) {
   5974 #ifndef IEEE80211_NO_HT
   5975 		if (ni->ni_flags & IEEE80211_NODE_HT) {
   5976 #ifdef notyet
   5977 			if (ni->ni_rxmcs[2] != 0)
   5978 				watermark = IWM_SF_W_MARK_MIMO3;
   5979 			else if (ni->ni_rxmcs[1] != 0)
   5980 				watermark = IWM_SF_W_MARK_MIMO2;
   5981 			else
   5982 #endif
   5983 				watermark = IWM_SF_W_MARK_SISO;
   5984 		} else
   5985 #endif
   5986 			watermark = IWM_SF_W_MARK_LEGACY;
   5987 	/* default watermark value for unassociated mode. */
   5988 	} else {
   5989 		watermark = IWM_SF_W_MARK_MIMO2;
   5990 	}
   5991 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
   5992 
   5993 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
   5994 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
   5995 			sf_cmd->long_delay_timeouts[i][j] =
   5996 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
   5997 		}
   5998 	}
   5999 
   6000 	if (ni) {
   6001 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
   6002 		       sizeof(iwm_sf_full_timeout));
   6003 	} else {
   6004 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
   6005 		       sizeof(iwm_sf_full_timeout_def));
   6006 	}
   6007 }
   6008 
   6009 static int
   6010 iwm_sf_config(struct iwm_softc *sc, int new_state)
   6011 {
   6012 	struct ieee80211com *ic = &sc->sc_ic;
   6013 	struct iwm_sf_cfg_cmd sf_cmd = {
   6014 		.state = htole32(IWM_SF_FULL_ON),
   6015 	};
   6016 
   6017 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   6018 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
   6019 
   6020 	switch (new_state) {
   6021 	case IWM_SF_UNINIT:
   6022 	case IWM_SF_INIT_OFF:
   6023 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
   6024 		break;
   6025 	case IWM_SF_FULL_ON:
   6026 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
   6027 		break;
   6028 	default:
   6029 		return EINVAL;
   6030 	}
   6031 
   6032 	return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
   6033 	    sizeof(sf_cmd), &sf_cmd);
   6034 }
   6035 
   6036 static int
   6037 iwm_send_bt_init_conf(struct iwm_softc *sc)
   6038 {
   6039 	struct iwm_bt_coex_cmd bt_cmd;
   6040 
   6041 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
   6042 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
   6043 
   6044 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
   6045 }
   6046 
   6047 static int
   6048 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
   6049 {
   6050 	struct iwm_mcc_update_cmd mcc_cmd;
   6051 	struct iwm_host_cmd hcmd = {
   6052 		.id = IWM_MCC_UPDATE_CMD,
   6053 		.flags = IWM_CMD_WANT_SKB,
   6054 		.data = { &mcc_cmd },
   6055 	};
   6056 	int resp_v2 = isset(sc->sc_enabled_capa,
   6057 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
   6058 	int err;
   6059 
   6060 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
   6061 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
   6062 	if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
   6063 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
   6064 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
   6065 	else
   6066 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
   6067 
   6068 	if (resp_v2)
   6069 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
   6070 	else
   6071 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
   6072 
   6073 	err = iwm_send_cmd(sc, &hcmd);
   6074 	if (err)
   6075 		return err;
   6076 
   6077 	iwm_free_resp(sc, &hcmd);
   6078 
   6079 	return 0;
   6080 }
   6081 
   6082 static void
   6083 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
   6084 {
   6085 	struct iwm_host_cmd cmd = {
   6086 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
   6087 		.len = { sizeof(uint32_t), },
   6088 		.data = { &backoff, },
   6089 	};
   6090 
   6091 	iwm_send_cmd(sc, &cmd);
   6092 }
   6093 
   6094 static int
   6095 iwm_init_hw(struct iwm_softc *sc)
   6096 {
   6097 	struct ieee80211com *ic = &sc->sc_ic;
   6098 	int err, i, ac;
   6099 
   6100 	err = iwm_preinit(sc);
   6101 	if (err)
   6102 		return err;
   6103 
   6104 	err = iwm_start_hw(sc);
   6105 	if (err) {
   6106 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6107 		return err;
   6108 	}
   6109 
   6110 	err = iwm_run_init_mvm_ucode(sc, 0);
   6111 	if (err)
   6112 		return err;
   6113 
   6114 	/* Should stop and start HW since INIT image just loaded. */
   6115 	iwm_stop_device(sc);
   6116 	err = iwm_start_hw(sc);
   6117 	if (err) {
   6118 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6119 		return err;
   6120 	}
   6121 
   6122 	/* Restart, this time with the regular firmware */
   6123 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
   6124 	if (err) {
   6125 		aprint_error_dev(sc->sc_dev, "could not load firmware\n");
   6126 		goto err;
   6127 	}
   6128 
   6129 	err = iwm_send_bt_init_conf(sc);
   6130 	if (err) {
   6131 		aprint_error_dev(sc->sc_dev,
   6132 		    "could not init bt coex (error %d)\n", err);
   6133 		goto err;
   6134 	}
   6135 
   6136 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
   6137 	if (err) {
   6138 		aprint_error_dev(sc->sc_dev,
   6139 		    "could not init tx ant config (error %d)\n", err);
   6140 		goto err;
   6141 	}
   6142 
   6143 	/* Send phy db control command and then phy db calibration*/
   6144 	err = iwm_send_phy_db_data(sc);
   6145 	if (err) {
   6146 		aprint_error_dev(sc->sc_dev,
   6147 		    "could not init phy db (error %d)\n", err);
   6148 		goto err;
   6149 	}
   6150 
   6151 	err = iwm_send_phy_cfg_cmd(sc);
   6152 	if (err) {
   6153 		aprint_error_dev(sc->sc_dev,
   6154 		    "could not send phy config (error %d)\n", err);
   6155 		goto err;
   6156 	}
   6157 
   6158 	/* Add auxiliary station for scanning */
   6159 	err = iwm_add_aux_sta(sc);
   6160 	if (err) {
   6161 		aprint_error_dev(sc->sc_dev,
   6162 		    "could not add aux station (error %d)\n", err);
   6163 		goto err;
   6164 	}
   6165 
   6166 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
   6167 		/*
   6168 		 * The channel used here isn't relevant as it's
   6169 		 * going to be overwritten in the other flows.
   6170 		 * For now use the first channel we have.
   6171 		 */
   6172 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
   6173 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
   6174 		    IWM_FW_CTXT_ACTION_ADD, 0);
   6175 		if (err) {
   6176 			aprint_error_dev(sc->sc_dev,
   6177 			    "could not add phy context %d (error %d)\n",
   6178 			    i, err);
   6179 			goto err;
   6180 		}
   6181 	}
   6182 
   6183 	/* Initialize tx backoffs to the minimum. */
   6184 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
   6185 		iwm_tt_tx_backoff(sc, 0);
   6186 
   6187 	err = iwm_power_update_device(sc);
   6188 	if (err) {
   6189 		aprint_error_dev(sc->sc_dev,
   6190 		    "could send power command (error %d)\n", err);
   6191 		goto err;
   6192 	}
   6193 
   6194 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
   6195 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
   6196 		if (err) {
   6197 			aprint_error_dev(sc->sc_dev,
   6198 			    "could not init LAR (error %d)\n", err);
   6199 			goto err;
   6200 		}
   6201 	}
   6202 
   6203 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
   6204 		err = iwm_config_umac_scan(sc);
   6205 		if (err) {
   6206 			aprint_error_dev(sc->sc_dev,
   6207 			    "could not configure scan (error %d)\n", err);
   6208 			goto err;
   6209 		}
   6210 	}
   6211 
   6212 	for (ac = 0; ac < WME_NUM_AC; ac++) {
   6213 		err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
   6214 		    iwm_ac_to_tx_fifo[ac]);
   6215 		if (err) {
   6216 			aprint_error_dev(sc->sc_dev,
   6217 			    "could not enable Tx queue %d (error %d)\n",
   6218 			    i, err);
   6219 			goto err;
   6220 		}
   6221 	}
   6222 
   6223 	err = iwm_disable_beacon_filter(sc);
   6224 	if (err) {
   6225 		aprint_error_dev(sc->sc_dev,
   6226 		    "could not disable beacon filter (error %d)\n", err);
   6227 		goto err;
   6228 	}
   6229 
   6230 	return 0;
   6231 
   6232  err:
   6233 	iwm_stop_device(sc);
   6234 	return err;
   6235 }
   6236 
   6237 /* Allow multicast from our BSSID. */
   6238 static int
   6239 iwm_allow_mcast(struct iwm_softc *sc)
   6240 {
   6241 	struct ieee80211com *ic = &sc->sc_ic;
   6242 	struct ieee80211_node *ni = ic->ic_bss;
   6243 	struct iwm_mcast_filter_cmd *cmd;
   6244 	size_t size;
   6245 	int err;
   6246 
   6247 	size = roundup(sizeof(*cmd), 4);
   6248 	cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
   6249 	if (cmd == NULL)
   6250 		return ENOMEM;
   6251 	cmd->filter_own = 1;
   6252 	cmd->port_id = 0;
   6253 	cmd->count = 0;
   6254 	cmd->pass_all = 1;
   6255 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
   6256 
   6257 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
   6258 	kmem_intr_free(cmd, size);
   6259 	return err;
   6260 }
   6261 
   6262 static int
   6263 iwm_init(struct ifnet *ifp)
   6264 {
   6265 	struct iwm_softc *sc = ifp->if_softc;
   6266 	int err;
   6267 
   6268 	if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
   6269 		return 0;
   6270 
   6271 	sc->sc_generation++;
   6272 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
   6273 
   6274 	err = iwm_init_hw(sc);
   6275 	if (err) {
   6276 		iwm_stop(ifp, 1);
   6277 		return err;
   6278 	}
   6279 
   6280 	ifp->if_flags &= ~IFF_OACTIVE;
   6281 	ifp->if_flags |= IFF_RUNNING;
   6282 
   6283 	ieee80211_begin_scan(&sc->sc_ic, 0);
   6284 	SET(sc->sc_flags, IWM_FLAG_HW_INITED);
   6285 
   6286 	return 0;
   6287 }
   6288 
   6289 static void
   6290 iwm_start(struct ifnet *ifp)
   6291 {
   6292 	struct iwm_softc *sc = ifp->if_softc;
   6293 	struct ieee80211com *ic = &sc->sc_ic;
   6294 	struct ieee80211_node *ni;
   6295 	struct ether_header *eh;
   6296 	struct mbuf *m;
   6297 	int ac;
   6298 
   6299 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6300 		return;
   6301 
   6302 	for (;;) {
   6303 		/* why isn't this done per-queue? */
   6304 		if (sc->qfullmsk != 0) {
   6305 			ifp->if_flags |= IFF_OACTIVE;
   6306 			break;
   6307 		}
   6308 
   6309 		/* need to send management frames even if we're not RUNning */
   6310 		IF_DEQUEUE(&ic->ic_mgtq, m);
   6311 		if (m) {
   6312 			ni = M_GETCTX(m, struct ieee80211_node *);
   6313 			M_CLEARCTX(m);
   6314 			ac = WME_AC_BE;
   6315 			goto sendit;
   6316 		}
   6317 		if (ic->ic_state != IEEE80211_S_RUN) {
   6318 			break;
   6319 		}
   6320 
   6321 		IFQ_DEQUEUE(&ifp->if_snd, m);
   6322 		if (m == NULL)
   6323 			break;
   6324 
   6325 		if (m->m_len < sizeof (*eh) &&
   6326 		   (m = m_pullup(m, sizeof (*eh))) == NULL) {
   6327 			ifp->if_oerrors++;
   6328 			continue;
   6329 		}
   6330 
   6331 		eh = mtod(m, struct ether_header *);
   6332 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
   6333 		if (ni == NULL) {
   6334 			m_freem(m);
   6335 			ifp->if_oerrors++;
   6336 			continue;
   6337 		}
   6338 
   6339 		/* classify mbuf so we can find which tx ring to use */
   6340 		if (ieee80211_classify(ic, m, ni) != 0) {
   6341 			m_freem(m);
   6342 			ieee80211_free_node(ni);
   6343 			ifp->if_oerrors++;
   6344 			continue;
   6345 		}
   6346 
   6347 		/* No QoS encapsulation for EAPOL frames. */
   6348 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
   6349 		    M_WME_GETAC(m) : WME_AC_BE;
   6350 
   6351 		bpf_mtap(ifp, m);
   6352 
   6353 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
   6354 			ieee80211_free_node(ni);
   6355 			ifp->if_oerrors++;
   6356 			continue;
   6357 		}
   6358 
   6359  sendit:
   6360 		bpf_mtap3(ic->ic_rawbpf, m);
   6361 
   6362 		if (iwm_tx(sc, m, ni, ac) != 0) {
   6363 			ieee80211_free_node(ni);
   6364 			ifp->if_oerrors++;
   6365 			continue;
   6366 		}
   6367 
   6368 		if (ifp->if_flags & IFF_UP) {
   6369 			sc->sc_tx_timer = 15;
   6370 			ifp->if_timer = 1;
   6371 		}
   6372 	}
   6373 }
   6374 
   6375 static void
   6376 iwm_stop(struct ifnet *ifp, int disable)
   6377 {
   6378 	struct iwm_softc *sc = ifp->if_softc;
   6379 	struct ieee80211com *ic = &sc->sc_ic;
   6380 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   6381 
   6382 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
   6383 	sc->sc_flags |= IWM_FLAG_STOPPED;
   6384 	sc->sc_generation++;
   6385 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6386 
   6387 	if (in)
   6388 		in->in_phyctxt = NULL;
   6389 
   6390 	if (ic->ic_state != IEEE80211_S_INIT)
   6391 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
   6392 
   6393 	callout_stop(&sc->sc_calib_to);
   6394 	iwm_led_blink_stop(sc);
   6395 	ifp->if_timer = sc->sc_tx_timer = 0;
   6396 	iwm_stop_device(sc);
   6397 }
   6398 
   6399 static void
   6400 iwm_watchdog(struct ifnet *ifp)
   6401 {
   6402 	struct iwm_softc *sc = ifp->if_softc;
   6403 
   6404 	ifp->if_timer = 0;
   6405 	if (sc->sc_tx_timer > 0) {
   6406 		if (--sc->sc_tx_timer == 0) {
   6407 			aprint_error_dev(sc->sc_dev, "device timeout\n");
   6408 #ifdef IWM_DEBUG
   6409 			iwm_nic_error(sc);
   6410 #endif
   6411 			ifp->if_flags &= ~IFF_UP;
   6412 			iwm_stop(ifp, 1);
   6413 			ifp->if_oerrors++;
   6414 			return;
   6415 		}
   6416 		ifp->if_timer = 1;
   6417 	}
   6418 
   6419 	ieee80211_watchdog(&sc->sc_ic);
   6420 }
   6421 
   6422 static int
   6423 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   6424 {
   6425 	struct iwm_softc *sc = ifp->if_softc;
   6426 	struct ieee80211com *ic = &sc->sc_ic;
   6427 	const struct sockaddr *sa;
   6428 	int s, err = 0;
   6429 
   6430 	s = splnet();
   6431 
   6432 	switch (cmd) {
   6433 	case SIOCSIFADDR:
   6434 		ifp->if_flags |= IFF_UP;
   6435 		/* FALLTHROUGH */
   6436 	case SIOCSIFFLAGS:
   6437 		err = ifioctl_common(ifp, cmd, data);
   6438 		if (err)
   6439 			break;
   6440 		if (ifp->if_flags & IFF_UP) {
   6441 			if (!(ifp->if_flags & IFF_RUNNING)) {
   6442 				err = iwm_init(ifp);
   6443 				if (err)
   6444 					ifp->if_flags &= ~IFF_UP;
   6445 			}
   6446 		} else {
   6447 			if (ifp->if_flags & IFF_RUNNING)
   6448 				iwm_stop(ifp, 1);
   6449 		}
   6450 		break;
   6451 
   6452 	case SIOCADDMULTI:
   6453 	case SIOCDELMULTI:
   6454 		if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
   6455 			err = ENXIO;
   6456 			break;
   6457 		}
   6458 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
   6459 		err = (cmd == SIOCADDMULTI) ?
   6460 		    ether_addmulti(sa, &sc->sc_ec) :
   6461 		    ether_delmulti(sa, &sc->sc_ec);
   6462 		if (err == ENETRESET)
   6463 			err = 0;
   6464 		break;
   6465 
   6466 	default:
   6467 		if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
   6468 			err = ether_ioctl(ifp, cmd, data);
   6469 			break;
   6470 		}
   6471 		err = ieee80211_ioctl(ic, cmd, data);
   6472 		break;
   6473 	}
   6474 
   6475 	if (err == ENETRESET) {
   6476 		err = 0;
   6477 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   6478 		    (IFF_UP | IFF_RUNNING)) {
   6479 			iwm_stop(ifp, 0);
   6480 			err = iwm_init(ifp);
   6481 		}
   6482 	}
   6483 
   6484 	splx(s);
   6485 	return err;
   6486 }
   6487 
   6488 /*
   6489  * Note: This structure is read from the device with IO accesses,
   6490  * and the reading already does the endian conversion. As it is
   6491  * read with uint32_t-sized accesses, any members with a different size
   6492  * need to be ordered correctly though!
   6493  */
   6494 struct iwm_error_event_table {
   6495 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
   6496 	uint32_t error_id;		/* type of error */
   6497 	uint32_t trm_hw_status0;	/* TRM HW status */
   6498 	uint32_t trm_hw_status1;	/* TRM HW status */
   6499 	uint32_t blink2;		/* branch link */
   6500 	uint32_t ilink1;		/* interrupt link */
   6501 	uint32_t ilink2;		/* interrupt link */
   6502 	uint32_t data1;		/* error-specific data */
   6503 	uint32_t data2;		/* error-specific data */
   6504 	uint32_t data3;		/* error-specific data */
   6505 	uint32_t bcon_time;		/* beacon timer */
   6506 	uint32_t tsf_low;		/* network timestamp function timer */
   6507 	uint32_t tsf_hi;		/* network timestamp function timer */
   6508 	uint32_t gp1;		/* GP1 timer register */
   6509 	uint32_t gp2;		/* GP2 timer register */
   6510 	uint32_t fw_rev_type;	/* firmware revision type */
   6511 	uint32_t major;		/* uCode version major */
   6512 	uint32_t minor;		/* uCode version minor */
   6513 	uint32_t hw_ver;		/* HW Silicon version */
   6514 	uint32_t brd_ver;		/* HW board version */
   6515 	uint32_t log_pc;		/* log program counter */
   6516 	uint32_t frame_ptr;		/* frame pointer */
   6517 	uint32_t stack_ptr;		/* stack pointer */
   6518 	uint32_t hcmd;		/* last host command header */
   6519 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
   6520 				 * rxtx_flag */
   6521 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
   6522 				 * host_flag */
   6523 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
   6524 				 * enc_flag */
   6525 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
   6526 				 * time_flag */
   6527 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
   6528 				 * wico interrupt */
   6529 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
   6530 	uint32_t wait_event;		/* wait event() caller address */
   6531 	uint32_t l2p_control;	/* L2pControlField */
   6532 	uint32_t l2p_duration;	/* L2pDurationField */
   6533 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
   6534 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
   6535 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
   6536 				 * (LMPM_PMG_SEL) */
   6537 	uint32_t u_timestamp;	/* indicate when the date and time of the
   6538 				 * compilation */
   6539 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
   6540 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
   6541 
   6542 /*
   6543  * UMAC error struct - relevant starting from family 8000 chip.
   6544  * Note: This structure is read from the device with IO accesses,
   6545  * and the reading already does the endian conversion. As it is
   6546  * read with u32-sized accesses, any members with a different size
   6547  * need to be ordered correctly though!
   6548  */
   6549 struct iwm_umac_error_event_table {
   6550 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
   6551 	uint32_t error_id;	/* type of error */
   6552 	uint32_t blink1;	/* branch link */
   6553 	uint32_t blink2;	/* branch link */
   6554 	uint32_t ilink1;	/* interrupt link */
   6555 	uint32_t ilink2;	/* interrupt link */
   6556 	uint32_t data1;		/* error-specific data */
   6557 	uint32_t data2;		/* error-specific data */
   6558 	uint32_t data3;		/* error-specific data */
   6559 	uint32_t umac_major;
   6560 	uint32_t umac_minor;
   6561 	uint32_t frame_pointer;	/* core register 27 */
   6562 	uint32_t stack_pointer;	/* core register 28 */
   6563 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
   6564 	uint32_t nic_isr_pref;	/* ISR status register */
   6565 } __packed;
   6566 
   6567 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
   6568 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
   6569 
   6570 #ifdef IWM_DEBUG
   6571 static const struct {
   6572 	const char *name;
   6573 	uint8_t num;
   6574 } advanced_lookup[] = {
   6575 	{ "NMI_INTERRUPT_WDG", 0x34 },
   6576 	{ "SYSASSERT", 0x35 },
   6577 	{ "UCODE_VERSION_MISMATCH", 0x37 },
   6578 	{ "BAD_COMMAND", 0x38 },
   6579 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
   6580 	{ "FATAL_ERROR", 0x3D },
   6581 	{ "NMI_TRM_HW_ERR", 0x46 },
   6582 	{ "NMI_INTERRUPT_TRM", 0x4C },
   6583 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
   6584 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
   6585 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
   6586 	{ "NMI_INTERRUPT_HOST", 0x66 },
   6587 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
   6588 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
   6589 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
   6590 	{ "ADVANCED_SYSASSERT", 0 },
   6591 };
   6592 
   6593 static const char *
   6594 iwm_desc_lookup(uint32_t num)
   6595 {
   6596 	int i;
   6597 
   6598 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
   6599 		if (advanced_lookup[i].num == num)
   6600 			return advanced_lookup[i].name;
   6601 
   6602 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
   6603 	return advanced_lookup[i].name;
   6604 }
   6605 
   6606 /*
   6607  * Support for dumping the error log seemed like a good idea ...
   6608  * but it's mostly hex junk and the only sensible thing is the
   6609  * hw/ucode revision (which we know anyway).  Since it's here,
   6610  * I'll just leave it in, just in case e.g. the Intel guys want to
   6611  * help us decipher some "ADVANCED_SYSASSERT" later.
   6612  */
   6613 static void
   6614 iwm_nic_error(struct iwm_softc *sc)
   6615 {
   6616 	struct iwm_error_event_table t;
   6617 	uint32_t base;
   6618 
   6619 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
   6620 	base = sc->sc_uc.uc_error_event_table;
   6621 	if (base < 0x800000) {
   6622 		aprint_error_dev(sc->sc_dev,
   6623 		    "Invalid error log pointer 0x%08x\n", base);
   6624 		return;
   6625 	}
   6626 
   6627 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
   6628 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
   6629 		return;
   6630 	}
   6631 
   6632 	if (!t.valid) {
   6633 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
   6634 		return;
   6635 	}
   6636 
   6637 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
   6638 		aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
   6639 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
   6640 		    sc->sc_flags, t.valid);
   6641 	}
   6642 
   6643 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
   6644 	    iwm_desc_lookup(t.error_id));
   6645 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
   6646 	    t.trm_hw_status0);
   6647 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
   6648 	    t.trm_hw_status1);
   6649 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
   6650 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
   6651 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
   6652 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
   6653 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
   6654 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
   6655 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
   6656 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
   6657 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
   6658 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
   6659 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
   6660 	aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
   6661 	    t.fw_rev_type);
   6662 	aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
   6663 	    t.major);
   6664 	aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
   6665 	    t.minor);
   6666 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
   6667 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
   6668 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
   6669 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
   6670 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
   6671 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
   6672 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
   6673 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
   6674 	aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
   6675 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
   6676 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
   6677 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
   6678 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
   6679 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
   6680 	    t.l2p_addr_match);
   6681 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
   6682 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
   6683 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
   6684 
   6685 	if (sc->sc_uc.uc_umac_error_event_table)
   6686 		iwm_nic_umac_error(sc);
   6687 }
   6688 
   6689 static void
   6690 iwm_nic_umac_error(struct iwm_softc *sc)
   6691 {
   6692 	struct iwm_umac_error_event_table t;
   6693 	uint32_t base;
   6694 
   6695 	base = sc->sc_uc.uc_umac_error_event_table;
   6696 
   6697 	if (base < 0x800000) {
   6698 		aprint_error_dev(sc->sc_dev,
   6699 		    "Invalid error log pointer 0x%08x\n", base);
   6700 		return;
   6701 	}
   6702 
   6703 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
   6704 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
   6705 		return;
   6706 	}
   6707 
   6708 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
   6709 		aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
   6710 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
   6711 		    sc->sc_flags, t.valid);
   6712 	}
   6713 
   6714 	aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
   6715 		iwm_desc_lookup(t.error_id));
   6716 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
   6717 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
   6718 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
   6719 	    t.ilink1);
   6720 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
   6721 	    t.ilink2);
   6722 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
   6723 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
   6724 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
   6725 	aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
   6726 	aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
   6727 	aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
   6728 	    t.frame_pointer);
   6729 	aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
   6730 	    t.stack_pointer);
   6731 	aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
   6732 	aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
   6733 	    t.nic_isr_pref);
   6734 }
   6735 #endif
   6736 
   6737 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
   6738 do {									\
   6739 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6740 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
   6741 	_var_ = (void *)((_pkt_)+1);					\
   6742 } while (/*CONSTCOND*/0)
   6743 
   6744 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
   6745 do {									\
   6746 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6747 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
   6748 	_ptr_ = (void *)((_pkt_)+1);					\
   6749 } while (/*CONSTCOND*/0)
   6750 
   6751 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
   6752 
   6753 static void
   6754 iwm_notif_intr(struct iwm_softc *sc)
   6755 {
   6756 	uint16_t hw;
   6757 
   6758 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
   6759 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
   6760 
   6761 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
   6762 	while (sc->rxq.cur != hw) {
   6763 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
   6764 		struct iwm_rx_packet *pkt;
   6765 		struct iwm_cmd_response *cresp;
   6766 		int orig_qid, qid, idx, code;
   6767 
   6768 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
   6769 		    BUS_DMASYNC_POSTREAD);
   6770 		pkt = mtod(data->m, struct iwm_rx_packet *);
   6771 
   6772 		orig_qid = pkt->hdr.qid;
   6773 		qid = orig_qid & ~0x80;
   6774 		idx = pkt->hdr.idx;
   6775 
   6776 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
   6777 
   6778 		/*
   6779 		 * randomly get these from the firmware, no idea why.
   6780 		 * they at least seem harmless, so just ignore them for now
   6781 		 */
   6782 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
   6783 		    || pkt->len_n_flags == htole32(0x55550000))) {
   6784 			ADVANCE_RXQ(sc);
   6785 			continue;
   6786 		}
   6787 
   6788 		switch (code) {
   6789 		case IWM_REPLY_RX_PHY_CMD:
   6790 			iwm_rx_rx_phy_cmd(sc, pkt, data);
   6791 			break;
   6792 
   6793 		case IWM_REPLY_RX_MPDU_CMD:
   6794 			iwm_rx_rx_mpdu(sc, pkt, data);
   6795 			break;
   6796 
   6797 		case IWM_TX_CMD:
   6798 			iwm_rx_tx_cmd(sc, pkt, data);
   6799 			break;
   6800 
   6801 		case IWM_MISSED_BEACONS_NOTIFICATION:
   6802 			iwm_rx_missed_beacons_notif(sc, pkt, data);
   6803 			break;
   6804 
   6805 		case IWM_MFUART_LOAD_NOTIFICATION:
   6806 			break;
   6807 
   6808 		case IWM_ALIVE: {
   6809 			struct iwm_alive_resp_v1 *resp1;
   6810 			struct iwm_alive_resp_v2 *resp2;
   6811 			struct iwm_alive_resp_v3 *resp3;
   6812 
   6813 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
   6814 				SYNC_RESP_STRUCT(resp1, pkt);
   6815 				sc->sc_uc.uc_error_event_table
   6816 				    = le32toh(resp1->error_event_table_ptr);
   6817 				sc->sc_uc.uc_log_event_table
   6818 				    = le32toh(resp1->log_event_table_ptr);
   6819 				sc->sched_base = le32toh(resp1->scd_base_ptr);
   6820 				if (resp1->status == IWM_ALIVE_STATUS_OK)
   6821 					sc->sc_uc.uc_ok = 1;
   6822 				else
   6823 					sc->sc_uc.uc_ok = 0;
   6824 			}
   6825 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
   6826 				SYNC_RESP_STRUCT(resp2, pkt);
   6827 				sc->sc_uc.uc_error_event_table
   6828 				    = le32toh(resp2->error_event_table_ptr);
   6829 				sc->sc_uc.uc_log_event_table
   6830 				    = le32toh(resp2->log_event_table_ptr);
   6831 				sc->sched_base = le32toh(resp2->scd_base_ptr);
   6832 				sc->sc_uc.uc_umac_error_event_table
   6833 				    = le32toh(resp2->error_info_addr);
   6834 				if (resp2->status == IWM_ALIVE_STATUS_OK)
   6835 					sc->sc_uc.uc_ok = 1;
   6836 				else
   6837 					sc->sc_uc.uc_ok = 0;
   6838 			}
   6839 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
   6840 				SYNC_RESP_STRUCT(resp3, pkt);
   6841 				sc->sc_uc.uc_error_event_table
   6842 				    = le32toh(resp3->error_event_table_ptr);
   6843 				sc->sc_uc.uc_log_event_table
   6844 				    = le32toh(resp3->log_event_table_ptr);
   6845 				sc->sched_base = le32toh(resp3->scd_base_ptr);
   6846 				sc->sc_uc.uc_umac_error_event_table
   6847 				    = le32toh(resp3->error_info_addr);
   6848 				if (resp3->status == IWM_ALIVE_STATUS_OK)
   6849 					sc->sc_uc.uc_ok = 1;
   6850 				else
   6851 					sc->sc_uc.uc_ok = 0;
   6852 			}
   6853 
   6854 			sc->sc_uc.uc_intr = 1;
   6855 			wakeup(&sc->sc_uc);
   6856 			break;
   6857 		}
   6858 
   6859 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
   6860 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
   6861 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
   6862 			uint16_t size = le16toh(phy_db_notif->length);
   6863 			bus_dmamap_sync(sc->sc_dmat, data->map,
   6864 			    sizeof(*pkt) + sizeof(*phy_db_notif),
   6865 			    size, BUS_DMASYNC_POSTREAD);
   6866 			iwm_phy_db_set_section(sc, phy_db_notif, size);
   6867 			break;
   6868 		}
   6869 
   6870 		case IWM_STATISTICS_NOTIFICATION: {
   6871 			struct iwm_notif_statistics *stats;
   6872 			SYNC_RESP_STRUCT(stats, pkt);
   6873 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
   6874 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
   6875 			break;
   6876 		}
   6877 
   6878 		case IWM_NVM_ACCESS_CMD:
   6879 		case IWM_MCC_UPDATE_CMD:
   6880 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6881 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   6882 				    sizeof(sc->sc_cmd_resp),
   6883 				    BUS_DMASYNC_POSTREAD);
   6884 				memcpy(sc->sc_cmd_resp,
   6885 				    pkt, sizeof(sc->sc_cmd_resp));
   6886 			}
   6887 			break;
   6888 
   6889 		case IWM_MCC_CHUB_UPDATE_CMD: {
   6890 			struct iwm_mcc_chub_notif *notif;
   6891 			SYNC_RESP_STRUCT(notif, pkt);
   6892 
   6893 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
   6894 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
   6895 			sc->sc_fw_mcc[2] = '\0';
   6896 			break;
   6897 		}
   6898 
   6899 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
   6900 			break;
   6901 
   6902 		case IWM_PHY_CONFIGURATION_CMD:
   6903 		case IWM_TX_ANT_CONFIGURATION_CMD:
   6904 		case IWM_ADD_STA:
   6905 		case IWM_MAC_CONTEXT_CMD:
   6906 		case IWM_REPLY_SF_CFG_CMD:
   6907 		case IWM_POWER_TABLE_CMD:
   6908 		case IWM_PHY_CONTEXT_CMD:
   6909 		case IWM_BINDING_CONTEXT_CMD:
   6910 		case IWM_TIME_EVENT_CMD:
   6911 		case IWM_SCAN_REQUEST_CMD:
   6912 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
   6913 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
   6914 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
   6915 		case IWM_REPLY_BEACON_FILTERING_CMD:
   6916 		case IWM_MAC_PM_POWER_TABLE:
   6917 		case IWM_TIME_QUOTA_CMD:
   6918 		case IWM_REMOVE_STA:
   6919 		case IWM_TXPATH_FLUSH:
   6920 		case IWM_LQ_CMD:
   6921 		case IWM_BT_CONFIG:
   6922 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
   6923 			SYNC_RESP_STRUCT(cresp, pkt);
   6924 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6925 				memcpy(sc->sc_cmd_resp,
   6926 				    pkt, sizeof(*pkt) + sizeof(*cresp));
   6927 			}
   6928 			break;
   6929 
   6930 		/* ignore */
   6931 		case 0x6c: /* IWM_PHY_DB_CMD */
   6932 			break;
   6933 
   6934 		case IWM_INIT_COMPLETE_NOTIF:
   6935 			sc->sc_init_complete = 1;
   6936 			wakeup(&sc->sc_init_complete);
   6937 			break;
   6938 
   6939 		case IWM_SCAN_OFFLOAD_COMPLETE: {
   6940 			struct iwm_periodic_scan_complete *notif;
   6941 			SYNC_RESP_STRUCT(notif, pkt);
   6942 			break;
   6943 		}
   6944 
   6945 		case IWM_SCAN_ITERATION_COMPLETE: {
   6946 			struct iwm_lmac_scan_complete_notif *notif;
   6947 			SYNC_RESP_STRUCT(notif, pkt);
   6948 			iwm_endscan(sc);
   6949 			break;
   6950 		}
   6951 
   6952 		case IWM_SCAN_COMPLETE_UMAC: {
   6953 			struct iwm_umac_scan_complete *notif;
   6954 			SYNC_RESP_STRUCT(notif, pkt);
   6955 			iwm_endscan(sc);
   6956 			break;
   6957 		}
   6958 
   6959 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
   6960 			struct iwm_umac_scan_iter_complete_notif *notif;
   6961 			SYNC_RESP_STRUCT(notif, pkt);
   6962 			iwm_endscan(sc);
   6963 			break;
   6964 		}
   6965 
   6966 		case IWM_REPLY_ERROR: {
   6967 			struct iwm_error_resp *resp;
   6968 			SYNC_RESP_STRUCT(resp, pkt);
   6969 			aprint_error_dev(sc->sc_dev,
   6970 			    "firmware error 0x%x, cmd 0x%x\n",
   6971 			    le32toh(resp->error_type), resp->cmd_id);
   6972 			break;
   6973 		}
   6974 
   6975 		case IWM_TIME_EVENT_NOTIFICATION: {
   6976 			struct iwm_time_event_notif *notif;
   6977 			SYNC_RESP_STRUCT(notif, pkt);
   6978 			break;
   6979 		}
   6980 
   6981 		case IWM_MCAST_FILTER_CMD:
   6982 			break;
   6983 
   6984 		case IWM_SCD_QUEUE_CFG: {
   6985 			struct iwm_scd_txq_cfg_rsp *rsp;
   6986 			SYNC_RESP_STRUCT(rsp, pkt);
   6987 			break;
   6988 		}
   6989 
   6990 		default:
   6991 			aprint_error_dev(sc->sc_dev,
   6992 			    "unhandled firmware response 0x%x 0x%x/0x%x "
   6993 			    "rx ring %d[%d]\n",
   6994 			    code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
   6995 			break;
   6996 		}
   6997 
   6998 		/*
   6999 		 * uCode sets bit 0x80 when it originates the notification,
   7000 		 * i.e. when the notification is not a direct response to a
   7001 		 * command sent by the driver.
   7002 		 * For example, uCode issues IWM_REPLY_RX when it sends a
   7003 		 * received frame to the driver.
   7004 		 */
   7005 		if (!(orig_qid & (1 << 7))) {
   7006 			iwm_cmd_done(sc, qid, idx);
   7007 		}
   7008 
   7009 		ADVANCE_RXQ(sc);
   7010 	}
   7011 
   7012 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   7013 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   7014 
   7015 	/*
   7016 	 * Seems like the hardware gets upset unless we align the write by 8??
   7017 	 */
   7018 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
   7019 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
   7020 }
   7021 
   7022 static void
   7023 iwm_softintr(void *arg)
   7024 {
   7025 	struct iwm_softc *sc = arg;
   7026 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   7027 	uint32_t r1;
   7028 	int isperiodic = 0;
   7029 
   7030 	r1 = atomic_swap_32(&sc->sc_soft_flags, 0);
   7031 
   7032  restart:
   7033 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
   7034 #ifdef IWM_DEBUG
   7035 		int i;
   7036 
   7037 		iwm_nic_error(sc);
   7038 
   7039 		/* Dump driver status (TX and RX rings) while we're here. */
   7040 		DPRINTF(("driver status:\n"));
   7041 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
   7042 			struct iwm_tx_ring *ring = &sc->txq[i];
   7043 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
   7044 			    "queued=%-3d\n",
   7045 			    i, ring->qid, ring->cur, ring->queued));
   7046 		}
   7047 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
   7048 		DPRINTF(("  802.11 state %s\n",
   7049 		    ieee80211_state_name[sc->sc_ic.ic_state]));
   7050 #endif
   7051 
   7052 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
   7053  fatal:
   7054 		ifp->if_flags &= ~IFF_UP;
   7055 		iwm_stop(ifp, 1);
   7056 		/* Don't restore interrupt mask */
   7057 		return;
   7058 
   7059 	}
   7060 
   7061 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
   7062 		aprint_error_dev(sc->sc_dev,
   7063 		    "hardware error, stopping device\n");
   7064 		goto fatal;
   7065 	}
   7066 
   7067 	/* firmware chunk loaded */
   7068 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
   7069 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
   7070 		sc->sc_fw_chunk_done = 1;
   7071 		wakeup(&sc->sc_fw);
   7072 	}
   7073 
   7074 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
   7075 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
   7076 			ifp->if_flags &= ~IFF_UP;
   7077 			iwm_stop(ifp, 1);
   7078 		}
   7079 	}
   7080 
   7081 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
   7082 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
   7083 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
   7084 			IWM_WRITE_1(sc,
   7085 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
   7086 		isperiodic = 1;
   7087 	}
   7088 
   7089 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
   7090 	    isperiodic) {
   7091 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
   7092 
   7093 		iwm_notif_intr(sc);
   7094 
   7095 		/* enable periodic interrupt, see above */
   7096 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
   7097 		    !isperiodic)
   7098 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
   7099 			    IWM_CSR_INT_PERIODIC_ENA);
   7100 	}
   7101 
   7102 	r1 = atomic_swap_32(&sc->sc_soft_flags, 0);
   7103 	if (r1 != 0)
   7104 		goto restart;
   7105 
   7106 	iwm_restore_interrupts(sc);
   7107 }
   7108 
   7109 static int
   7110 iwm_intr(void *arg)
   7111 {
   7112 	struct iwm_softc *sc = arg;
   7113 	int r1, r2;
   7114 
   7115 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   7116 
   7117 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
   7118 		uint32_t *ict = sc->ict_dma.vaddr;
   7119 		int tmp;
   7120 
   7121 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
   7122 		    0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
   7123 		tmp = htole32(ict[sc->ict_cur]);
   7124 		if (!tmp)
   7125 			goto out_ena;
   7126 
   7127 		/*
   7128 		 * ok, there was something.  keep plowing until we have all.
   7129 		 */
   7130 		r1 = r2 = 0;
   7131 		while (tmp) {
   7132 			r1 |= tmp;
   7133 			ict[sc->ict_cur] = 0;	/* Acknowledge. */
   7134 			bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
   7135 			    &ict[sc->ict_cur] - ict, sizeof(*ict),
   7136 			    BUS_DMASYNC_PREWRITE);
   7137 			sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
   7138 			tmp = htole32(ict[sc->ict_cur]);
   7139 		}
   7140 
   7141 		/* this is where the fun begins.  don't ask */
   7142 		if (r1 == 0xffffffff)
   7143 			r1 = 0;
   7144 
   7145 		/* i am not expected to understand this */
   7146 		if (r1 & 0xc0000)
   7147 			r1 |= 0x8000;
   7148 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
   7149 	} else {
   7150 		r1 = IWM_READ(sc, IWM_CSR_INT);
   7151 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
   7152 			goto out;
   7153 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
   7154 	}
   7155 	if (r1 == 0 && r2 == 0) {
   7156 		goto out_ena;
   7157 	}
   7158 
   7159 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
   7160 
   7161 	atomic_or_32(&sc->sc_soft_flags, r1);
   7162 	softint_schedule(sc->sc_soft_ih);
   7163 	return 1;
   7164 
   7165  out_ena:
   7166 	iwm_restore_interrupts(sc);
   7167  out:
   7168 	return 0;
   7169 }
   7170 
   7171 /*
   7172  * Autoconf glue-sniffing
   7173  */
   7174 
   7175 static const pci_product_id_t iwm_devices[] = {
   7176 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
   7177 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
   7178 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
   7179 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
   7180 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
   7181 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
   7182 #if 0
   7183 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
   7184 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
   7185 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
   7186 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
   7187 #endif
   7188 };
   7189 
   7190 static int
   7191 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
   7192 {
   7193 	struct pci_attach_args *pa = aux;
   7194 
   7195 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
   7196 		return 0;
   7197 
   7198 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
   7199 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
   7200 			return 1;
   7201 
   7202 	return 0;
   7203 }
   7204 
   7205 static int
   7206 iwm_preinit(struct iwm_softc *sc)
   7207 {
   7208 	struct ieee80211com *ic = &sc->sc_ic;
   7209 	int err;
   7210 
   7211 	if (ISSET(sc->sc_flags, IWM_FLAG_ATTACHED))
   7212 		return 0;
   7213 
   7214 	err = iwm_start_hw(sc);
   7215 	if (err) {
   7216 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   7217 		return err;
   7218 	}
   7219 
   7220 	err = iwm_run_init_mvm_ucode(sc, 1);
   7221 	iwm_stop_device(sc);
   7222 	if (err)
   7223 		return err;
   7224 
   7225 	sc->sc_flags |= IWM_FLAG_ATTACHED;
   7226 
   7227 	aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
   7228 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
   7229 	    ether_sprintf(sc->sc_nvm.hw_addr));
   7230 
   7231 #ifndef IEEE80211_NO_HT
   7232 	if (sc->sc_nvm.sku_cap_11n_enable)
   7233 		iwm_setup_ht_rates(sc);
   7234 #endif
   7235 
   7236 	/* not all hardware can do 5GHz band */
   7237 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
   7238 		ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
   7239 
   7240 	ieee80211_ifattach(ic);
   7241 
   7242 	ic->ic_node_alloc = iwm_node_alloc;
   7243 
   7244 	/* Override 802.11 state transition machine. */
   7245 	sc->sc_newstate = ic->ic_newstate;
   7246 	ic->ic_newstate = iwm_newstate;
   7247 	ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
   7248 	ieee80211_announce(ic);
   7249 
   7250 	iwm_radiotap_attach(sc);
   7251 
   7252 	return 0;
   7253 }
   7254 
   7255 static void
   7256 iwm_attach_hook(device_t dev)
   7257 {
   7258 	struct iwm_softc *sc = device_private(dev);
   7259 
   7260 	iwm_preinit(sc);
   7261 }
   7262 
   7263 static void
   7264 iwm_attach(device_t parent, device_t self, void *aux)
   7265 {
   7266 	struct iwm_softc *sc = device_private(self);
   7267 	struct pci_attach_args *pa = aux;
   7268 	struct ieee80211com *ic = &sc->sc_ic;
   7269 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   7270 	pcireg_t reg, memtype;
   7271 	char intrbuf[PCI_INTRSTR_LEN];
   7272 	const char *intrstr;
   7273 	int err;
   7274 	int txq_i;
   7275 	const struct sysctlnode *node;
   7276 
   7277 	sc->sc_dev = self;
   7278 	sc->sc_pct = pa->pa_pc;
   7279 	sc->sc_pcitag = pa->pa_tag;
   7280 	sc->sc_dmat = pa->pa_dmat;
   7281 	sc->sc_pciid = pa->pa_id;
   7282 
   7283 	pci_aprint_devinfo(pa, NULL);
   7284 
   7285 	if (workqueue_create(&sc->sc_nswq, "iwmns",
   7286 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
   7287 		panic("%s: could not create workqueue: newstate",
   7288 		    device_xname(self));
   7289 	sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwm_softintr, sc);
   7290 	if (sc->sc_soft_ih == NULL)
   7291 		panic("%s: could not establish softint", device_xname(self));
   7292 
   7293 	/*
   7294 	 * Get the offset of the PCI Express Capability Structure in PCI
   7295 	 * Configuration Space.
   7296 	 */
   7297 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
   7298 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
   7299 	if (err == 0) {
   7300 		aprint_error_dev(self,
   7301 		    "PCIe capability structure not found!\n");
   7302 		return;
   7303 	}
   7304 
   7305 	/* Clear device-specific "PCI retry timeout" register (41h). */
   7306 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   7307 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   7308 
   7309 	/* Enable bus-mastering */
   7310 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
   7311 	reg |= PCI_COMMAND_MASTER_ENABLE;
   7312 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
   7313 
   7314 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
   7315 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
   7316 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
   7317 	if (err) {
   7318 		aprint_error_dev(self, "can't map mem space\n");
   7319 		return;
   7320 	}
   7321 
   7322 	/* Install interrupt handler. */
   7323 	err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
   7324 	if (err) {
   7325 		aprint_error_dev(self, "can't allocate interrupt\n");
   7326 		return;
   7327 	}
   7328 	if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX) {
   7329 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
   7330 		    PCI_COMMAND_STATUS_REG);
   7331 		if (ISSET(reg, PCI_COMMAND_INTERRUPT_DISABLE)) {
   7332 			CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
   7333 			pci_conf_write(sc->sc_pct, sc->sc_pcitag,
   7334 			    PCI_COMMAND_STATUS_REG, reg);
   7335 		}
   7336 	}
   7337 	intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
   7338 	    sizeof(intrbuf));
   7339 	sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0],
   7340 	    IPL_NET, iwm_intr, sc, device_xname(self));
   7341 	if (sc->sc_ih == NULL) {
   7342 		aprint_error_dev(self, "can't establish interrupt");
   7343 		if (intrstr != NULL)
   7344 			aprint_error(" at %s", intrstr);
   7345 		aprint_error("\n");
   7346 		return;
   7347 	}
   7348 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
   7349 
   7350 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
   7351 
   7352 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
   7353 	switch (PCI_PRODUCT(sc->sc_pciid)) {
   7354 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
   7355 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
   7356 		sc->sc_fwname = "iwlwifi-3160-16.ucode";
   7357 		sc->host_interrupt_operation_mode = 1;
   7358 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   7359 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   7360 		break;
   7361 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
   7362 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
   7363 		sc->sc_fwname = "iwlwifi-7265D-16.ucode";
   7364 		sc->host_interrupt_operation_mode = 0;
   7365 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   7366 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   7367 		break;
   7368 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
   7369 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
   7370 		sc->sc_fwname = "iwlwifi-7260-16.ucode";
   7371 		sc->host_interrupt_operation_mode = 1;
   7372 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   7373 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   7374 		break;
   7375 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
   7376 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
   7377 		sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
   7378 		    IWM_CSR_HW_REV_TYPE_7265D ?
   7379 		    "iwlwifi-7265D-16.ucode": "iwlwifi-7265-16.ucode";
   7380 		sc->host_interrupt_operation_mode = 0;
   7381 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   7382 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   7383 		break;
   7384 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
   7385 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
   7386 		sc->sc_fwname = "iwlwifi-8000C-16.ucode";
   7387 		sc->host_interrupt_operation_mode = 0;
   7388 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
   7389 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
   7390 		break;
   7391 	default:
   7392 		aprint_error_dev(self, "unknown product %#x",
   7393 		    PCI_PRODUCT(sc->sc_pciid));
   7394 		return;
   7395 	}
   7396 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
   7397 
   7398 	/*
   7399 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
   7400 	 * changed, and now the revision step also includes bit 0-1 (no more
   7401 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
   7402 	 * in the old format.
   7403 	 */
   7404 
   7405 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   7406 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
   7407 		    (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
   7408 
   7409 	if (iwm_prepare_card_hw(sc) != 0) {
   7410 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   7411 		return;
   7412 	}
   7413 
   7414 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
   7415 		uint32_t hw_step;
   7416 
   7417 		/*
   7418 		 * In order to recognize C step the driver should read the
   7419 		 * chip version id located at the AUX bus MISC address.
   7420 		 */
   7421 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   7422 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
   7423 		DELAY(2);
   7424 
   7425 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   7426 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   7427 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   7428 				   25000);
   7429 		if (!err) {
   7430 			aprint_error_dev(sc->sc_dev,
   7431 			    "failed to wake up the nic\n");
   7432 			return;
   7433 		}
   7434 
   7435 		if (iwm_nic_lock(sc)) {
   7436 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
   7437 			hw_step |= IWM_ENABLE_WFPM;
   7438 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
   7439 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
   7440 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
   7441 			if (hw_step == 0x3)
   7442 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
   7443 				    (IWM_SILICON_C_STEP << 2);
   7444 			iwm_nic_unlock(sc);
   7445 		} else {
   7446 			aprint_error_dev(sc->sc_dev,
   7447 			    "failed to lock the nic\n");
   7448 			return;
   7449 		}
   7450 	}
   7451 
   7452 	/*
   7453 	 * Allocate DMA memory for firmware transfers.
   7454 	 * Must be aligned on a 16-byte boundary.
   7455 	 */
   7456 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
   7457 	    16);
   7458 	if (err) {
   7459 		aprint_error_dev(sc->sc_dev,
   7460 		    "could not allocate memory for firmware\n");
   7461 		return;
   7462 	}
   7463 
   7464 	/* Allocate "Keep Warm" page, used internally by the card. */
   7465 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
   7466 	if (err) {
   7467 		aprint_error_dev(sc->sc_dev,
   7468 		    "could not allocate keep warm page\n");
   7469 		goto fail1;
   7470 	}
   7471 
   7472 	/* Allocate interrupt cause table (ICT).*/
   7473 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
   7474 	    1 << IWM_ICT_PADDR_SHIFT);
   7475 	if (err) {
   7476 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
   7477 		goto fail2;
   7478 	}
   7479 
   7480 	/* TX scheduler rings must be aligned on a 1KB boundary. */
   7481 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
   7482 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
   7483 	if (err) {
   7484 		aprint_error_dev(sc->sc_dev,
   7485 		    "could not allocate TX scheduler rings\n");
   7486 		goto fail3;
   7487 	}
   7488 
   7489 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
   7490 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
   7491 		if (err) {
   7492 			aprint_error_dev(sc->sc_dev,
   7493 			    "could not allocate TX ring %d\n", txq_i);
   7494 			goto fail4;
   7495 		}
   7496 	}
   7497 
   7498 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
   7499 	if (err) {
   7500 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
   7501 		goto fail4;
   7502 	}
   7503 
   7504 	/* Clear pending interrupts. */
   7505 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
   7506 
   7507 	if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
   7508 	    0, CTLTYPE_NODE, device_xname(sc->sc_dev),
   7509 	    SYSCTL_DESCR("iwm per-controller controls"),
   7510 	    NULL, 0, NULL, 0,
   7511 	    CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
   7512 	    CTL_EOL)) != 0) {
   7513 		aprint_normal_dev(sc->sc_dev,
   7514 		    "couldn't create iwm per-controller sysctl node\n");
   7515 	}
   7516 	if (err == 0) {
   7517 		int iwm_nodenum = node->sysctl_num;
   7518 
   7519 		/* Reload firmware sysctl node */
   7520 		if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
   7521 		    CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
   7522 		    SYSCTL_DESCR("Reload firmware"),
   7523 		    iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
   7524 		    CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
   7525 		    CTL_EOL)) != 0) {
   7526 			aprint_normal_dev(sc->sc_dev,
   7527 			    "couldn't create load_fw sysctl node\n");
   7528 		}
   7529 	}
   7530 
   7531 	/*
   7532 	 * Attach interface
   7533 	 */
   7534 	ic->ic_ifp = ifp;
   7535 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
   7536 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
   7537 	ic->ic_state = IEEE80211_S_INIT;
   7538 
   7539 	/* Set device capabilities. */
   7540 	ic->ic_caps =
   7541 	    IEEE80211_C_WEP |		/* WEP */
   7542 	    IEEE80211_C_WPA |		/* 802.11i */
   7543 #ifdef notyet
   7544 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
   7545 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
   7546 #endif
   7547 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
   7548 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
   7549 
   7550 #ifndef IEEE80211_NO_HT
   7551 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
   7552 	ic->ic_htxcaps = 0;
   7553 	ic->ic_txbfcaps = 0;
   7554 	ic->ic_aselcaps = 0;
   7555 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
   7556 #endif
   7557 
   7558 	/* all hardware can do 2.4GHz band */
   7559 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
   7560 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
   7561 
   7562 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
   7563 		sc->sc_phyctxt[i].id = i;
   7564 	}
   7565 
   7566 	sc->sc_amrr.amrr_min_success_threshold =  1;
   7567 	sc->sc_amrr.amrr_max_success_threshold = 15;
   7568 
   7569 	/* IBSS channel undefined for now. */
   7570 	ic->ic_ibss_chan = &ic->ic_channels[1];
   7571 
   7572 #if 0
   7573 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
   7574 #endif
   7575 
   7576 	ifp->if_softc = sc;
   7577 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   7578 	ifp->if_init = iwm_init;
   7579 	ifp->if_stop = iwm_stop;
   7580 	ifp->if_ioctl = iwm_ioctl;
   7581 	ifp->if_start = iwm_start;
   7582 	ifp->if_watchdog = iwm_watchdog;
   7583 	IFQ_SET_READY(&ifp->if_snd);
   7584 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
   7585 
   7586 	if_initialize(ifp);
   7587 #if 0
   7588 	ieee80211_ifattach(ic);
   7589 #else
   7590 	ether_ifattach(ifp, ic->ic_myaddr);	/* XXX */
   7591 #endif
   7592 	/* Use common softint-based if_input */
   7593 	ifp->if_percpuq = if_percpuq_create(ifp);
   7594 	if_deferred_start_init(ifp, NULL);
   7595 	if_register(ifp);
   7596 
   7597 	callout_init(&sc->sc_calib_to, 0);
   7598 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
   7599 	callout_init(&sc->sc_led_blink_to, 0);
   7600 	callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
   7601 #ifndef IEEE80211_NO_HT
   7602 	if (workqueue_create(&sc->sc_setratewq, "iwmsr",
   7603 	    iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
   7604 		panic("%s: could not create workqueue: setrates",
   7605 		    device_xname(self));
   7606 	if (workqueue_create(&sc->sc_bawq, "iwmba",
   7607 	    iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
   7608 		panic("%s: could not create workqueue: blockack",
   7609 		    device_xname(self));
   7610 	if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
   7611 	    iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
   7612 		panic("%s: could not create workqueue: htprot",
   7613 		    device_xname(self));
   7614 #endif
   7615 
   7616 	if (pmf_device_register(self, NULL, NULL))
   7617 		pmf_class_network_register(self, ifp);
   7618 	else
   7619 		aprint_error_dev(self, "couldn't establish power handler\n");
   7620 
   7621 	/*
   7622 	 * We can't do normal attach before the file system is mounted
   7623 	 * because we cannot read the MAC address without loading the
   7624 	 * firmware from disk.  So we postpone until mountroot is done.
   7625 	 * Notably, this will require a full driver unload/load cycle
   7626 	 * (or reboot) in case the firmware is not present when the
   7627 	 * hook runs.
   7628 	 */
   7629 	config_mountroot(self, iwm_attach_hook);
   7630 
   7631 	return;
   7632 
   7633 fail4:	while (--txq_i >= 0)
   7634 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
   7635 	iwm_free_rx_ring(sc, &sc->rxq);
   7636 	iwm_dma_contig_free(&sc->sched_dma);
   7637 fail3:	if (sc->ict_dma.vaddr != NULL)
   7638 		iwm_dma_contig_free(&sc->ict_dma);
   7639 fail2:	iwm_dma_contig_free(&sc->kw_dma);
   7640 fail1:	iwm_dma_contig_free(&sc->fw_dma);
   7641 }
   7642 
   7643 void
   7644 iwm_radiotap_attach(struct iwm_softc *sc)
   7645 {
   7646 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   7647 
   7648 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
   7649 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
   7650 	    &sc->sc_drvbpf);
   7651 
   7652 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
   7653 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
   7654 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
   7655 
   7656 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
   7657 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
   7658 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
   7659 }
   7660 
   7661 #if 0
   7662 static void
   7663 iwm_init_task(void *arg)
   7664 {
   7665 	struct iwm_softc *sc = arg;
   7666 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   7667 	int s;
   7668 
   7669 	rw_enter_write(&sc->ioctl_rwl);
   7670 	s = splnet();
   7671 
   7672 	iwm_stop(ifp, 0);
   7673 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
   7674 		iwm_init(ifp);
   7675 
   7676 	splx(s);
   7677 	rw_exit(&sc->ioctl_rwl);
   7678 }
   7679 
   7680 static void
   7681 iwm_wakeup(struct iwm_softc *sc)
   7682 {
   7683 	pcireg_t reg;
   7684 
   7685 	/* Clear device-specific "PCI retry timeout" register (41h). */
   7686 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   7687 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   7688 
   7689 	iwm_init_task(sc);
   7690 }
   7691 
   7692 static int
   7693 iwm_activate(device_t self, enum devact act)
   7694 {
   7695 	struct iwm_softc *sc = device_private(self);
   7696 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   7697 
   7698 	switch (act) {
   7699 	case DVACT_DEACTIVATE:
   7700 		if (ifp->if_flags & IFF_RUNNING)
   7701 			iwm_stop(ifp, 0);
   7702 		return 0;
   7703 	default:
   7704 		return EOPNOTSUPP;
   7705 	}
   7706 }
   7707 #endif
   7708 
   7709 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
   7710 	NULL, NULL);
   7711 
   7712 static int
   7713 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
   7714 {
   7715 	struct sysctlnode node;
   7716 	struct iwm_softc *sc;
   7717 	int err, t;
   7718 
   7719 	node = *rnode;
   7720 	sc = node.sysctl_data;
   7721 	t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
   7722 	node.sysctl_data = &t;
   7723 	err = sysctl_lookup(SYSCTLFN_CALL(&node));
   7724 	if (err || newp == NULL)
   7725 		return err;
   7726 
   7727 	if (t == 0)
   7728 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
   7729 	return 0;
   7730 }
   7731 
   7732 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
   7733 {
   7734 	const struct sysctlnode *rnode;
   7735 #ifdef IWM_DEBUG
   7736 	const struct sysctlnode *cnode;
   7737 #endif /* IWM_DEBUG */
   7738 	int rc;
   7739 
   7740 	if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
   7741 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
   7742 	    SYSCTL_DESCR("iwm global controls"),
   7743 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   7744 		goto err;
   7745 
   7746 	iwm_sysctl_root_num = rnode->sysctl_num;
   7747 
   7748 #ifdef IWM_DEBUG
   7749 	/* control debugging printfs */
   7750 	if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
   7751 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
   7752 	    "debug", SYSCTL_DESCR("Enable debugging output"),
   7753 	    NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
   7754 		goto err;
   7755 #endif /* IWM_DEBUG */
   7756 
   7757 	return;
   7758 
   7759  err:
   7760 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
   7761 }
   7762