Home | History | Annotate | Line # | Download | only in pci
if_iwm.c revision 1.45
      1 /*	$NetBSD: if_iwm.c,v 1.45 2016/12/18 02:18:29 nonaka Exp $	*/
      2 /*	OpenBSD: if_iwm.c,v 1.147 2016/11/17 14:12:33 stsp Exp	*/
      3 #define IEEE80211_NO_HT
      4 /*
      5  * Copyright (c) 2014, 2016 genua gmbh <info (at) genua.de>
      6  *   Author: Stefan Sperling <stsp (at) openbsd.org>
      7  * Copyright (c) 2014 Fixup Software Ltd.
      8  *
      9  * Permission to use, copy, modify, and distribute this software for any
     10  * purpose with or without fee is hereby granted, provided that the above
     11  * copyright notice and this permission notice appear in all copies.
     12  *
     13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     20  */
     21 
     22 /*-
     23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
     24  * which were used as the reference documentation for this implementation.
     25  *
     26  ***********************************************************************
     27  *
     28  * This file is provided under a dual BSD/GPLv2 license.  When using or
     29  * redistributing this file, you may do so under either license.
     30  *
     31  * GPL LICENSE SUMMARY
     32  *
     33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
     34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
     35  * Copyright(c) 2016 Intel Deutschland GmbH
     36  *
     37  * This program is free software; you can redistribute it and/or modify
     38  * it under the terms of version 2 of the GNU General Public License as
     39  * published by the Free Software Foundation.
     40  *
     41  * This program is distributed in the hope that it will be useful, but
     42  * WITHOUT ANY WARRANTY; without even the implied warranty of
     43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     44  * General Public License for more details.
     45  *
     46  * You should have received a copy of the GNU General Public License
     47  * along with this program; if not, write to the Free Software
     48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
     49  * USA
     50  *
     51  * The full GNU General Public License is included in this distribution
     52  * in the file called COPYING.
     53  *
     54  * Contact Information:
     55  *  Intel Linux Wireless <ilw (at) linux.intel.com>
     56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
     57  *
     58  *
     59  * BSD LICENSE
     60  *
     61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
     62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
     63  * Copyright(c) 2016 Intel Deutschland GmbH
     64  * All rights reserved.
     65  *
     66  * Redistribution and use in source and binary forms, with or without
     67  * modification, are permitted provided that the following conditions
     68  * are met:
     69  *
     70  *  * Redistributions of source code must retain the above copyright
     71  *    notice, this list of conditions and the following disclaimer.
     72  *  * Redistributions in binary form must reproduce the above copyright
     73  *    notice, this list of conditions and the following disclaimer in
     74  *    the documentation and/or other materials provided with the
     75  *    distribution.
     76  *  * Neither the name Intel Corporation nor the names of its
     77  *    contributors may be used to endorse or promote products derived
     78  *    from this software without specific prior written permission.
     79  *
     80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     91  */
     92 
     93 /*-
     94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
     95  *
     96  * Permission to use, copy, modify, and distribute this software for any
     97  * purpose with or without fee is hereby granted, provided that the above
     98  * copyright notice and this permission notice appear in all copies.
     99  *
    100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
    101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    107  */
    108 
    109 #include <sys/cdefs.h>
    110 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.45 2016/12/18 02:18:29 nonaka Exp $");
    111 
    112 #include <sys/param.h>
    113 #include <sys/conf.h>
    114 #include <sys/kernel.h>
    115 #include <sys/kmem.h>
    116 #include <sys/mbuf.h>
    117 #include <sys/mutex.h>
    118 #include <sys/proc.h>
    119 #include <sys/socket.h>
    120 #include <sys/sockio.h>
    121 #include <sys/sysctl.h>
    122 #include <sys/systm.h>
    123 
    124 #include <sys/cpu.h>
    125 #include <sys/bus.h>
    126 #include <sys/workqueue.h>
    127 #include <machine/endian.h>
    128 #include <machine/intr.h>
    129 
    130 #include <dev/pci/pcireg.h>
    131 #include <dev/pci/pcivar.h>
    132 #include <dev/pci/pcidevs.h>
    133 #include <dev/firmload.h>
    134 
    135 #include <net/bpf.h>
    136 #include <net/if.h>
    137 #include <net/if_dl.h>
    138 #include <net/if_media.h>
    139 #include <net/if_ether.h>
    140 
    141 #include <netinet/in.h>
    142 #include <netinet/ip.h>
    143 
    144 #include <net80211/ieee80211_var.h>
    145 #include <net80211/ieee80211_amrr.h>
    146 #include <net80211/ieee80211_radiotap.h>
    147 
    148 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
    149 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
    150 
    151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
    152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
    153 
    154 #ifdef IWM_DEBUG
    155 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
    156 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
    157 int iwm_debug = 0;
    158 #else
    159 #define DPRINTF(x)	do { ; } while (0)
    160 #define DPRINTFN(n, x)	do { ; } while (0)
    161 #endif
    162 
    163 #include <dev/pci/if_iwmreg.h>
    164 #include <dev/pci/if_iwmvar.h>
    165 
    166 static const uint8_t iwm_nvm_channels[] = {
    167 	/* 2.4 GHz */
    168 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
    169 	/* 5 GHz */
    170 	36, 40, 44, 48, 52, 56, 60, 64,
    171 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
    172 	149, 153, 157, 161, 165
    173 };
    174 
    175 static const uint8_t iwm_nvm_channels_8000[] = {
    176 	/* 2.4 GHz */
    177 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
    178 	/* 5 GHz */
    179 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
    180 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
    181 	149, 153, 157, 161, 165, 169, 173, 177, 181
    182 };
    183 
    184 #define IWM_NUM_2GHZ_CHANNELS	14
    185 
    186 static const struct iwm_rate {
    187 	uint8_t rate;
    188 	uint8_t plcp;
    189 	uint8_t ht_plcp;
    190 } iwm_rates[] = {
    191 		/* Legacy */		/* HT */
    192 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
    193 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
    194 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
    195 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
    196 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
    197 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
    198 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
    199 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
    200 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
    201 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
    202 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
    203 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
    204 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
    205 };
    206 #define IWM_RIDX_CCK	0
    207 #define IWM_RIDX_OFDM	4
    208 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
    209 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
    210 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
    211 
    212 #ifndef IEEE80211_NO_HT
    213 /* Convert an MCS index into an iwm_rates[] index. */
    214 static const int iwm_mcs2ridx[] = {
    215 	IWM_RATE_MCS_0_INDEX,
    216 	IWM_RATE_MCS_1_INDEX,
    217 	IWM_RATE_MCS_2_INDEX,
    218 	IWM_RATE_MCS_3_INDEX,
    219 	IWM_RATE_MCS_4_INDEX,
    220 	IWM_RATE_MCS_5_INDEX,
    221 	IWM_RATE_MCS_6_INDEX,
    222 	IWM_RATE_MCS_7_INDEX,
    223 };
    224 #endif
    225 
    226 struct iwm_nvm_section {
    227 	uint16_t length;
    228 	uint8_t *data;
    229 };
    230 
    231 struct iwm_newstate_state {
    232 	struct work ns_wk;
    233 	enum ieee80211_state ns_nstate;
    234 	int ns_arg;
    235 	int ns_generation;
    236 };
    237 
    238 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
    239 static int	iwm_firmware_store_section(struct iwm_softc *,
    240 		    enum iwm_ucode_type, uint8_t *, size_t);
    241 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
    242 static int	iwm_read_firmware(struct iwm_softc *);
    243 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
    244 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
    245 #ifdef IWM_DEBUG
    246 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
    247 #endif
    248 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
    249 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
    250 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
    251 static int	iwm_nic_lock(struct iwm_softc *);
    252 static void	iwm_nic_unlock(struct iwm_softc *);
    253 static void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
    254 		    uint32_t);
    255 static void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    256 static void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    257 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
    258 		    bus_size_t, bus_size_t);
    259 static void	iwm_dma_contig_free(struct iwm_dma_info *);
    260 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    261 static void	iwm_disable_rx_dma(struct iwm_softc *);
    262 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    263 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    264 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
    265 		    int);
    266 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    267 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    268 static void	iwm_enable_rfkill_int(struct iwm_softc *);
    269 static int	iwm_check_rfkill(struct iwm_softc *);
    270 static void	iwm_enable_interrupts(struct iwm_softc *);
    271 static void	iwm_restore_interrupts(struct iwm_softc *);
    272 static void	iwm_disable_interrupts(struct iwm_softc *);
    273 static void	iwm_ict_reset(struct iwm_softc *);
    274 static int	iwm_set_hw_ready(struct iwm_softc *);
    275 static int	iwm_prepare_card_hw(struct iwm_softc *);
    276 static void	iwm_apm_config(struct iwm_softc *);
    277 static int	iwm_apm_init(struct iwm_softc *);
    278 static void	iwm_apm_stop(struct iwm_softc *);
    279 static int	iwm_allow_mcast(struct iwm_softc *);
    280 static int	iwm_start_hw(struct iwm_softc *);
    281 static void	iwm_stop_device(struct iwm_softc *);
    282 static void	iwm_nic_config(struct iwm_softc *);
    283 static int	iwm_nic_rx_init(struct iwm_softc *);
    284 static int	iwm_nic_tx_init(struct iwm_softc *);
    285 static int	iwm_nic_init(struct iwm_softc *);
    286 static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
    287 static int	iwm_post_alive(struct iwm_softc *);
    288 static struct iwm_phy_db_entry *
    289 		iwm_phy_db_get_section(struct iwm_softc *,
    290 		    enum iwm_phy_db_section_type, uint16_t);
    291 static int	iwm_phy_db_set_section(struct iwm_softc *,
    292 		    struct iwm_calib_res_notif_phy_db *, uint16_t);
    293 static int	iwm_is_valid_channel(uint16_t);
    294 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
    295 static uint16_t iwm_channel_id_to_papd(uint16_t);
    296 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
    297 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
    298 		    uint8_t **, uint16_t *, uint16_t);
    299 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
    300 		    void *);
    301 static int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
    302 		    enum iwm_phy_db_section_type, uint8_t);
    303 static int	iwm_send_phy_db_data(struct iwm_softc *);
    304 static void	iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
    305 		    struct iwm_time_event_cmd_v1 *);
    306 static int	iwm_send_time_event_cmd(struct iwm_softc *,
    307 		    const struct iwm_time_event_cmd_v2 *);
    308 static void	iwm_protect_session(struct iwm_softc *, struct iwm_node *,
    309 		    uint32_t, uint32_t);
    310 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
    311 		    uint16_t, uint8_t *, uint16_t *);
    312 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
    313 		    uint16_t *, size_t);
    314 static void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
    315 		    const uint8_t *, size_t);
    316 #ifndef IEEE80211_NO_HT
    317 static void	iwm_setup_ht_rates(struct iwm_softc *);
    318 static void	iwm_htprot_task(void *);
    319 static void	iwm_update_htprot(struct ieee80211com *,
    320 		    struct ieee80211_node *);
    321 static int	iwm_ampdu_rx_start(struct ieee80211com *,
    322 		    struct ieee80211_node *, uint8_t);
    323 static void	iwm_ampdu_rx_stop(struct ieee80211com *,
    324 		    struct ieee80211_node *, uint8_t);
    325 static void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
    326 		    uint8_t, uint16_t, int);
    327 #ifdef notyet
    328 static int	iwm_ampdu_tx_start(struct ieee80211com *,
    329 		    struct ieee80211_node *, uint8_t);
    330 static void	iwm_ampdu_tx_stop(struct ieee80211com *,
    331 		    struct ieee80211_node *, uint8_t);
    332 #endif
    333 static void	iwm_ba_task(void *);
    334 #endif
    335 
    336 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
    337 		    const uint16_t *, const uint16_t *, const uint16_t *,
    338 		    const uint16_t *, const uint16_t *);
    339 static void	iwm_set_hw_address_8000(struct iwm_softc *,
    340 		    struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
    341 static int	iwm_parse_nvm_sections(struct iwm_softc *,
    342 		    struct iwm_nvm_section *);
    343 static int	iwm_nvm_init(struct iwm_softc *);
    344 static int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
    345 		    const uint8_t *, uint32_t);
    346 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
    347 		    const uint8_t *, uint32_t);
    348 static int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
    349 static int	iwm_load_cpu_sections_8000(struct iwm_softc *,
    350 		    struct iwm_fw_sects *, int , int *);
    351 static int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
    352 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
    353 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
    354 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
    355 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
    356 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
    357 		    enum iwm_ucode_type);
    358 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
    359 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
    360 static int	iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
    361 static int	iwm_get_signal_strength(struct iwm_softc *,
    362 		    struct iwm_rx_phy_info *);
    363 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
    364 		    struct iwm_rx_packet *, struct iwm_rx_data *);
    365 static int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
    366 static void	iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
    367 		    struct iwm_rx_data *);
    368 static void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,		    struct iwm_node *);
    369 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
    370 		    struct iwm_rx_data *);
    371 static int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
    372 		    uint32_t);
    373 #if 0
    374 static int	iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
    375 static int	iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
    376 #endif
    377 static void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
    378 		    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
    379 static void	iwm_phy_ctxt_cmd_data(struct iwm_softc *,
    380 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
    381 		    uint8_t, uint8_t);
    382 static int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
    383 		    uint8_t, uint8_t, uint32_t, uint32_t);
    384 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
    385 static int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
    386 		    uint16_t, const void *);
    387 static int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
    388 		    uint32_t *);
    389 static int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
    390 		    const void *, uint32_t *);
    391 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
    392 static void	iwm_cmd_done(struct iwm_softc *, int qid, int idx);
    393 #if 0
    394 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
    395 		    uint16_t);
    396 #endif
    397 static const struct iwm_rate *
    398 		iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
    399 		    struct ieee80211_frame *, struct iwm_tx_cmd *);
    400 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
    401 		    struct ieee80211_node *, int);
    402 static void	iwm_led_enable(struct iwm_softc *);
    403 static void	iwm_led_disable(struct iwm_softc *);
    404 static int	iwm_led_is_enabled(struct iwm_softc *);
    405 static void	iwm_led_blink_timeout(void *);
    406 static void	iwm_led_blink_start(struct iwm_softc *);
    407 static void	iwm_led_blink_stop(struct iwm_softc *);
    408 static int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
    409 		    struct iwm_beacon_filter_cmd *);
    410 static void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
    411 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
    412 static int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
    413 		    int);
    414 static void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
    415 		    struct iwm_mac_power_cmd *);
    416 static int	iwm_power_mac_update_mode(struct iwm_softc *,
    417 		    struct iwm_node *);
    418 static int	iwm_power_update_device(struct iwm_softc *);
    419 #ifdef notyet
    420 static int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
    421 #endif
    422 static int	iwm_disable_beacon_filter(struct iwm_softc *);
    423 static int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
    424 static int	iwm_add_aux_sta(struct iwm_softc *);
    425 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
    426 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
    427 #ifdef notyet
    428 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
    429 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
    430 #endif
    431 static uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
    432 		    struct iwm_scan_channel_cfg_lmac *, int);
    433 static int	iwm_fill_probe_req(struct iwm_softc *,
    434 		    struct iwm_scan_probe_req *);
    435 static int	iwm_lmac_scan(struct iwm_softc *);
    436 static int	iwm_config_umac_scan(struct iwm_softc *);
    437 static int	iwm_umac_scan(struct iwm_softc *);
    438 static uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
    439 static void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
    440 		    int *);
    441 static void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
    442 		    struct iwm_mac_ctx_cmd *, uint32_t, int);
    443 static void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
    444 		    struct iwm_mac_data_sta *, int);
    445 static int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
    446 		    uint32_t, int);
    447 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
    448 static int	iwm_auth(struct iwm_softc *);
    449 static int	iwm_assoc(struct iwm_softc *);
    450 static void	iwm_calib_timeout(void *);
    451 #ifndef IEEE80211_NO_HT
    452 static void	iwm_setrates_task(void *);
    453 static int	iwm_setrates(struct iwm_node *);
    454 #endif
    455 static int	iwm_media_change(struct ifnet *);
    456 static void	iwm_newstate_cb(struct work *, void *);
    457 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
    458 static void	iwm_endscan_cb(struct work *, void *);
    459 static void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
    460 		    struct ieee80211_node *);
    461 static int	iwm_sf_config(struct iwm_softc *, int);
    462 static int	iwm_send_bt_init_conf(struct iwm_softc *);
    463 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
    464 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
    465 static int	iwm_init_hw(struct iwm_softc *);
    466 static int	iwm_init(struct ifnet *);
    467 static void	iwm_start(struct ifnet *);
    468 static void	iwm_stop(struct ifnet *, int);
    469 static void	iwm_watchdog(struct ifnet *);
    470 static int	iwm_ioctl(struct ifnet *, u_long, void *);
    471 #ifdef IWM_DEBUG
    472 static const char *iwm_desc_lookup(uint32_t);
    473 static void	iwm_nic_error(struct iwm_softc *);
    474 static void	iwm_nic_umac_error(struct iwm_softc *);
    475 #endif
    476 static void	iwm_notif_intr(struct iwm_softc *);
    477 static int	iwm_intr(void *);
    478 static int	iwm_preinit(struct iwm_softc *);
    479 static void	iwm_attach_hook(device_t);
    480 static void	iwm_attach(device_t, device_t, void *);
    481 #if 0
    482 static void	iwm_init_task(void *);
    483 static int	iwm_activate(device_t, enum devact);
    484 static void	iwm_wakeup(struct iwm_softc *);
    485 #endif
    486 static void	iwm_radiotap_attach(struct iwm_softc *);
    487 static int	iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
    488 
    489 /* XXX needed by iwn_scan */
    490 static uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int);
    491 static uint8_t *ieee80211_add_rates(uint8_t *,
    492 		    const struct ieee80211_rateset *);
    493 static uint8_t *ieee80211_add_xrates(uint8_t *,
    494 		    const struct ieee80211_rateset *);
    495 
    496 static int iwm_sysctl_root_num;
    497 
    498 static int
    499 iwm_firmload(struct iwm_softc *sc)
    500 {
    501 	struct iwm_fw_info *fw = &sc->sc_fw;
    502 	firmware_handle_t fwh;
    503 	int err;
    504 
    505 	if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
    506 		return 0;
    507 
    508 	/* Open firmware image. */
    509 	err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
    510 	if (err) {
    511 		aprint_error_dev(sc->sc_dev,
    512 		    "could not get firmware handle %s\n", sc->sc_fwname);
    513 		return err;
    514 	}
    515 
    516 	if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
    517 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
    518 		fw->fw_rawdata = NULL;
    519 	}
    520 
    521 	fw->fw_rawsize = firmware_get_size(fwh);
    522 	/*
    523 	 * Well, this is how the Linux driver checks it ....
    524 	 */
    525 	if (fw->fw_rawsize < sizeof(uint32_t)) {
    526 		aprint_error_dev(sc->sc_dev,
    527 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
    528 		err = EINVAL;
    529 		goto out;
    530 	}
    531 
    532 	/* some sanity */
    533 	if (fw->fw_rawsize > IWM_FWMAXSIZE) {
    534 		aprint_error_dev(sc->sc_dev,
    535 		    "firmware size is ridiculous: %zd bytes\n", fw->fw_rawsize);
    536 		err = EINVAL;
    537 		goto out;
    538 	}
    539 
    540 	/* Read the firmware. */
    541 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
    542 	if (fw->fw_rawdata == NULL) {
    543 		aprint_error_dev(sc->sc_dev,
    544 		    "not enough memory to stock firmware %s\n", sc->sc_fwname);
    545 		err = ENOMEM;
    546 		goto out;
    547 	}
    548 	err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
    549 	if (err) {
    550 		aprint_error_dev(sc->sc_dev,
    551 		    "could not read firmware %s\n", sc->sc_fwname);
    552 		goto out;
    553 	}
    554 
    555 	SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
    556  out:
    557 	/* caller will release memory, if necessary */
    558 
    559 	firmware_close(fwh);
    560 	return err;
    561 }
    562 
    563 /*
    564  * just maintaining status quo.
    565  */
    566 static void
    567 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
    568 {
    569 	struct ieee80211com *ic = &sc->sc_ic;
    570 	struct ieee80211_frame *wh;
    571 	uint8_t subtype;
    572 
    573 	wh = mtod(m, struct ieee80211_frame *);
    574 
    575 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
    576 		return;
    577 
    578 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
    579 
    580 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
    581 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
    582 		return;
    583 
    584 	int chan = le32toh(sc->sc_last_phy_info.channel);
    585 	if (chan < __arraycount(ic->ic_channels))
    586 		ic->ic_curchan = &ic->ic_channels[chan];
    587 }
    588 
    589 static int
    590 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
    591 {
    592 	struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
    593 
    594 	if (dlen < sizeof(*l) ||
    595 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
    596 		return EINVAL;
    597 
    598 	/* we don't actually store anything for now, always use s/w crypto */
    599 
    600 	return 0;
    601 }
    602 
    603 static int
    604 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
    605     uint8_t *data, size_t dlen)
    606 {
    607 	struct iwm_fw_sects *fws;
    608 	struct iwm_fw_onesect *fwone;
    609 
    610 	if (type >= IWM_UCODE_TYPE_MAX)
    611 		return EINVAL;
    612 	if (dlen < sizeof(uint32_t))
    613 		return EINVAL;
    614 
    615 	fws = &sc->sc_fw.fw_sects[type];
    616 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
    617 		return EINVAL;
    618 
    619 	fwone = &fws->fw_sect[fws->fw_count];
    620 
    621 	/* first 32bit are device load offset */
    622 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
    623 
    624 	/* rest is data */
    625 	fwone->fws_data = data + sizeof(uint32_t);
    626 	fwone->fws_len = dlen - sizeof(uint32_t);
    627 
    628 	/* for freeing the buffer during driver unload */
    629 	fwone->fws_alloc = data;
    630 	fwone->fws_allocsize = dlen;
    631 
    632 	fws->fw_count++;
    633 	fws->fw_totlen += fwone->fws_len;
    634 
    635 	return 0;
    636 }
    637 
    638 struct iwm_tlv_calib_data {
    639 	uint32_t ucode_type;
    640 	struct iwm_tlv_calib_ctrl calib;
    641 } __packed;
    642 
    643 static int
    644 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
    645 {
    646 	const struct iwm_tlv_calib_data *def_calib = data;
    647 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
    648 
    649 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
    650 		DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
    651 		    DEVNAME(sc), ucode_type));
    652 		return EINVAL;
    653 	}
    654 
    655 	sc->sc_default_calib[ucode_type].flow_trigger =
    656 	    def_calib->calib.flow_trigger;
    657 	sc->sc_default_calib[ucode_type].event_trigger =
    658 	    def_calib->calib.event_trigger;
    659 
    660 	return 0;
    661 }
    662 
    663 static int
    664 iwm_read_firmware(struct iwm_softc *sc)
    665 {
    666 	struct iwm_fw_info *fw = &sc->sc_fw;
    667 	struct iwm_tlv_ucode_header *uhdr;
    668 	struct iwm_ucode_tlv tlv;
    669 	enum iwm_ucode_tlv_type tlv_type;
    670 	uint8_t *data;
    671 	int err, status;
    672 	size_t len;
    673 
    674 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
    675 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
    676 	} else {
    677 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
    678 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
    679 	}
    680 	status = fw->fw_status;
    681 
    682 	if (status == IWM_FW_STATUS_DONE)
    683 		return 0;
    684 
    685 	err = iwm_firmload(sc);
    686 	if (err) {
    687 		aprint_error_dev(sc->sc_dev,
    688 		    "could not read firmware %s (error %d)\n",
    689 		    sc->sc_fwname, err);
    690 		goto out;
    691 	}
    692 
    693 	sc->sc_capaflags = 0;
    694 	sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
    695 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
    696 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
    697 
    698 	uhdr = (void *)fw->fw_rawdata;
    699 	if (*(uint32_t *)fw->fw_rawdata != 0
    700 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
    701 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
    702 		    sc->sc_fwname);
    703 		err = EINVAL;
    704 		goto out;
    705 	}
    706 
    707 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
    708 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
    709 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
    710 	    IWM_UCODE_API(le32toh(uhdr->ver)));
    711 	data = uhdr->data;
    712 	len = fw->fw_rawsize - sizeof(*uhdr);
    713 
    714 	while (len >= sizeof(tlv)) {
    715 		size_t tlv_len;
    716 		void *tlv_data;
    717 
    718 		memcpy(&tlv, data, sizeof(tlv));
    719 		tlv_len = le32toh(tlv.length);
    720 		tlv_type = le32toh(tlv.type);
    721 
    722 		len -= sizeof(tlv);
    723 		data += sizeof(tlv);
    724 		tlv_data = data;
    725 
    726 		if (len < tlv_len) {
    727 			aprint_error_dev(sc->sc_dev,
    728 			    "firmware too short: %zu bytes\n", len);
    729 			err = EINVAL;
    730 			goto parse_out;
    731 		}
    732 
    733 		switch (tlv_type) {
    734 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
    735 			if (tlv_len < sizeof(uint32_t)) {
    736 				err = EINVAL;
    737 				goto parse_out;
    738 			}
    739 			sc->sc_capa_max_probe_len
    740 			    = le32toh(*(uint32_t *)tlv_data);
    741 			/* limit it to something sensible */
    742 			if (sc->sc_capa_max_probe_len >
    743 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
    744 				err = EINVAL;
    745 				goto parse_out;
    746 			}
    747 			break;
    748 		case IWM_UCODE_TLV_PAN:
    749 			if (tlv_len) {
    750 				err = EINVAL;
    751 				goto parse_out;
    752 			}
    753 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
    754 			break;
    755 		case IWM_UCODE_TLV_FLAGS:
    756 			if (tlv_len < sizeof(uint32_t)) {
    757 				err = EINVAL;
    758 				goto parse_out;
    759 			}
    760 			/*
    761 			 * Apparently there can be many flags, but Linux driver
    762 			 * parses only the first one, and so do we.
    763 			 *
    764 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
    765 			 * Intentional or a bug?  Observations from
    766 			 * current firmware file:
    767 			 *  1) TLV_PAN is parsed first
    768 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
    769 			 * ==> this resets TLV_PAN to itself... hnnnk
    770 			 */
    771 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
    772 			break;
    773 		case IWM_UCODE_TLV_CSCHEME:
    774 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
    775 			if (err)
    776 				goto parse_out;
    777 			break;
    778 		case IWM_UCODE_TLV_NUM_OF_CPU: {
    779 			uint32_t num_cpu;
    780 			if (tlv_len != sizeof(uint32_t)) {
    781 				err = EINVAL;
    782 				goto parse_out;
    783 			}
    784 			num_cpu = le32toh(*(uint32_t *)tlv_data);
    785 			if (num_cpu < 1 || num_cpu > 2) {
    786 				err = EINVAL;
    787 				goto parse_out;
    788 			}
    789 			break;
    790 		}
    791 		case IWM_UCODE_TLV_SEC_RT:
    792 			err = iwm_firmware_store_section(sc,
    793 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
    794 			if (err)
    795 				goto parse_out;
    796 			break;
    797 		case IWM_UCODE_TLV_SEC_INIT:
    798 			err = iwm_firmware_store_section(sc,
    799 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
    800 			if (err)
    801 				goto parse_out;
    802 			break;
    803 		case IWM_UCODE_TLV_SEC_WOWLAN:
    804 			err = iwm_firmware_store_section(sc,
    805 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
    806 			if (err)
    807 				goto parse_out;
    808 			break;
    809 		case IWM_UCODE_TLV_DEF_CALIB:
    810 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
    811 				err = EINVAL;
    812 				goto parse_out;
    813 			}
    814 			err = iwm_set_default_calib(sc, tlv_data);
    815 			if (err)
    816 				goto parse_out;
    817 			break;
    818 		case IWM_UCODE_TLV_PHY_SKU:
    819 			if (tlv_len != sizeof(uint32_t)) {
    820 				err = EINVAL;
    821 				goto parse_out;
    822 			}
    823 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
    824 			break;
    825 
    826 		case IWM_UCODE_TLV_API_CHANGES_SET: {
    827 			struct iwm_ucode_api *api;
    828 			if (tlv_len != sizeof(*api)) {
    829 				err = EINVAL;
    830 				goto parse_out;
    831 			}
    832 			api = (struct iwm_ucode_api *)tlv_data;
    833 			/* Flags may exceed 32 bits in future firmware. */
    834 			if (le32toh(api->api_index) > 0) {
    835 				goto parse_out;
    836 			}
    837 			sc->sc_ucode_api = le32toh(api->api_flags);
    838 			break;
    839 		}
    840 
    841 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
    842 			struct iwm_ucode_capa *capa;
    843 			int idx, i;
    844 			if (tlv_len != sizeof(*capa)) {
    845 				err = EINVAL;
    846 				goto parse_out;
    847 			}
    848 			capa = (struct iwm_ucode_capa *)tlv_data;
    849 			idx = le32toh(capa->api_index);
    850 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
    851 				goto parse_out;
    852 			}
    853 			for (i = 0; i < 32; i++) {
    854 				if (!ISSET(le32toh(capa->api_capa), __BIT(i)))
    855 					continue;
    856 				setbit(sc->sc_enabled_capa, i + (32 * idx));
    857 			}
    858 			break;
    859 		}
    860 
    861 		case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
    862 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
    863 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
    864 			/* ignore, not used by current driver */
    865 			break;
    866 
    867 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
    868 			err = iwm_firmware_store_section(sc,
    869 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
    870 			    tlv_len);
    871 			if (err)
    872 				goto parse_out;
    873 			break;
    874 
    875 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
    876 			if (tlv_len != sizeof(uint32_t)) {
    877 				err = EINVAL;
    878 				goto parse_out;
    879 			}
    880 			sc->sc_capa_n_scan_channels =
    881 			  le32toh(*(uint32_t *)tlv_data);
    882 			break;
    883 
    884 		case IWM_UCODE_TLV_FW_VERSION:
    885 			if (tlv_len != sizeof(uint32_t) * 3) {
    886 				err = EINVAL;
    887 				goto parse_out;
    888 			}
    889 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
    890 			    "%d.%d.%d",
    891 			    le32toh(((uint32_t *)tlv_data)[0]),
    892 			    le32toh(((uint32_t *)tlv_data)[1]),
    893 			    le32toh(((uint32_t *)tlv_data)[2]));
    894 			break;
    895 
    896 		default:
    897 			DPRINTF(("%s: unknown firmware section %d, abort\n",
    898 			    DEVNAME(sc), tlv_type));
    899 			err = EINVAL;
    900 			goto parse_out;
    901 		}
    902 
    903 		len -= roundup(tlv_len, 4);
    904 		data += roundup(tlv_len, 4);
    905 	}
    906 
    907 	KASSERT(err == 0);
    908 
    909  parse_out:
    910 	if (err) {
    911 		aprint_error_dev(sc->sc_dev,
    912 		    "firmware parse error, section type %d\n", tlv_type);
    913 	}
    914 
    915 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
    916 		aprint_error_dev(sc->sc_dev,
    917 		    "device uses unsupported power ops\n");
    918 		err = ENOTSUP;
    919 	}
    920 
    921  out:
    922 	if (err)
    923 		fw->fw_status = IWM_FW_STATUS_NONE;
    924 	else
    925 		fw->fw_status = IWM_FW_STATUS_DONE;
    926 	wakeup(&sc->sc_fw);
    927 
    928 	if (err && fw->fw_rawdata != NULL) {
    929 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
    930 		fw->fw_rawdata = NULL;
    931 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
    932 		/* don't touch fw->fw_status */
    933 		memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
    934 	}
    935 	return err;
    936 }
    937 
    938 static uint32_t
    939 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
    940 {
    941 	IWM_WRITE(sc,
    942 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
    943 	IWM_BARRIER_READ_WRITE(sc);
    944 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
    945 }
    946 
    947 static void
    948 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    949 {
    950 	IWM_WRITE(sc,
    951 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
    952 	IWM_BARRIER_WRITE(sc);
    953 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
    954 }
    955 
    956 #ifdef IWM_DEBUG
    957 static int
    958 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
    959 {
    960 	int offs, err = 0;
    961 	uint32_t *vals = buf;
    962 
    963 	if (iwm_nic_lock(sc)) {
    964 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
    965 		for (offs = 0; offs < dwords; offs++)
    966 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
    967 		iwm_nic_unlock(sc);
    968 	} else {
    969 		err = EBUSY;
    970 	}
    971 	return err;
    972 }
    973 #endif
    974 
    975 static int
    976 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
    977 {
    978 	int offs;
    979 	const uint32_t *vals = buf;
    980 
    981 	if (iwm_nic_lock(sc)) {
    982 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
    983 		/* WADDR auto-increments */
    984 		for (offs = 0; offs < dwords; offs++) {
    985 			uint32_t val = vals ? vals[offs] : 0;
    986 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
    987 		}
    988 		iwm_nic_unlock(sc);
    989 	} else {
    990 		return EBUSY;
    991 	}
    992 	return 0;
    993 }
    994 
    995 static int
    996 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    997 {
    998 	return iwm_write_mem(sc, addr, &val, 1);
    999 }
   1000 
   1001 static int
   1002 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
   1003     int timo)
   1004 {
   1005 	for (;;) {
   1006 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
   1007 			return 1;
   1008 		}
   1009 		if (timo < 10) {
   1010 			return 0;
   1011 		}
   1012 		timo -= 10;
   1013 		DELAY(10);
   1014 	}
   1015 }
   1016 
   1017 static int
   1018 iwm_nic_lock(struct iwm_softc *sc)
   1019 {
   1020 	int rv = 0;
   1021 
   1022 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   1023 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1024 
   1025 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   1026 		DELAY(2);
   1027 
   1028 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   1029 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
   1030 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
   1031 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
   1032 		rv = 1;
   1033 	} else {
   1034 		aprint_error_dev(sc->sc_dev, "device timeout\n");
   1035 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
   1036 	}
   1037 
   1038 	return rv;
   1039 }
   1040 
   1041 static void
   1042 iwm_nic_unlock(struct iwm_softc *sc)
   1043 {
   1044 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1045 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1046 }
   1047 
   1048 static void
   1049 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
   1050     uint32_t mask)
   1051 {
   1052 	uint32_t val;
   1053 
   1054 	/* XXX: no error path? */
   1055 	if (iwm_nic_lock(sc)) {
   1056 		val = iwm_read_prph(sc, reg) & mask;
   1057 		val |= bits;
   1058 		iwm_write_prph(sc, reg, val);
   1059 		iwm_nic_unlock(sc);
   1060 	}
   1061 }
   1062 
   1063 static void
   1064 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
   1065 {
   1066 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
   1067 }
   1068 
   1069 static void
   1070 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
   1071 {
   1072 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
   1073 }
   1074 
   1075 static int
   1076 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
   1077     bus_size_t size, bus_size_t alignment)
   1078 {
   1079 	int nsegs, err;
   1080 	void *va;
   1081 
   1082 	dma->tag = tag;
   1083 	dma->size = size;
   1084 
   1085 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
   1086 	    &dma->map);
   1087 	if (err)
   1088 		goto fail;
   1089 
   1090 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
   1091 	    BUS_DMA_NOWAIT);
   1092 	if (err)
   1093 		goto fail;
   1094 
   1095 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
   1096 	if (err)
   1097 		goto fail;
   1098 	dma->vaddr = va;
   1099 
   1100 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
   1101 	    BUS_DMA_NOWAIT);
   1102 	if (err)
   1103 		goto fail;
   1104 
   1105 	memset(dma->vaddr, 0, size);
   1106 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
   1107 	dma->paddr = dma->map->dm_segs[0].ds_addr;
   1108 
   1109 	return 0;
   1110 
   1111 fail:	iwm_dma_contig_free(dma);
   1112 	return err;
   1113 }
   1114 
   1115 static void
   1116 iwm_dma_contig_free(struct iwm_dma_info *dma)
   1117 {
   1118 	if (dma->map != NULL) {
   1119 		if (dma->vaddr != NULL) {
   1120 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
   1121 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1122 			bus_dmamap_unload(dma->tag, dma->map);
   1123 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
   1124 			bus_dmamem_free(dma->tag, &dma->seg, 1);
   1125 			dma->vaddr = NULL;
   1126 		}
   1127 		bus_dmamap_destroy(dma->tag, dma->map);
   1128 		dma->map = NULL;
   1129 	}
   1130 }
   1131 
   1132 static int
   1133 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1134 {
   1135 	bus_size_t size;
   1136 	int i, err;
   1137 
   1138 	ring->cur = 0;
   1139 
   1140 	/* Allocate RX descriptors (256-byte aligned). */
   1141 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
   1142 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1143 	if (err) {
   1144 		aprint_error_dev(sc->sc_dev,
   1145 		    "could not allocate RX ring DMA memory\n");
   1146 		goto fail;
   1147 	}
   1148 	ring->desc = ring->desc_dma.vaddr;
   1149 
   1150 	/* Allocate RX status area (16-byte aligned). */
   1151 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
   1152 	    sizeof(*ring->stat), 16);
   1153 	if (err) {
   1154 		aprint_error_dev(sc->sc_dev,
   1155 		    "could not allocate RX status DMA memory\n");
   1156 		goto fail;
   1157 	}
   1158 	ring->stat = ring->stat_dma.vaddr;
   1159 
   1160 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1161 		struct iwm_rx_data *data = &ring->data[i];
   1162 
   1163 		memset(data, 0, sizeof(*data));
   1164 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
   1165 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
   1166 		    &data->map);
   1167 		if (err) {
   1168 			aprint_error_dev(sc->sc_dev,
   1169 			    "could not create RX buf DMA map\n");
   1170 			goto fail;
   1171 		}
   1172 
   1173 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
   1174 		if (err)
   1175 			goto fail;
   1176 	}
   1177 	return 0;
   1178 
   1179 fail:	iwm_free_rx_ring(sc, ring);
   1180 	return err;
   1181 }
   1182 
   1183 static void
   1184 iwm_disable_rx_dma(struct iwm_softc *sc)
   1185 {
   1186 	int ntries;
   1187 
   1188 	if (iwm_nic_lock(sc)) {
   1189 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
   1190 		for (ntries = 0; ntries < 1000; ntries++) {
   1191 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
   1192 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
   1193 				break;
   1194 			DELAY(10);
   1195 		}
   1196 		iwm_nic_unlock(sc);
   1197 	}
   1198 }
   1199 
   1200 void
   1201 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1202 {
   1203 	ring->cur = 0;
   1204 	memset(ring->stat, 0, sizeof(*ring->stat));
   1205 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
   1206 	    ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1207 }
   1208 
   1209 static void
   1210 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1211 {
   1212 	int i;
   1213 
   1214 	iwm_dma_contig_free(&ring->desc_dma);
   1215 	iwm_dma_contig_free(&ring->stat_dma);
   1216 
   1217 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1218 		struct iwm_rx_data *data = &ring->data[i];
   1219 
   1220 		if (data->m != NULL) {
   1221 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1222 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1223 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1224 			m_freem(data->m);
   1225 			data->m = NULL;
   1226 		}
   1227 		if (data->map != NULL) {
   1228 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1229 			data->map = NULL;
   1230 		}
   1231 	}
   1232 }
   1233 
   1234 static int
   1235 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
   1236 {
   1237 	bus_addr_t paddr;
   1238 	bus_size_t size;
   1239 	int i, err;
   1240 
   1241 	ring->qid = qid;
   1242 	ring->queued = 0;
   1243 	ring->cur = 0;
   1244 
   1245 	/* Allocate TX descriptors (256-byte aligned). */
   1246 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
   1247 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1248 	if (err) {
   1249 		aprint_error_dev(sc->sc_dev,
   1250 		    "could not allocate TX ring DMA memory\n");
   1251 		goto fail;
   1252 	}
   1253 	ring->desc = ring->desc_dma.vaddr;
   1254 
   1255 	/*
   1256 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
   1257 	 * to allocate commands space for other rings.
   1258 	 */
   1259 	if (qid > IWM_CMD_QUEUE)
   1260 		return 0;
   1261 
   1262 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
   1263 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
   1264 	if (err) {
   1265 		aprint_error_dev(sc->sc_dev,
   1266 		    "could not allocate TX cmd DMA memory\n");
   1267 		goto fail;
   1268 	}
   1269 	ring->cmd = ring->cmd_dma.vaddr;
   1270 
   1271 	paddr = ring->cmd_dma.paddr;
   1272 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1273 		struct iwm_tx_data *data = &ring->data[i];
   1274 		size_t mapsize;
   1275 
   1276 		data->cmd_paddr = paddr;
   1277 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
   1278 		    + offsetof(struct iwm_tx_cmd, scratch);
   1279 		paddr += sizeof(struct iwm_device_cmd);
   1280 
   1281 		/* FW commands may require more mapped space than packets. */
   1282 		if (qid == IWM_CMD_QUEUE)
   1283 			mapsize = (sizeof(struct iwm_cmd_header) +
   1284 			    IWM_MAX_CMD_PAYLOAD_SIZE);
   1285 		else
   1286 			mapsize = MCLBYTES;
   1287 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
   1288 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT, &data->map);
   1289 		if (err) {
   1290 			aprint_error_dev(sc->sc_dev,
   1291 			    "could not create TX buf DMA map\n");
   1292 			goto fail;
   1293 		}
   1294 	}
   1295 	KASSERT(paddr == ring->cmd_dma.paddr + size);
   1296 	return 0;
   1297 
   1298 fail:	iwm_free_tx_ring(sc, ring);
   1299 	return err;
   1300 }
   1301 
   1302 static void
   1303 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1304 {
   1305 	int i;
   1306 
   1307 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1308 		struct iwm_tx_data *data = &ring->data[i];
   1309 
   1310 		if (data->m != NULL) {
   1311 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1312 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1313 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1314 			m_freem(data->m);
   1315 			data->m = NULL;
   1316 		}
   1317 	}
   1318 	/* Clear TX descriptors. */
   1319 	memset(ring->desc, 0, ring->desc_dma.size);
   1320 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
   1321 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
   1322 	sc->qfullmsk &= ~(1 << ring->qid);
   1323 	ring->queued = 0;
   1324 	ring->cur = 0;
   1325 }
   1326 
   1327 static void
   1328 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1329 {
   1330 	int i;
   1331 
   1332 	iwm_dma_contig_free(&ring->desc_dma);
   1333 	iwm_dma_contig_free(&ring->cmd_dma);
   1334 
   1335 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1336 		struct iwm_tx_data *data = &ring->data[i];
   1337 
   1338 		if (data->m != NULL) {
   1339 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1340 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1341 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1342 			m_freem(data->m);
   1343 		}
   1344 		if (data->map != NULL) {
   1345 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1346 			data->map = NULL;
   1347 		}
   1348 	}
   1349 }
   1350 
   1351 static void
   1352 iwm_enable_rfkill_int(struct iwm_softc *sc)
   1353 {
   1354 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
   1355 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1356 }
   1357 
   1358 static int
   1359 iwm_check_rfkill(struct iwm_softc *sc)
   1360 {
   1361 	uint32_t v;
   1362 	int s;
   1363 	int rv;
   1364 
   1365 	s = splnet();
   1366 
   1367 	/*
   1368 	 * "documentation" is not really helpful here:
   1369 	 *  27:	HW_RF_KILL_SW
   1370 	 *	Indicates state of (platform's) hardware RF-Kill switch
   1371 	 *
   1372 	 * But apparently when it's off, it's on ...
   1373 	 */
   1374 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
   1375 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
   1376 	if (rv) {
   1377 		sc->sc_flags |= IWM_FLAG_RFKILL;
   1378 	} else {
   1379 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
   1380 	}
   1381 
   1382 	splx(s);
   1383 	return rv;
   1384 }
   1385 
   1386 static void
   1387 iwm_enable_interrupts(struct iwm_softc *sc)
   1388 {
   1389 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
   1390 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1391 }
   1392 
   1393 static void
   1394 iwm_restore_interrupts(struct iwm_softc *sc)
   1395 {
   1396 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1397 }
   1398 
   1399 static void
   1400 iwm_disable_interrupts(struct iwm_softc *sc)
   1401 {
   1402 	int s = splnet();
   1403 
   1404 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   1405 
   1406 	/* acknowledge all interrupts */
   1407 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1408 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
   1409 
   1410 	splx(s);
   1411 }
   1412 
   1413 static void
   1414 iwm_ict_reset(struct iwm_softc *sc)
   1415 {
   1416 	iwm_disable_interrupts(sc);
   1417 
   1418 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
   1419 	bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
   1420 	    BUS_DMASYNC_PREWRITE);
   1421 	sc->ict_cur = 0;
   1422 
   1423 	/* Set physical address of ICT (4KB aligned). */
   1424 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
   1425 	    IWM_CSR_DRAM_INT_TBL_ENABLE
   1426 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
   1427 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
   1428 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
   1429 
   1430 	/* Switch to ICT interrupt mode in driver. */
   1431 	sc->sc_flags |= IWM_FLAG_USE_ICT;
   1432 
   1433 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1434 	iwm_enable_interrupts(sc);
   1435 }
   1436 
   1437 #define IWM_HW_READY_TIMEOUT 50
   1438 static int
   1439 iwm_set_hw_ready(struct iwm_softc *sc)
   1440 {
   1441 	int ready;
   1442 
   1443 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1444 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
   1445 
   1446 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1447 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1448 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1449 	    IWM_HW_READY_TIMEOUT);
   1450 	if (ready)
   1451 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
   1452 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
   1453 
   1454 	return ready;
   1455 }
   1456 #undef IWM_HW_READY_TIMEOUT
   1457 
   1458 static int
   1459 iwm_prepare_card_hw(struct iwm_softc *sc)
   1460 {
   1461 	int t = 0;
   1462 
   1463 	if (iwm_set_hw_ready(sc))
   1464 		return 0;
   1465 
   1466 	DELAY(100);
   1467 
   1468 	/* If HW is not ready, prepare the conditions to check again */
   1469 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1470 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
   1471 
   1472 	do {
   1473 		if (iwm_set_hw_ready(sc))
   1474 			return 0;
   1475 		DELAY(200);
   1476 		t += 200;
   1477 	} while (t < 150000);
   1478 
   1479 	return ETIMEDOUT;
   1480 }
   1481 
   1482 static void
   1483 iwm_apm_config(struct iwm_softc *sc)
   1484 {
   1485 	pcireg_t reg;
   1486 
   1487 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
   1488 	    sc->sc_cap_off + PCIE_LCSR);
   1489 	if (reg & PCIE_LCSR_ASPM_L1) {
   1490 		/* Um the Linux driver prints "Disabling L0S for this one ... */
   1491 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
   1492 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1493 	} else {
   1494 		/* ... and "Enabling" here */
   1495 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
   1496 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1497 	}
   1498 }
   1499 
   1500 /*
   1501  * Start up NIC's basic functionality after it has been reset
   1502  * e.g. after platform boot or shutdown.
   1503  * NOTE:  This does not load uCode nor start the embedded processor
   1504  */
   1505 static int
   1506 iwm_apm_init(struct iwm_softc *sc)
   1507 {
   1508 	int err = 0;
   1509 
   1510 	/* Disable L0S exit timer (platform NMI workaround) */
   1511 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
   1512 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1513 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
   1514 
   1515 	/*
   1516 	 * Disable L0s without affecting L1;
   1517 	 *  don't wait for ICH L0s (ICH bug W/A)
   1518 	 */
   1519 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1520 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
   1521 
   1522 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
   1523 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
   1524 
   1525 	/*
   1526 	 * Enable HAP INTA (interrupt from management bus) to
   1527 	 * wake device's PCI Express link L1a -> L0s
   1528 	 */
   1529 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1530 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
   1531 
   1532 	iwm_apm_config(sc);
   1533 
   1534 #if 0 /* not for 7k/8k */
   1535 	/* Configure analog phase-lock-loop before activating to D0A */
   1536 	if (trans->cfg->base_params->pll_cfg_val)
   1537 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
   1538 		    trans->cfg->base_params->pll_cfg_val);
   1539 #endif
   1540 
   1541 	/*
   1542 	 * Set "initialization complete" bit to move adapter from
   1543 	 * D0U* --> D0A* (powered-up active) state.
   1544 	 */
   1545 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
   1546 
   1547 	/*
   1548 	 * Wait for clock stabilization; once stabilized, access to
   1549 	 * device-internal resources is supported, e.g. iwm_write_prph()
   1550 	 * and accesses to uCode SRAM.
   1551 	 */
   1552 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   1553 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   1554 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
   1555 		aprint_error_dev(sc->sc_dev,
   1556 		    "timeout waiting for clock stabilization\n");
   1557 		err = ETIMEDOUT;
   1558 		goto out;
   1559 	}
   1560 
   1561 	if (sc->host_interrupt_operation_mode) {
   1562 		/*
   1563 		 * This is a bit of an abuse - This is needed for 7260 / 3160
   1564 		 * only check host_interrupt_operation_mode even if this is
   1565 		 * not related to host_interrupt_operation_mode.
   1566 		 *
   1567 		 * Enable the oscillator to count wake up time for L1 exit. This
   1568 		 * consumes slightly more power (100uA) - but allows to be sure
   1569 		 * that we wake up from L1 on time.
   1570 		 *
   1571 		 * This looks weird: read twice the same register, discard the
   1572 		 * value, set a bit, and yet again, read that same register
   1573 		 * just to discard the value. But that's the way the hardware
   1574 		 * seems to like it.
   1575 		 */
   1576 		iwm_read_prph(sc, IWM_OSC_CLK);
   1577 		iwm_read_prph(sc, IWM_OSC_CLK);
   1578 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
   1579 		iwm_read_prph(sc, IWM_OSC_CLK);
   1580 		iwm_read_prph(sc, IWM_OSC_CLK);
   1581 	}
   1582 
   1583 	/*
   1584 	 * Enable DMA clock and wait for it to stabilize.
   1585 	 *
   1586 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
   1587 	 * do not disable clocks.  This preserves any hardware bits already
   1588 	 * set by default in "CLK_CTRL_REG" after reset.
   1589 	 */
   1590 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   1591 		iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
   1592 		    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1593 		DELAY(20);
   1594 
   1595 		/* Disable L1-Active */
   1596 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1597 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1598 
   1599 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
   1600 		iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
   1601 		    IWM_APMG_RTC_INT_STT_RFKILL);
   1602 	}
   1603  out:
   1604 	if (err)
   1605 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
   1606 	return err;
   1607 }
   1608 
   1609 static void
   1610 iwm_apm_stop(struct iwm_softc *sc)
   1611 {
   1612 	/* stop device's busmaster DMA activity */
   1613 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
   1614 
   1615 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
   1616 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
   1617 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
   1618 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
   1619 	DPRINTF(("iwm apm stop\n"));
   1620 }
   1621 
   1622 static int
   1623 iwm_start_hw(struct iwm_softc *sc)
   1624 {
   1625 	int err;
   1626 
   1627 	err = iwm_prepare_card_hw(sc);
   1628 	if (err)
   1629 		return err;
   1630 
   1631 	/* Reset the entire device */
   1632 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
   1633 	DELAY(10);
   1634 
   1635 	err = iwm_apm_init(sc);
   1636 	if (err)
   1637 		return err;
   1638 
   1639 	iwm_enable_rfkill_int(sc);
   1640 	iwm_check_rfkill(sc);
   1641 
   1642 	return 0;
   1643 }
   1644 
   1645 static void
   1646 iwm_stop_device(struct iwm_softc *sc)
   1647 {
   1648 	int chnl, ntries;
   1649 	int qid;
   1650 
   1651 	iwm_disable_interrupts(sc);
   1652 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
   1653 
   1654 	/* Deactivate TX scheduler. */
   1655 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1656 
   1657 	/* Stop all DMA channels. */
   1658 	if (iwm_nic_lock(sc)) {
   1659 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1660 			IWM_WRITE(sc,
   1661 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
   1662 			for (ntries = 0; ntries < 200; ntries++) {
   1663 				uint32_t r;
   1664 
   1665 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
   1666 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
   1667 				    chnl))
   1668 					break;
   1669 				DELAY(20);
   1670 			}
   1671 		}
   1672 		iwm_nic_unlock(sc);
   1673 	}
   1674 	iwm_disable_rx_dma(sc);
   1675 
   1676 	iwm_reset_rx_ring(sc, &sc->rxq);
   1677 
   1678 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
   1679 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
   1680 
   1681 	/*
   1682 	 * Power-down device's busmaster DMA clocks
   1683 	 */
   1684 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1685 	DELAY(5);
   1686 
   1687 	/* Make sure (redundant) we've released our request to stay awake */
   1688 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1689 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1690 
   1691 	/* Stop the device, and put it in low power state */
   1692 	iwm_apm_stop(sc);
   1693 
   1694 	/*
   1695 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
   1696 	 * Clean again the interrupt here
   1697 	 */
   1698 	iwm_disable_interrupts(sc);
   1699 
   1700 	/* Reset the on-board processor. */
   1701 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
   1702 
   1703 	/* Even though we stop the HW we still want the RF kill interrupt. */
   1704 	iwm_enable_rfkill_int(sc);
   1705 	iwm_check_rfkill(sc);
   1706 }
   1707 
   1708 static void
   1709 iwm_nic_config(struct iwm_softc *sc)
   1710 {
   1711 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
   1712 	uint32_t reg_val = 0;
   1713 
   1714 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
   1715 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
   1716 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
   1717 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
   1718 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
   1719 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
   1720 
   1721 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
   1722 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
   1723 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
   1724 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
   1725 
   1726 	/* radio configuration */
   1727 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
   1728 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
   1729 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
   1730 
   1731 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
   1732 
   1733 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
   1734 	    radio_cfg_step, radio_cfg_dash));
   1735 
   1736 	/*
   1737 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
   1738 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
   1739 	 * to lose ownership and not being able to obtain it back.
   1740 	 */
   1741 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
   1742 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1743 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
   1744 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
   1745 }
   1746 
   1747 static int
   1748 iwm_nic_rx_init(struct iwm_softc *sc)
   1749 {
   1750 	if (!iwm_nic_lock(sc))
   1751 		return EBUSY;
   1752 
   1753 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
   1754 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
   1755 	    0, sc->rxq.stat_dma.size,
   1756 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1757 
   1758 	iwm_disable_rx_dma(sc);
   1759 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
   1760 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
   1761 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
   1762 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
   1763 
   1764 	/* Set physical address of RX ring (256-byte aligned). */
   1765 	IWM_WRITE(sc,
   1766 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
   1767 
   1768 	/* Set physical address of RX status (16-byte aligned). */
   1769 	IWM_WRITE(sc,
   1770 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
   1771 
   1772 	/* Enable RX. */
   1773 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
   1774 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
   1775 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
   1776 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
   1777 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
   1778 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
   1779 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
   1780 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
   1781 
   1782 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
   1783 
   1784 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
   1785 	if (sc->host_interrupt_operation_mode)
   1786 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
   1787 
   1788 	/*
   1789 	 * This value should initially be 0 (before preparing any RBs),
   1790 	 * and should be 8 after preparing the first 8 RBs (for example).
   1791 	 */
   1792 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
   1793 
   1794 	iwm_nic_unlock(sc);
   1795 
   1796 	return 0;
   1797 }
   1798 
   1799 static int
   1800 iwm_nic_tx_init(struct iwm_softc *sc)
   1801 {
   1802 	int qid;
   1803 
   1804 	if (!iwm_nic_lock(sc))
   1805 		return EBUSY;
   1806 
   1807 	/* Deactivate TX scheduler. */
   1808 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1809 
   1810 	/* Set physical address of "keep warm" page (16-byte aligned). */
   1811 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
   1812 
   1813 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
   1814 		struct iwm_tx_ring *txq = &sc->txq[qid];
   1815 
   1816 		/* Set physical address of TX ring (256-byte aligned). */
   1817 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
   1818 		    txq->desc_dma.paddr >> 8);
   1819 		DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
   1820 		    qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
   1821 	}
   1822 
   1823 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
   1824 
   1825 	iwm_nic_unlock(sc);
   1826 
   1827 	return 0;
   1828 }
   1829 
   1830 static int
   1831 iwm_nic_init(struct iwm_softc *sc)
   1832 {
   1833 	int err;
   1834 
   1835 	iwm_apm_init(sc);
   1836 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
   1837 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1838 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
   1839 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
   1840 
   1841 	iwm_nic_config(sc);
   1842 
   1843 	err = iwm_nic_rx_init(sc);
   1844 	if (err)
   1845 		return err;
   1846 
   1847 	err = iwm_nic_tx_init(sc);
   1848 	if (err)
   1849 		return err;
   1850 
   1851 	DPRINTF(("shadow registers enabled\n"));
   1852 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
   1853 
   1854 	return 0;
   1855 }
   1856 
   1857 static const uint8_t iwm_ac_to_tx_fifo[] = {
   1858 	IWM_TX_FIFO_VO,
   1859 	IWM_TX_FIFO_VI,
   1860 	IWM_TX_FIFO_BE,
   1861 	IWM_TX_FIFO_BK,
   1862 };
   1863 
   1864 static int
   1865 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
   1866 {
   1867 	if (!iwm_nic_lock(sc)) {
   1868 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
   1869 		return EBUSY;
   1870 	}
   1871 
   1872 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
   1873 
   1874 	if (qid == IWM_CMD_QUEUE) {
   1875 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1876 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
   1877 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
   1878 
   1879 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
   1880 
   1881 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
   1882 
   1883 		iwm_write_mem32(sc,
   1884 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
   1885 
   1886 		/* Set scheduler window size and frame limit. */
   1887 		iwm_write_mem32(sc,
   1888 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
   1889 		    sizeof(uint32_t),
   1890 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
   1891 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
   1892 		    ((IWM_FRAME_LIMIT
   1893 		        << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
   1894 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
   1895 
   1896 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1897 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
   1898 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
   1899 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
   1900 		    IWM_SCD_QUEUE_STTS_REG_MSK);
   1901 	} else {
   1902 		struct iwm_scd_txq_cfg_cmd cmd;
   1903 		int err;
   1904 
   1905 		iwm_nic_unlock(sc);
   1906 
   1907 		memset(&cmd, 0, sizeof(cmd));
   1908 		cmd.scd_queue = qid;
   1909 		cmd.enable = 1;
   1910 		cmd.sta_id = sta_id;
   1911 		cmd.tx_fifo = fifo;
   1912 		cmd.aggregate = 0;
   1913 		cmd.window = IWM_FRAME_LIMIT;
   1914 
   1915 		err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
   1916 		    &cmd);
   1917 		if (err)
   1918 			return err;
   1919 
   1920 		if (!iwm_nic_lock(sc))
   1921 			return EBUSY;
   1922 	}
   1923 
   1924 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
   1925 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
   1926 
   1927 	iwm_nic_unlock(sc);
   1928 
   1929 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
   1930 
   1931 	return 0;
   1932 }
   1933 
   1934 static int
   1935 iwm_post_alive(struct iwm_softc *sc)
   1936 {
   1937 	int nwords;
   1938 	int err, chnl;
   1939 	uint32_t base;
   1940 
   1941 	if (!iwm_nic_lock(sc))
   1942 		return EBUSY;
   1943 
   1944 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
   1945 	if (sc->sched_base != base) {
   1946 		DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
   1947 		    DEVNAME(sc), sc->sched_base, base));
   1948 		err = EINVAL;
   1949 		goto out;
   1950 	}
   1951 
   1952 	iwm_ict_reset(sc);
   1953 
   1954 	/* Clear TX scheduler state in SRAM. */
   1955 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
   1956 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
   1957 	    / sizeof(uint32_t);
   1958 	err = iwm_write_mem(sc,
   1959 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
   1960 	    NULL, nwords);
   1961 	if (err)
   1962 		goto out;
   1963 
   1964 	/* Set physical address of TX scheduler rings (1KB aligned). */
   1965 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
   1966 
   1967 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
   1968 
   1969 	iwm_nic_unlock(sc);
   1970 
   1971 	/* enable command channel */
   1972 	err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
   1973 	if (err)
   1974 		return err;
   1975 
   1976 	if (!iwm_nic_lock(sc))
   1977 		return EBUSY;
   1978 
   1979 	/* Activate TX scheduler. */
   1980 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
   1981 
   1982 	/* Enable DMA channels. */
   1983 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1984 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
   1985 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
   1986 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
   1987 	}
   1988 
   1989 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
   1990 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
   1991 
   1992 	/* Enable L1-Active */
   1993 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
   1994 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1995 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1996 
   1997  out:
   1998 	iwm_nic_unlock(sc);
   1999 	return err;
   2000 }
   2001 
   2002 static struct iwm_phy_db_entry *
   2003 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
   2004     uint16_t chg_id)
   2005 {
   2006 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   2007 
   2008 	if (type >= IWM_PHY_DB_MAX)
   2009 		return NULL;
   2010 
   2011 	switch (type) {
   2012 	case IWM_PHY_DB_CFG:
   2013 		return &phy_db->cfg;
   2014 	case IWM_PHY_DB_CALIB_NCH:
   2015 		return &phy_db->calib_nch;
   2016 	case IWM_PHY_DB_CALIB_CHG_PAPD:
   2017 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
   2018 			return NULL;
   2019 		return &phy_db->calib_ch_group_papd[chg_id];
   2020 	case IWM_PHY_DB_CALIB_CHG_TXP:
   2021 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
   2022 			return NULL;
   2023 		return &phy_db->calib_ch_group_txp[chg_id];
   2024 	default:
   2025 		return NULL;
   2026 	}
   2027 	return NULL;
   2028 }
   2029 
   2030 static int
   2031 iwm_phy_db_set_section(struct iwm_softc *sc,
   2032     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
   2033 {
   2034 	struct iwm_phy_db_entry *entry;
   2035 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
   2036 	uint16_t chg_id = 0;
   2037 
   2038 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
   2039 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
   2040 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
   2041 
   2042 	entry = iwm_phy_db_get_section(sc, type, chg_id);
   2043 	if (!entry)
   2044 		return EINVAL;
   2045 
   2046 	if (entry->data)
   2047 		kmem_intr_free(entry->data, entry->size);
   2048 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
   2049 	if (!entry->data) {
   2050 		entry->size = 0;
   2051 		return ENOMEM;
   2052 	}
   2053 	memcpy(entry->data, phy_db_notif->data, size);
   2054 	entry->size = size;
   2055 
   2056 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
   2057 	    __func__, __LINE__, type, size, entry->data));
   2058 
   2059 	return 0;
   2060 }
   2061 
   2062 static int
   2063 iwm_is_valid_channel(uint16_t ch_id)
   2064 {
   2065 	if (ch_id <= 14 ||
   2066 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
   2067 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
   2068 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
   2069 		return 1;
   2070 	return 0;
   2071 }
   2072 
   2073 static uint8_t
   2074 iwm_ch_id_to_ch_index(uint16_t ch_id)
   2075 {
   2076 	if (!iwm_is_valid_channel(ch_id))
   2077 		return 0xff;
   2078 
   2079 	if (ch_id <= 14)
   2080 		return ch_id - 1;
   2081 	if (ch_id <= 64)
   2082 		return (ch_id + 20) / 4;
   2083 	if (ch_id <= 140)
   2084 		return (ch_id - 12) / 4;
   2085 	return (ch_id - 13) / 4;
   2086 }
   2087 
   2088 
   2089 static uint16_t
   2090 iwm_channel_id_to_papd(uint16_t ch_id)
   2091 {
   2092 	if (!iwm_is_valid_channel(ch_id))
   2093 		return 0xff;
   2094 
   2095 	if (1 <= ch_id && ch_id <= 14)
   2096 		return 0;
   2097 	if (36 <= ch_id && ch_id <= 64)
   2098 		return 1;
   2099 	if (100 <= ch_id && ch_id <= 140)
   2100 		return 2;
   2101 	return 3;
   2102 }
   2103 
   2104 static uint16_t
   2105 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
   2106 {
   2107 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   2108 	struct iwm_phy_db_chg_txp *txp_chg;
   2109 	int i;
   2110 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
   2111 
   2112 	if (ch_index == 0xff)
   2113 		return 0xff;
   2114 
   2115 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
   2116 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
   2117 		if (!txp_chg)
   2118 			return 0xff;
   2119 		/*
   2120 		 * Looking for the first channel group the max channel
   2121 		 * of which is higher than the requested channel.
   2122 		 */
   2123 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
   2124 			return i;
   2125 	}
   2126 	return 0xff;
   2127 }
   2128 
   2129 static int
   2130 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
   2131     uint16_t *size, uint16_t ch_id)
   2132 {
   2133 	struct iwm_phy_db_entry *entry;
   2134 	uint16_t ch_group_id = 0;
   2135 
   2136 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
   2137 		ch_group_id = iwm_channel_id_to_papd(ch_id);
   2138 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
   2139 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
   2140 
   2141 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
   2142 	if (!entry)
   2143 		return EINVAL;
   2144 
   2145 	*data = entry->data;
   2146 	*size = entry->size;
   2147 
   2148 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
   2149 		       __func__, __LINE__, type, *size));
   2150 
   2151 	return 0;
   2152 }
   2153 
   2154 static int
   2155 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
   2156     void *data)
   2157 {
   2158 	struct iwm_phy_db_cmd phy_db_cmd;
   2159 	struct iwm_host_cmd cmd = {
   2160 		.id = IWM_PHY_DB_CMD,
   2161 		.flags = IWM_CMD_ASYNC,
   2162 	};
   2163 
   2164 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
   2165 	    type, length));
   2166 
   2167 	phy_db_cmd.type = le16toh(type);
   2168 	phy_db_cmd.length = le16toh(length);
   2169 
   2170 	cmd.data[0] = &phy_db_cmd;
   2171 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
   2172 	cmd.data[1] = data;
   2173 	cmd.len[1] = length;
   2174 
   2175 	return iwm_send_cmd(sc, &cmd);
   2176 }
   2177 
   2178 static int
   2179 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
   2180     enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
   2181 {
   2182 	uint16_t i;
   2183 	int err;
   2184 	struct iwm_phy_db_entry *entry;
   2185 
   2186 	/* Send all the channel-specific groups to operational fw */
   2187 	for (i = 0; i < max_ch_groups; i++) {
   2188 		entry = iwm_phy_db_get_section(sc, type, i);
   2189 		if (!entry)
   2190 			return EINVAL;
   2191 
   2192 		if (!entry->size)
   2193 			continue;
   2194 
   2195 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
   2196 		if (err) {
   2197 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
   2198 			    "err %d\n", DEVNAME(sc), type, i, err));
   2199 			return err;
   2200 		}
   2201 
   2202 		DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
   2203 		    DEVNAME(sc), type, i));
   2204 
   2205 		DELAY(1000);
   2206 	}
   2207 
   2208 	return 0;
   2209 }
   2210 
   2211 static int
   2212 iwm_send_phy_db_data(struct iwm_softc *sc)
   2213 {
   2214 	uint8_t *data = NULL;
   2215 	uint16_t size = 0;
   2216 	int err;
   2217 
   2218 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
   2219 	if (err)
   2220 		return err;
   2221 
   2222 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
   2223 	if (err)
   2224 		return err;
   2225 
   2226 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
   2227 	    &data, &size, 0);
   2228 	if (err)
   2229 		return err;
   2230 
   2231 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
   2232 	if (err)
   2233 		return err;
   2234 
   2235 	err = iwm_phy_db_send_all_channel_groups(sc,
   2236 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
   2237 	if (err)
   2238 		return err;
   2239 
   2240 	err = iwm_phy_db_send_all_channel_groups(sc,
   2241 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
   2242 	if (err)
   2243 		return err;
   2244 
   2245 	return 0;
   2246 }
   2247 
   2248 /*
   2249  * For the high priority TE use a time event type that has similar priority to
   2250  * the FW's action scan priority.
   2251  */
   2252 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
   2253 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
   2254 
   2255 /* used to convert from time event API v2 to v1 */
   2256 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
   2257 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
   2258 static inline uint16_t
   2259 iwm_te_v2_get_notify(uint16_t policy)
   2260 {
   2261 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
   2262 }
   2263 
   2264 static inline uint16_t
   2265 iwm_te_v2_get_dep_policy(uint16_t policy)
   2266 {
   2267 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
   2268 		IWM_TE_V2_PLACEMENT_POS;
   2269 }
   2270 
   2271 static inline uint16_t
   2272 iwm_te_v2_get_absence(uint16_t policy)
   2273 {
   2274 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
   2275 }
   2276 
   2277 static void
   2278 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
   2279     struct iwm_time_event_cmd_v1 *cmd_v1)
   2280 {
   2281 	cmd_v1->id_and_color = cmd_v2->id_and_color;
   2282 	cmd_v1->action = cmd_v2->action;
   2283 	cmd_v1->id = cmd_v2->id;
   2284 	cmd_v1->apply_time = cmd_v2->apply_time;
   2285 	cmd_v1->max_delay = cmd_v2->max_delay;
   2286 	cmd_v1->depends_on = cmd_v2->depends_on;
   2287 	cmd_v1->interval = cmd_v2->interval;
   2288 	cmd_v1->duration = cmd_v2->duration;
   2289 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
   2290 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
   2291 	else
   2292 		cmd_v1->repeat = htole32(cmd_v2->repeat);
   2293 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
   2294 	cmd_v1->interval_reciprocal = 0; /* unused */
   2295 
   2296 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
   2297 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
   2298 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
   2299 }
   2300 
   2301 static int
   2302 iwm_send_time_event_cmd(struct iwm_softc *sc,
   2303     const struct iwm_time_event_cmd_v2 *cmd)
   2304 {
   2305 	struct iwm_time_event_cmd_v1 cmd_v1;
   2306 
   2307 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
   2308 		return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
   2309 		    cmd);
   2310 
   2311 	iwm_te_v2_to_v1(cmd, &cmd_v1);
   2312 	return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
   2313 	    &cmd_v1);
   2314 }
   2315 
   2316 static void
   2317 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
   2318     uint32_t duration, uint32_t max_delay)
   2319 {
   2320 	struct iwm_time_event_cmd_v2 time_cmd;
   2321 
   2322 	memset(&time_cmd, 0, sizeof(time_cmd));
   2323 
   2324 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
   2325 	time_cmd.id_and_color =
   2326 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   2327 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
   2328 
   2329 	time_cmd.apply_time = htole32(0);
   2330 
   2331 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
   2332 	time_cmd.max_delay = htole32(max_delay);
   2333 	/* TODO: why do we need to interval = bi if it is not periodic? */
   2334 	time_cmd.interval = htole32(1);
   2335 	time_cmd.duration = htole32(duration);
   2336 	time_cmd.repeat = 1;
   2337 	time_cmd.policy
   2338 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
   2339 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
   2340 		IWM_T2_V2_START_IMMEDIATELY);
   2341 
   2342 	iwm_send_time_event_cmd(sc, &time_cmd);
   2343 }
   2344 
   2345 /*
   2346  * NVM read access and content parsing.  We do not support
   2347  * external NVM or writing NVM.
   2348  */
   2349 
   2350 /* list of NVM sections we are allowed/need to read */
   2351 static const int iwm_nvm_to_read[] = {
   2352 	IWM_NVM_SECTION_TYPE_HW,
   2353 	IWM_NVM_SECTION_TYPE_SW,
   2354 	IWM_NVM_SECTION_TYPE_REGULATORY,
   2355 	IWM_NVM_SECTION_TYPE_CALIBRATION,
   2356 	IWM_NVM_SECTION_TYPE_PRODUCTION,
   2357 	IWM_NVM_SECTION_TYPE_HW_8000,
   2358 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
   2359 	IWM_NVM_SECTION_TYPE_PHY_SKU,
   2360 };
   2361 
   2362 /* Default NVM size to read */
   2363 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
   2364 #define IWM_MAX_NVM_SECTION_SIZE	8192
   2365 
   2366 #define IWM_NVM_WRITE_OPCODE 1
   2367 #define IWM_NVM_READ_OPCODE 0
   2368 
   2369 static int
   2370 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
   2371     uint16_t length, uint8_t *data, uint16_t *len)
   2372 {
   2373 	offset = 0;
   2374 	struct iwm_nvm_access_cmd nvm_access_cmd = {
   2375 		.offset = htole16(offset),
   2376 		.length = htole16(length),
   2377 		.type = htole16(section),
   2378 		.op_code = IWM_NVM_READ_OPCODE,
   2379 	};
   2380 	struct iwm_nvm_access_resp *nvm_resp;
   2381 	struct iwm_rx_packet *pkt;
   2382 	struct iwm_host_cmd cmd = {
   2383 		.id = IWM_NVM_ACCESS_CMD,
   2384 		.flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
   2385 		.data = { &nvm_access_cmd, },
   2386 	};
   2387 	int err, offset_read;
   2388 	size_t bytes_read;
   2389 	uint8_t *resp_data;
   2390 
   2391 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
   2392 
   2393 	err = iwm_send_cmd(sc, &cmd);
   2394 	if (err) {
   2395 		DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
   2396 		    DEVNAME(sc), err));
   2397 		return err;
   2398 	}
   2399 
   2400 	pkt = cmd.resp_pkt;
   2401 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   2402 		err = EIO;
   2403 		goto exit;
   2404 	}
   2405 
   2406 	/* Extract NVM response */
   2407 	nvm_resp = (void *)pkt->data;
   2408 
   2409 	err = le16toh(nvm_resp->status);
   2410 	bytes_read = le16toh(nvm_resp->length);
   2411 	offset_read = le16toh(nvm_resp->offset);
   2412 	resp_data = nvm_resp->data;
   2413 	if (err) {
   2414 		err = EINVAL;
   2415 		goto exit;
   2416 	}
   2417 
   2418 	if (offset_read != offset) {
   2419 		err = EINVAL;
   2420 		goto exit;
   2421 	}
   2422 	if (bytes_read > length) {
   2423 		err = EINVAL;
   2424 		goto exit;
   2425 	}
   2426 
   2427 	memcpy(data + offset, resp_data, bytes_read);
   2428 	*len = bytes_read;
   2429 
   2430  exit:
   2431 	iwm_free_resp(sc, &cmd);
   2432 	return err;
   2433 }
   2434 
   2435 /*
   2436  * Reads an NVM section completely.
   2437  * NICs prior to 7000 family doesn't have a real NVM, but just read
   2438  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
   2439  * by uCode, we need to manually check in this case that we don't
   2440  * overflow and try to read more than the EEPROM size.
   2441  */
   2442 static int
   2443 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
   2444     uint16_t *len, size_t max_len)
   2445 {
   2446 	uint16_t chunklen, seglen;
   2447 	int err;
   2448 
   2449 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
   2450 	*len = 0;
   2451 
   2452 	/* Read NVM chunks until exhausted (reading less than requested) */
   2453 	while (seglen == chunklen && *len < max_len) {
   2454 		err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
   2455 		    &seglen);
   2456 		if (err) {
   2457 			DPRINTF(("%s:Cannot read NVM from section %d "
   2458 			    "offset %d, length %d\n",
   2459 			    DEVNAME(sc), section, *len, chunklen));
   2460 			return err;
   2461 		}
   2462 		*len += seglen;
   2463 	}
   2464 
   2465 	DPRINTFN(4, ("NVM section %d read completed\n", section));
   2466 	return 0;
   2467 }
   2468 
   2469 static uint8_t
   2470 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
   2471 {
   2472 	uint8_t tx_ant;
   2473 
   2474 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
   2475 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
   2476 
   2477 	if (sc->sc_nvm.valid_tx_ant)
   2478 		tx_ant &= sc->sc_nvm.valid_tx_ant;
   2479 
   2480 	return tx_ant;
   2481 }
   2482 
   2483 static uint8_t
   2484 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
   2485 {
   2486 	uint8_t rx_ant;
   2487 
   2488 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
   2489 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
   2490 
   2491 	if (sc->sc_nvm.valid_rx_ant)
   2492 		rx_ant &= sc->sc_nvm.valid_rx_ant;
   2493 
   2494 	return rx_ant;
   2495 }
   2496 
   2497 static void
   2498 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
   2499     const uint8_t *nvm_channels, size_t nchan)
   2500 {
   2501 	struct ieee80211com *ic = &sc->sc_ic;
   2502 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2503 	int ch_idx;
   2504 	struct ieee80211_channel *channel;
   2505 	uint16_t ch_flags;
   2506 	int is_5ghz;
   2507 	int flags, hw_value;
   2508 
   2509 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
   2510 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
   2511 
   2512 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
   2513 		    !data->sku_cap_band_52GHz_enable)
   2514 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
   2515 
   2516 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
   2517 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
   2518 			    iwm_nvm_channels[ch_idx],
   2519 			    ch_flags,
   2520 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
   2521 			    "5.2" : "2.4"));
   2522 			continue;
   2523 		}
   2524 
   2525 		hw_value = nvm_channels[ch_idx];
   2526 		channel = &ic->ic_channels[hw_value];
   2527 
   2528 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
   2529 		if (!is_5ghz) {
   2530 			flags = IEEE80211_CHAN_2GHZ;
   2531 			channel->ic_flags
   2532 			    = IEEE80211_CHAN_CCK
   2533 			    | IEEE80211_CHAN_OFDM
   2534 			    | IEEE80211_CHAN_DYN
   2535 			    | IEEE80211_CHAN_2GHZ;
   2536 		} else {
   2537 			flags = IEEE80211_CHAN_5GHZ;
   2538 			channel->ic_flags =
   2539 			    IEEE80211_CHAN_A;
   2540 		}
   2541 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
   2542 
   2543 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
   2544 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
   2545 
   2546 #ifndef IEEE80211_NO_HT
   2547 		if (data->sku_cap_11n_enable)
   2548 			channel->ic_flags |= IEEE80211_CHAN_HT;
   2549 #endif
   2550 	}
   2551 }
   2552 
   2553 #ifndef IEEE80211_NO_HT
   2554 static void
   2555 iwm_setup_ht_rates(struct iwm_softc *sc)
   2556 {
   2557 	struct ieee80211com *ic = &sc->sc_ic;
   2558 
   2559 	/* TX is supported with the same MCS as RX. */
   2560 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
   2561 
   2562 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
   2563 
   2564 #ifdef notyet
   2565 	if (sc->sc_nvm.sku_cap_mimo_disable)
   2566 		return;
   2567 
   2568 	if (iwm_fw_valid_rx_ant(sc) > 1)
   2569 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
   2570 	if (iwm_fw_valid_rx_ant(sc) > 2)
   2571 		ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
   2572 #endif
   2573 }
   2574 
   2575 #define IWM_MAX_RX_BA_SESSIONS 16
   2576 
   2577 static void
   2578 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
   2579     uint16_t ssn, int start)
   2580 {
   2581 	struct ieee80211com *ic = &sc->sc_ic;
   2582 	struct iwm_add_sta_cmd_v7 cmd;
   2583 	struct iwm_node *in = (struct iwm_node *)ni;
   2584 	int err, s;
   2585 	uint32_t status;
   2586 
   2587 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
   2588 		ieee80211_addba_req_refuse(ic, ni, tid);
   2589 		return;
   2590 	}
   2591 
   2592 	memset(&cmd, 0, sizeof(cmd));
   2593 
   2594 	cmd.sta_id = IWM_STATION_ID;
   2595 	cmd.mac_id_n_color
   2596 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   2597 	cmd.add_modify = IWM_STA_MODE_MODIFY;
   2598 
   2599 	if (start) {
   2600 		cmd.add_immediate_ba_tid = (uint8_t)tid;
   2601 		cmd.add_immediate_ba_ssn = ssn;
   2602 	} else {
   2603 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
   2604 	}
   2605 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
   2606 	    IWM_STA_MODIFY_REMOVE_BA_TID;
   2607 
   2608 	status = IWM_ADD_STA_SUCCESS;
   2609 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
   2610 	    &status);
   2611 
   2612 	s = splnet();
   2613 	if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
   2614 		if (start) {
   2615 			sc->sc_rx_ba_sessions++;
   2616 			ieee80211_addba_req_accept(ic, ni, tid);
   2617 		} else if (sc->sc_rx_ba_sessions > 0)
   2618 			sc->sc_rx_ba_sessions--;
   2619 	} else if (start)
   2620 		ieee80211_addba_req_refuse(ic, ni, tid);
   2621 
   2622 	splx(s);
   2623 }
   2624 
   2625 static void
   2626 iwm_htprot_task(void *arg)
   2627 {
   2628 	struct iwm_softc *sc = arg;
   2629 	struct ieee80211com *ic = &sc->sc_ic;
   2630 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   2631 	int err;
   2632 
   2633 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
   2634 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
   2635 	if (err)
   2636 		aprint_error_dev(sc->sc_dev,
   2637 		    "could not change HT protection: error %d\n", err);
   2638 }
   2639 
   2640 /*
   2641  * This function is called by upper layer when HT protection settings in
   2642  * beacons have changed.
   2643  */
   2644 static void
   2645 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
   2646 {
   2647 	struct iwm_softc *sc = ic->ic_softc;
   2648 
   2649 	/* assumes that ni == ic->ic_bss */
   2650 	task_add(systq, &sc->htprot_task);
   2651 }
   2652 
   2653 static void
   2654 iwm_ba_task(void *arg)
   2655 {
   2656 	struct iwm_softc *sc = arg;
   2657 	struct ieee80211com *ic = &sc->sc_ic;
   2658 	struct ieee80211_node *ni = ic->ic_bss;
   2659 
   2660 	if (sc->ba_start)
   2661 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
   2662 	else
   2663 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
   2664 }
   2665 
   2666 /*
   2667  * This function is called by upper layer when an ADDBA request is received
   2668  * from another STA and before the ADDBA response is sent.
   2669  */
   2670 static int
   2671 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
   2672     uint8_t tid)
   2673 {
   2674 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
   2675 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   2676 
   2677 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
   2678 		return ENOSPC;
   2679 
   2680 	sc->ba_start = 1;
   2681 	sc->ba_tid = tid;
   2682 	sc->ba_ssn = htole16(ba->ba_winstart);
   2683 	task_add(systq, &sc->ba_task);
   2684 
   2685 	return EBUSY;
   2686 }
   2687 
   2688 /*
   2689  * This function is called by upper layer on teardown of an HT-immediate
   2690  * Block Ack agreement (eg. upon receipt of a DELBA frame).
   2691  */
   2692 static void
   2693 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
   2694     uint8_t tid)
   2695 {
   2696 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   2697 
   2698 	sc->ba_start = 0;
   2699 	sc->ba_tid = tid;
   2700 	task_add(systq, &sc->ba_task);
   2701 }
   2702 #endif
   2703 
   2704 static void
   2705 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
   2706     const uint16_t *mac_override, const uint16_t *nvm_hw)
   2707 {
   2708 	static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
   2709 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
   2710 	};
   2711 	static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
   2712 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
   2713 	};
   2714 	const uint8_t *hw_addr;
   2715 
   2716 	if (mac_override) {
   2717 		hw_addr = (const uint8_t *)(mac_override +
   2718 		    IWM_MAC_ADDRESS_OVERRIDE_8000);
   2719 
   2720 		/*
   2721 		 * Store the MAC address from MAO section.
   2722 		 * No byte swapping is required in MAO section
   2723 		 */
   2724 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
   2725 
   2726 		/*
   2727 		 * Force the use of the OTP MAC address in case of reserved MAC
   2728 		 * address in the NVM, or if address is given but invalid.
   2729 		 */
   2730 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
   2731 		    (memcmp(etherbroadcastaddr, data->hw_addr,
   2732 		    sizeof(etherbroadcastaddr)) != 0) &&
   2733 		    (memcmp(etheranyaddr, data->hw_addr,
   2734 		    sizeof(etheranyaddr)) != 0) &&
   2735 		    !ETHER_IS_MULTICAST(data->hw_addr))
   2736 			return;
   2737 	}
   2738 
   2739 	if (nvm_hw) {
   2740 		/* Read the mac address from WFMP registers. */
   2741 		uint32_t mac_addr0 =
   2742 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
   2743 		uint32_t mac_addr1 =
   2744 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
   2745 
   2746 		hw_addr = (const uint8_t *)&mac_addr0;
   2747 		data->hw_addr[0] = hw_addr[3];
   2748 		data->hw_addr[1] = hw_addr[2];
   2749 		data->hw_addr[2] = hw_addr[1];
   2750 		data->hw_addr[3] = hw_addr[0];
   2751 
   2752 		hw_addr = (const uint8_t *)&mac_addr1;
   2753 		data->hw_addr[4] = hw_addr[1];
   2754 		data->hw_addr[5] = hw_addr[0];
   2755 
   2756 		return;
   2757 	}
   2758 
   2759 	aprint_error_dev(sc->sc_dev, "mac address not found\n");
   2760 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
   2761 }
   2762 
   2763 static int
   2764 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
   2765     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
   2766     const uint16_t *mac_override, const uint16_t *phy_sku,
   2767     const uint16_t *regulatory)
   2768 {
   2769 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2770 	uint8_t hw_addr[ETHER_ADDR_LEN];
   2771 	uint32_t sku;
   2772 
   2773 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
   2774 
   2775 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   2776 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
   2777 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
   2778 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
   2779 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
   2780 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
   2781 
   2782 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
   2783 	} else {
   2784 		uint32_t radio_cfg = le32_to_cpup(
   2785 		    (const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
   2786 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
   2787 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
   2788 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
   2789 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
   2790 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
   2791 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
   2792 
   2793 		sku = le32_to_cpup(
   2794 		    (const uint32_t *)(phy_sku + IWM_SKU_8000));
   2795 	}
   2796 
   2797 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
   2798 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
   2799 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
   2800 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
   2801 
   2802 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
   2803 
   2804 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   2805 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
   2806 		data->hw_addr[0] = hw_addr[1];
   2807 		data->hw_addr[1] = hw_addr[0];
   2808 		data->hw_addr[2] = hw_addr[3];
   2809 		data->hw_addr[3] = hw_addr[2];
   2810 		data->hw_addr[4] = hw_addr[5];
   2811 		data->hw_addr[5] = hw_addr[4];
   2812 	} else
   2813 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
   2814 
   2815 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
   2816 		iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
   2817 		    iwm_nvm_channels, __arraycount(iwm_nvm_channels));
   2818 	else
   2819 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
   2820 		    iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
   2821 
   2822 	data->calib_version = 255;   /* TODO:
   2823 					this value will prevent some checks from
   2824 					failing, we need to check if this
   2825 					field is still needed, and if it does,
   2826 					where is it in the NVM */
   2827 
   2828 	return 0;
   2829 }
   2830 
   2831 static int
   2832 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
   2833 {
   2834 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
   2835 	const uint16_t *regulatory = NULL;
   2836 
   2837 	/* Checking for required sections */
   2838 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   2839 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
   2840 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
   2841 			return ENOENT;
   2842 		}
   2843 
   2844 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
   2845 	} else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
   2846 		/* SW and REGULATORY sections are mandatory */
   2847 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
   2848 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
   2849 			return ENOENT;
   2850 		}
   2851 		/* MAC_OVERRIDE or at least HW section must exist */
   2852 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
   2853 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
   2854 			return ENOENT;
   2855 		}
   2856 
   2857 		/* PHY_SKU section is mandatory in B0 */
   2858 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
   2859 			return ENOENT;
   2860 		}
   2861 
   2862 		regulatory = (const uint16_t *)
   2863 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
   2864 		hw = (const uint16_t *)
   2865 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
   2866 		mac_override =
   2867 			(const uint16_t *)
   2868 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
   2869 		phy_sku = (const uint16_t *)
   2870 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
   2871 	} else {
   2872 		panic("unknown device family %d\n", sc->sc_device_family);
   2873 	}
   2874 
   2875 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
   2876 	calib = (const uint16_t *)
   2877 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
   2878 
   2879 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
   2880 	    phy_sku, regulatory);
   2881 }
   2882 
   2883 static int
   2884 iwm_nvm_init(struct iwm_softc *sc)
   2885 {
   2886 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
   2887 	int i, section, err;
   2888 	uint16_t len;
   2889 	uint8_t *buf;
   2890 	const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
   2891 
   2892 	/* Read From FW NVM */
   2893 	DPRINTF(("Read NVM\n"));
   2894 
   2895 	memset(nvm_sections, 0, sizeof(nvm_sections));
   2896 
   2897 	buf = kmem_alloc(bufsz, KM_SLEEP);
   2898 	if (buf == NULL)
   2899 		return ENOMEM;
   2900 
   2901 	for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
   2902 		section = iwm_nvm_to_read[i];
   2903 		KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
   2904 
   2905 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
   2906 		if (err) {
   2907 			err = 0;
   2908 			continue;
   2909 		}
   2910 		nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
   2911 		if (nvm_sections[section].data == NULL) {
   2912 			err = ENOMEM;
   2913 			break;
   2914 		}
   2915 		memcpy(nvm_sections[section].data, buf, len);
   2916 		nvm_sections[section].length = len;
   2917 	}
   2918 	kmem_free(buf, bufsz);
   2919 	if (err == 0)
   2920 		err = iwm_parse_nvm_sections(sc, nvm_sections);
   2921 
   2922 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
   2923 		if (nvm_sections[i].data != NULL)
   2924 			kmem_free(nvm_sections[i].data, nvm_sections[i].length);
   2925 	}
   2926 
   2927 	return err;
   2928 }
   2929 
   2930 static int
   2931 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
   2932     const uint8_t *section, uint32_t byte_cnt)
   2933 {
   2934 	int err = EINVAL;
   2935 	uint32_t chunk_sz, offset;
   2936 
   2937 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
   2938 
   2939 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
   2940 		uint32_t addr, len;
   2941 		const uint8_t *data;
   2942 
   2943 		addr = dst_addr + offset;
   2944 		len = MIN(chunk_sz, byte_cnt - offset);
   2945 		data = section + offset;
   2946 
   2947 		err = iwm_firmware_load_chunk(sc, addr, data, len);
   2948 		if (err)
   2949 			break;
   2950 	}
   2951 
   2952 	return err;
   2953 }
   2954 
   2955 static int
   2956 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
   2957     const uint8_t *section, uint32_t byte_cnt)
   2958 {
   2959 	struct iwm_dma_info *dma = &sc->fw_dma;
   2960 	bool is_extended = false;
   2961 	int err;
   2962 
   2963 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
   2964 	memcpy(dma->vaddr, section, byte_cnt);
   2965 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
   2966 	    BUS_DMASYNC_PREWRITE);
   2967 
   2968 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
   2969 	    dst_addr <= IWM_FW_MEM_EXTENDED_END)
   2970 		is_extended = true;
   2971 
   2972 	if (is_extended) {
   2973 		iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
   2974 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
   2975 	}
   2976 
   2977 	sc->sc_fw_chunk_done = 0;
   2978 
   2979 	if (!iwm_nic_lock(sc)) {
   2980 		if (is_extended)
   2981 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
   2982 			    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
   2983 		return EBUSY;
   2984 	}
   2985 
   2986 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2987 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
   2988 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
   2989 	    dst_addr);
   2990 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
   2991 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
   2992 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
   2993 	    (iwm_get_dma_hi_addr(dma->paddr)
   2994 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
   2995 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
   2996 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
   2997 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
   2998 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
   2999 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   3000 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
   3001 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
   3002 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
   3003 
   3004 	iwm_nic_unlock(sc);
   3005 
   3006 	/* Wait for this segment to load. */
   3007 	err = 0;
   3008 	while (!sc->sc_fw_chunk_done) {
   3009 		err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
   3010 		if (err)
   3011 			break;
   3012 	}
   3013 	if (!sc->sc_fw_chunk_done) {
   3014 		aprint_error_dev(sc->sc_dev,
   3015 		    "fw chunk addr 0x%x len %d failed to load\n",
   3016 		    dst_addr, byte_cnt);
   3017 	}
   3018 
   3019 	if (is_extended) {
   3020 		int rv = iwm_nic_lock(sc);
   3021 		iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
   3022 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
   3023 		if (rv == 0)
   3024 			iwm_nic_unlock(sc);
   3025 	}
   3026 
   3027 	return err;
   3028 }
   3029 
   3030 static int
   3031 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3032 {
   3033 	struct iwm_fw_sects *fws;
   3034 	int err, i;
   3035 	void *data;
   3036 	uint32_t dlen;
   3037 	uint32_t offset;
   3038 
   3039 	fws = &sc->sc_fw.fw_sects[ucode_type];
   3040 	for (i = 0; i < fws->fw_count; i++) {
   3041 		data = fws->fw_sect[i].fws_data;
   3042 		dlen = fws->fw_sect[i].fws_len;
   3043 		offset = fws->fw_sect[i].fws_devoff;
   3044 		if (dlen > sc->sc_fwdmasegsz) {
   3045 			err = EFBIG;
   3046 		} else
   3047 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
   3048 		if (err) {
   3049 			aprint_error_dev(sc->sc_dev,
   3050 			    "could not load firmware chunk %u of %u\n",
   3051 			    i, fws->fw_count);
   3052 			return err;
   3053 		}
   3054 	}
   3055 
   3056 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
   3057 
   3058 	return 0;
   3059 }
   3060 
   3061 static int
   3062 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
   3063     int cpu, int *first_ucode_section)
   3064 {
   3065 	int shift_param;
   3066 	int i, err = 0, sec_num = 0x1;
   3067 	uint32_t val, last_read_idx = 0;
   3068 	void *data;
   3069 	uint32_t dlen;
   3070 	uint32_t offset;
   3071 
   3072 	if (cpu == 1) {
   3073 		shift_param = 0;
   3074 		*first_ucode_section = 0;
   3075 	} else {
   3076 		shift_param = 16;
   3077 		(*first_ucode_section)++;
   3078 	}
   3079 
   3080 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
   3081 		last_read_idx = i;
   3082 		data = fws->fw_sect[i].fws_data;
   3083 		dlen = fws->fw_sect[i].fws_len;
   3084 		offset = fws->fw_sect[i].fws_devoff;
   3085 
   3086 		/*
   3087 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
   3088 		 * CPU1 to CPU2.
   3089 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
   3090 		 * CPU2 non paged to CPU2 paging sec.
   3091 		 */
   3092 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
   3093 		    offset == IWM_PAGING_SEPARATOR_SECTION)
   3094 			break;
   3095 
   3096 		if (dlen > sc->sc_fwdmasegsz) {
   3097 			err = EFBIG;
   3098 		} else
   3099 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
   3100 		if (err) {
   3101 			aprint_error_dev(sc->sc_dev,
   3102 			    "could not load firmware chunk %d (error %d)\n",
   3103 			    i, err);
   3104 			return err;
   3105 		}
   3106 
   3107 		/* Notify the ucode of the loaded section number and status */
   3108 		if (iwm_nic_lock(sc)) {
   3109 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
   3110 			val = val | (sec_num << shift_param);
   3111 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
   3112 			sec_num = (sec_num << 1) | 0x1;
   3113 			iwm_nic_unlock(sc);
   3114 
   3115 			/*
   3116 			 * The firmware won't load correctly without this delay.
   3117 			 */
   3118 			DELAY(8000);
   3119 		}
   3120 	}
   3121 
   3122 	*first_ucode_section = last_read_idx;
   3123 
   3124 	if (iwm_nic_lock(sc)) {
   3125 		if (cpu == 1)
   3126 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
   3127 		else
   3128 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
   3129 		iwm_nic_unlock(sc);
   3130 	}
   3131 
   3132 	return 0;
   3133 }
   3134 
   3135 static int
   3136 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3137 {
   3138 	struct iwm_fw_sects *fws;
   3139 	int err = 0;
   3140 	int first_ucode_section;
   3141 
   3142 	fws = &sc->sc_fw.fw_sects[ucode_type];
   3143 
   3144 	/* configure the ucode to be ready to get the secured image */
   3145 	/* release CPU reset */
   3146 	iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
   3147 
   3148 	/* load to FW the binary Secured sections of CPU1 */
   3149 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
   3150 	if (err)
   3151 		return err;
   3152 
   3153 	/* load to FW the binary sections of CPU2 */
   3154 	return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
   3155 }
   3156 
   3157 static int
   3158 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3159 {
   3160 	int err, w;
   3161 
   3162 	sc->sc_uc.uc_intr = 0;
   3163 
   3164 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   3165 		err = iwm_load_firmware_8000(sc, ucode_type);
   3166 	else
   3167 		err = iwm_load_firmware_7000(sc, ucode_type);
   3168 
   3169 	if (err)
   3170 		return err;
   3171 
   3172 	/* wait for the firmware to load */
   3173 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
   3174 		err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
   3175 	if (err || !sc->sc_uc.uc_ok)
   3176 		aprint_error_dev(sc->sc_dev, "could not load firmware\n");
   3177 
   3178 	return err;
   3179 }
   3180 
   3181 static int
   3182 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3183 {
   3184 	int err;
   3185 
   3186 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   3187 
   3188 	err = iwm_nic_init(sc);
   3189 	if (err) {
   3190 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
   3191 		return err;
   3192 	}
   3193 
   3194 	/* make sure rfkill handshake bits are cleared */
   3195 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   3196 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
   3197 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
   3198 
   3199 	/* clear (again), then enable host interrupts */
   3200 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   3201 	iwm_enable_interrupts(sc);
   3202 
   3203 	/* really make sure rfkill handshake bits are cleared */
   3204 	/* maybe we should write a few times more?  just to make sure */
   3205 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   3206 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   3207 
   3208 	return iwm_load_firmware(sc, ucode_type);
   3209 }
   3210 
   3211 static int
   3212 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
   3213 {
   3214 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
   3215 		.valid = htole32(valid_tx_ant),
   3216 	};
   3217 
   3218 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
   3219 	    sizeof(tx_ant_cmd), &tx_ant_cmd);
   3220 }
   3221 
   3222 static int
   3223 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
   3224 {
   3225 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
   3226 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
   3227 
   3228 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
   3229 	phy_cfg_cmd.calib_control.event_trigger =
   3230 	    sc->sc_default_calib[ucode_type].event_trigger;
   3231 	phy_cfg_cmd.calib_control.flow_trigger =
   3232 	    sc->sc_default_calib[ucode_type].flow_trigger;
   3233 
   3234 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
   3235 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
   3236 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
   3237 }
   3238 
   3239 static int
   3240 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
   3241 	enum iwm_ucode_type ucode_type)
   3242 {
   3243 	enum iwm_ucode_type old_type = sc->sc_uc_current;
   3244 	int err;
   3245 
   3246 	err = iwm_read_firmware(sc);
   3247 	if (err)
   3248 		return err;
   3249 
   3250 	sc->sc_uc_current = ucode_type;
   3251 	err = iwm_start_fw(sc, ucode_type);
   3252 	if (err) {
   3253 		sc->sc_uc_current = old_type;
   3254 		return err;
   3255 	}
   3256 
   3257 	return iwm_post_alive(sc);
   3258 }
   3259 
   3260 static int
   3261 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
   3262 {
   3263 	int err;
   3264 
   3265 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
   3266 		aprint_error_dev(sc->sc_dev,
   3267 		    "radio is disabled by hardware switch\n");
   3268 		return EPERM;
   3269 	}
   3270 
   3271 	sc->sc_init_complete = 0;
   3272 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
   3273 	if (err) {
   3274 		aprint_error_dev(sc->sc_dev, "failed to load init firmware\n");
   3275 		return err;
   3276 	}
   3277 
   3278 	if (justnvm) {
   3279 		err = iwm_nvm_init(sc);
   3280 		if (err) {
   3281 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
   3282 			return err;
   3283 		}
   3284 
   3285 		memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
   3286 		    ETHER_ADDR_LEN);
   3287 		return 0;
   3288 	}
   3289 
   3290 	err = iwm_send_bt_init_conf(sc);
   3291 	if (err)
   3292 		return err;
   3293 
   3294 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
   3295 	if (err)
   3296 		return err;
   3297 
   3298 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
   3299 	if (err)
   3300 		return err;
   3301 
   3302 	/*
   3303 	 * Send phy configurations command to init uCode
   3304 	 * to start the 16.0 uCode init image internal calibrations.
   3305 	 */
   3306 	err = iwm_send_phy_cfg_cmd(sc);
   3307 	if (err)
   3308 		return err;
   3309 
   3310 	/*
   3311 	 * Nothing to do but wait for the init complete notification
   3312 	 * from the firmware
   3313 	 */
   3314 	while (!sc->sc_init_complete) {
   3315 		err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
   3316 		if (err)
   3317 			break;
   3318 	}
   3319 
   3320 	return err;
   3321 }
   3322 
   3323 static int
   3324 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
   3325 {
   3326 	struct iwm_rx_ring *ring = &sc->rxq;
   3327 	struct iwm_rx_data *data = &ring->data[idx];
   3328 	struct mbuf *m;
   3329 	int err;
   3330 	int fatal = 0;
   3331 
   3332 	m = m_gethdr(M_DONTWAIT, MT_DATA);
   3333 	if (m == NULL)
   3334 		return ENOBUFS;
   3335 
   3336 	if (size <= MCLBYTES) {
   3337 		MCLGET(m, M_DONTWAIT);
   3338 	} else {
   3339 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   3340 	}
   3341 	if ((m->m_flags & M_EXT) == 0) {
   3342 		m_freem(m);
   3343 		return ENOBUFS;
   3344 	}
   3345 
   3346 	if (data->m != NULL) {
   3347 		bus_dmamap_unload(sc->sc_dmat, data->map);
   3348 		fatal = 1;
   3349 	}
   3350 
   3351 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3352 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   3353 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3354 	if (err) {
   3355 		/* XXX */
   3356 		if (fatal)
   3357 			panic("iwm: could not load RX mbuf");
   3358 		m_freem(m);
   3359 		return err;
   3360 	}
   3361 	data->m = m;
   3362 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
   3363 
   3364 	/* Update RX descriptor. */
   3365 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
   3366 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3367 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
   3368 
   3369 	return 0;
   3370 }
   3371 
   3372 #define IWM_RSSI_OFFSET 50
   3373 static int
   3374 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
   3375 {
   3376 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
   3377 	uint32_t agc_a, agc_b;
   3378 	uint32_t val;
   3379 
   3380 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
   3381 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
   3382 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
   3383 
   3384 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
   3385 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
   3386 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
   3387 
   3388 	/*
   3389 	 * dBm = rssi dB - agc dB - constant.
   3390 	 * Higher AGC (higher radio gain) means lower signal.
   3391 	 */
   3392 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
   3393 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
   3394 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
   3395 
   3396 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
   3397 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
   3398 
   3399 	return max_rssi_dbm;
   3400 }
   3401 
   3402 /*
   3403  * RSSI values are reported by the FW as positive values - need to negate
   3404  * to obtain their dBM.  Account for missing antennas by replacing 0
   3405  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
   3406  */
   3407 static int
   3408 iwm_get_signal_strength(struct iwm_softc *sc,
   3409     struct iwm_rx_phy_info *phy_info)
   3410 {
   3411 	int energy_a, energy_b, energy_c, max_energy;
   3412 	uint32_t val;
   3413 
   3414 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
   3415 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
   3416 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
   3417 	energy_a = energy_a ? -energy_a : -256;
   3418 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
   3419 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
   3420 	energy_b = energy_b ? -energy_b : -256;
   3421 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
   3422 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
   3423 	energy_c = energy_c ? -energy_c : -256;
   3424 	max_energy = MAX(energy_a, energy_b);
   3425 	max_energy = MAX(max_energy, energy_c);
   3426 
   3427 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
   3428 	    energy_a, energy_b, energy_c, max_energy));
   3429 
   3430 	return max_energy;
   3431 }
   3432 
   3433 static void
   3434 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3435     struct iwm_rx_data *data)
   3436 {
   3437 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
   3438 
   3439 	DPRINTFN(20, ("received PHY stats\n"));
   3440 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
   3441 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
   3442 
   3443 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
   3444 }
   3445 
   3446 /*
   3447  * Retrieve the average noise (in dBm) among receivers.
   3448  */
   3449 static int
   3450 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
   3451 {
   3452 	int i, total, nbant, noise;
   3453 
   3454 	total = nbant = noise = 0;
   3455 	for (i = 0; i < 3; i++) {
   3456 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
   3457 		if (noise) {
   3458 			total += noise;
   3459 			nbant++;
   3460 		}
   3461 	}
   3462 
   3463 	/* There should be at least one antenna but check anyway. */
   3464 	return (nbant == 0) ? -127 : (total / nbant) - 107;
   3465 }
   3466 
   3467 static void
   3468 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3469     struct iwm_rx_data *data)
   3470 {
   3471 	struct ieee80211com *ic = &sc->sc_ic;
   3472 	struct ieee80211_frame *wh;
   3473 	struct ieee80211_node *ni;
   3474 	struct ieee80211_channel *c = NULL;
   3475 	struct mbuf *m;
   3476 	struct iwm_rx_phy_info *phy_info;
   3477 	struct iwm_rx_mpdu_res_start *rx_res;
   3478 	int device_timestamp;
   3479 	uint32_t len;
   3480 	uint32_t rx_pkt_status;
   3481 	int rssi;
   3482 
   3483 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3484 	    BUS_DMASYNC_POSTREAD);
   3485 
   3486 	phy_info = &sc->sc_last_phy_info;
   3487 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
   3488 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
   3489 	len = le16toh(rx_res->byte_count);
   3490 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
   3491 	    sizeof(*rx_res) + len));
   3492 
   3493 	m = data->m;
   3494 	m->m_data = pkt->data + sizeof(*rx_res);
   3495 	m->m_pkthdr.len = m->m_len = len;
   3496 
   3497 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
   3498 		DPRINTF(("dsp size out of range [0,20]: %d\n",
   3499 		    phy_info->cfg_phy_cnt));
   3500 		return;
   3501 	}
   3502 
   3503 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
   3504 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
   3505 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
   3506 		return; /* drop */
   3507 	}
   3508 
   3509 	device_timestamp = le32toh(phy_info->system_timestamp);
   3510 
   3511 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
   3512 		rssi = iwm_get_signal_strength(sc, phy_info);
   3513 	} else {
   3514 		rssi = iwm_calc_rssi(sc, phy_info);
   3515 	}
   3516 	rssi = -rssi;
   3517 
   3518 	if (ic->ic_state == IEEE80211_S_SCAN)
   3519 		iwm_fix_channel(sc, m);
   3520 
   3521 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
   3522 		return;
   3523 
   3524 	m_set_rcvif(m, IC2IFP(ic));
   3525 
   3526 	if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
   3527 		c = &ic->ic_channels[le32toh(phy_info->channel)];
   3528 
   3529 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
   3530 	if (c)
   3531 		ni->ni_chan = c;
   3532 
   3533 	if (sc->sc_drvbpf != NULL) {
   3534 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
   3535 
   3536 		tap->wr_flags = 0;
   3537 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
   3538 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
   3539 		tap->wr_chan_freq =
   3540 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
   3541 		tap->wr_chan_flags =
   3542 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
   3543 		tap->wr_dbm_antsignal = (int8_t)rssi;
   3544 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
   3545 		tap->wr_tsft = phy_info->system_timestamp;
   3546 		if (phy_info->phy_flags &
   3547 		    htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
   3548 			uint8_t mcs = (phy_info->rate_n_flags &
   3549 			    htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK));
   3550 			tap->wr_rate = (0x80 | mcs);
   3551 		} else {
   3552 			uint8_t rate = (phy_info->rate_n_flags &
   3553 			    htole32(IWM_RATE_LEGACY_RATE_MSK));
   3554 			switch (rate) {
   3555 			/* CCK rates. */
   3556 			case  10: tap->wr_rate =   2; break;
   3557 			case  20: tap->wr_rate =   4; break;
   3558 			case  55: tap->wr_rate =  11; break;
   3559 			case 110: tap->wr_rate =  22; break;
   3560 			/* OFDM rates. */
   3561 			case 0xd: tap->wr_rate =  12; break;
   3562 			case 0xf: tap->wr_rate =  18; break;
   3563 			case 0x5: tap->wr_rate =  24; break;
   3564 			case 0x7: tap->wr_rate =  36; break;
   3565 			case 0x9: tap->wr_rate =  48; break;
   3566 			case 0xb: tap->wr_rate =  72; break;
   3567 			case 0x1: tap->wr_rate =  96; break;
   3568 			case 0x3: tap->wr_rate = 108; break;
   3569 			/* Unknown rate: should not happen. */
   3570 			default:  tap->wr_rate =   0;
   3571 			}
   3572 		}
   3573 
   3574 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
   3575 	}
   3576 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
   3577 	ieee80211_free_node(ni);
   3578 }
   3579 
   3580 static void
   3581 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3582     struct iwm_node *in)
   3583 {
   3584 	struct ieee80211com *ic = &sc->sc_ic;
   3585 	struct ifnet *ifp = IC2IFP(ic);
   3586 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
   3587 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
   3588 	int failack = tx_resp->failure_frame;
   3589 
   3590 	KASSERT(tx_resp->frame_count == 1);
   3591 
   3592 	/* Update rate control statistics. */
   3593 	in->in_amn.amn_txcnt++;
   3594 	if (failack > 0) {
   3595 		in->in_amn.amn_retrycnt++;
   3596 	}
   3597 
   3598 	if (status != IWM_TX_STATUS_SUCCESS &&
   3599 	    status != IWM_TX_STATUS_DIRECT_DONE)
   3600 		ifp->if_oerrors++;
   3601 	else
   3602 		ifp->if_opackets++;
   3603 }
   3604 
   3605 static void
   3606 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3607     struct iwm_rx_data *data)
   3608 {
   3609 	struct ieee80211com *ic = &sc->sc_ic;
   3610 	struct ifnet *ifp = IC2IFP(ic);
   3611 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
   3612 	int idx = cmd_hdr->idx;
   3613 	int qid = cmd_hdr->qid;
   3614 	struct iwm_tx_ring *ring = &sc->txq[qid];
   3615 	struct iwm_tx_data *txd = &ring->data[idx];
   3616 	struct iwm_node *in = txd->in;
   3617 
   3618 	if (txd->done) {
   3619 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
   3620 		    DEVNAME(sc)));
   3621 		return;
   3622 	}
   3623 
   3624 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3625 	    BUS_DMASYNC_POSTREAD);
   3626 
   3627 	sc->sc_tx_timer = 0;
   3628 
   3629 	iwm_rx_tx_cmd_single(sc, pkt, in);
   3630 
   3631 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
   3632 	    BUS_DMASYNC_POSTWRITE);
   3633 	bus_dmamap_unload(sc->sc_dmat, txd->map);
   3634 	m_freem(txd->m);
   3635 
   3636 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
   3637 	KASSERT(txd->done == 0);
   3638 	txd->done = 1;
   3639 	KASSERT(txd->in);
   3640 
   3641 	txd->m = NULL;
   3642 	txd->in = NULL;
   3643 	ieee80211_free_node(&in->in_ni);
   3644 
   3645 	if (--ring->queued < IWM_TX_RING_LOMARK) {
   3646 		sc->qfullmsk &= ~(1 << ring->qid);
   3647 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
   3648 			ifp->if_flags &= ~IFF_OACTIVE;
   3649 			/*
   3650 			 * Well, we're in interrupt context, but then again
   3651 			 * I guess net80211 does all sorts of stunts in
   3652 			 * interrupt context, so maybe this is no biggie.
   3653 			 */
   3654 			if_schedule_deferred_start(ifp);
   3655 		}
   3656 	}
   3657 }
   3658 
   3659 static int
   3660 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
   3661 {
   3662 	struct iwm_binding_cmd cmd;
   3663 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
   3664 	int i, err;
   3665 	uint32_t status;
   3666 
   3667 	memset(&cmd, 0, sizeof(cmd));
   3668 
   3669 	cmd.id_and_color
   3670 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3671 	cmd.action = htole32(action);
   3672 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3673 
   3674 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   3675 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
   3676 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
   3677 
   3678 	status = 0;
   3679 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
   3680 	    sizeof(cmd), &cmd, &status);
   3681 	if (err == 0 && status != 0)
   3682 		err = EIO;
   3683 
   3684 	return err;
   3685 }
   3686 
   3687 static void
   3688 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
   3689     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
   3690 {
   3691 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
   3692 
   3693 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
   3694 	    ctxt->color));
   3695 	cmd->action = htole32(action);
   3696 	cmd->apply_time = htole32(apply_time);
   3697 }
   3698 
   3699 static void
   3700 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
   3701     struct ieee80211_channel *chan, uint8_t chains_static,
   3702     uint8_t chains_dynamic)
   3703 {
   3704 	struct ieee80211com *ic = &sc->sc_ic;
   3705 	uint8_t active_cnt, idle_cnt;
   3706 
   3707 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
   3708 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
   3709 
   3710 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
   3711 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
   3712 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
   3713 
   3714 	/* Set rx the chains */
   3715 	idle_cnt = chains_static;
   3716 	active_cnt = chains_dynamic;
   3717 
   3718 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
   3719 	    IWM_PHY_RX_CHAIN_VALID_POS);
   3720 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
   3721 	cmd->rxchain_info |= htole32(active_cnt <<
   3722 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
   3723 
   3724 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
   3725 }
   3726 
   3727 static int
   3728 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
   3729     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
   3730     uint32_t apply_time)
   3731 {
   3732 	struct iwm_phy_context_cmd cmd;
   3733 
   3734 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
   3735 
   3736 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
   3737 	    chains_static, chains_dynamic);
   3738 
   3739 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
   3740 	    sizeof(struct iwm_phy_context_cmd), &cmd);
   3741 }
   3742 
   3743 static int
   3744 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3745 {
   3746 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
   3747 	struct iwm_tfd *desc;
   3748 	struct iwm_tx_data *txdata;
   3749 	struct iwm_device_cmd *cmd;
   3750 	struct mbuf *m;
   3751 	bus_addr_t paddr;
   3752 	uint32_t addr_lo;
   3753 	int err = 0, i, paylen, off, s;
   3754 	int code;
   3755 	int async, wantresp;
   3756 	int group_id;
   3757 	size_t hdrlen, datasz;
   3758 	uint8_t *data;
   3759 
   3760 	code = hcmd->id;
   3761 	async = hcmd->flags & IWM_CMD_ASYNC;
   3762 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
   3763 
   3764 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
   3765 		paylen += hcmd->len[i];
   3766 	}
   3767 
   3768 	/* if the command wants an answer, busy sc_cmd_resp */
   3769 	if (wantresp) {
   3770 		KASSERT(!async);
   3771 		while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
   3772 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
   3773 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
   3774 	}
   3775 
   3776 	/*
   3777 	 * Is the hardware still available?  (after e.g. above wait).
   3778 	 */
   3779 	s = splnet();
   3780 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
   3781 		err = ENXIO;
   3782 		goto out;
   3783 	}
   3784 
   3785 	desc = &ring->desc[ring->cur];
   3786 	txdata = &ring->data[ring->cur];
   3787 
   3788 	group_id = iwm_cmd_groupid(code);
   3789 	if (group_id != 0) {
   3790 		hdrlen = sizeof(cmd->hdr_wide);
   3791 		datasz = sizeof(cmd->data_wide);
   3792 	} else {
   3793 		hdrlen = sizeof(cmd->hdr);
   3794 		datasz = sizeof(cmd->data);
   3795 	}
   3796 
   3797 	if (paylen > datasz) {
   3798 		/* Command is too large to fit in pre-allocated space. */
   3799 		size_t totlen = hdrlen + paylen;
   3800 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
   3801 			aprint_error_dev(sc->sc_dev,
   3802 			    "firmware command too long (%zd bytes)\n", totlen);
   3803 			err = EINVAL;
   3804 			goto out;
   3805 		}
   3806 		m = m_gethdr(M_DONTWAIT, MT_DATA);
   3807 		if (m == NULL) {
   3808 			err = ENOMEM;
   3809 			goto out;
   3810 		}
   3811 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   3812 		if (!(m->m_flags & M_EXT)) {
   3813 			aprint_error_dev(sc->sc_dev,
   3814 			    "could not get fw cmd mbuf (%zd bytes)\n", totlen);
   3815 			m_freem(m);
   3816 			err = ENOMEM;
   3817 			goto out;
   3818 		}
   3819 		cmd = mtod(m, struct iwm_device_cmd *);
   3820 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
   3821 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3822 		if (err) {
   3823 			aprint_error_dev(sc->sc_dev,
   3824 			    "could not load fw cmd mbuf (%zd bytes)\n", totlen);
   3825 			m_freem(m);
   3826 			goto out;
   3827 		}
   3828 		txdata->m = m;
   3829 		paddr = txdata->map->dm_segs[0].ds_addr;
   3830 	} else {
   3831 		cmd = &ring->cmd[ring->cur];
   3832 		paddr = txdata->cmd_paddr;
   3833 	}
   3834 
   3835 	if (group_id != 0) {
   3836 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
   3837 		cmd->hdr_wide.group_id = group_id;
   3838 		cmd->hdr_wide.qid = ring->qid;
   3839 		cmd->hdr_wide.idx = ring->cur;
   3840 		cmd->hdr_wide.length = htole16(paylen);
   3841 		cmd->hdr_wide.version = iwm_cmd_version(code);
   3842 		data = cmd->data_wide;
   3843 	} else {
   3844 		cmd->hdr.code = code;
   3845 		cmd->hdr.flags = 0;
   3846 		cmd->hdr.qid = ring->qid;
   3847 		cmd->hdr.idx = ring->cur;
   3848 		data = cmd->data;
   3849 	}
   3850 
   3851 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
   3852 		if (hcmd->len[i] == 0)
   3853 			continue;
   3854 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
   3855 		off += hcmd->len[i];
   3856 	}
   3857 	KASSERT(off == paylen);
   3858 
   3859 	/* lo field is not aligned */
   3860 	addr_lo = htole32((uint32_t)paddr);
   3861 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
   3862 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
   3863 	    | ((hdrlen + paylen) << 4));
   3864 	desc->num_tbs = 1;
   3865 
   3866 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
   3867 	    code, sizeof(cmd->hdr) + paylen, async ? " (async)" : ""));
   3868 
   3869 	if (paylen > datasz) {
   3870 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
   3871 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
   3872 	} else {
   3873 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   3874 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   3875 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
   3876 	}
   3877 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3878 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   3879 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
   3880 
   3881 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   3882 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   3883 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   3884 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
   3885 	    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
   3886 	     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
   3887 		aprint_error_dev(sc->sc_dev, "acquiring device failed\n");
   3888 		err = EBUSY;
   3889 		goto out;
   3890 	}
   3891 
   3892 #if 0
   3893 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
   3894 #endif
   3895 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
   3896 	    code, ring->qid, ring->cur));
   3897 
   3898 	/* Kick command ring. */
   3899 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   3900 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   3901 
   3902 	if (!async) {
   3903 		int generation = sc->sc_generation;
   3904 		err = tsleep(desc, PCATCH, "iwmcmd", mstohz(1000));
   3905 		if (err == 0) {
   3906 			/* if hardware is no longer up, return error */
   3907 			if (generation != sc->sc_generation) {
   3908 				err = ENXIO;
   3909 			} else {
   3910 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
   3911 			}
   3912 		}
   3913 	}
   3914  out:
   3915 	if (wantresp && err) {
   3916 		iwm_free_resp(sc, hcmd);
   3917 	}
   3918 	splx(s);
   3919 
   3920 	return err;
   3921 }
   3922 
   3923 static int
   3924 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
   3925     uint16_t len, const void *data)
   3926 {
   3927 	struct iwm_host_cmd cmd = {
   3928 		.id = id,
   3929 		.len = { len, },
   3930 		.data = { data, },
   3931 		.flags = flags,
   3932 	};
   3933 
   3934 	return iwm_send_cmd(sc, &cmd);
   3935 }
   3936 
   3937 static int
   3938 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
   3939     uint32_t *status)
   3940 {
   3941 	struct iwm_rx_packet *pkt;
   3942 	struct iwm_cmd_response *resp;
   3943 	int err, resp_len;
   3944 
   3945 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
   3946 	cmd->flags |= IWM_CMD_WANT_SKB;
   3947 
   3948 	err = iwm_send_cmd(sc, cmd);
   3949 	if (err)
   3950 		return err;
   3951 	pkt = cmd->resp_pkt;
   3952 
   3953 	/* Can happen if RFKILL is asserted */
   3954 	if (!pkt) {
   3955 		err = 0;
   3956 		goto out_free_resp;
   3957 	}
   3958 
   3959 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   3960 		err = EIO;
   3961 		goto out_free_resp;
   3962 	}
   3963 
   3964 	resp_len = iwm_rx_packet_payload_len(pkt);
   3965 	if (resp_len != sizeof(*resp)) {
   3966 		err = EIO;
   3967 		goto out_free_resp;
   3968 	}
   3969 
   3970 	resp = (void *)pkt->data;
   3971 	*status = le32toh(resp->status);
   3972  out_free_resp:
   3973 	iwm_free_resp(sc, cmd);
   3974 	return err;
   3975 }
   3976 
   3977 static int
   3978 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
   3979     const void *data, uint32_t *status)
   3980 {
   3981 	struct iwm_host_cmd cmd = {
   3982 		.id = id,
   3983 		.len = { len, },
   3984 		.data = { data, },
   3985 	};
   3986 
   3987 	return iwm_send_cmd_status(sc, &cmd, status);
   3988 }
   3989 
   3990 static void
   3991 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3992 {
   3993 	KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
   3994 	KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
   3995 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
   3996 	wakeup(&sc->sc_wantresp);
   3997 }
   3998 
   3999 static void
   4000 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
   4001 {
   4002 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
   4003 	struct iwm_tx_data *data;
   4004 
   4005 	if (qid != IWM_CMD_QUEUE) {
   4006 		return;	/* Not a command ack. */
   4007 	}
   4008 
   4009 	data = &ring->data[idx];
   4010 
   4011 	if (data->m != NULL) {
   4012 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   4013 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   4014 		bus_dmamap_unload(sc->sc_dmat, data->map);
   4015 		m_freem(data->m);
   4016 		data->m = NULL;
   4017 	}
   4018 	wakeup(&ring->desc[idx]);
   4019 }
   4020 
   4021 #if 0
   4022 /*
   4023  * necessary only for block ack mode
   4024  */
   4025 void
   4026 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
   4027     uint16_t len)
   4028 {
   4029 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
   4030 	uint16_t w_val;
   4031 
   4032 	scd_bc_tbl = sc->sched_dma.vaddr;
   4033 
   4034 	len += 8; /* magic numbers came naturally from paris */
   4035 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
   4036 		len = roundup(len, 4) / 4;
   4037 
   4038 	w_val = htole16(sta_id << 12 | len);
   4039 
   4040 	/* Update TX scheduler. */
   4041 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
   4042 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   4043 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
   4044 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
   4045 
   4046 	/* I really wonder what this is ?!? */
   4047 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
   4048 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
   4049 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   4050 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
   4051 		    (char *)(void *)sc->sched_dma.vaddr,
   4052 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
   4053 	}
   4054 }
   4055 #endif
   4056 
   4057 /*
   4058  * Fill in various bit for management frames, and leave them
   4059  * unfilled for data frames (firmware takes care of that).
   4060  * Return the selected TX rate.
   4061  */
   4062 static const struct iwm_rate *
   4063 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
   4064     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
   4065 {
   4066 	struct ieee80211com *ic = &sc->sc_ic;
   4067 	struct ieee80211_node *ni = &in->in_ni;
   4068 	const struct iwm_rate *rinfo;
   4069 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   4070 	int ridx, rate_flags, i;
   4071 	int nrates = ni->ni_rates.rs_nrates;
   4072 
   4073 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
   4074 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
   4075 
   4076 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
   4077 	    type != IEEE80211_FC0_TYPE_DATA) {
   4078 		/* for non-data, use the lowest supported rate */
   4079 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
   4080 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
   4081 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
   4082 #ifndef IEEE80211_NO_HT
   4083 	} else if (ic->ic_fixed_mcs != -1) {
   4084 		ridx = sc->sc_fixed_ridx;
   4085 #endif
   4086 	} else if (ic->ic_fixed_rate != -1) {
   4087 		ridx = sc->sc_fixed_ridx;
   4088 	} else {
   4089 		/* for data frames, use RS table */
   4090 		tx->initial_rate_index = 0;
   4091 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
   4092 		DPRINTFN(12, ("start with txrate %d\n",
   4093 		    tx->initial_rate_index));
   4094 #ifndef IEEE80211_NO_HT
   4095 		if (ni->ni_flags & IEEE80211_NODE_HT) {
   4096 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
   4097 			return &iwm_rates[ridx];
   4098 		}
   4099 #endif
   4100 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
   4101 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
   4102 		for (i = 0; i < nrates; i++) {
   4103 			if (iwm_rates[i].rate == (ni->ni_txrate &
   4104 			    IEEE80211_RATE_VAL)) {
   4105 				ridx = i;
   4106 				break;
   4107 			}
   4108 		}
   4109 		return &iwm_rates[ridx];
   4110 	}
   4111 
   4112 	rinfo = &iwm_rates[ridx];
   4113 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
   4114 	if (IWM_RIDX_IS_CCK(ridx))
   4115 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
   4116 #ifndef IEEE80211_NO_HT
   4117 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   4118 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
   4119 		rate_flags |= IWM_RATE_MCS_HT_MSK;
   4120 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
   4121 	} else
   4122 #endif
   4123 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
   4124 
   4125 	return rinfo;
   4126 }
   4127 
   4128 #define TB0_SIZE 16
   4129 static int
   4130 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
   4131 {
   4132 	struct ieee80211com *ic = &sc->sc_ic;
   4133 	struct iwm_node *in = (struct iwm_node *)ni;
   4134 	struct iwm_tx_ring *ring;
   4135 	struct iwm_tx_data *data;
   4136 	struct iwm_tfd *desc;
   4137 	struct iwm_device_cmd *cmd;
   4138 	struct iwm_tx_cmd *tx;
   4139 	struct ieee80211_frame *wh;
   4140 	struct ieee80211_key *k = NULL;
   4141 	struct mbuf *m1;
   4142 	const struct iwm_rate *rinfo;
   4143 	uint32_t flags;
   4144 	u_int hdrlen;
   4145 	bus_dma_segment_t *seg;
   4146 	uint8_t tid, type;
   4147 	int i, totlen, err, pad;
   4148 
   4149 	wh = mtod(m, struct ieee80211_frame *);
   4150 	hdrlen = ieee80211_anyhdrsize(wh);
   4151 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   4152 
   4153 	tid = 0;
   4154 
   4155 	ring = &sc->txq[ac];
   4156 	desc = &ring->desc[ring->cur];
   4157 	memset(desc, 0, sizeof(*desc));
   4158 	data = &ring->data[ring->cur];
   4159 
   4160 	cmd = &ring->cmd[ring->cur];
   4161 	cmd->hdr.code = IWM_TX_CMD;
   4162 	cmd->hdr.flags = 0;
   4163 	cmd->hdr.qid = ring->qid;
   4164 	cmd->hdr.idx = ring->cur;
   4165 
   4166 	tx = (void *)cmd->data;
   4167 	memset(tx, 0, sizeof(*tx));
   4168 
   4169 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
   4170 
   4171 	if (sc->sc_drvbpf != NULL) {
   4172 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
   4173 
   4174 		tap->wt_flags = 0;
   4175 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
   4176 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
   4177 #ifndef IEEE80211_NO_HT
   4178 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   4179 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
   4180 		    type == IEEE80211_FC0_TYPE_DATA &&
   4181 		    rinfo->plcp == IWM_RATE_INVM_PLCP) {
   4182 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
   4183 		} else
   4184 #endif
   4185 			tap->wt_rate = rinfo->rate;
   4186 		tap->wt_hwqueue = ac;
   4187 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
   4188 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
   4189 
   4190 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
   4191 	}
   4192 
   4193 	/* Encrypt the frame if need be. */
   4194 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
   4195 		k = ieee80211_crypto_encap(ic, ni, m);
   4196 		if (k == NULL) {
   4197 			m_freem(m);
   4198 			return ENOBUFS;
   4199 		}
   4200 		/* Packet header may have moved, reset our local pointer. */
   4201 		wh = mtod(m, struct ieee80211_frame *);
   4202 	}
   4203 	totlen = m->m_pkthdr.len;
   4204 
   4205 	flags = 0;
   4206 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
   4207 		flags |= IWM_TX_CMD_FLG_ACK;
   4208 	}
   4209 
   4210 	if (type == IEEE80211_FC0_TYPE_DATA &&
   4211 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
   4212 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
   4213 	     (ic->ic_flags & IEEE80211_F_USEPROT)))
   4214 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
   4215 
   4216 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
   4217 	    type != IEEE80211_FC0_TYPE_DATA)
   4218 		tx->sta_id = IWM_AUX_STA_ID;
   4219 	else
   4220 		tx->sta_id = IWM_STATION_ID;
   4221 
   4222 	if (type == IEEE80211_FC0_TYPE_MGT) {
   4223 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
   4224 
   4225 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
   4226 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
   4227 			tx->pm_frame_timeout = htole16(3);
   4228 		else
   4229 			tx->pm_frame_timeout = htole16(2);
   4230 	} else {
   4231 		tx->pm_frame_timeout = htole16(0);
   4232 	}
   4233 
   4234 	if (hdrlen & 3) {
   4235 		/* First segment length must be a multiple of 4. */
   4236 		flags |= IWM_TX_CMD_FLG_MH_PAD;
   4237 		pad = 4 - (hdrlen & 3);
   4238 	} else
   4239 		pad = 0;
   4240 
   4241 	tx->driver_txop = 0;
   4242 	tx->next_frame_len = 0;
   4243 
   4244 	tx->len = htole16(totlen);
   4245 	tx->tid_tspec = tid;
   4246 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
   4247 
   4248 	/* Set physical address of "scratch area". */
   4249 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
   4250 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
   4251 
   4252 	/* Copy 802.11 header in TX command. */
   4253 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
   4254 
   4255 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
   4256 
   4257 	tx->sec_ctl = 0;
   4258 	tx->tx_flags |= htole32(flags);
   4259 
   4260 	/* Trim 802.11 header. */
   4261 	m_adj(m, hdrlen);
   4262 
   4263 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   4264 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   4265 	if (err) {
   4266 		if (err != EFBIG) {
   4267 			aprint_error_dev(sc->sc_dev,
   4268 			    "can't map mbuf (error %d)\n", err);
   4269 			m_freem(m);
   4270 			return err;
   4271 		}
   4272 		/* Too many DMA segments, linearize mbuf. */
   4273 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
   4274 		if (m1 == NULL) {
   4275 			m_freem(m);
   4276 			return ENOBUFS;
   4277 		}
   4278 		if (m->m_pkthdr.len > MHLEN) {
   4279 			MCLGET(m1, M_DONTWAIT);
   4280 			if (!(m1->m_flags & M_EXT)) {
   4281 				m_freem(m);
   4282 				m_freem(m1);
   4283 				return ENOBUFS;
   4284 			}
   4285 		}
   4286 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
   4287 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
   4288 		m_freem(m);
   4289 		m = m1;
   4290 
   4291 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   4292 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   4293 		if (err) {
   4294 			aprint_error_dev(sc->sc_dev,
   4295 			    "can't map mbuf (error %d)\n", err);
   4296 			m_freem(m);
   4297 			return err;
   4298 		}
   4299 	}
   4300 	data->m = m;
   4301 	data->in = in;
   4302 	data->done = 0;
   4303 
   4304 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
   4305 	KASSERT(data->in != NULL);
   4306 
   4307 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
   4308 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs));
   4309 
   4310 	/* Fill TX descriptor. */
   4311 	desc->num_tbs = 2 + data->map->dm_nsegs;
   4312 
   4313 	desc->tbs[0].lo = htole32(data->cmd_paddr);
   4314 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   4315 	    (TB0_SIZE << 4);
   4316 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
   4317 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   4318 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
   4319 	      + hdrlen + pad - TB0_SIZE) << 4);
   4320 
   4321 	/* Other DMA segments are for data payload. */
   4322 	seg = data->map->dm_segs;
   4323 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
   4324 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
   4325 		desc->tbs[i+2].hi_n_len = \
   4326 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
   4327 		    | ((seg->ds_len) << 4);
   4328 	}
   4329 
   4330 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
   4331 	    BUS_DMASYNC_PREWRITE);
   4332 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   4333 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   4334 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
   4335 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   4336 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   4337 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
   4338 
   4339 #if 0
   4340 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
   4341 	    le16toh(tx->len));
   4342 #endif
   4343 
   4344 	/* Kick TX ring. */
   4345 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   4346 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   4347 
   4348 	/* Mark TX ring as full if we reach a certain threshold. */
   4349 	if (++ring->queued > IWM_TX_RING_HIMARK) {
   4350 		sc->qfullmsk |= 1 << ring->qid;
   4351 	}
   4352 
   4353 	return 0;
   4354 }
   4355 
   4356 #if 0
   4357 /* not necessary? */
   4358 static int
   4359 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
   4360 {
   4361 	struct iwm_tx_path_flush_cmd flush_cmd = {
   4362 		.queues_ctl = htole32(tfd_msk),
   4363 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
   4364 	};
   4365 	int err;
   4366 
   4367 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
   4368 	    sizeof(flush_cmd), &flush_cmd);
   4369 	if (err)
   4370 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
   4371 		    err);
   4372 	return err;
   4373 }
   4374 #endif
   4375 
   4376 static void
   4377 iwm_led_enable(struct iwm_softc *sc)
   4378 {
   4379 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
   4380 }
   4381 
   4382 static void
   4383 iwm_led_disable(struct iwm_softc *sc)
   4384 {
   4385 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
   4386 }
   4387 
   4388 static int
   4389 iwm_led_is_enabled(struct iwm_softc *sc)
   4390 {
   4391 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
   4392 }
   4393 
   4394 static void
   4395 iwm_led_blink_timeout(void *arg)
   4396 {
   4397 	struct iwm_softc *sc = arg;
   4398 
   4399 	if (iwm_led_is_enabled(sc))
   4400 		iwm_led_disable(sc);
   4401 	else
   4402 		iwm_led_enable(sc);
   4403 
   4404 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
   4405 }
   4406 
   4407 static void
   4408 iwm_led_blink_start(struct iwm_softc *sc)
   4409 {
   4410 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
   4411 }
   4412 
   4413 static void
   4414 iwm_led_blink_stop(struct iwm_softc *sc)
   4415 {
   4416 	callout_stop(&sc->sc_led_blink_to);
   4417 	iwm_led_disable(sc);
   4418 }
   4419 
   4420 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
   4421 
   4422 static int
   4423 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
   4424 	struct iwm_beacon_filter_cmd *cmd)
   4425 {
   4426 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
   4427 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
   4428 }
   4429 
   4430 static void
   4431 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
   4432     struct iwm_beacon_filter_cmd *cmd)
   4433 {
   4434 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
   4435 }
   4436 
   4437 static int
   4438 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
   4439 {
   4440 	struct iwm_beacon_filter_cmd cmd = {
   4441 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4442 		.bf_enable_beacon_filter = htole32(1),
   4443 		.ba_enable_beacon_abort = htole32(enable),
   4444 	};
   4445 
   4446 	if (!sc->sc_bf.bf_enabled)
   4447 		return 0;
   4448 
   4449 	sc->sc_bf.ba_enabled = enable;
   4450 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4451 	return iwm_beacon_filter_send_cmd(sc, &cmd);
   4452 }
   4453 
   4454 static void
   4455 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
   4456     struct iwm_mac_power_cmd *cmd)
   4457 {
   4458 	struct ieee80211_node *ni = &in->in_ni;
   4459 	int dtim_period, dtim_msec, keep_alive;
   4460 
   4461 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   4462 	    in->in_color));
   4463 	if (ni->ni_dtim_period)
   4464 		dtim_period = ni->ni_dtim_period;
   4465 	else
   4466 		dtim_period = 1;
   4467 
   4468 	/*
   4469 	 * Regardless of power management state the driver must set
   4470 	 * keep alive period. FW will use it for sending keep alive NDPs
   4471 	 * immediately after association. Check that keep alive period
   4472 	 * is at least 3 * DTIM.
   4473 	 */
   4474 	dtim_msec = dtim_period * ni->ni_intval;
   4475 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
   4476 	keep_alive = roundup(keep_alive, 1000) / 1000;
   4477 	cmd->keep_alive_seconds = htole16(keep_alive);
   4478 
   4479 #ifdef notyet
   4480 	cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
   4481 	cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
   4482 	cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
   4483 #endif
   4484 }
   4485 
   4486 static int
   4487 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
   4488 {
   4489 	int err;
   4490 	int ba_enable;
   4491 	struct iwm_mac_power_cmd cmd;
   4492 
   4493 	memset(&cmd, 0, sizeof(cmd));
   4494 
   4495 	iwm_power_build_cmd(sc, in, &cmd);
   4496 
   4497 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
   4498 	    sizeof(cmd), &cmd);
   4499 	if (err)
   4500 		return err;
   4501 
   4502 	ba_enable = !!(cmd.flags &
   4503 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
   4504 	return iwm_update_beacon_abort(sc, in, ba_enable);
   4505 }
   4506 
   4507 static int
   4508 iwm_power_update_device(struct iwm_softc *sc)
   4509 {
   4510 	struct iwm_device_power_cmd cmd = {
   4511 #ifdef notyet
   4512 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
   4513 #endif
   4514 	};
   4515 
   4516 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
   4517 		return 0;
   4518 
   4519 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
   4520 	DPRINTF(("Sending device power command with flags = 0x%X\n",
   4521 	    cmd.flags));
   4522 
   4523 	return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
   4524 }
   4525 
   4526 #ifdef notyet
   4527 static int
   4528 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4529 {
   4530 	struct iwm_beacon_filter_cmd cmd = {
   4531 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4532 		.bf_enable_beacon_filter = htole32(1),
   4533 	};
   4534 	int err;
   4535 
   4536 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4537 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
   4538 
   4539 	if (err == 0)
   4540 		sc->sc_bf.bf_enabled = 1;
   4541 
   4542 	return err;
   4543 }
   4544 #endif
   4545 
   4546 static int
   4547 iwm_disable_beacon_filter(struct iwm_softc *sc)
   4548 {
   4549 	struct iwm_beacon_filter_cmd cmd;
   4550 	int err;
   4551 
   4552 	memset(&cmd, 0, sizeof(cmd));
   4553 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
   4554 		return 0;
   4555 
   4556 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
   4557 	if (err == 0)
   4558 		sc->sc_bf.bf_enabled = 0;
   4559 
   4560 	return err;
   4561 }
   4562 
   4563 static int
   4564 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
   4565 {
   4566 	struct iwm_add_sta_cmd_v7 add_sta_cmd;
   4567 	int err;
   4568 	uint32_t status;
   4569 
   4570 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
   4571 
   4572 	add_sta_cmd.sta_id = IWM_STATION_ID;
   4573 	add_sta_cmd.mac_id_n_color
   4574 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   4575 	if (!update) {
   4576 		int ac;
   4577 		for (ac = 0; ac < WME_NUM_AC; ac++) {
   4578 			add_sta_cmd.tfd_queue_msk |=
   4579 			    htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
   4580 		}
   4581 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
   4582 	}
   4583 	add_sta_cmd.add_modify = update ? 1 : 0;
   4584 	add_sta_cmd.station_flags_msk
   4585 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
   4586 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
   4587 	if (update)
   4588 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
   4589 
   4590 #ifndef IEEE80211_NO_HT
   4591 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
   4592 		add_sta_cmd.station_flags_msk
   4593 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
   4594 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
   4595 
   4596 		add_sta_cmd.station_flags
   4597 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
   4598 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
   4599 		case IEEE80211_AMPDU_PARAM_SS_2:
   4600 			add_sta_cmd.station_flags
   4601 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
   4602 			break;
   4603 		case IEEE80211_AMPDU_PARAM_SS_4:
   4604 			add_sta_cmd.station_flags
   4605 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
   4606 			break;
   4607 		case IEEE80211_AMPDU_PARAM_SS_8:
   4608 			add_sta_cmd.station_flags
   4609 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
   4610 			break;
   4611 		case IEEE80211_AMPDU_PARAM_SS_16:
   4612 			add_sta_cmd.station_flags
   4613 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
   4614 			break;
   4615 		default:
   4616 			break;
   4617 		}
   4618 	}
   4619 #endif
   4620 
   4621 	status = IWM_ADD_STA_SUCCESS;
   4622 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
   4623 	    &add_sta_cmd, &status);
   4624 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
   4625 		err = EIO;
   4626 
   4627 	return err;
   4628 }
   4629 
   4630 static int
   4631 iwm_add_aux_sta(struct iwm_softc *sc)
   4632 {
   4633 	struct iwm_add_sta_cmd_v7 cmd;
   4634 	int err;
   4635 	uint32_t status;
   4636 
   4637 	err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
   4638 	if (err)
   4639 		return err;
   4640 
   4641 	memset(&cmd, 0, sizeof(cmd));
   4642 	cmd.sta_id = IWM_AUX_STA_ID;
   4643 	cmd.mac_id_n_color =
   4644 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
   4645 	cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
   4646 	cmd.tid_disable_tx = htole16(0xffff);
   4647 
   4648 	status = IWM_ADD_STA_SUCCESS;
   4649 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
   4650 	    &status);
   4651 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
   4652 		err = EIO;
   4653 
   4654 	return err;
   4655 }
   4656 
   4657 #define IWM_PLCP_QUIET_THRESH 1
   4658 #define IWM_ACTIVE_QUIET_TIME 10
   4659 #define LONG_OUT_TIME_PERIOD 600
   4660 #define SHORT_OUT_TIME_PERIOD 200
   4661 #define SUSPEND_TIME_PERIOD 100
   4662 
   4663 static uint16_t
   4664 iwm_scan_rx_chain(struct iwm_softc *sc)
   4665 {
   4666 	uint16_t rx_chain;
   4667 	uint8_t rx_ant;
   4668 
   4669 	rx_ant = iwm_fw_valid_rx_ant(sc);
   4670 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
   4671 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
   4672 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
   4673 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
   4674 	return htole16(rx_chain);
   4675 }
   4676 
   4677 static uint32_t
   4678 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
   4679 {
   4680 	uint32_t tx_ant;
   4681 	int i, ind;
   4682 
   4683 	for (i = 0, ind = sc->sc_scan_last_antenna;
   4684 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
   4685 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
   4686 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
   4687 			sc->sc_scan_last_antenna = ind;
   4688 			break;
   4689 		}
   4690 	}
   4691 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
   4692 
   4693 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
   4694 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
   4695 				   tx_ant);
   4696 	else
   4697 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
   4698 }
   4699 
   4700 #ifdef notyet
   4701 /*
   4702  * If req->n_ssids > 0, it means we should do an active scan.
   4703  * In case of active scan w/o directed scan, we receive a zero-length SSID
   4704  * just to notify that this scan is active and not passive.
   4705  * In order to notify the FW of the number of SSIDs we wish to scan (including
   4706  * the zero-length one), we need to set the corresponding bits in chan->type,
   4707  * one for each SSID, and set the active bit (first). If the first SSID is
   4708  * already included in the probe template, so we need to set only
   4709  * req->n_ssids - 1 bits in addition to the first bit.
   4710  */
   4711 static uint16_t
   4712 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
   4713 {
   4714 	if (flags & IEEE80211_CHAN_2GHZ)
   4715 		return 30  + 3 * (n_ssids + 1);
   4716 	return 20  + 2 * (n_ssids + 1);
   4717 }
   4718 
   4719 static uint16_t
   4720 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
   4721 {
   4722 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
   4723 }
   4724 #endif
   4725 
   4726 static uint8_t
   4727 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
   4728     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
   4729 {
   4730 	struct ieee80211com *ic = &sc->sc_ic;
   4731 	struct ieee80211_channel *c;
   4732 	uint8_t nchan;
   4733 
   4734 	for (nchan = 0, c = &ic->ic_channels[1];
   4735 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
   4736 	    nchan < sc->sc_capa_n_scan_channels;
   4737 	    c++) {
   4738 		if (c->ic_flags == 0)
   4739 			continue;
   4740 
   4741 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
   4742 		chan->iter_count = htole16(1);
   4743 		chan->iter_interval = 0;
   4744 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
   4745 #if 0 /* makes scanning while associated less useful */
   4746 		if (n_ssids != 0)
   4747 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
   4748 #endif
   4749 		chan++;
   4750 		nchan++;
   4751 	}
   4752 
   4753 	return nchan;
   4754 }
   4755 
   4756 static uint8_t
   4757 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
   4758     struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
   4759 {
   4760 	struct ieee80211com *ic = &sc->sc_ic;
   4761 	struct ieee80211_channel *c;
   4762 	uint8_t nchan;
   4763 
   4764 	for (nchan = 0, c = &ic->ic_channels[1];
   4765 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
   4766 	    nchan < sc->sc_capa_n_scan_channels;
   4767 	    c++) {
   4768 		if (c->ic_flags == 0)
   4769 			continue;
   4770 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
   4771 		chan->iter_count = 1;
   4772 		chan->iter_interval = htole16(0);
   4773 #if 0 /* makes scanning while associated less useful */
   4774 		if (n_ssids != 0)
   4775 			chan->flags = htole32(1 << 0); /* select SSID 0 */
   4776 #endif
   4777 		chan++;
   4778 		nchan++;
   4779 	}
   4780 
   4781 	return nchan;
   4782 }
   4783 
   4784 static int
   4785 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
   4786 {
   4787 	struct ieee80211com *ic = &sc->sc_ic;
   4788 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
   4789 	struct ieee80211_rateset *rs;
   4790 	size_t remain = sizeof(preq->buf);
   4791 	uint8_t *frm, *pos;
   4792 
   4793 	memset(preq, 0, sizeof(*preq));
   4794 
   4795 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
   4796 		return ENOBUFS;
   4797 
   4798 	/*
   4799 	 * Build a probe request frame.  Most of the following code is a
   4800 	 * copy & paste of what is done in net80211.
   4801 	 */
   4802 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
   4803 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
   4804 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
   4805 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
   4806 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
   4807 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
   4808 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
   4809 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
   4810 
   4811 	frm = (uint8_t *)(wh + 1);
   4812 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
   4813 
   4814 	/* Tell the firmware where the MAC header is. */
   4815 	preq->mac_header.offset = 0;
   4816 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
   4817 	remain -= frm - (uint8_t *)wh;
   4818 
   4819 	/* Fill in 2GHz IEs and tell firmware where they are. */
   4820 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
   4821 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
   4822 		if (remain < 4 + rs->rs_nrates)
   4823 			return ENOBUFS;
   4824 	} else if (remain < 2 + rs->rs_nrates)
   4825 		return ENOBUFS;
   4826 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
   4827 	pos = frm;
   4828 	frm = ieee80211_add_rates(frm, rs);
   4829 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
   4830 		frm = ieee80211_add_xrates(frm, rs);
   4831 	preq->band_data[0].len = htole16(frm - pos);
   4832 	remain -= frm - pos;
   4833 
   4834 	if (isset(sc->sc_enabled_capa,
   4835 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
   4836 		if (remain < 3)
   4837 			return ENOBUFS;
   4838 		*frm++ = IEEE80211_ELEMID_DSPARMS;
   4839 		*frm++ = 1;
   4840 		*frm++ = 0;
   4841 		remain -= 3;
   4842 	}
   4843 
   4844 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
   4845 		/* Fill in 5GHz IEs. */
   4846 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
   4847 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
   4848 			if (remain < 4 + rs->rs_nrates)
   4849 				return ENOBUFS;
   4850 		} else if (remain < 2 + rs->rs_nrates)
   4851 			return ENOBUFS;
   4852 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
   4853 		pos = frm;
   4854 		frm = ieee80211_add_rates(frm, rs);
   4855 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
   4856 			frm = ieee80211_add_xrates(frm, rs);
   4857 		preq->band_data[1].len = htole16(frm - pos);
   4858 		remain -= frm - pos;
   4859 	}
   4860 
   4861 #ifndef IEEE80211_NO_HT
   4862 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
   4863 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
   4864 	pos = frm;
   4865 	if (ic->ic_flags & IEEE80211_F_HTON) {
   4866 		if (remain < 28)
   4867 			return ENOBUFS;
   4868 		frm = ieee80211_add_htcaps(frm, ic);
   4869 		/* XXX add WME info? */
   4870 	}
   4871 #endif
   4872 
   4873 	preq->common_data.len = htole16(frm - pos);
   4874 
   4875 	return 0;
   4876 }
   4877 
   4878 static int
   4879 iwm_lmac_scan(struct iwm_softc *sc)
   4880 {
   4881 	struct ieee80211com *ic = &sc->sc_ic;
   4882 	struct iwm_host_cmd hcmd = {
   4883 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
   4884 		.len = { 0, },
   4885 		.data = { NULL, },
   4886 		.flags = 0,
   4887 	};
   4888 	struct iwm_scan_req_lmac *req;
   4889 	size_t req_len;
   4890 	int err;
   4891 
   4892 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
   4893 
   4894 	req_len = sizeof(struct iwm_scan_req_lmac) +
   4895 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
   4896 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
   4897 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
   4898 		return ENOMEM;
   4899 	req = kmem_zalloc(req_len, KM_SLEEP);
   4900 	if (req == NULL)
   4901 		return ENOMEM;
   4902 
   4903 	hcmd.len[0] = (uint16_t)req_len;
   4904 	hcmd.data[0] = (void *)req;
   4905 
   4906 	/* These timings correspond to iwlwifi's UNASSOC scan. */
   4907 	req->active_dwell = 10;
   4908 	req->passive_dwell = 110;
   4909 	req->fragmented_dwell = 44;
   4910 	req->extended_dwell = 90;
   4911 	req->max_out_time = 0;
   4912 	req->suspend_time = 0;
   4913 
   4914 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
   4915 	req->rx_chain_select = iwm_scan_rx_chain(sc);
   4916 	req->iter_num = htole32(1);
   4917 	req->delay = 0;
   4918 
   4919 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
   4920 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
   4921 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
   4922 	if (ic->ic_des_esslen == 0)
   4923 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
   4924 	else
   4925 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
   4926 	if (isset(sc->sc_enabled_capa,
   4927 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
   4928 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
   4929 
   4930 	req->flags = htole32(IWM_PHY_BAND_24);
   4931 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
   4932 		req->flags |= htole32(IWM_PHY_BAND_5);
   4933 	req->filter_flags =
   4934 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
   4935 
   4936 	/* Tx flags 2 GHz. */
   4937 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
   4938 	    IWM_TX_CMD_FLG_BT_DIS);
   4939 	req->tx_cmd[0].rate_n_flags =
   4940 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
   4941 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
   4942 
   4943 	/* Tx flags 5 GHz. */
   4944 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
   4945 	    IWM_TX_CMD_FLG_BT_DIS);
   4946 	req->tx_cmd[1].rate_n_flags =
   4947 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
   4948 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
   4949 
   4950 	/* Check if we're doing an active directed scan. */
   4951 	if (ic->ic_des_esslen != 0) {
   4952 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
   4953 		req->direct_scan[0].len = ic->ic_des_esslen;
   4954 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
   4955 		    ic->ic_des_esslen);
   4956 	}
   4957 
   4958 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
   4959 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
   4960 	    ic->ic_des_esslen != 0);
   4961 
   4962 	err = iwm_fill_probe_req(sc,
   4963 	    (struct iwm_scan_probe_req *)(req->data +
   4964 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
   4965 	     sc->sc_capa_n_scan_channels)));
   4966 	if (err) {
   4967 		kmem_free(req, req_len);
   4968 		return err;
   4969 	}
   4970 
   4971 	/* Specify the scan plan: We'll do one iteration. */
   4972 	req->schedule[0].iterations = 1;
   4973 	req->schedule[0].full_scan_mul = 1;
   4974 
   4975 	/* Disable EBS. */
   4976 	req->channel_opt[0].non_ebs_ratio = 1;
   4977 	req->channel_opt[1].non_ebs_ratio = 1;
   4978 
   4979 	err = iwm_send_cmd(sc, &hcmd);
   4980 	kmem_free(req, req_len);
   4981 	return err;
   4982 }
   4983 
   4984 static int
   4985 iwm_config_umac_scan(struct iwm_softc *sc)
   4986 {
   4987 	struct ieee80211com *ic = &sc->sc_ic;
   4988 	struct iwm_scan_config *scan_config;
   4989 	int err, nchan;
   4990 	size_t cmd_size;
   4991 	struct ieee80211_channel *c;
   4992 	struct iwm_host_cmd hcmd = {
   4993 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
   4994 		.flags = 0,
   4995 	};
   4996 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
   4997 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
   4998 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
   4999 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
   5000 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
   5001 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
   5002 	    IWM_SCAN_CONFIG_RATE_54M);
   5003 
   5004 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
   5005 
   5006 	scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
   5007 	if (scan_config == NULL)
   5008 		return ENOMEM;
   5009 
   5010 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
   5011 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
   5012 	scan_config->legacy_rates = htole32(rates |
   5013 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
   5014 
   5015 	/* These timings correspond to iwlwifi's UNASSOC scan. */
   5016 	scan_config->dwell_active = 10;
   5017 	scan_config->dwell_passive = 110;
   5018 	scan_config->dwell_fragmented = 44;
   5019 	scan_config->dwell_extended = 90;
   5020 	scan_config->out_of_channel_time = htole32(0);
   5021 	scan_config->suspend_time = htole32(0);
   5022 
   5023 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
   5024 
   5025 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
   5026 	scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
   5027 	    IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
   5028 	    IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
   5029 
   5030 	for (c = &ic->ic_channels[1], nchan = 0;
   5031 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
   5032 	    nchan < sc->sc_capa_n_scan_channels; c++) {
   5033 		if (c->ic_flags == 0)
   5034 			continue;
   5035 		scan_config->channel_array[nchan++] =
   5036 		    ieee80211_mhz2ieee(c->ic_freq, 0);
   5037 	}
   5038 
   5039 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
   5040 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
   5041 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
   5042 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
   5043 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
   5044 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
   5045 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
   5046 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
   5047 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
   5048 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
   5049 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
   5050 
   5051 	hcmd.data[0] = scan_config;
   5052 	hcmd.len[0] = cmd_size;
   5053 
   5054 	err = iwm_send_cmd(sc, &hcmd);
   5055 	kmem_free(scan_config, cmd_size);
   5056 	return err;
   5057 }
   5058 
   5059 static int
   5060 iwm_umac_scan(struct iwm_softc *sc)
   5061 {
   5062 	struct ieee80211com *ic = &sc->sc_ic;
   5063 	struct iwm_host_cmd hcmd = {
   5064 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
   5065 		.len = { 0, },
   5066 		.data = { NULL, },
   5067 		.flags = 0,
   5068 	};
   5069 	struct iwm_scan_req_umac *req;
   5070 	struct iwm_scan_req_umac_tail *tail;
   5071 	size_t req_len;
   5072 	int err;
   5073 
   5074 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
   5075 
   5076 	req_len = sizeof(struct iwm_scan_req_umac) +
   5077 	    (sizeof(struct iwm_scan_channel_cfg_umac) *
   5078 	    sc->sc_capa_n_scan_channels) +
   5079 	    sizeof(struct iwm_scan_req_umac_tail);
   5080 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
   5081 		return ENOMEM;
   5082 	req = kmem_zalloc(req_len, KM_SLEEP);
   5083 	if (req == NULL)
   5084 		return ENOMEM;
   5085 
   5086 	hcmd.len[0] = (uint16_t)req_len;
   5087 	hcmd.data[0] = (void *)req;
   5088 
   5089 	/* These timings correspond to iwlwifi's UNASSOC scan. */
   5090 	req->active_dwell = 10;
   5091 	req->passive_dwell = 110;
   5092 	req->fragmented_dwell = 44;
   5093 	req->extended_dwell = 90;
   5094 	req->max_out_time = 0;
   5095 	req->suspend_time = 0;
   5096 
   5097 	req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
   5098 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
   5099 
   5100 	req->n_channels = iwm_umac_scan_fill_channels(sc,
   5101 	    (struct iwm_scan_channel_cfg_umac *)req->data,
   5102 	    ic->ic_des_esslen != 0);
   5103 
   5104 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
   5105 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
   5106 	    IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
   5107 
   5108 	tail = (struct iwm_scan_req_umac_tail *)(req->data +
   5109 		sizeof(struct iwm_scan_channel_cfg_umac) *
   5110 			sc->sc_capa_n_scan_channels);
   5111 
   5112 	/* Check if we're doing an active directed scan. */
   5113 	if (ic->ic_des_esslen != 0) {
   5114 		tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
   5115 		tail->direct_scan[0].len = ic->ic_des_esslen;
   5116 		memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
   5117 		    ic->ic_des_esslen);
   5118 		req->general_flags |=
   5119 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
   5120 	} else
   5121 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
   5122 
   5123 	if (isset(sc->sc_enabled_capa,
   5124 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
   5125 		req->general_flags |=
   5126 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
   5127 
   5128 	err = iwm_fill_probe_req(sc, &tail->preq);
   5129 	if (err) {
   5130 		kmem_free(req, req_len);
   5131 		return err;
   5132 	}
   5133 
   5134 	/* Specify the scan plan: We'll do one iteration. */
   5135 	tail->schedule[0].interval = 0;
   5136 	tail->schedule[0].iter_count = 1;
   5137 
   5138 	err = iwm_send_cmd(sc, &hcmd);
   5139 	kmem_free(req, req_len);
   5140 	return err;
   5141 }
   5142 
   5143 static uint8_t
   5144 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
   5145 {
   5146 	int i;
   5147 	uint8_t rval;
   5148 
   5149 	for (i = 0; i < rs->rs_nrates; i++) {
   5150 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
   5151 		if (rval == iwm_rates[ridx].rate)
   5152 			return rs->rs_rates[i];
   5153 	}
   5154 	return 0;
   5155 }
   5156 
   5157 static void
   5158 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
   5159     int *ofdm_rates)
   5160 {
   5161 	struct ieee80211_node *ni = &in->in_ni;
   5162 	struct ieee80211_rateset *rs = &ni->ni_rates;
   5163 	int lowest_present_ofdm = 100;
   5164 	int lowest_present_cck = 100;
   5165 	uint8_t cck = 0;
   5166 	uint8_t ofdm = 0;
   5167 	int i;
   5168 
   5169 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
   5170 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
   5171 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
   5172 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
   5173 				continue;
   5174 			cck |= (1 << i);
   5175 			if (lowest_present_cck > i)
   5176 				lowest_present_cck = i;
   5177 		}
   5178 	}
   5179 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
   5180 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
   5181 			continue;
   5182 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
   5183 		if (lowest_present_ofdm > i)
   5184 			lowest_present_ofdm = i;
   5185 	}
   5186 
   5187 	/*
   5188 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
   5189 	 * variables. This isn't sufficient though, as there might not
   5190 	 * be all the right rates in the bitmap. E.g. if the only basic
   5191 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
   5192 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
   5193 	 *
   5194 	 *    [...] a STA responding to a received frame shall transmit
   5195 	 *    its Control Response frame [...] at the highest rate in the
   5196 	 *    BSSBasicRateSet parameter that is less than or equal to the
   5197 	 *    rate of the immediately previous frame in the frame exchange
   5198 	 *    sequence ([...]) and that is of the same modulation class
   5199 	 *    ([...]) as the received frame. If no rate contained in the
   5200 	 *    BSSBasicRateSet parameter meets these conditions, then the
   5201 	 *    control frame sent in response to a received frame shall be
   5202 	 *    transmitted at the highest mandatory rate of the PHY that is
   5203 	 *    less than or equal to the rate of the received frame, and
   5204 	 *    that is of the same modulation class as the received frame.
   5205 	 *
   5206 	 * As a consequence, we need to add all mandatory rates that are
   5207 	 * lower than all of the basic rates to these bitmaps.
   5208 	 */
   5209 
   5210 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
   5211 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
   5212 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
   5213 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
   5214 	/* 6M already there or needed so always add */
   5215 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
   5216 
   5217 	/*
   5218 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
   5219 	 * Note, however:
   5220 	 *  - if no CCK rates are basic, it must be ERP since there must
   5221 	 *    be some basic rates at all, so they're OFDM => ERP PHY
   5222 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
   5223 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
   5224 	 *  - if 5.5M is basic, 1M and 2M are mandatory
   5225 	 *  - if 2M is basic, 1M is mandatory
   5226 	 *  - if 1M is basic, that's the only valid ACK rate.
   5227 	 * As a consequence, it's not as complicated as it sounds, just add
   5228 	 * any lower rates to the ACK rate bitmap.
   5229 	 */
   5230 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
   5231 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
   5232 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
   5233 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
   5234 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
   5235 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
   5236 	/* 1M already there or needed so always add */
   5237 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
   5238 
   5239 	*cck_rates = cck;
   5240 	*ofdm_rates = ofdm;
   5241 }
   5242 
   5243 static void
   5244 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
   5245     struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
   5246 {
   5247 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
   5248 	struct ieee80211com *ic = &sc->sc_ic;
   5249 	struct ieee80211_node *ni = ic->ic_bss;
   5250 	int cck_ack_rates, ofdm_ack_rates;
   5251 	int i;
   5252 
   5253 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   5254 	    in->in_color));
   5255 	cmd->action = htole32(action);
   5256 
   5257 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
   5258 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
   5259 
   5260 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
   5261 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
   5262 
   5263 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
   5264 	cmd->cck_rates = htole32(cck_ack_rates);
   5265 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
   5266 
   5267 	cmd->cck_short_preamble
   5268 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
   5269 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
   5270 	cmd->short_slot
   5271 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
   5272 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
   5273 
   5274 	for (i = 0; i < WME_NUM_AC; i++) {
   5275 		struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
   5276 		int txf = iwm_ac_to_tx_fifo[i];
   5277 
   5278 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
   5279 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
   5280 		cmd->ac[txf].aifsn = wmep->wmep_aifsn;
   5281 		cmd->ac[txf].fifos_mask = (1 << txf);
   5282 		cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
   5283 	}
   5284 	if (ni->ni_flags & IEEE80211_NODE_QOS)
   5285 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
   5286 
   5287 #ifndef IEEE80211_NO_HT
   5288 	if (ni->ni_flags & IEEE80211_NODE_HT) {
   5289 		enum ieee80211_htprot htprot =
   5290 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
   5291 		switch (htprot) {
   5292 		case IEEE80211_HTPROT_NONE:
   5293 			break;
   5294 		case IEEE80211_HTPROT_NONMEMBER:
   5295 		case IEEE80211_HTPROT_NONHT_MIXED:
   5296 			cmd->protection_flags |=
   5297 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
   5298 		case IEEE80211_HTPROT_20MHZ:
   5299 			cmd->protection_flags |=
   5300 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
   5301 			    IWM_MAC_PROT_FLG_FAT_PROT);
   5302 			break;
   5303 		default:
   5304 			break;
   5305 		}
   5306 
   5307 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
   5308 	}
   5309 #endif
   5310 
   5311 	if (ic->ic_flags & IEEE80211_F_USEPROT)
   5312 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
   5313 
   5314 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
   5315 #undef IWM_EXP2
   5316 }
   5317 
   5318 static void
   5319 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
   5320     struct iwm_mac_data_sta *sta, int assoc)
   5321 {
   5322 	struct ieee80211_node *ni = &in->in_ni;
   5323 	uint32_t dtim_off;
   5324 	uint64_t tsf;
   5325 
   5326 	dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
   5327 	tsf = le64toh(ni->ni_tstamp.tsf);
   5328 
   5329 	sta->is_assoc = htole32(assoc);
   5330 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
   5331 	sta->dtim_tsf = htole64(tsf + dtim_off);
   5332 	sta->bi = htole32(ni->ni_intval);
   5333 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
   5334 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
   5335 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
   5336 	sta->listen_interval = htole32(10);
   5337 	sta->assoc_id = htole32(ni->ni_associd);
   5338 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
   5339 }
   5340 
   5341 static int
   5342 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
   5343     int assoc)
   5344 {
   5345 	struct ieee80211_node *ni = &in->in_ni;
   5346 	struct iwm_mac_ctx_cmd cmd;
   5347 
   5348 	memset(&cmd, 0, sizeof(cmd));
   5349 
   5350 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
   5351 
   5352 	/* Allow beacons to pass through as long as we are not associated or we
   5353 	 * do not have dtim period information */
   5354 	if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
   5355 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
   5356 	else
   5357 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
   5358 
   5359 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
   5360 }
   5361 
   5362 #define IWM_MISSED_BEACONS_THRESHOLD 8
   5363 
   5364 static void
   5365 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
   5366 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   5367 {
   5368 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
   5369 
   5370 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
   5371 	    le32toh(mb->mac_id),
   5372 	    le32toh(mb->consec_missed_beacons),
   5373 	    le32toh(mb->consec_missed_beacons_since_last_rx),
   5374 	    le32toh(mb->num_recvd_beacons),
   5375 	    le32toh(mb->num_expected_beacons)));
   5376 
   5377 	/*
   5378 	 * TODO: the threshold should be adjusted based on latency conditions,
   5379 	 * and/or in case of a CS flow on one of the other AP vifs.
   5380 	 */
   5381 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
   5382 	    IWM_MISSED_BEACONS_THRESHOLD)
   5383 		ieee80211_beacon_miss(&sc->sc_ic);
   5384 }
   5385 
   5386 static int
   5387 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
   5388 {
   5389 	struct iwm_time_quota_cmd cmd;
   5390 	int i, idx, num_active_macs, quota, quota_rem;
   5391 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
   5392 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
   5393 	uint16_t id;
   5394 
   5395 	memset(&cmd, 0, sizeof(cmd));
   5396 
   5397 	/* currently, PHY ID == binding ID */
   5398 	if (in) {
   5399 		id = in->in_phyctxt->id;
   5400 		KASSERT(id < IWM_MAX_BINDINGS);
   5401 		colors[id] = in->in_phyctxt->color;
   5402 
   5403 		if (1)
   5404 			n_ifs[id] = 1;
   5405 	}
   5406 
   5407 	/*
   5408 	 * The FW's scheduling session consists of
   5409 	 * IWM_MAX_QUOTA fragments. Divide these fragments
   5410 	 * equally between all the bindings that require quota
   5411 	 */
   5412 	num_active_macs = 0;
   5413 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
   5414 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
   5415 		num_active_macs += n_ifs[i];
   5416 	}
   5417 
   5418 	quota = 0;
   5419 	quota_rem = 0;
   5420 	if (num_active_macs) {
   5421 		quota = IWM_MAX_QUOTA / num_active_macs;
   5422 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
   5423 	}
   5424 
   5425 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
   5426 		if (colors[i] < 0)
   5427 			continue;
   5428 
   5429 		cmd.quotas[idx].id_and_color =
   5430 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
   5431 
   5432 		if (n_ifs[i] <= 0) {
   5433 			cmd.quotas[idx].quota = htole32(0);
   5434 			cmd.quotas[idx].max_duration = htole32(0);
   5435 		} else {
   5436 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
   5437 			cmd.quotas[idx].max_duration = htole32(0);
   5438 		}
   5439 		idx++;
   5440 	}
   5441 
   5442 	/* Give the remainder of the session to the first binding */
   5443 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
   5444 
   5445 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
   5446 }
   5447 
   5448 static int
   5449 iwm_auth(struct iwm_softc *sc)
   5450 {
   5451 	struct ieee80211com *ic = &sc->sc_ic;
   5452 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   5453 	uint32_t duration;
   5454 	int err;
   5455 
   5456 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
   5457 	if (err)
   5458 		return err;
   5459 
   5460 	err = iwm_allow_mcast(sc);
   5461 	if (err)
   5462 		return err;
   5463 
   5464 	sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
   5465 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
   5466 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
   5467 	if (err)
   5468 		return err;
   5469 	in->in_phyctxt = &sc->sc_phyctxt[0];
   5470 
   5471 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
   5472 	if (err) {
   5473 		aprint_error_dev(sc->sc_dev,
   5474 		    "could not add MAC context (error %d)\n", err);
   5475 		return err;
   5476 	}
   5477 
   5478 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
   5479 	if (err)
   5480 		return err;
   5481 
   5482 	err = iwm_add_sta_cmd(sc, in, 0);
   5483 	if (err)
   5484 		return err;
   5485 
   5486 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
   5487 	if (err) {
   5488 		aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
   5489 		return err;
   5490 	}
   5491 
   5492 	/*
   5493 	 * Prevent the FW from wandering off channel during association
   5494 	 * by "protecting" the session with a time event.
   5495 	 */
   5496 	if (in->in_ni.ni_intval)
   5497 		duration = in->in_ni.ni_intval * 2;
   5498 	else
   5499 		duration = IEEE80211_DUR_TU;
   5500 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
   5501 	DELAY(100);
   5502 
   5503 	return 0;
   5504 }
   5505 
   5506 static int
   5507 iwm_assoc(struct iwm_softc *sc)
   5508 {
   5509 	struct ieee80211com *ic = &sc->sc_ic;
   5510 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   5511 	int err;
   5512 
   5513 	err = iwm_add_sta_cmd(sc, in, 1);
   5514 	if (err)
   5515 		return err;
   5516 
   5517 	return 0;
   5518 }
   5519 
   5520 static struct ieee80211_node *
   5521 iwm_node_alloc(struct ieee80211_node_table *nt)
   5522 {
   5523 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
   5524 }
   5525 
   5526 static void
   5527 iwm_calib_timeout(void *arg)
   5528 {
   5529 	struct iwm_softc *sc = arg;
   5530 	struct ieee80211com *ic = &sc->sc_ic;
   5531 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   5532 #ifndef IEEE80211_NO_HT
   5533 	struct ieee80211_node *ni = &in->in_ni;
   5534 	int otxrate;
   5535 #endif
   5536 	int s;
   5537 
   5538 	s = splnet();
   5539 	if ((ic->ic_fixed_rate == -1
   5540 #ifndef IEEE80211_NO_HT
   5541 	    || ic->ic_fixed_mcs == -1
   5542 #endif
   5543 	    ) &&
   5544 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
   5545 #ifndef IEEE80211_NO_HT
   5546 		if (ni->ni_flags & IEEE80211_NODE_HT)
   5547 			otxrate = ni->ni_txmcs;
   5548 		else
   5549 			otxrate = ni->ni_txrate;
   5550 #endif
   5551 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
   5552 
   5553 #ifndef IEEE80211_NO_HT
   5554 		/*
   5555 		 * If AMRR has chosen a new TX rate we must update
   5556 		 * the firwmare's LQ rate table from process context.
   5557 		 */
   5558 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   5559 		    otxrate != ni->ni_txmcs)
   5560 			softint_schedule(sc->setrates_task);
   5561 		else if (otxrate != ni->ni_txrate)
   5562 			softint_schedule(sc->setrates_task);
   5563 #endif
   5564 	}
   5565 	splx(s);
   5566 
   5567 	callout_schedule(&sc->sc_calib_to, mstohz(500));
   5568 }
   5569 
   5570 #ifndef IEEE80211_NO_HT
   5571 static void
   5572 iwm_setrates_task(void *arg)
   5573 {
   5574 	struct iwm_softc *sc = arg;
   5575 	struct ieee80211com *ic = &sc->sc_ic;
   5576 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   5577 
   5578 	/* Update rates table based on new TX rate determined by AMRR. */
   5579 	iwm_setrates(in);
   5580 }
   5581 
   5582 static int
   5583 iwm_setrates(struct iwm_node *in)
   5584 {
   5585 	struct ieee80211_node *ni = &in->in_ni;
   5586 	struct ieee80211com *ic = ni->ni_ic;
   5587 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   5588 	struct iwm_lq_cmd *lq = &in->in_lq;
   5589 	struct ieee80211_rateset *rs = &ni->ni_rates;
   5590 	int i, j, ridx, ridx_min, tab = 0;
   5591 #ifndef IEEE80211_NO_HT
   5592 	int sgi_ok;
   5593 #endif
   5594 	struct iwm_host_cmd cmd = {
   5595 		.id = IWM_LQ_CMD,
   5596 		.len = { sizeof(in->in_lq), },
   5597 	};
   5598 
   5599 	memset(lq, 0, sizeof(*lq));
   5600 	lq->sta_id = IWM_STATION_ID;
   5601 
   5602 	if (ic->ic_flags & IEEE80211_F_USEPROT)
   5603 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
   5604 
   5605 #ifndef IEEE80211_NO_HT
   5606 	sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
   5607 	    (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
   5608 #endif
   5609 
   5610 
   5611 	/*
   5612 	 * Fill the LQ rate selection table with legacy and/or HT rates
   5613 	 * in descending order, i.e. with the node's current TX rate first.
   5614 	 * In cases where throughput of an HT rate corresponds to a legacy
   5615 	 * rate it makes no sense to add both. We rely on the fact that
   5616 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
   5617 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
   5618 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
   5619 	 */
   5620 	j = 0;
   5621 	ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
   5622 	    IWM_RIDX_OFDM : IWM_RIDX_CCK;
   5623 	for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
   5624 		if (j >= __arraycount(lq->rs_table))
   5625 			break;
   5626 		tab = 0;
   5627 #ifndef IEEE80211_NO_HT
   5628 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   5629 		    iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
   5630 			for (i = ni->ni_txmcs; i >= 0; i--) {
   5631 				if (isclr(ni->ni_rxmcs, i))
   5632 					continue;
   5633 				if (ridx == iwm_mcs2ridx[i]) {
   5634 					tab = iwm_rates[ridx].ht_plcp;
   5635 					tab |= IWM_RATE_MCS_HT_MSK;
   5636 					if (sgi_ok)
   5637 						tab |= IWM_RATE_MCS_SGI_MSK;
   5638 					break;
   5639 				}
   5640 			}
   5641 		}
   5642 #endif
   5643 		if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
   5644 			for (i = ni->ni_txrate; i >= 0; i--) {
   5645 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
   5646 				    IEEE80211_RATE_VAL)) {
   5647 					tab = iwm_rates[ridx].plcp;
   5648 					break;
   5649 				}
   5650 			}
   5651 		}
   5652 
   5653 		if (tab == 0)
   5654 			continue;
   5655 
   5656 		tab |= 1 << IWM_RATE_MCS_ANT_POS;
   5657 		if (IWM_RIDX_IS_CCK(ridx))
   5658 			tab |= IWM_RATE_MCS_CCK_MSK;
   5659 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
   5660 		lq->rs_table[j++] = htole32(tab);
   5661 	}
   5662 
   5663 	/* Fill the rest with the lowest possible rate */
   5664 	i = j > 0 ? j - 1 : 0;
   5665 	while (j < __arraycount(lq->rs_table))
   5666 		lq->rs_table[j++] = lq->rs_table[i];
   5667 
   5668 	lq->single_stream_ant_msk = IWM_ANT_A;
   5669 	lq->dual_stream_ant_msk = IWM_ANT_AB;
   5670 
   5671 	lq->agg_time_limit = htole16(4000);	/* 4ms */
   5672 	lq->agg_disable_start_th = 3;
   5673 #ifdef notyet
   5674 	lq->agg_frame_cnt_limit = 0x3f;
   5675 #else
   5676 	lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
   5677 #endif
   5678 
   5679 	cmd.data[0] = &in->in_lq;
   5680 	return iwm_send_cmd(sc, &cmd);
   5681 }
   5682 #endif
   5683 
   5684 static int
   5685 iwm_media_change(struct ifnet *ifp)
   5686 {
   5687 	struct iwm_softc *sc = ifp->if_softc;
   5688 	struct ieee80211com *ic = &sc->sc_ic;
   5689 	uint8_t rate, ridx;
   5690 	int err;
   5691 
   5692 	err = ieee80211_media_change(ifp);
   5693 	if (err != ENETRESET)
   5694 		return err;
   5695 
   5696 #ifndef IEEE80211_NO_HT
   5697 	if (ic->ic_fixed_mcs != -1)
   5698 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
   5699 	else
   5700 #endif
   5701 	if (ic->ic_fixed_rate != -1) {
   5702 		rate = ic->ic_sup_rates[ic->ic_curmode].
   5703 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
   5704 		/* Map 802.11 rate to HW rate index. */
   5705 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
   5706 			if (iwm_rates[ridx].rate == rate)
   5707 				break;
   5708 		sc->sc_fixed_ridx = ridx;
   5709 	}
   5710 
   5711 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   5712 	    (IFF_UP | IFF_RUNNING)) {
   5713 		iwm_stop(ifp, 0);
   5714 		err = iwm_init(ifp);
   5715 	}
   5716 	return err;
   5717 }
   5718 
   5719 static void
   5720 iwm_newstate_cb(struct work *wk, void *v)
   5721 {
   5722 	struct iwm_softc *sc = v;
   5723 	struct ieee80211com *ic = &sc->sc_ic;
   5724 	struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
   5725 	enum ieee80211_state nstate = iwmns->ns_nstate;
   5726 	enum ieee80211_state ostate = ic->ic_state;
   5727 	int generation = iwmns->ns_generation;
   5728 	struct iwm_node *in;
   5729 	int arg = iwmns->ns_arg;
   5730 	int err;
   5731 
   5732 	kmem_free(iwmns, sizeof(*iwmns));
   5733 
   5734 	DPRINTF(("Prepare to switch state %d->%d\n", ostate, nstate));
   5735 	if (sc->sc_generation != generation) {
   5736 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
   5737 		if (nstate == IEEE80211_S_INIT) {
   5738 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
   5739 			sc->sc_newstate(ic, nstate, arg);
   5740 		}
   5741 		return;
   5742 	}
   5743 
   5744 	DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
   5745 	    ieee80211_state_name[nstate]));
   5746 
   5747 	if (ostate == IEEE80211_S_SCAN && nstate != ostate)
   5748 		iwm_led_blink_stop(sc);
   5749 
   5750 	if (ostate == IEEE80211_S_RUN && nstate != ostate)
   5751 		iwm_disable_beacon_filter(sc);
   5752 
   5753 	/* Reset the device if moving out of AUTH, ASSOC, or RUN. */
   5754 	/* XXX Is there a way to switch states without a full reset? */
   5755 	if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
   5756 		iwm_stop_device(sc);
   5757 		iwm_init_hw(sc);
   5758 
   5759 		/*
   5760 		 * Upon receiving a deauth frame from AP the net80211 stack
   5761 		 * puts the driver into AUTH state. This will fail with this
   5762 		 * driver so bring the FSM from RUN to SCAN in this case.
   5763 		 */
   5764 		if (nstate == IEEE80211_S_SCAN ||
   5765 		    nstate == IEEE80211_S_AUTH ||
   5766 		    nstate == IEEE80211_S_ASSOC) {
   5767 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
   5768 			/* Always pass arg as -1 since we can't Tx right now. */
   5769 			sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
   5770 			DPRINTF(("Going INIT->SCAN\n"));
   5771 			nstate = IEEE80211_S_SCAN;
   5772 		}
   5773 	}
   5774 
   5775 	switch (nstate) {
   5776 	case IEEE80211_S_INIT:
   5777 		break;
   5778 
   5779 	case IEEE80211_S_SCAN:
   5780 		if (ostate == nstate &&
   5781 		    ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
   5782 			return;
   5783 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
   5784 			err = iwm_umac_scan(sc);
   5785 		else
   5786 			err = iwm_lmac_scan(sc);
   5787 		if (err) {
   5788 			DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
   5789 			return;
   5790 		}
   5791 		SET(sc->sc_flags, IWM_FLAG_SCANNING);
   5792 		ic->ic_state = nstate;
   5793 		iwm_led_blink_start(sc);
   5794 		return;
   5795 
   5796 	case IEEE80211_S_AUTH:
   5797 		err = iwm_auth(sc);
   5798 		if (err) {
   5799 			DPRINTF(("%s: could not move to auth state: %d\n",
   5800 			    DEVNAME(sc), err));
   5801 			return;
   5802 		}
   5803 		break;
   5804 
   5805 	case IEEE80211_S_ASSOC:
   5806 		err = iwm_assoc(sc);
   5807 		if (err) {
   5808 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
   5809 			    err));
   5810 			return;
   5811 		}
   5812 		break;
   5813 
   5814 	case IEEE80211_S_RUN:
   5815 		in = (struct iwm_node *)ic->ic_bss;
   5816 
   5817 		/* We have now been assigned an associd by the AP. */
   5818 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
   5819 		if (err) {
   5820 			aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
   5821 			return;
   5822 		}
   5823 
   5824 		err = iwm_power_update_device(sc);
   5825 		if (err) {
   5826 			aprint_error_dev(sc->sc_dev,
   5827 			    "could send power command (error %d)\n", err);
   5828 			return;
   5829 		}
   5830 #ifdef notyet
   5831 		/*
   5832 		 * Disabled for now. Default beacon filter settings
   5833 		 * prevent net80211 from getting ERP and HT protection
   5834 		 * updates from beacons.
   5835 		 */
   5836 		err = iwm_enable_beacon_filter(sc, in);
   5837 		if (err) {
   5838 			aprint_error_dev(sc->sc_dev,
   5839 			    "could not enable beacon filter\n");
   5840 			return;
   5841 		}
   5842 #endif
   5843 		err = iwm_power_mac_update_mode(sc, in);
   5844 		if (err) {
   5845 			aprint_error_dev(sc->sc_dev,
   5846 			    "could not update MAC power (error %d)\n", err);
   5847 			return;
   5848 		}
   5849 
   5850 		err = iwm_update_quotas(sc, in);
   5851 		if (err) {
   5852 			aprint_error_dev(sc->sc_dev,
   5853 			    "could not update quotas (error %d)\n", err);
   5854 			return;
   5855 		}
   5856 
   5857 		ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
   5858 
   5859 		/* Start at lowest available bit-rate, AMRR will raise. */
   5860 		in->in_ni.ni_txrate = 0;
   5861 #ifndef IEEE80211_NO_HT
   5862 		in->in_ni.ni_txmcs = 0;
   5863 		iwm_setrates(in);
   5864 #endif
   5865 
   5866 		callout_schedule(&sc->sc_calib_to, mstohz(500));
   5867 		iwm_led_enable(sc);
   5868 		break;
   5869 
   5870 	default:
   5871 		break;
   5872 	}
   5873 
   5874 	sc->sc_newstate(ic, nstate, arg);
   5875 }
   5876 
   5877 static int
   5878 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
   5879 {
   5880 	struct iwm_newstate_state *iwmns;
   5881 	struct ifnet *ifp = IC2IFP(ic);
   5882 	struct iwm_softc *sc = ifp->if_softc;
   5883 
   5884 	callout_stop(&sc->sc_calib_to);
   5885 
   5886 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
   5887 	if (!iwmns) {
   5888 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
   5889 		return ENOMEM;
   5890 	}
   5891 
   5892 	iwmns->ns_nstate = nstate;
   5893 	iwmns->ns_arg = arg;
   5894 	iwmns->ns_generation = sc->sc_generation;
   5895 
   5896 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
   5897 
   5898 	return 0;
   5899 }
   5900 
   5901 static void
   5902 iwm_endscan_cb(struct work *work __unused, void *arg)
   5903 {
   5904 	struct iwm_softc *sc = arg;
   5905 	struct ieee80211com *ic = &sc->sc_ic;
   5906 
   5907 	DPRINTF(("scan ended\n"));
   5908 
   5909 	CLR(sc->sc_flags, IWM_FLAG_SCANNING);
   5910 	ieee80211_end_scan(ic);
   5911 }
   5912 
   5913 /*
   5914  * Aging and idle timeouts for the different possible scenarios
   5915  * in default configuration
   5916  */
   5917 static const uint32_t
   5918 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
   5919 	{
   5920 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
   5921 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
   5922 	},
   5923 	{
   5924 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
   5925 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
   5926 	},
   5927 	{
   5928 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
   5929 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
   5930 	},
   5931 	{
   5932 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
   5933 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
   5934 	},
   5935 	{
   5936 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
   5937 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
   5938 	},
   5939 };
   5940 
   5941 /*
   5942  * Aging and idle timeouts for the different possible scenarios
   5943  * in single BSS MAC configuration.
   5944  */
   5945 static const uint32_t
   5946 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
   5947 	{
   5948 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
   5949 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
   5950 	},
   5951 	{
   5952 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
   5953 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
   5954 	},
   5955 	{
   5956 		htole32(IWM_SF_MCAST_AGING_TIMER),
   5957 		htole32(IWM_SF_MCAST_IDLE_TIMER)
   5958 	},
   5959 	{
   5960 		htole32(IWM_SF_BA_AGING_TIMER),
   5961 		htole32(IWM_SF_BA_IDLE_TIMER)
   5962 	},
   5963 	{
   5964 		htole32(IWM_SF_TX_RE_AGING_TIMER),
   5965 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
   5966 	},
   5967 };
   5968 
   5969 static void
   5970 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
   5971     struct ieee80211_node *ni)
   5972 {
   5973 	int i, j, watermark;
   5974 
   5975 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
   5976 
   5977 	/*
   5978 	 * If we are in association flow - check antenna configuration
   5979 	 * capabilities of the AP station, and choose the watermark accordingly.
   5980 	 */
   5981 	if (ni) {
   5982 #ifndef IEEE80211_NO_HT
   5983 		if (ni->ni_flags & IEEE80211_NODE_HT) {
   5984 #ifdef notyet
   5985 			if (ni->ni_rxmcs[2] != 0)
   5986 				watermark = IWM_SF_W_MARK_MIMO3;
   5987 			else if (ni->ni_rxmcs[1] != 0)
   5988 				watermark = IWM_SF_W_MARK_MIMO2;
   5989 			else
   5990 #endif
   5991 				watermark = IWM_SF_W_MARK_SISO;
   5992 		} else
   5993 #endif
   5994 			watermark = IWM_SF_W_MARK_LEGACY;
   5995 	/* default watermark value for unassociated mode. */
   5996 	} else {
   5997 		watermark = IWM_SF_W_MARK_MIMO2;
   5998 	}
   5999 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
   6000 
   6001 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
   6002 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
   6003 			sf_cmd->long_delay_timeouts[i][j] =
   6004 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
   6005 		}
   6006 	}
   6007 
   6008 	if (ni) {
   6009 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
   6010 		       sizeof(iwm_sf_full_timeout));
   6011 	} else {
   6012 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
   6013 		       sizeof(iwm_sf_full_timeout_def));
   6014 	}
   6015 }
   6016 
   6017 static int
   6018 iwm_sf_config(struct iwm_softc *sc, int new_state)
   6019 {
   6020 	struct ieee80211com *ic = &sc->sc_ic;
   6021 	struct iwm_sf_cfg_cmd sf_cmd = {
   6022 		.state = htole32(IWM_SF_FULL_ON),
   6023 	};
   6024 
   6025 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   6026 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
   6027 
   6028 	switch (new_state) {
   6029 	case IWM_SF_UNINIT:
   6030 	case IWM_SF_INIT_OFF:
   6031 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
   6032 		break;
   6033 	case IWM_SF_FULL_ON:
   6034 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
   6035 		break;
   6036 	default:
   6037 		return EINVAL;
   6038 	}
   6039 
   6040 	return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
   6041 	    sizeof(sf_cmd), &sf_cmd);
   6042 }
   6043 
   6044 static int
   6045 iwm_send_bt_init_conf(struct iwm_softc *sc)
   6046 {
   6047 	struct iwm_bt_coex_cmd bt_cmd;
   6048 
   6049 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
   6050 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
   6051 
   6052 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
   6053 }
   6054 
   6055 static int
   6056 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
   6057 {
   6058 	struct iwm_mcc_update_cmd mcc_cmd;
   6059 	struct iwm_host_cmd hcmd = {
   6060 		.id = IWM_MCC_UPDATE_CMD,
   6061 		.flags = IWM_CMD_WANT_SKB,
   6062 		.data = { &mcc_cmd },
   6063 	};
   6064 	int resp_v2 = isset(sc->sc_enabled_capa,
   6065 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
   6066 	int err;
   6067 
   6068 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
   6069 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
   6070 	if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
   6071 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
   6072 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
   6073 	else
   6074 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
   6075 
   6076 	if (resp_v2)
   6077 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
   6078 	else
   6079 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
   6080 
   6081 	err = iwm_send_cmd(sc, &hcmd);
   6082 	if (err)
   6083 		return err;
   6084 
   6085 	iwm_free_resp(sc, &hcmd);
   6086 
   6087 	return 0;
   6088 }
   6089 
   6090 static void
   6091 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
   6092 {
   6093 	struct iwm_host_cmd cmd = {
   6094 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
   6095 		.len = { sizeof(uint32_t), },
   6096 		.data = { &backoff, },
   6097 	};
   6098 
   6099 	iwm_send_cmd(sc, &cmd);
   6100 }
   6101 
   6102 static int
   6103 iwm_init_hw(struct iwm_softc *sc)
   6104 {
   6105 	struct ieee80211com *ic = &sc->sc_ic;
   6106 	int err, i, ac;
   6107 
   6108 	err = iwm_preinit(sc);
   6109 	if (err)
   6110 		return err;
   6111 
   6112 	err = iwm_start_hw(sc);
   6113 	if (err) {
   6114 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6115 		return err;
   6116 	}
   6117 
   6118 	err = iwm_run_init_mvm_ucode(sc, 0);
   6119 	if (err)
   6120 		return err;
   6121 
   6122 	/* Should stop and start HW since INIT image just loaded. */
   6123 	iwm_stop_device(sc);
   6124 	err = iwm_start_hw(sc);
   6125 	if (err) {
   6126 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6127 		return err;
   6128 	}
   6129 
   6130 	/* Restart, this time with the regular firmware */
   6131 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
   6132 	if (err) {
   6133 		aprint_error_dev(sc->sc_dev, "could not load firmware\n");
   6134 		goto err;
   6135 	}
   6136 
   6137 	err = iwm_send_bt_init_conf(sc);
   6138 	if (err) {
   6139 		aprint_error_dev(sc->sc_dev,
   6140 		    "could not init bt coex (error %d)\n", err);
   6141 		goto err;
   6142 	}
   6143 
   6144 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
   6145 	if (err) {
   6146 		aprint_error_dev(sc->sc_dev,
   6147 		    "could not init tx ant config (error %d)\n", err);
   6148 		goto err;
   6149 	}
   6150 
   6151 	/* Send phy db control command and then phy db calibration*/
   6152 	err = iwm_send_phy_db_data(sc);
   6153 	if (err) {
   6154 		aprint_error_dev(sc->sc_dev,
   6155 		    "could not init phy db (error %d)\n", err);
   6156 		goto err;
   6157 	}
   6158 
   6159 	err = iwm_send_phy_cfg_cmd(sc);
   6160 	if (err) {
   6161 		aprint_error_dev(sc->sc_dev,
   6162 		    "could not send phy config (error %d)\n", err);
   6163 		goto err;
   6164 	}
   6165 
   6166 	/* Add auxiliary station for scanning */
   6167 	err = iwm_add_aux_sta(sc);
   6168 	if (err) {
   6169 		aprint_error_dev(sc->sc_dev,
   6170 		    "could not add aux station (error %d)\n", err);
   6171 		goto err;
   6172 	}
   6173 
   6174 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
   6175 		/*
   6176 		 * The channel used here isn't relevant as it's
   6177 		 * going to be overwritten in the other flows.
   6178 		 * For now use the first channel we have.
   6179 		 */
   6180 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
   6181 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
   6182 		    IWM_FW_CTXT_ACTION_ADD, 0);
   6183 		if (err) {
   6184 			aprint_error_dev(sc->sc_dev,
   6185 			    "could not add phy context %d (error %d)\n",
   6186 			    i, err);
   6187 			goto err;
   6188 		}
   6189 	}
   6190 
   6191 	/* Initialize tx backoffs to the minimum. */
   6192 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
   6193 		iwm_tt_tx_backoff(sc, 0);
   6194 
   6195 	err = iwm_power_update_device(sc);
   6196 	if (err) {
   6197 		aprint_error_dev(sc->sc_dev,
   6198 		    "could send power command (error %d)\n", err);
   6199 		goto err;
   6200 	}
   6201 
   6202 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
   6203 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
   6204 		if (err) {
   6205 			aprint_error_dev(sc->sc_dev,
   6206 			    "could not init LAR (error %d)\n", err);
   6207 			goto err;
   6208 		}
   6209 	}
   6210 
   6211 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
   6212 		err = iwm_config_umac_scan(sc);
   6213 		if (err) {
   6214 			aprint_error_dev(sc->sc_dev,
   6215 			    "could not configure scan (error %d)\n", err);
   6216 			goto err;
   6217 		}
   6218 	}
   6219 
   6220 	for (ac = 0; ac < WME_NUM_AC; ac++) {
   6221 		err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
   6222 		    iwm_ac_to_tx_fifo[ac]);
   6223 		if (err) {
   6224 			aprint_error_dev(sc->sc_dev,
   6225 			    "could not enable Tx queue %d (error %d)\n",
   6226 			    i, err);
   6227 			goto err;
   6228 		}
   6229 	}
   6230 
   6231 	err = iwm_disable_beacon_filter(sc);
   6232 	if (err) {
   6233 		aprint_error_dev(sc->sc_dev,
   6234 		    "could not disable beacon filter (error %d)\n", err);
   6235 		goto err;
   6236 	}
   6237 
   6238 	return 0;
   6239 
   6240  err:
   6241 	iwm_stop_device(sc);
   6242 	return err;
   6243 }
   6244 
   6245 /* Allow multicast from our BSSID. */
   6246 static int
   6247 iwm_allow_mcast(struct iwm_softc *sc)
   6248 {
   6249 	struct ieee80211com *ic = &sc->sc_ic;
   6250 	struct ieee80211_node *ni = ic->ic_bss;
   6251 	struct iwm_mcast_filter_cmd *cmd;
   6252 	size_t size;
   6253 	int err;
   6254 
   6255 	size = roundup(sizeof(*cmd), 4);
   6256 	cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
   6257 	if (cmd == NULL)
   6258 		return ENOMEM;
   6259 	cmd->filter_own = 1;
   6260 	cmd->port_id = 0;
   6261 	cmd->count = 0;
   6262 	cmd->pass_all = 1;
   6263 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
   6264 
   6265 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
   6266 	kmem_intr_free(cmd, size);
   6267 	return err;
   6268 }
   6269 
   6270 static int
   6271 iwm_init(struct ifnet *ifp)
   6272 {
   6273 	struct iwm_softc *sc = ifp->if_softc;
   6274 	int err;
   6275 
   6276 	if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
   6277 		return 0;
   6278 
   6279 	sc->sc_generation++;
   6280 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
   6281 
   6282 	err = iwm_init_hw(sc);
   6283 	if (err) {
   6284 		iwm_stop(ifp, 1);
   6285 		return err;
   6286 	}
   6287 
   6288 	ifp->if_flags &= ~IFF_OACTIVE;
   6289 	ifp->if_flags |= IFF_RUNNING;
   6290 
   6291 	ieee80211_begin_scan(&sc->sc_ic, 0);
   6292 	SET(sc->sc_flags, IWM_FLAG_HW_INITED);
   6293 
   6294 	return 0;
   6295 }
   6296 
   6297 static void
   6298 iwm_start(struct ifnet *ifp)
   6299 {
   6300 	struct iwm_softc *sc = ifp->if_softc;
   6301 	struct ieee80211com *ic = &sc->sc_ic;
   6302 	struct ieee80211_node *ni;
   6303 	struct ether_header *eh;
   6304 	struct mbuf *m;
   6305 	int ac;
   6306 
   6307 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6308 		return;
   6309 
   6310 	for (;;) {
   6311 		/* why isn't this done per-queue? */
   6312 		if (sc->qfullmsk != 0) {
   6313 			ifp->if_flags |= IFF_OACTIVE;
   6314 			break;
   6315 		}
   6316 
   6317 		/* need to send management frames even if we're not RUNning */
   6318 		IF_DEQUEUE(&ic->ic_mgtq, m);
   6319 		if (m) {
   6320 			ni = M_GETCTX(m, struct ieee80211_node *);
   6321 			ac = WME_AC_BE;
   6322 			goto sendit;
   6323 		}
   6324 		if (ic->ic_state != IEEE80211_S_RUN) {
   6325 			break;
   6326 		}
   6327 
   6328 		IFQ_DEQUEUE(&ifp->if_snd, m);
   6329 		if (!m)
   6330 			break;
   6331 		if (m->m_len < sizeof (*eh) &&
   6332 		   (m = m_pullup(m, sizeof (*eh))) == NULL) {
   6333 			ifp->if_oerrors++;
   6334 			continue;
   6335 		}
   6336 		if (ifp->if_bpf != NULL)
   6337 			bpf_mtap(ifp, m);
   6338 
   6339 		eh = mtod(m, struct ether_header *);
   6340 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
   6341 		if (ni == NULL) {
   6342 			m_freem(m);
   6343 			ifp->if_oerrors++;
   6344 			continue;
   6345 		}
   6346 		/* classify mbuf so we can find which tx ring to use */
   6347 		if (ieee80211_classify(ic, m, ni) != 0) {
   6348 			m_freem(m);
   6349 			ieee80211_free_node(ni);
   6350 			ifp->if_oerrors++;
   6351 			continue;
   6352 		}
   6353 
   6354 		/* No QoS encapsulation for EAPOL frames. */
   6355 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
   6356 		    M_WME_GETAC(m) : WME_AC_BE;
   6357 
   6358 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
   6359 			ieee80211_free_node(ni);
   6360 			ifp->if_oerrors++;
   6361 			continue;
   6362 		}
   6363 
   6364  sendit:
   6365 		if (ic->ic_rawbpf != NULL)
   6366 			bpf_mtap3(ic->ic_rawbpf, m);
   6367 		if (iwm_tx(sc, m, ni, ac) != 0) {
   6368 			ieee80211_free_node(ni);
   6369 			ifp->if_oerrors++;
   6370 			continue;
   6371 		}
   6372 
   6373 		if (ifp->if_flags & IFF_UP) {
   6374 			sc->sc_tx_timer = 15;
   6375 			ifp->if_timer = 1;
   6376 		}
   6377 	}
   6378 
   6379 	return;
   6380 }
   6381 
   6382 static void
   6383 iwm_stop(struct ifnet *ifp, int disable)
   6384 {
   6385 	struct iwm_softc *sc = ifp->if_softc;
   6386 	struct ieee80211com *ic = &sc->sc_ic;
   6387 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   6388 
   6389 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
   6390 	sc->sc_flags |= IWM_FLAG_STOPPED;
   6391 	sc->sc_generation++;
   6392 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6393 
   6394 	if (in)
   6395 		in->in_phyctxt = NULL;
   6396 
   6397 	if (ic->ic_state != IEEE80211_S_INIT)
   6398 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
   6399 
   6400 	callout_stop(&sc->sc_calib_to);
   6401 	iwm_led_blink_stop(sc);
   6402 	ifp->if_timer = sc->sc_tx_timer = 0;
   6403 	iwm_stop_device(sc);
   6404 }
   6405 
   6406 static void
   6407 iwm_watchdog(struct ifnet *ifp)
   6408 {
   6409 	struct iwm_softc *sc = ifp->if_softc;
   6410 
   6411 	ifp->if_timer = 0;
   6412 	if (sc->sc_tx_timer > 0) {
   6413 		if (--sc->sc_tx_timer == 0) {
   6414 			aprint_error_dev(sc->sc_dev, "device timeout\n");
   6415 #ifdef IWM_DEBUG
   6416 			iwm_nic_error(sc);
   6417 #endif
   6418 			ifp->if_flags &= ~IFF_UP;
   6419 			iwm_stop(ifp, 1);
   6420 			ifp->if_oerrors++;
   6421 			return;
   6422 		}
   6423 		ifp->if_timer = 1;
   6424 	}
   6425 
   6426 	ieee80211_watchdog(&sc->sc_ic);
   6427 }
   6428 
   6429 static int
   6430 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   6431 {
   6432 	struct iwm_softc *sc = ifp->if_softc;
   6433 	struct ieee80211com *ic = &sc->sc_ic;
   6434 	const struct sockaddr *sa;
   6435 	int s, err = 0;
   6436 
   6437 	s = splnet();
   6438 
   6439 	switch (cmd) {
   6440 	case SIOCSIFADDR:
   6441 		ifp->if_flags |= IFF_UP;
   6442 		/* FALLTHROUGH */
   6443 	case SIOCSIFFLAGS:
   6444 		err = ifioctl_common(ifp, cmd, data);
   6445 		if (err)
   6446 			break;
   6447 		if (ifp->if_flags & IFF_UP) {
   6448 			if (!(ifp->if_flags & IFF_RUNNING)) {
   6449 				err = iwm_init(ifp);
   6450 				if (err)
   6451 					ifp->if_flags &= ~IFF_UP;
   6452 			}
   6453 		} else {
   6454 			if (ifp->if_flags & IFF_RUNNING)
   6455 				iwm_stop(ifp, 1);
   6456 		}
   6457 		break;
   6458 
   6459 	case SIOCADDMULTI:
   6460 	case SIOCDELMULTI:
   6461 		if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
   6462 			err = ENXIO;
   6463 			break;
   6464 		}
   6465 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
   6466 		err = (cmd == SIOCADDMULTI) ?
   6467 		    ether_addmulti(sa, &sc->sc_ec) :
   6468 		    ether_delmulti(sa, &sc->sc_ec);
   6469 		if (err == ENETRESET)
   6470 			err = 0;
   6471 		break;
   6472 
   6473 	default:
   6474 		if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
   6475 			err = ether_ioctl(ifp, cmd, data);
   6476 			break;
   6477 		}
   6478 		err = ieee80211_ioctl(ic, cmd, data);
   6479 		break;
   6480 	}
   6481 
   6482 	if (err == ENETRESET) {
   6483 		err = 0;
   6484 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   6485 		    (IFF_UP | IFF_RUNNING)) {
   6486 			iwm_stop(ifp, 0);
   6487 			err = iwm_init(ifp);
   6488 		}
   6489 	}
   6490 
   6491 	splx(s);
   6492 	return err;
   6493 }
   6494 
   6495 /*
   6496  * Note: This structure is read from the device with IO accesses,
   6497  * and the reading already does the endian conversion. As it is
   6498  * read with uint32_t-sized accesses, any members with a different size
   6499  * need to be ordered correctly though!
   6500  */
   6501 struct iwm_error_event_table {
   6502 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
   6503 	uint32_t error_id;		/* type of error */
   6504 	uint32_t trm_hw_status0;	/* TRM HW status */
   6505 	uint32_t trm_hw_status1;	/* TRM HW status */
   6506 	uint32_t blink2;		/* branch link */
   6507 	uint32_t ilink1;		/* interrupt link */
   6508 	uint32_t ilink2;		/* interrupt link */
   6509 	uint32_t data1;		/* error-specific data */
   6510 	uint32_t data2;		/* error-specific data */
   6511 	uint32_t data3;		/* error-specific data */
   6512 	uint32_t bcon_time;		/* beacon timer */
   6513 	uint32_t tsf_low;		/* network timestamp function timer */
   6514 	uint32_t tsf_hi;		/* network timestamp function timer */
   6515 	uint32_t gp1;		/* GP1 timer register */
   6516 	uint32_t gp2;		/* GP2 timer register */
   6517 	uint32_t fw_rev_type;	/* firmware revision type */
   6518 	uint32_t major;		/* uCode version major */
   6519 	uint32_t minor;		/* uCode version minor */
   6520 	uint32_t hw_ver;		/* HW Silicon version */
   6521 	uint32_t brd_ver;		/* HW board version */
   6522 	uint32_t log_pc;		/* log program counter */
   6523 	uint32_t frame_ptr;		/* frame pointer */
   6524 	uint32_t stack_ptr;		/* stack pointer */
   6525 	uint32_t hcmd;		/* last host command header */
   6526 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
   6527 				 * rxtx_flag */
   6528 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
   6529 				 * host_flag */
   6530 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
   6531 				 * enc_flag */
   6532 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
   6533 				 * time_flag */
   6534 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
   6535 				 * wico interrupt */
   6536 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
   6537 	uint32_t wait_event;		/* wait event() caller address */
   6538 	uint32_t l2p_control;	/* L2pControlField */
   6539 	uint32_t l2p_duration;	/* L2pDurationField */
   6540 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
   6541 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
   6542 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
   6543 				 * (LMPM_PMG_SEL) */
   6544 	uint32_t u_timestamp;	/* indicate when the date and time of the
   6545 				 * compilation */
   6546 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
   6547 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
   6548 
   6549 /*
   6550  * UMAC error struct - relevant starting from family 8000 chip.
   6551  * Note: This structure is read from the device with IO accesses,
   6552  * and the reading already does the endian conversion. As it is
   6553  * read with u32-sized accesses, any members with a different size
   6554  * need to be ordered correctly though!
   6555  */
   6556 struct iwm_umac_error_event_table {
   6557 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
   6558 	uint32_t error_id;	/* type of error */
   6559 	uint32_t blink1;	/* branch link */
   6560 	uint32_t blink2;	/* branch link */
   6561 	uint32_t ilink1;	/* interrupt link */
   6562 	uint32_t ilink2;	/* interrupt link */
   6563 	uint32_t data1;		/* error-specific data */
   6564 	uint32_t data2;		/* error-specific data */
   6565 	uint32_t data3;		/* error-specific data */
   6566 	uint32_t umac_major;
   6567 	uint32_t umac_minor;
   6568 	uint32_t frame_pointer;	/* core register 27 */
   6569 	uint32_t stack_pointer;	/* core register 28 */
   6570 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
   6571 	uint32_t nic_isr_pref;	/* ISR status register */
   6572 } __packed;
   6573 
   6574 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
   6575 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
   6576 
   6577 #ifdef IWM_DEBUG
   6578 static const struct {
   6579 	const char *name;
   6580 	uint8_t num;
   6581 } advanced_lookup[] = {
   6582 	{ "NMI_INTERRUPT_WDG", 0x34 },
   6583 	{ "SYSASSERT", 0x35 },
   6584 	{ "UCODE_VERSION_MISMATCH", 0x37 },
   6585 	{ "BAD_COMMAND", 0x38 },
   6586 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
   6587 	{ "FATAL_ERROR", 0x3D },
   6588 	{ "NMI_TRM_HW_ERR", 0x46 },
   6589 	{ "NMI_INTERRUPT_TRM", 0x4C },
   6590 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
   6591 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
   6592 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
   6593 	{ "NMI_INTERRUPT_HOST", 0x66 },
   6594 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
   6595 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
   6596 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
   6597 	{ "ADVANCED_SYSASSERT", 0 },
   6598 };
   6599 
   6600 static const char *
   6601 iwm_desc_lookup(uint32_t num)
   6602 {
   6603 	int i;
   6604 
   6605 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
   6606 		if (advanced_lookup[i].num == num)
   6607 			return advanced_lookup[i].name;
   6608 
   6609 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
   6610 	return advanced_lookup[i].name;
   6611 }
   6612 
   6613 /*
   6614  * Support for dumping the error log seemed like a good idea ...
   6615  * but it's mostly hex junk and the only sensible thing is the
   6616  * hw/ucode revision (which we know anyway).  Since it's here,
   6617  * I'll just leave it in, just in case e.g. the Intel guys want to
   6618  * help us decipher some "ADVANCED_SYSASSERT" later.
   6619  */
   6620 static void
   6621 iwm_nic_error(struct iwm_softc *sc)
   6622 {
   6623 	struct iwm_error_event_table t;
   6624 	uint32_t base;
   6625 
   6626 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
   6627 	base = sc->sc_uc.uc_error_event_table;
   6628 	if (base < 0x800000) {
   6629 		aprint_error_dev(sc->sc_dev,
   6630 		    "Invalid error log pointer 0x%08x\n", base);
   6631 		return;
   6632 	}
   6633 
   6634 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
   6635 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
   6636 		return;
   6637 	}
   6638 
   6639 	if (!t.valid) {
   6640 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
   6641 		return;
   6642 	}
   6643 
   6644 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
   6645 		aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
   6646 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
   6647 		    sc->sc_flags, t.valid);
   6648 	}
   6649 
   6650 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
   6651 	    iwm_desc_lookup(t.error_id));
   6652 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
   6653 	    t.trm_hw_status0);
   6654 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
   6655 	    t.trm_hw_status1);
   6656 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
   6657 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
   6658 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
   6659 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
   6660 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
   6661 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
   6662 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
   6663 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
   6664 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
   6665 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
   6666 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
   6667 	aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
   6668 	    t.fw_rev_type);
   6669 	aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
   6670 	    t.major);
   6671 	aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
   6672 	    t.minor);
   6673 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
   6674 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
   6675 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
   6676 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
   6677 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
   6678 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
   6679 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
   6680 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
   6681 	aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
   6682 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
   6683 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
   6684 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
   6685 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
   6686 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
   6687 	    t.l2p_addr_match);
   6688 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
   6689 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
   6690 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
   6691 
   6692 	if (sc->sc_uc.uc_umac_error_event_table)
   6693 		iwm_nic_umac_error(sc);
   6694 }
   6695 
   6696 static void
   6697 iwm_nic_umac_error(struct iwm_softc *sc)
   6698 {
   6699 	struct iwm_umac_error_event_table t;
   6700 	uint32_t base;
   6701 
   6702 	base = sc->sc_uc.uc_umac_error_event_table;
   6703 
   6704 	if (base < 0x800000) {
   6705 		aprint_error_dev(sc->sc_dev,
   6706 		    "Invalid error log pointer 0x%08x\n", base);
   6707 		return;
   6708 	}
   6709 
   6710 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
   6711 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
   6712 		return;
   6713 	}
   6714 
   6715 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
   6716 		aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
   6717 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
   6718 		    sc->sc_flags, t.valid);
   6719 	}
   6720 
   6721 	aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
   6722 		iwm_desc_lookup(t.error_id));
   6723 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
   6724 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
   6725 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
   6726 	    t.ilink1);
   6727 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
   6728 	    t.ilink2);
   6729 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
   6730 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
   6731 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
   6732 	aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
   6733 	aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
   6734 	aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
   6735 	    t.frame_pointer);
   6736 	aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
   6737 	    t.stack_pointer);
   6738 	aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
   6739 	aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
   6740 	    t.nic_isr_pref);
   6741 }
   6742 #endif
   6743 
   6744 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
   6745 do {									\
   6746 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6747 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
   6748 	_var_ = (void *)((_pkt_)+1);					\
   6749 } while (/*CONSTCOND*/0)
   6750 
   6751 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
   6752 do {									\
   6753 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6754 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
   6755 	_ptr_ = (void *)((_pkt_)+1);					\
   6756 } while (/*CONSTCOND*/0)
   6757 
   6758 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
   6759 
   6760 static void
   6761 iwm_notif_intr(struct iwm_softc *sc)
   6762 {
   6763 	uint16_t hw;
   6764 
   6765 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
   6766 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
   6767 
   6768 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
   6769 	while (sc->rxq.cur != hw) {
   6770 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
   6771 		struct iwm_rx_packet *pkt;
   6772 		struct iwm_cmd_response *cresp;
   6773 		int orig_qid, qid, idx, code;
   6774 
   6775 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
   6776 		    BUS_DMASYNC_POSTREAD);
   6777 		pkt = mtod(data->m, struct iwm_rx_packet *);
   6778 
   6779 		orig_qid = pkt->hdr.qid;
   6780 		qid = orig_qid & ~0x80;
   6781 		idx = pkt->hdr.idx;
   6782 
   6783 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
   6784 
   6785 		/*
   6786 		 * randomly get these from the firmware, no idea why.
   6787 		 * they at least seem harmless, so just ignore them for now
   6788 		 */
   6789 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
   6790 		    || pkt->len_n_flags == htole32(0x55550000))) {
   6791 			ADVANCE_RXQ(sc);
   6792 			continue;
   6793 		}
   6794 
   6795 		switch (code) {
   6796 		case IWM_REPLY_RX_PHY_CMD:
   6797 			iwm_rx_rx_phy_cmd(sc, pkt, data);
   6798 			break;
   6799 
   6800 		case IWM_REPLY_RX_MPDU_CMD:
   6801 			iwm_rx_rx_mpdu(sc, pkt, data);
   6802 			break;
   6803 
   6804 		case IWM_TX_CMD:
   6805 			iwm_rx_tx_cmd(sc, pkt, data);
   6806 			break;
   6807 
   6808 		case IWM_MISSED_BEACONS_NOTIFICATION:
   6809 			iwm_rx_missed_beacons_notif(sc, pkt, data);
   6810 			break;
   6811 
   6812 		case IWM_MFUART_LOAD_NOTIFICATION:
   6813 			break;
   6814 
   6815 		case IWM_ALIVE: {
   6816 			struct iwm_alive_resp_v1 *resp1;
   6817 			struct iwm_alive_resp_v2 *resp2;
   6818 			struct iwm_alive_resp_v3 *resp3;
   6819 
   6820 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
   6821 				SYNC_RESP_STRUCT(resp1, pkt);
   6822 				sc->sc_uc.uc_error_event_table
   6823 				    = le32toh(resp1->error_event_table_ptr);
   6824 				sc->sc_uc.uc_log_event_table
   6825 				    = le32toh(resp1->log_event_table_ptr);
   6826 				sc->sched_base = le32toh(resp1->scd_base_ptr);
   6827 				if (resp1->status == IWM_ALIVE_STATUS_OK)
   6828 					sc->sc_uc.uc_ok = 1;
   6829 				else
   6830 					sc->sc_uc.uc_ok = 0;
   6831 			}
   6832 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
   6833 				SYNC_RESP_STRUCT(resp2, pkt);
   6834 				sc->sc_uc.uc_error_event_table
   6835 				    = le32toh(resp2->error_event_table_ptr);
   6836 				sc->sc_uc.uc_log_event_table
   6837 				    = le32toh(resp2->log_event_table_ptr);
   6838 				sc->sched_base = le32toh(resp2->scd_base_ptr);
   6839 				sc->sc_uc.uc_umac_error_event_table
   6840 				    = le32toh(resp2->error_info_addr);
   6841 				if (resp2->status == IWM_ALIVE_STATUS_OK)
   6842 					sc->sc_uc.uc_ok = 1;
   6843 				else
   6844 					sc->sc_uc.uc_ok = 0;
   6845 			}
   6846 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
   6847 				SYNC_RESP_STRUCT(resp3, pkt);
   6848 				sc->sc_uc.uc_error_event_table
   6849 				    = le32toh(resp3->error_event_table_ptr);
   6850 				sc->sc_uc.uc_log_event_table
   6851 				    = le32toh(resp3->log_event_table_ptr);
   6852 				sc->sched_base = le32toh(resp3->scd_base_ptr);
   6853 				sc->sc_uc.uc_umac_error_event_table
   6854 				    = le32toh(resp3->error_info_addr);
   6855 				if (resp3->status == IWM_ALIVE_STATUS_OK)
   6856 					sc->sc_uc.uc_ok = 1;
   6857 				else
   6858 					sc->sc_uc.uc_ok = 0;
   6859 			}
   6860 
   6861 			sc->sc_uc.uc_intr = 1;
   6862 			wakeup(&sc->sc_uc);
   6863 			break;
   6864 		}
   6865 
   6866 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
   6867 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
   6868 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
   6869 			uint16_t size = le16toh(phy_db_notif->length);
   6870 			bus_dmamap_sync(sc->sc_dmat, data->map,
   6871 			    sizeof(*pkt) + sizeof(*phy_db_notif),
   6872 			    size, BUS_DMASYNC_POSTREAD);
   6873 			iwm_phy_db_set_section(sc, phy_db_notif, size);
   6874 			break;
   6875 		}
   6876 
   6877 		case IWM_STATISTICS_NOTIFICATION: {
   6878 			struct iwm_notif_statistics *stats;
   6879 			SYNC_RESP_STRUCT(stats, pkt);
   6880 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
   6881 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
   6882 			break;
   6883 		}
   6884 
   6885 		case IWM_NVM_ACCESS_CMD:
   6886 		case IWM_MCC_UPDATE_CMD:
   6887 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6888 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   6889 				    sizeof(sc->sc_cmd_resp),
   6890 				    BUS_DMASYNC_POSTREAD);
   6891 				memcpy(sc->sc_cmd_resp,
   6892 				    pkt, sizeof(sc->sc_cmd_resp));
   6893 			}
   6894 			break;
   6895 
   6896 		case IWM_MCC_CHUB_UPDATE_CMD: {
   6897 			struct iwm_mcc_chub_notif *notif;
   6898 			SYNC_RESP_STRUCT(notif, pkt);
   6899 
   6900 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
   6901 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
   6902 			sc->sc_fw_mcc[2] = '\0';
   6903 			break;
   6904 		}
   6905 
   6906 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
   6907 			break;
   6908 
   6909 		case IWM_PHY_CONFIGURATION_CMD:
   6910 		case IWM_TX_ANT_CONFIGURATION_CMD:
   6911 		case IWM_ADD_STA:
   6912 		case IWM_MAC_CONTEXT_CMD:
   6913 		case IWM_REPLY_SF_CFG_CMD:
   6914 		case IWM_POWER_TABLE_CMD:
   6915 		case IWM_PHY_CONTEXT_CMD:
   6916 		case IWM_BINDING_CONTEXT_CMD:
   6917 		case IWM_TIME_EVENT_CMD:
   6918 		case IWM_SCAN_REQUEST_CMD:
   6919 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
   6920 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
   6921 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
   6922 		case IWM_REPLY_BEACON_FILTERING_CMD:
   6923 		case IWM_MAC_PM_POWER_TABLE:
   6924 		case IWM_TIME_QUOTA_CMD:
   6925 		case IWM_REMOVE_STA:
   6926 		case IWM_TXPATH_FLUSH:
   6927 		case IWM_LQ_CMD:
   6928 		case IWM_BT_CONFIG:
   6929 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
   6930 			SYNC_RESP_STRUCT(cresp, pkt);
   6931 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6932 				memcpy(sc->sc_cmd_resp,
   6933 				    pkt, sizeof(*pkt) + sizeof(*cresp));
   6934 			}
   6935 			break;
   6936 
   6937 		/* ignore */
   6938 		case 0x6c: /* IWM_PHY_DB_CMD */
   6939 			break;
   6940 
   6941 		case IWM_INIT_COMPLETE_NOTIF:
   6942 			sc->sc_init_complete = 1;
   6943 			wakeup(&sc->sc_init_complete);
   6944 			break;
   6945 
   6946 		case IWM_SCAN_OFFLOAD_COMPLETE: {
   6947 			struct iwm_periodic_scan_complete *notif;
   6948 			SYNC_RESP_STRUCT(notif, pkt);
   6949 			break;
   6950 		}
   6951 
   6952 		case IWM_SCAN_ITERATION_COMPLETE: {
   6953 			struct iwm_lmac_scan_complete_notif *notif;
   6954 			SYNC_RESP_STRUCT(notif, pkt);
   6955 			workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
   6956 			break;
   6957 		}
   6958 
   6959 		case IWM_SCAN_COMPLETE_UMAC: {
   6960 			struct iwm_umac_scan_complete *notif;
   6961 			SYNC_RESP_STRUCT(notif, pkt);
   6962 			workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
   6963 			break;
   6964 		}
   6965 
   6966 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
   6967 			struct iwm_umac_scan_iter_complete_notif *notif;
   6968 			SYNC_RESP_STRUCT(notif, pkt);
   6969 			workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
   6970 			break;
   6971 		}
   6972 
   6973 		case IWM_REPLY_ERROR: {
   6974 			struct iwm_error_resp *resp;
   6975 			SYNC_RESP_STRUCT(resp, pkt);
   6976 			aprint_error_dev(sc->sc_dev,
   6977 			    "firmware error 0x%x, cmd 0x%x\n",
   6978 			    le32toh(resp->error_type), resp->cmd_id);
   6979 			break;
   6980 		}
   6981 
   6982 		case IWM_TIME_EVENT_NOTIFICATION: {
   6983 			struct iwm_time_event_notif *notif;
   6984 			SYNC_RESP_STRUCT(notif, pkt);
   6985 			break;
   6986 		}
   6987 
   6988 		case IWM_MCAST_FILTER_CMD:
   6989 			break;
   6990 
   6991 		case IWM_SCD_QUEUE_CFG: {
   6992 			struct iwm_scd_txq_cfg_rsp *rsp;
   6993 			SYNC_RESP_STRUCT(rsp, pkt);
   6994 			break;
   6995 		}
   6996 
   6997 		default:
   6998 			aprint_error_dev(sc->sc_dev,
   6999 			    "unhandled firmware response 0x%x 0x%x/0x%x "
   7000 			    "rx ring %d[%d]\n",
   7001 			    code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
   7002 			break;
   7003 		}
   7004 
   7005 		/*
   7006 		 * uCode sets bit 0x80 when it originates the notification,
   7007 		 * i.e. when the notification is not a direct response to a
   7008 		 * command sent by the driver.
   7009 		 * For example, uCode issues IWM_REPLY_RX when it sends a
   7010 		 * received frame to the driver.
   7011 		 */
   7012 		if (!(orig_qid & (1 << 7))) {
   7013 			iwm_cmd_done(sc, qid, idx);
   7014 		}
   7015 
   7016 		ADVANCE_RXQ(sc);
   7017 	}
   7018 
   7019 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   7020 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   7021 
   7022 	/*
   7023 	 * Seems like the hardware gets upset unless we align the write by 8??
   7024 	 */
   7025 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
   7026 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
   7027 }
   7028 
   7029 static int
   7030 iwm_intr(void *arg)
   7031 {
   7032 	struct iwm_softc *sc = arg;
   7033 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   7034 	int handled = 0;
   7035 	int r1, r2, rv = 0;
   7036 	int isperiodic = 0;
   7037 
   7038 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   7039 
   7040 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
   7041 		uint32_t *ict = sc->ict_dma.vaddr;
   7042 		int tmp;
   7043 
   7044 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
   7045 		    0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
   7046 		tmp = htole32(ict[sc->ict_cur]);
   7047 		if (!tmp)
   7048 			goto out_ena;
   7049 
   7050 		/*
   7051 		 * ok, there was something.  keep plowing until we have all.
   7052 		 */
   7053 		r1 = r2 = 0;
   7054 		while (tmp) {
   7055 			r1 |= tmp;
   7056 			ict[sc->ict_cur] = 0;	/* Acknowledge. */
   7057 			bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
   7058 			    &ict[sc->ict_cur] - ict, sizeof(*ict),
   7059 			    BUS_DMASYNC_PREWRITE);
   7060 			sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
   7061 			tmp = htole32(ict[sc->ict_cur]);
   7062 		}
   7063 
   7064 		/* this is where the fun begins.  don't ask */
   7065 		if (r1 == 0xffffffff)
   7066 			r1 = 0;
   7067 
   7068 		/* i am not expected to understand this */
   7069 		if (r1 & 0xc0000)
   7070 			r1 |= 0x8000;
   7071 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
   7072 	} else {
   7073 		r1 = IWM_READ(sc, IWM_CSR_INT);
   7074 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
   7075 			goto out;
   7076 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
   7077 	}
   7078 	if (r1 == 0 && r2 == 0) {
   7079 		goto out_ena;
   7080 	}
   7081 
   7082 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
   7083 
   7084 	/* ignored */
   7085 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
   7086 
   7087 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
   7088 #ifdef IWM_DEBUG
   7089 		int i;
   7090 
   7091 		iwm_nic_error(sc);
   7092 
   7093 		/* Dump driver status (TX and RX rings) while we're here. */
   7094 		DPRINTF(("driver status:\n"));
   7095 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
   7096 			struct iwm_tx_ring *ring = &sc->txq[i];
   7097 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
   7098 			    "queued=%-3d\n",
   7099 			    i, ring->qid, ring->cur, ring->queued));
   7100 		}
   7101 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
   7102 		DPRINTF(("  802.11 state %s\n",
   7103 		    ieee80211_state_name[sc->sc_ic.ic_state]));
   7104 #endif
   7105 
   7106 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
   7107 		ifp->if_flags &= ~IFF_UP;
   7108 		iwm_stop(ifp, 1);
   7109 		rv = 1;
   7110 		goto out;
   7111 
   7112 	}
   7113 
   7114 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
   7115 		handled |= IWM_CSR_INT_BIT_HW_ERR;
   7116 		aprint_error_dev(sc->sc_dev,
   7117 		    "hardware error, stopping device\n");
   7118 		ifp->if_flags &= ~IFF_UP;
   7119 		iwm_stop(ifp, 1);
   7120 		rv = 1;
   7121 		goto out;
   7122 	}
   7123 
   7124 	/* firmware chunk loaded */
   7125 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
   7126 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
   7127 		handled |= IWM_CSR_INT_BIT_FH_TX;
   7128 		sc->sc_fw_chunk_done = 1;
   7129 		wakeup(&sc->sc_fw);
   7130 	}
   7131 
   7132 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
   7133 		handled |= IWM_CSR_INT_BIT_RF_KILL;
   7134 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
   7135 			ifp->if_flags &= ~IFF_UP;
   7136 			iwm_stop(ifp, 1);
   7137 		}
   7138 	}
   7139 
   7140 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
   7141 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
   7142 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
   7143 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
   7144 			IWM_WRITE_1(sc,
   7145 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
   7146 		isperiodic = 1;
   7147 	}
   7148 
   7149 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
   7150 	    isperiodic) {
   7151 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
   7152 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
   7153 
   7154 		iwm_notif_intr(sc);
   7155 
   7156 		/* enable periodic interrupt, see above */
   7157 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
   7158 		    !isperiodic)
   7159 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
   7160 			    IWM_CSR_INT_PERIODIC_ENA);
   7161 	}
   7162 
   7163 	rv = 1;
   7164 
   7165  out_ena:
   7166 	iwm_restore_interrupts(sc);
   7167  out:
   7168 	return rv;
   7169 }
   7170 
   7171 /*
   7172  * Autoconf glue-sniffing
   7173  */
   7174 
   7175 static const pci_product_id_t iwm_devices[] = {
   7176 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
   7177 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
   7178 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
   7179 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
   7180 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
   7181 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
   7182 #if 0
   7183 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
   7184 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
   7185 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
   7186 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
   7187 #endif
   7188 };
   7189 
   7190 static int
   7191 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
   7192 {
   7193 	struct pci_attach_args *pa = aux;
   7194 
   7195 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
   7196 		return 0;
   7197 
   7198 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
   7199 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
   7200 			return 1;
   7201 
   7202 	return 0;
   7203 }
   7204 
   7205 static int
   7206 iwm_preinit(struct iwm_softc *sc)
   7207 {
   7208 	struct ieee80211com *ic = &sc->sc_ic;
   7209 	int err;
   7210 
   7211 	if (ISSET(sc->sc_flags, IWM_FLAG_ATTACHED))
   7212 		return 0;
   7213 
   7214 	err = iwm_start_hw(sc);
   7215 	if (err) {
   7216 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   7217 		return err;
   7218 	}
   7219 
   7220 	err = iwm_run_init_mvm_ucode(sc, 1);
   7221 	iwm_stop_device(sc);
   7222 	if (err)
   7223 		return err;
   7224 
   7225 	sc->sc_flags |= IWM_FLAG_ATTACHED;
   7226 
   7227 	aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
   7228 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
   7229 	    ether_sprintf(sc->sc_nvm.hw_addr));
   7230 
   7231 #ifndef IEEE80211_NO_HT
   7232 	if (sc->sc_nvm.sku_cap_11n_enable)
   7233 		iwm_setup_ht_rates(sc);
   7234 #endif
   7235 
   7236 	/* not all hardware can do 5GHz band */
   7237 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
   7238 		ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
   7239 
   7240 	ieee80211_ifattach(ic);
   7241 
   7242 	ic->ic_node_alloc = iwm_node_alloc;
   7243 
   7244 	/* Override 802.11 state transition machine. */
   7245 	sc->sc_newstate = ic->ic_newstate;
   7246 	ic->ic_newstate = iwm_newstate;
   7247 	ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
   7248 	ieee80211_announce(ic);
   7249 
   7250 	iwm_radiotap_attach(sc);
   7251 
   7252 	return 0;
   7253 }
   7254 
   7255 static void
   7256 iwm_attach_hook(device_t dev)
   7257 {
   7258 	struct iwm_softc *sc = device_private(dev);
   7259 
   7260 	iwm_preinit(sc);
   7261 }
   7262 
   7263 static void
   7264 iwm_attach(device_t parent, device_t self, void *aux)
   7265 {
   7266 	struct iwm_softc *sc = device_private(self);
   7267 	struct pci_attach_args *pa = aux;
   7268 	struct ieee80211com *ic = &sc->sc_ic;
   7269 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   7270 	pcireg_t reg, memtype;
   7271 	char intrbuf[PCI_INTRSTR_LEN];
   7272 	const char *intrstr;
   7273 	int err;
   7274 	int txq_i;
   7275 	const struct sysctlnode *node;
   7276 
   7277 	sc->sc_dev = self;
   7278 	sc->sc_pct = pa->pa_pc;
   7279 	sc->sc_pcitag = pa->pa_tag;
   7280 	sc->sc_dmat = pa->pa_dmat;
   7281 	sc->sc_pciid = pa->pa_id;
   7282 
   7283 	pci_aprint_devinfo(pa, NULL);
   7284 
   7285 	if (workqueue_create(&sc->sc_eswq, "iwmes",
   7286 	    iwm_endscan_cb, sc, PRI_NONE, IPL_NET, 0))
   7287 		panic("%s: could not create workqueue: scan",
   7288 		    device_xname(self));
   7289 	if (workqueue_create(&sc->sc_nswq, "iwmns",
   7290 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
   7291 		panic("%s: could not create workqueue: newstate",
   7292 		    device_xname(self));
   7293 
   7294 	/*
   7295 	 * Get the offset of the PCI Express Capability Structure in PCI
   7296 	 * Configuration Space.
   7297 	 */
   7298 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
   7299 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
   7300 	if (err == 0) {
   7301 		aprint_error_dev(self,
   7302 		    "PCIe capability structure not found!\n");
   7303 		return;
   7304 	}
   7305 
   7306 	/* Clear device-specific "PCI retry timeout" register (41h). */
   7307 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   7308 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   7309 
   7310 	/* Enable bus-mastering */
   7311 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
   7312 	reg |= PCI_COMMAND_MASTER_ENABLE;
   7313 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
   7314 
   7315 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
   7316 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
   7317 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
   7318 	if (err) {
   7319 		aprint_error_dev(self, "can't map mem space\n");
   7320 		return;
   7321 	}
   7322 
   7323 	/* Install interrupt handler. */
   7324 	err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
   7325 	if (err) {
   7326 		aprint_error_dev(self, "can't allocate interrupt\n");
   7327 		return;
   7328 	}
   7329 	if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX) {
   7330 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
   7331 		    PCI_COMMAND_STATUS_REG);
   7332 		if (ISSET(reg, PCI_COMMAND_INTERRUPT_DISABLE)) {
   7333 			CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
   7334 			pci_conf_write(sc->sc_pct, sc->sc_pcitag,
   7335 			    PCI_COMMAND_STATUS_REG, reg);
   7336 		}
   7337 	}
   7338 	intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
   7339 	    sizeof(intrbuf));
   7340 	sc->sc_ih = pci_intr_establish(sc->sc_pct, sc->sc_pihp[0], IPL_NET,
   7341 	    iwm_intr, sc);
   7342 	if (sc->sc_ih == NULL) {
   7343 		aprint_error_dev(self, "can't establish interrupt");
   7344 		if (intrstr != NULL)
   7345 			aprint_error(" at %s", intrstr);
   7346 		aprint_error("\n");
   7347 		return;
   7348 	}
   7349 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
   7350 
   7351 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
   7352 
   7353 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
   7354 	switch (PCI_PRODUCT(sc->sc_pciid)) {
   7355 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
   7356 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
   7357 		sc->sc_fwname = "iwlwifi-3160-16.ucode";
   7358 		sc->host_interrupt_operation_mode = 1;
   7359 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   7360 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   7361 		break;
   7362 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
   7363 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
   7364 		sc->sc_fwname = "iwlwifi-7265D-16.ucode";
   7365 		sc->host_interrupt_operation_mode = 0;
   7366 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   7367 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   7368 		break;
   7369 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
   7370 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
   7371 		sc->sc_fwname = "iwlwifi-7260-16.ucode";
   7372 		sc->host_interrupt_operation_mode = 1;
   7373 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   7374 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   7375 		break;
   7376 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
   7377 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
   7378 		sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
   7379 		    IWM_CSR_HW_REV_TYPE_7265D ?
   7380 		    "iwlwifi-7265D-16.ucode": "iwlwifi-7265-16.ucode";
   7381 		sc->host_interrupt_operation_mode = 0;
   7382 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   7383 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   7384 		break;
   7385 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
   7386 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
   7387 		sc->sc_fwname = "iwlwifi-8000C-16.ucode";
   7388 		sc->host_interrupt_operation_mode = 0;
   7389 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
   7390 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
   7391 		break;
   7392 	default:
   7393 		aprint_error_dev(self, "unknown product %#x",
   7394 		    PCI_PRODUCT(sc->sc_pciid));
   7395 		return;
   7396 	}
   7397 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
   7398 
   7399 	/*
   7400 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
   7401 	 * changed, and now the revision step also includes bit 0-1 (no more
   7402 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
   7403 	 * in the old format.
   7404 	 */
   7405 
   7406 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   7407 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
   7408 		    (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
   7409 
   7410 	if (iwm_prepare_card_hw(sc) != 0) {
   7411 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   7412 		return;
   7413 	}
   7414 
   7415 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
   7416 		uint32_t hw_step;
   7417 
   7418 		/*
   7419 		 * In order to recognize C step the driver should read the
   7420 		 * chip version id located at the AUX bus MISC address.
   7421 		 */
   7422 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   7423 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
   7424 		DELAY(2);
   7425 
   7426 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   7427 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   7428 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   7429 				   25000);
   7430 		if (!err) {
   7431 			aprint_error_dev(sc->sc_dev,
   7432 			    "failed to wake up the nic\n");
   7433 			return;
   7434 		}
   7435 
   7436 		if (iwm_nic_lock(sc)) {
   7437 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
   7438 			hw_step |= IWM_ENABLE_WFPM;
   7439 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
   7440 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
   7441 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
   7442 			if (hw_step == 0x3)
   7443 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
   7444 				    (IWM_SILICON_C_STEP << 2);
   7445 			iwm_nic_unlock(sc);
   7446 		} else {
   7447 			aprint_error_dev(sc->sc_dev,
   7448 			    "failed to lock the nic\n");
   7449 			return;
   7450 		}
   7451 	}
   7452 
   7453 	/*
   7454 	 * Allocate DMA memory for firmware transfers.
   7455 	 * Must be aligned on a 16-byte boundary.
   7456 	 */
   7457 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
   7458 	    16);
   7459 	if (err) {
   7460 		aprint_error_dev(sc->sc_dev,
   7461 		    "could not allocate memory for firmware\n");
   7462 		return;
   7463 	}
   7464 
   7465 	/* Allocate "Keep Warm" page, used internally by the card. */
   7466 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
   7467 	if (err) {
   7468 		aprint_error_dev(sc->sc_dev,
   7469 		    "could not allocate keep warm page\n");
   7470 		goto fail1;
   7471 	}
   7472 
   7473 	/* Allocate interrupt cause table (ICT).*/
   7474 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
   7475 	    1 << IWM_ICT_PADDR_SHIFT);
   7476 	if (err) {
   7477 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
   7478 		goto fail2;
   7479 	}
   7480 
   7481 	/* TX scheduler rings must be aligned on a 1KB boundary. */
   7482 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
   7483 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
   7484 	if (err) {
   7485 		aprint_error_dev(sc->sc_dev,
   7486 		    "could not allocate TX scheduler rings\n");
   7487 		goto fail3;
   7488 	}
   7489 
   7490 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
   7491 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
   7492 		if (err) {
   7493 			aprint_error_dev(sc->sc_dev,
   7494 			    "could not allocate TX ring %d\n", txq_i);
   7495 			goto fail4;
   7496 		}
   7497 	}
   7498 
   7499 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
   7500 	if (err) {
   7501 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
   7502 		goto fail4;
   7503 	}
   7504 
   7505 	/* Clear pending interrupts. */
   7506 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
   7507 
   7508 	if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
   7509 	    0, CTLTYPE_NODE, device_xname(sc->sc_dev),
   7510 	    SYSCTL_DESCR("iwm per-controller controls"),
   7511 	    NULL, 0, NULL, 0,
   7512 	    CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
   7513 	    CTL_EOL)) != 0) {
   7514 		aprint_normal_dev(sc->sc_dev,
   7515 		    "couldn't create iwm per-controller sysctl node\n");
   7516 	}
   7517 	if (err == 0) {
   7518 		int iwm_nodenum = node->sysctl_num;
   7519 
   7520 		/* Reload firmware sysctl node */
   7521 		if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
   7522 		    CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
   7523 		    SYSCTL_DESCR("Reload firmware"),
   7524 		    iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
   7525 		    CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
   7526 		    CTL_EOL)) != 0) {
   7527 			aprint_normal_dev(sc->sc_dev,
   7528 			    "couldn't create load_fw sysctl node\n");
   7529 		}
   7530 	}
   7531 
   7532 	/*
   7533 	 * Attach interface
   7534 	 */
   7535 	ic->ic_ifp = ifp;
   7536 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
   7537 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
   7538 	ic->ic_state = IEEE80211_S_INIT;
   7539 
   7540 	/* Set device capabilities. */
   7541 	ic->ic_caps =
   7542 	    IEEE80211_C_WEP |		/* WEP */
   7543 	    IEEE80211_C_WPA |		/* 802.11i */
   7544 #ifdef notyet
   7545 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
   7546 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
   7547 #endif
   7548 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
   7549 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
   7550 
   7551 #ifndef IEEE80211_NO_HT
   7552 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
   7553 	ic->ic_htxcaps = 0;
   7554 	ic->ic_txbfcaps = 0;
   7555 	ic->ic_aselcaps = 0;
   7556 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
   7557 #endif
   7558 
   7559 	/* all hardware can do 2.4GHz band */
   7560 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
   7561 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
   7562 
   7563 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
   7564 		sc->sc_phyctxt[i].id = i;
   7565 	}
   7566 
   7567 	sc->sc_amrr.amrr_min_success_threshold =  1;
   7568 	sc->sc_amrr.amrr_max_success_threshold = 15;
   7569 
   7570 	/* IBSS channel undefined for now. */
   7571 	ic->ic_ibss_chan = &ic->ic_channels[1];
   7572 
   7573 #if 0
   7574 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
   7575 #endif
   7576 
   7577 	ifp->if_softc = sc;
   7578 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   7579 	ifp->if_init = iwm_init;
   7580 	ifp->if_stop = iwm_stop;
   7581 	ifp->if_ioctl = iwm_ioctl;
   7582 	ifp->if_start = iwm_start;
   7583 	ifp->if_watchdog = iwm_watchdog;
   7584 	IFQ_SET_READY(&ifp->if_snd);
   7585 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
   7586 
   7587 	if_initialize(ifp);
   7588 #if 0
   7589 	ieee80211_ifattach(ic);
   7590 #else
   7591 	ether_ifattach(ifp, ic->ic_myaddr);	/* XXX */
   7592 #endif
   7593 	/* Use common softint-based if_input */
   7594 	ifp->if_percpuq = if_percpuq_create(ifp);
   7595 	if_deferred_start_init(ifp, NULL);
   7596 	if_register(ifp);
   7597 
   7598 	callout_init(&sc->sc_calib_to, 0);
   7599 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
   7600 	callout_init(&sc->sc_led_blink_to, 0);
   7601 	callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
   7602 #ifndef IEEE80211_NO_HT
   7603 	if (workqueue_create(&sc->sc_setratewq, "iwmsr",
   7604 	    iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
   7605 		panic("%s: could not create workqueue: setrates",
   7606 		    device_xname(self));
   7607 	if (workqueue_create(&sc->sc_bawq, "iwmba",
   7608 	    iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
   7609 		panic("%s: could not create workqueue: blockack",
   7610 		    device_xname(self));
   7611 	if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
   7612 	    iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
   7613 		panic("%s: could not create workqueue: htprot",
   7614 		    device_xname(self));
   7615 #endif
   7616 
   7617 	if (pmf_device_register(self, NULL, NULL))
   7618 		pmf_class_network_register(self, ifp);
   7619 	else
   7620 		aprint_error_dev(self, "couldn't establish power handler\n");
   7621 
   7622 	/*
   7623 	 * We can't do normal attach before the file system is mounted
   7624 	 * because we cannot read the MAC address without loading the
   7625 	 * firmware from disk.  So we postpone until mountroot is done.
   7626 	 * Notably, this will require a full driver unload/load cycle
   7627 	 * (or reboot) in case the firmware is not present when the
   7628 	 * hook runs.
   7629 	 */
   7630 	config_mountroot(self, iwm_attach_hook);
   7631 
   7632 	return;
   7633 
   7634 fail4:	while (--txq_i >= 0)
   7635 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
   7636 	iwm_free_rx_ring(sc, &sc->rxq);
   7637 	iwm_dma_contig_free(&sc->sched_dma);
   7638 fail3:	if (sc->ict_dma.vaddr != NULL)
   7639 		iwm_dma_contig_free(&sc->ict_dma);
   7640 fail2:	iwm_dma_contig_free(&sc->kw_dma);
   7641 fail1:	iwm_dma_contig_free(&sc->fw_dma);
   7642 }
   7643 
   7644 void
   7645 iwm_radiotap_attach(struct iwm_softc *sc)
   7646 {
   7647 	struct ifnet *ifp = sc->sc_ic.ic_ifp;
   7648 
   7649 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
   7650 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
   7651 	    &sc->sc_drvbpf);
   7652 
   7653 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
   7654 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
   7655 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
   7656 
   7657 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
   7658 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
   7659 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
   7660 }
   7661 
   7662 #if 0
   7663 static void
   7664 iwm_init_task(void *arg1)
   7665 {
   7666 	struct iwm_softc *sc = arg1;
   7667 	struct ifnet *ifp = &sc->sc_ic.ic_if;
   7668 	int s;
   7669 
   7670 	rw_enter_write(&sc->ioctl_rwl);
   7671 	s = splnet();
   7672 
   7673 	iwm_stop(ifp, 0);
   7674 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
   7675 		iwm_init(ifp);
   7676 
   7677 	splx(s);
   7678 	rw_exit(&sc->ioctl_rwl);
   7679 }
   7680 
   7681 static void
   7682 iwm_wakeup(struct iwm_softc *sc)
   7683 {
   7684 	pcireg_t reg;
   7685 
   7686 	/* Clear device-specific "PCI retry timeout" register (41h). */
   7687 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   7688 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   7689 
   7690 	iwm_init_task(sc);
   7691 }
   7692 
   7693 static int
   7694 iwm_activate(device_t self, enum devact act)
   7695 {
   7696 	struct iwm_softc *sc = device_private(self);
   7697 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   7698 
   7699 	switch (act) {
   7700 	case DVACT_DEACTIVATE:
   7701 		if (ifp->if_flags & IFF_RUNNING)
   7702 			iwm_stop(ifp, 0);
   7703 		return 0;
   7704 	default:
   7705 		return EOPNOTSUPP;
   7706 	}
   7707 }
   7708 #endif
   7709 
   7710 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
   7711 	NULL, NULL);
   7712 
   7713 static int
   7714 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
   7715 {
   7716 	struct sysctlnode node;
   7717 	struct iwm_softc *sc;
   7718 	int err, t;
   7719 
   7720 	node = *rnode;
   7721 	sc = node.sysctl_data;
   7722 	t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
   7723 	node.sysctl_data = &t;
   7724 	err = sysctl_lookup(SYSCTLFN_CALL(&node));
   7725 	if (err || newp == NULL)
   7726 		return err;
   7727 
   7728 	if (t == 0)
   7729 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
   7730 	return 0;
   7731 }
   7732 
   7733 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
   7734 {
   7735 	const struct sysctlnode *rnode;
   7736 #ifdef IWM_DEBUG
   7737 	const struct sysctlnode *cnode;
   7738 #endif /* IWM_DEBUG */
   7739 	int rc;
   7740 
   7741 	if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
   7742 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
   7743 	    SYSCTL_DESCR("iwm global controls"),
   7744 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   7745 		goto err;
   7746 
   7747 	iwm_sysctl_root_num = rnode->sysctl_num;
   7748 
   7749 #ifdef IWM_DEBUG
   7750 	/* control debugging printfs */
   7751 	if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
   7752 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
   7753 	    "debug", SYSCTL_DESCR("Enable debugging output"),
   7754 	    NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
   7755 		goto err;
   7756 #endif /* IWM_DEBUG */
   7757 
   7758 	return;
   7759 
   7760  err:
   7761 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
   7762 }
   7763 
   7764 /*
   7765  * XXX code from OpenBSD src/sys/net80211/ieee80211_output.c
   7766  * Copyright (c) 2001 Atsushi Onoe
   7767  * Copyright (c) 2002, 2003 Sam Leffler, Errno Consulting
   7768  * Copyright (c) 2007-2009 Damien Bergamini
   7769  * All rights reserved.
   7770  */
   7771 
   7772 /*
   7773  * Add an SSID element to a frame (see 7.3.2.1).
   7774  */
   7775 static uint8_t *
   7776 ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len)
   7777 {
   7778        *frm++ = IEEE80211_ELEMID_SSID;
   7779        *frm++ = len;
   7780        memcpy(frm, ssid, len);
   7781        return frm + len;
   7782 }
   7783 
   7784 /*
   7785  * Add a supported rates element to a frame (see 7.3.2.2).
   7786  */
   7787 static uint8_t *
   7788 ieee80211_add_rates(uint8_t *frm, const struct ieee80211_rateset *rs)
   7789 {
   7790        int nrates;
   7791 
   7792        *frm++ = IEEE80211_ELEMID_RATES;
   7793        nrates = min(rs->rs_nrates, IEEE80211_RATE_SIZE);
   7794        *frm++ = nrates;
   7795        memcpy(frm, rs->rs_rates, nrates);
   7796        return frm + nrates;
   7797 }
   7798 
   7799 /*
   7800  * Add an extended supported rates element to a frame (see 7.3.2.14).
   7801  */
   7802 static uint8_t *
   7803 ieee80211_add_xrates(uint8_t *frm, const struct ieee80211_rateset *rs)
   7804 {
   7805        int nrates;
   7806 
   7807        KASSERT(rs->rs_nrates > IEEE80211_RATE_SIZE);
   7808 
   7809        *frm++ = IEEE80211_ELEMID_XRATES;
   7810        nrates = rs->rs_nrates - IEEE80211_RATE_SIZE;
   7811        *frm++ = nrates;
   7812        memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates);
   7813        return frm + nrates;
   7814 }
   7815