Home | History | Annotate | Line # | Download | only in pci
      1 /*	$NetBSD: if_iwm.c,v 1.93 2025/12/21 16:24:39 mlelstv Exp $	*/
      2 /*	OpenBSD: if_iwm.c,v 1.148 2016/11/19 21:07:08 stsp Exp	*/
      3 #define IEEE80211_NO_HT
      4 /*
      5  * Copyright (c) 2014, 2016 genua gmbh <info (at) genua.de>
      6  *   Author: Stefan Sperling <stsp (at) openbsd.org>
      7  * Copyright (c) 2014 Fixup Software Ltd.
      8  *
      9  * Permission to use, copy, modify, and distribute this software for any
     10  * purpose with or without fee is hereby granted, provided that the above
     11  * copyright notice and this permission notice appear in all copies.
     12  *
     13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     20  */
     21 
     22 /*-
     23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
     24  * which were used as the reference documentation for this implementation.
     25  *
     26  ***********************************************************************
     27  *
     28  * This file is provided under a dual BSD/GPLv2 license.  When using or
     29  * redistributing this file, you may do so under either license.
     30  *
     31  * GPL LICENSE SUMMARY
     32  *
     33  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
     34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
     35  * Copyright(c) 2016        Intel Deutschland GmbH
     36  *
     37  * This program is free software; you can redistribute it and/or modify
     38  * it under the terms of version 2 of the GNU General Public License as
     39  * published by the Free Software Foundation.
     40  *
     41  * This program is distributed in the hope that it will be useful, but
     42  * WITHOUT ANY WARRANTY; without even the implied warranty of
     43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     44  * General Public License for more details.
     45  *
     46  * You should have received a copy of the GNU General Public License
     47  * along with this program; if not, write to the Free Software
     48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
     49  * USA
     50  *
     51  * The full GNU General Public License is included in this distribution
     52  * in the file called COPYING.
     53  *
     54  * Contact Information:
     55  *  Intel Linux Wireless <linuxwifi (at) intel.com>
     56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
     57  *
     58  * BSD LICENSE
     59  *
     60  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
     61  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
     62  * Copyright(c) 2016        Intel Deutschland GmbH
     63  * All rights reserved.
     64  *
     65  * Redistribution and use in source and binary forms, with or without
     66  * modification, are permitted provided that the following conditions
     67  * are met:
     68  *
     69  *  * Redistributions of source code must retain the above copyright
     70  *    notice, this list of conditions and the following disclaimer.
     71  *  * Redistributions in binary form must reproduce the above copyright
     72  *    notice, this list of conditions and the following disclaimer in
     73  *    the documentation and/or other materials provided with the
     74  *    distribution.
     75  *  * Neither the name Intel Corporation nor the names of its
     76  *    contributors may be used to endorse or promote products derived
     77  *    from this software without specific prior written permission.
     78  *
     79  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     80  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     81  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     82  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     83  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     84  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     85  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     86  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     87  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     88  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     89  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     90  */
     91 
     92 /*-
     93  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
     94  *
     95  * Permission to use, copy, modify, and distribute this software for any
     96  * purpose with or without fee is hereby granted, provided that the above
     97  * copyright notice and this permission notice appear in all copies.
     98  *
     99  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
    100  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    101  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    102  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    103  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    104  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    105  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    106  */
    107 
    108 #include <sys/cdefs.h>
    109 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.93 2025/12/21 16:24:39 mlelstv Exp $");
    110 
    111 #include <sys/param.h>
    112 #include <sys/conf.h>
    113 #include <sys/kernel.h>
    114 #include <sys/kmem.h>
    115 #include <sys/mbuf.h>
    116 #include <sys/mutex.h>
    117 #include <sys/proc.h>
    118 #include <sys/socket.h>
    119 #include <sys/sockio.h>
    120 #include <sys/sysctl.h>
    121 #include <sys/systm.h>
    122 
    123 #include <sys/cpu.h>
    124 #include <sys/bus.h>
    125 #include <sys/workqueue.h>
    126 #include <machine/endian.h>
    127 #include <sys/intr.h>
    128 
    129 #include <dev/pci/pcireg.h>
    130 #include <dev/pci/pcivar.h>
    131 #include <dev/pci/pcidevs.h>
    132 #include <dev/firmload.h>
    133 
    134 #include <net/bpf.h>
    135 #include <net/if.h>
    136 #include <net/if_dl.h>
    137 #include <net/if_media.h>
    138 #include <net/if_ether.h>
    139 
    140 #include <netinet/in.h>
    141 #include <netinet/ip.h>
    142 
    143 #include <net80211/ieee80211_var.h>
    144 #include <net80211/ieee80211_amrr.h>
    145 #include <net80211/ieee80211_radiotap.h>
    146 
    147 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
    148 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
    149 
    150 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
    151 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
    152 
    153 #ifdef IWM_DEBUG
    154 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
    155 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
    156 int iwm_debug = 0;
    157 #else
    158 #define DPRINTF(x)	do { ; } while (0)
    159 #define DPRINTFN(n, x)	do { ; } while (0)
    160 #endif
    161 
    162 #include <dev/pci/if_iwmreg.h>
    163 #include <dev/pci/if_iwmvar.h>
    164 
    165 static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
    166 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00
    167 };
    168 
    169 static const uint8_t iwm_nvm_channels[] = {
    170 	/* 2.4 GHz */
    171 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
    172 	/* 5 GHz */
    173 	36, 40, 44, 48, 52, 56, 60, 64,
    174 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
    175 	149, 153, 157, 161, 165
    176 };
    177 
    178 static const uint8_t iwm_nvm_channels_8000[] = {
    179 	/* 2.4 GHz */
    180 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
    181 	/* 5 GHz */
    182 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
    183 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
    184 	149, 153, 157, 161, 165, 169, 173, 177, 181
    185 };
    186 
    187 #define IWM_NUM_2GHZ_CHANNELS	14
    188 
    189 static const struct iwm_rate {
    190 	uint16_t rate;
    191 	uint8_t plcp;
    192 	uint8_t ht_plcp;
    193 } iwm_rates[] = {
    194 		/* Legacy */		/* HT */
    195 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
    196 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
    197 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
    198 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
    199 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
    200 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
    201 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
    202 	{  26,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_0_PLCP },
    203 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
    204 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
    205 	{  52,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_1_PLCP },
    206 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
    207 	{  78,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_2_PLCP },
    208 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
    209 	{ 104,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_3_PLCP },
    210 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
    211 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
    212 	{ 156,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_4_PLCP },
    213 	{ 208,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_5_PLCP },
    214 	{ 234,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_6_PLCP },
    215 	{ 260,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_MIMO2_MCS_7_PLCP },
    216 };
    217 #define IWM_RIDX_CCK	0
    218 #define IWM_RIDX_OFDM	4
    219 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
    220 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
    221 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
    222 #define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
    223 
    224 #ifndef IEEE80211_NO_HT
    225 /* Convert an MCS index into an iwm_rates[] index. */
    226 static const int iwm_mcs2ridx[] = {
    227 	IWM_RATE_MCS_0_INDEX,
    228 	IWM_RATE_MCS_1_INDEX,
    229 	IWM_RATE_MCS_2_INDEX,
    230 	IWM_RATE_MCS_3_INDEX,
    231 	IWM_RATE_MCS_4_INDEX,
    232 	IWM_RATE_MCS_5_INDEX,
    233 	IWM_RATE_MCS_6_INDEX,
    234 	IWM_RATE_MCS_7_INDEX,
    235         IWM_RATE_MCS_8_INDEX,
    236         IWM_RATE_MCS_9_INDEX,
    237         IWM_RATE_MCS_10_INDEX,
    238         IWM_RATE_MCS_11_INDEX,
    239         IWM_RATE_MCS_12_INDEX,
    240         IWM_RATE_MCS_13_INDEX,
    241         IWM_RATE_MCS_14_INDEX,
    242         IWM_RATE_MCS_15_INDEX,
    243 };
    244 #endif
    245 
    246 struct iwm_nvm_section {
    247 	uint16_t length;
    248 	uint8_t *data;
    249 };
    250 
    251 struct iwm_newstate_state {
    252 	struct work ns_wk;
    253 	enum ieee80211_state ns_nstate;
    254 	int ns_arg;
    255 	int ns_generation;
    256 };
    257 
    258 #ifndef IEEE80211_NO_HT
    259 static int	iwm_is_mimo_ht_plcp(uint8_t);
    260 static int	iwm_is_mimo_ht_mcs(int);
    261 #endif
    262 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
    263 static int	iwm_firmware_store_section(struct iwm_softc *,
    264 		    enum iwm_ucode_type, uint8_t *, size_t);
    265 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
    266 static void	iwm_fw_info_free(struct iwm_fw_info *);
    267 static void	iwm_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
    268 static int	iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
    269 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
    270 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
    271 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
    272 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
    273 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
    274 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
    275 static int	iwm_nic_lock(struct iwm_softc *);
    276 static void	iwm_nic_unlock(struct iwm_softc *);
    277 static int	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
    278 		    uint32_t);
    279 static int	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    280 static int	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    281 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
    282 		    bus_size_t, bus_size_t);
    283 static void	iwm_dma_contig_free(struct iwm_dma_info *);
    284 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    285 static void	iwm_disable_rx_dma(struct iwm_softc *);
    286 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    287 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    288 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
    289 		    int);
    290 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    291 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    292 static void	iwm_enable_rfkill_int(struct iwm_softc *);
    293 static int	iwm_check_rfkill(struct iwm_softc *);
    294 static void	iwm_enable_interrupts(struct iwm_softc *);
    295 static void	iwm_enable_fwload_interrupt(struct iwm_softc *);
    296 static void	iwm_restore_interrupts(struct iwm_softc *);
    297 static void	iwm_disable_interrupts(struct iwm_softc *);
    298 static void	iwm_ict_reset(struct iwm_softc *);
    299 static int	iwm_set_hw_ready(struct iwm_softc *);
    300 static int	iwm_prepare_card_hw(struct iwm_softc *);
    301 static void	iwm_apm_config(struct iwm_softc *);
    302 static int	iwm_apm_init(struct iwm_softc *);
    303 static void	iwm_apm_stop(struct iwm_softc *);
    304 static int	iwm_allow_mcast(struct iwm_softc *);
    305 static int	iwm_start_hw(struct iwm_softc *);
    306 static void	iwm_stop_device(struct iwm_softc *);
    307 static void	iwm_nic_config(struct iwm_softc *);
    308 static int	iwm_nic_rx_init(struct iwm_softc *);
    309 static int	iwm_nic_rx_legacy_init(struct iwm_softc *);
    310 static int	iwm_nic_tx_init(struct iwm_softc *);
    311 static int	iwm_nic_init(struct iwm_softc *);
    312 static int	iwm_enable_ac_txq(struct iwm_softc *, int, int);
    313 static int	iwm_enable_txq(struct iwm_softc *, int, int, int,
    314 		    int, uint8_t, uint16_t);
    315 static int	iwm_post_alive(struct iwm_softc *);
    316 static struct iwm_phy_db_entry *
    317 		iwm_phy_db_get_section(struct iwm_softc *,
    318 		    enum iwm_phy_db_section_type, uint16_t);
    319 static int	iwm_phy_db_set_section(struct iwm_softc *,
    320 		    struct iwm_calib_res_notif_phy_db *, uint16_t);
    321 static int	iwm_is_valid_channel(uint16_t);
    322 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
    323 static uint16_t iwm_channel_id_to_papd(uint16_t);
    324 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
    325 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
    326 		    uint8_t **, uint16_t *, uint16_t);
    327 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
    328 		    void *);
    329 static int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
    330 		    enum iwm_phy_db_section_type, uint8_t);
    331 static int	iwm_send_phy_db_data(struct iwm_softc *);
    332 static int	iwm_send_time_event_cmd(struct iwm_softc *,
    333 		    const struct iwm_time_event_cmd *);
    334 static void	iwm_protect_session(struct iwm_softc *, struct iwm_node *,
    335 		    uint32_t, uint32_t);
    336 static void	iwm_unprotect_session(struct iwm_softc *, struct iwm_node *);
    337 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
    338 		    uint16_t, uint8_t *, uint16_t *);
    339 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
    340 		    uint16_t *, size_t);
    341 static void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
    342 		    const uint8_t *, size_t);
    343 static int	iwm_mimo_enabled(struct iwm_softc *);
    344 #ifndef IEEE80211_NO_HT
    345 static void	iwm_setup_ht_rates(struct iwm_softc *);
    346 static void	iwm_htprot_task(void *);
    347 static void	iwm_update_htprot(struct ieee80211com *,
    348 		    struct ieee80211_node *);
    349 static int	iwm_ampdu_rx_start(struct ieee80211com *,
    350 		    struct ieee80211_node *, uint8_t);
    351 static void	iwm_ampdu_rx_stop(struct ieee80211com *,
    352 		    struct ieee80211_node *, uint8_t);
    353 static void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
    354 		    uint8_t, uint16_t, int);
    355 #if 0
    356 static int	iwm_ampdu_tx_start(struct ieee80211com *,
    357 		    struct ieee80211_node *, uint8_t);
    358 static void	iwm_ampdu_tx_stop(struct ieee80211com *,
    359 		    struct ieee80211_node *, uint8_t);
    360 #endif
    361 static void	iwm_ba_task(void *);
    362 #endif
    363 
    364 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
    365 		    const uint16_t *, const uint16_t *,
    366 		    const uint16_t *, const uint16_t *,
    367 		    const uint16_t *, int);
    368 static void	iwm_set_hw_address_8000(struct iwm_softc *,
    369 		    struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
    370 static int	iwm_parse_nvm_sections(struct iwm_softc *,
    371 		    struct iwm_nvm_section *);
    372 static int	iwm_nvm_init(struct iwm_softc *);
    373 static int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
    374 		    const uint8_t *, uint32_t);
    375 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
    376 		    const uint8_t *, uint32_t);
    377 static int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
    378 static int	iwm_load_cpu_sections_8000(struct iwm_softc *,
    379 		    struct iwm_fw_sects *, int , int *);
    380 static int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
    381 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
    382 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
    383 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
    384 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
    385 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
    386 		    enum iwm_ucode_type);
    387 static int	iwm_send_dqa_cmd(struct iwm_softc *);
    388 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
    389 #ifdef notyet
    390 static int	iwm_config_ltr(struct iwm_softc *);
    391 #endif
    392 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
    393 static int	iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
    394 static int	iwm_get_signal_strength(struct iwm_softc *,
    395 		    struct iwm_rx_phy_info *);
    396 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
    397 		    struct iwm_rx_packet *, struct iwm_rx_data *);
    398 static int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
    399 static void	iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
    400 		    struct iwm_rx_data *);
    401 static void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,		    struct iwm_node *);
    402 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
    403 		    struct iwm_rx_data *);
    404 static int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
    405 		    uint32_t);
    406 static void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
    407 		    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
    408 static void	iwm_phy_ctxt_cmd_data(struct iwm_softc *,
    409 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
    410 		    uint8_t, uint8_t);
    411 static int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
    412 		    uint8_t, uint8_t, uint32_t, uint32_t);
    413 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
    414 static int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
    415 		    uint16_t, const void *);
    416 static int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
    417 		    uint32_t *);
    418 static int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
    419 		    const void *, uint32_t *);
    420 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
    421 static void	iwm_cmd_done(struct iwm_softc *, int qid, int idx);
    422 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
    423 		    uint16_t);
    424 static const struct iwm_rate *
    425 		iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
    426 		    struct ieee80211_frame *, struct iwm_tx_cmd *);
    427 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
    428 		    struct ieee80211_node *, int);
    429 static void	iwm_led_enable(struct iwm_softc *);
    430 static void	iwm_led_disable(struct iwm_softc *);
    431 static int	iwm_led_is_enabled(struct iwm_softc *);
    432 static void	iwm_led_blink_timeout(void *);
    433 static void	iwm_led_blink_start(struct iwm_softc *);
    434 static void	iwm_led_blink_stop(struct iwm_softc *);
    435 static int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
    436 		    struct iwm_beacon_filter_cmd *);
    437 static void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
    438 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
    439 static int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
    440 		    int);
    441 static void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
    442 		    struct iwm_mac_power_cmd *);
    443 static int	iwm_power_mac_update_mode(struct iwm_softc *,
    444 		    struct iwm_node *);
    445 static int	iwm_power_update_device(struct iwm_softc *);
    446 #ifdef notyet
    447 static int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
    448 #endif
    449 static int	iwm_disable_beacon_filter(struct iwm_softc *);
    450 static int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
    451 static int	iwm_add_aux_sta(struct iwm_softc *);
    452 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
    453 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
    454 static uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
    455 		    struct iwm_scan_channel_cfg_lmac *, int);
    456 static int	iwm_fill_probe_req(struct iwm_softc *,
    457 		    struct iwm_scan_probe_req *);
    458 static int	iwm_fill_probe_req_v1(struct iwm_softc *,
    459 		    struct iwm_scan_probe_req_v1 *);
    460 static int	iwm_lmac_scan(struct iwm_softc *);
    461 static int	iwm_config_umac_scan(struct iwm_softc *);
    462 static int	iwm_umac_scan(struct iwm_softc *);
    463 static void	iwm_mcc_update(struct iwm_softc *, struct iwm_mcc_chub_notif *);
    464 static uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
    465 static void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
    466 		    int *);
    467 static void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
    468 		    struct iwm_mac_ctx_cmd *, uint32_t, int);
    469 static void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
    470 		    struct iwm_mac_data_sta *, int);
    471 static int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
    472 		    uint32_t, int);
    473 static void	iwm_rx_missed_beacons_notif(struct iwm_softc *,
    474 		    struct iwm_missed_beacons_notif *);
    475 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
    476 static int	iwm_scan(struct iwm_softc *);
    477 static int	iwm_phy_ctxt_update(struct iwm_softc *, struct iwm_phy_ctxt *,
    478 		    struct ieee80211_channel *, uint8_t, uint8_t, uint32_t);
    479 static int	iwm_auth(struct iwm_softc *);
    480 #ifdef notyet
    481 static int	iwm_deauth(struct iwm_softc *);
    482 #endif
    483 static int	iwm_run(struct iwm_softc *);
    484 #ifdef notyet
    485 static int	iwm_run_stop(struct iwm_softc *);
    486 #endif
    487 static int	iwm_assoc(struct iwm_softc *);
    488 static void	iwm_calib_timeout(void *);
    489 static int	iwm_setrates_task(struct iwm_softc *);
    490 static void	iwm_setrates_cb(struct work *, void *);
    491 static int	iwm_setrates(struct iwm_node *);
    492 static int	iwm_media_change(struct ifnet *);
    493 static int	iwm_do_newstate(struct ieee80211com *, enum ieee80211_state,
    494 		    int);
    495 static void	iwm_newstate_cb(struct work *, void *);
    496 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
    497 static void	iwm_endscan(struct iwm_softc *);
    498 static void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
    499 		    struct ieee80211_node *);
    500 static int	iwm_sf_config(struct iwm_softc *, int);
    501 static int	iwm_send_bt_init_conf(struct iwm_softc *);
    502 static int	iwm_send_soc_conf(struct iwm_softc *);
    503 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
    504 static int	iwm_send_temp_report_ths_cmd(struct iwm_softc *);
    505 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
    506 static int	iwm_init_hw(struct iwm_softc *);
    507 static int	iwm_init(struct ifnet *);
    508 static void	iwm_start(struct ifnet *);
    509 static void	iwm_stop(struct ifnet *, int);
    510 static void	iwm_watchdog(struct ifnet *);
    511 static int	iwm_ioctl(struct ifnet *, u_long, void *);
    512 static const char *iwm_desc_lookup(uint32_t);
    513 static void	iwm_nic_error(struct iwm_softc *);
    514 static void	iwm_dump_driver_status(struct iwm_softc *);
    515 static void	iwm_nic_umac_error(struct iwm_softc *);
    516 static void	iwm_rx_pkt(struct iwm_softc *, struct iwm_rx_data *);
    517 static void	iwm_notif_intr(struct iwm_softc *);
    518 static int	iwm_intr(void *);
    519 static void	iwm_softintr(void *);
    520 static int	iwm_preinit(struct iwm_softc *, bool);
    521 static void	iwm_attach_hook(device_t);
    522 static void	iwm_attach(device_t, device_t, void *);
    523 static int	iwm_config_complete(struct iwm_softc *);
    524 #if 0
    525 static void	iwm_init_task(void *);
    526 static int	iwm_activate(device_t, enum devact);
    527 static void	iwm_wakeup(struct iwm_softc *);
    528 #endif
    529 static void	iwm_radiotap_attach(struct iwm_softc *);
    530 static int	iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
    531 
    532 static int iwm_sysctl_root_num;
    533 static int iwm_lar_disable;
    534 
    535 #ifndef	IWM_DEFAULT_MCC
    536 #define	IWM_DEFAULT_MCC	"ZZ"
    537 #endif
    538 static char iwm_default_mcc[3] = IWM_DEFAULT_MCC;
    539 
    540 static int
    541 iwm_firmload(struct iwm_softc *sc)
    542 {
    543 	struct iwm_fw_info *fw = &sc->sc_fw;
    544 	firmware_handle_t fwh;
    545 	int err;
    546 
    547 	if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
    548 		return 0;
    549 
    550 	/* Open firmware image. */
    551 	err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
    552 	if (err) {
    553 		aprint_error_dev(sc->sc_dev,
    554 		    "could not get firmware handle %s\n", sc->sc_fwname);
    555 		return err;
    556 	}
    557 
    558 	if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
    559 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
    560 		fw->fw_rawdata = NULL;
    561 	}
    562 
    563 	fw->fw_rawsize = firmware_get_size(fwh);
    564 	/*
    565 	 * Well, this is how the Linux driver checks it ....
    566 	 */
    567 	if (fw->fw_rawsize < sizeof(uint32_t)) {
    568 		aprint_error_dev(sc->sc_dev,
    569 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
    570 		err = EINVAL;
    571 		goto out;
    572 	}
    573 
    574 	/* Read the firmware. */
    575 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
    576 	err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
    577 	if (err) {
    578 		aprint_error_dev(sc->sc_dev,
    579 		    "could not read firmware %s\n", sc->sc_fwname);
    580 		goto out;
    581 	}
    582 
    583 	SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
    584  out:
    585 	/* caller will release memory, if necessary */
    586 
    587 	firmware_close(fwh);
    588 	return err;
    589 }
    590 
    591 /*
    592  * just maintaining status quo.
    593  */
    594 static void
    595 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
    596 {
    597 	struct ieee80211com *ic = &sc->sc_ic;
    598 	struct ieee80211_frame *wh;
    599 	uint8_t subtype;
    600 
    601 	wh = mtod(m, struct ieee80211_frame *);
    602 
    603 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
    604 		return;
    605 
    606 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
    607 
    608 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
    609 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
    610 		return;
    611 
    612 	int chan = le32toh(sc->sc_last_phy_info.channel);
    613 	if (chan < __arraycount(ic->ic_channels))
    614 		ic->ic_curchan = &ic->ic_channels[chan];
    615 }
    616 
    617 #ifdef notyet
    618 static uint8_t
    619 iwm_lookup_cmd_ver(struct iwm_softc *sc, uint8_t grp, uint8_t cmd)
    620 {
    621 	const struct iwm_fw_cmd_version *entry;
    622 	int i;
    623 
    624 	for (i = 0; i < sc->n_cmd_versions; i++) {
    625 		entry = &sc->cmd_versions[i];
    626 		if (entry->group == grp && entry->cmd == cmd)
    627 			return entry->cmd_ver;
    628 	}
    629 
    630 	return IWM_FW_CMD_VER_UNKNOWN;
    631 }
    632 
    633 static int
    634 iwm_is_mimo_ht_plcp(uint8_t ht_plcp)
    635 {
    636 	return (ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP &&
    637 	    (ht_plcp & IWM_RATE_HT_MCS_NSS_MSK));
    638 }
    639 
    640 static int
    641 iwm_is_mimo_ht_mcs(int mcs)
    642 {
    643 	int ridx = iwm_ht_mcs2ridx[mcs];
    644 	return iwm_is_mimo_ht_plcp(iwm_rates[ridx].ht_plcp);
    645 
    646 }
    647 #endif
    648 
    649 static int
    650 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
    651 {
    652 	struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
    653 
    654 	if (dlen < sizeof(*l) ||
    655 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
    656 		return EINVAL;
    657 
    658 	/* we don't actually store anything for now, always use s/w crypto */
    659 
    660 	return 0;
    661 }
    662 
    663 static int
    664 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
    665     uint8_t *data, size_t dlen)
    666 {
    667 	struct iwm_fw_sects *fws;
    668 	struct iwm_fw_onesect *fwone;
    669 
    670 	if (type >= IWM_UCODE_TYPE_MAX)
    671 		return EINVAL;
    672 	if (dlen < sizeof(uint32_t))
    673 		return EINVAL;
    674 
    675 	fws = &sc->sc_fw.fw_sects[type];
    676 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
    677 		return EINVAL;
    678 
    679 	fwone = &fws->fw_sect[fws->fw_count];
    680 
    681 	/* first 32bit are device load offset */
    682 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
    683 
    684 	/* rest is data */
    685 	fwone->fws_data = data + sizeof(uint32_t);
    686 	fwone->fws_len = dlen - sizeof(uint32_t);
    687 
    688 	/* for freeing the buffer during driver unload */
    689 	fwone->fws_alloc = data;
    690 	fwone->fws_allocsize = dlen;
    691 
    692 	fws->fw_count++;
    693 	fws->fw_totlen += fwone->fws_len;
    694 
    695 	return 0;
    696 }
    697 
    698 struct iwm_tlv_calib_data {
    699 	uint32_t ucode_type;
    700 	struct iwm_tlv_calib_ctrl calib;
    701 } __packed;
    702 
    703 static int
    704 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
    705 {
    706 	const struct iwm_tlv_calib_data *def_calib = data;
    707 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
    708 
    709 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
    710 		DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
    711 		    DEVNAME(sc), ucode_type));
    712 		return EINVAL;
    713 	}
    714 
    715 	sc->sc_default_calib[ucode_type].flow_trigger =
    716 	    def_calib->calib.flow_trigger;
    717 	sc->sc_default_calib[ucode_type].event_trigger =
    718 	    def_calib->calib.event_trigger;
    719 
    720 	return 0;
    721 }
    722 
    723 static void
    724 iwm_fw_info_free(struct iwm_fw_info *fw)
    725 {
    726 	kmem_free(fw->fw_rawdata, fw->fw_rawsize);
    727 	fw->fw_rawdata = NULL;
    728 	fw->fw_rawsize = 0;
    729 	/* don't touch fw->fw_status */
    730 	memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
    731 }
    732 
    733 static void
    734 iwm_fw_version_str(char *buf, size_t bufsize,
    735     uint32_t major, uint32_t minor, uint32_t api)
    736 {
    737 	/*
    738 	 * Starting with major version 35 the Linux driver prints the minor
    739 	 * version in hexadecimal.
    740 	 */
    741 	if (major >= 35)
    742 		snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
    743 	else
    744 		snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
    745 }
    746 
    747 static int
    748 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
    749 {
    750 	struct iwm_fw_info *fw = &sc->sc_fw;
    751 	struct iwm_tlv_ucode_header *uhdr;
    752 	struct iwm_ucode_tlv tlv;
    753 	enum iwm_ucode_tlv_type tlv_type;
    754 	uint8_t *data;
    755 	int err;
    756 	size_t len;
    757 
    758 	if (fw->fw_status == IWM_FW_STATUS_DONE)
    759 		return 0;
    760 
    761 	while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
    762 		tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
    763 	fw->fw_status = IWM_FW_STATUS_INPROGRESS;
    764 
    765 	if (fw->fw_status == IWM_FW_STATUS_DONE)
    766 		return 0;
    767 
    768 	if (fw->fw_rawdata != NULL) {
    769 		iwm_fw_info_free(fw);
    770 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
    771 	}
    772 
    773 	err = iwm_firmload(sc);
    774 	if (err) {
    775 		aprint_error_dev(sc->sc_dev,
    776 		    "could not read firmware %s (error %d)\n",
    777 		    sc->sc_fwname, err);
    778 		goto out;
    779 	}
    780 
    781 	sc->sc_capaflags = 0;
    782 	sc->sc_capa_n_scan_channels = IWM_DEFAULT_SCAN_CHANNELS;
    783 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
    784 	memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
    785 	sc->n_cmd_versions = 0;
    786 
    787 	uhdr = (void *)fw->fw_rawdata;
    788 	if (*(uint32_t *)fw->fw_rawdata != 0
    789 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
    790 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
    791 		    sc->sc_fwname);
    792 		err = EINVAL;
    793 		goto out;
    794 	}
    795 
    796 	iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
    797 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
    798 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
    799 	    IWM_UCODE_API(le32toh(uhdr->ver)));
    800 
    801 	data = uhdr->data;
    802 	len = fw->fw_rawsize - sizeof(*uhdr);
    803 
    804 	while (len >= sizeof(tlv)) {
    805 		size_t tlv_len;
    806 		void *tlv_data;
    807 
    808 		memcpy(&tlv, data, sizeof(tlv));
    809 		tlv_len = le32toh(tlv.length);
    810 		tlv_type = le32toh(tlv.type);
    811 
    812 		len -= sizeof(tlv);
    813 		data += sizeof(tlv);
    814 		tlv_data = data;
    815 
    816 		if (len < tlv_len) {
    817 			aprint_error_dev(sc->sc_dev,
    818 			    "firmware too short: %zu bytes\n", len);
    819 			err = EINVAL;
    820 			goto parse_out;
    821 		}
    822 
    823 		switch (tlv_type) {
    824 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
    825 			if (tlv_len < sizeof(uint32_t)) {
    826 				err = EINVAL;
    827 				goto parse_out;
    828 			}
    829 			sc->sc_capa_max_probe_len
    830 			    = le32toh(*(uint32_t *)tlv_data);
    831 			/* limit it to something sensible */
    832 			if (sc->sc_capa_max_probe_len >
    833 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
    834 				err = EINVAL;
    835 				goto parse_out;
    836 			}
    837 			break;
    838 		case IWM_UCODE_TLV_PAN:
    839 			if (tlv_len) {
    840 				err = EINVAL;
    841 				goto parse_out;
    842 			}
    843 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
    844 			break;
    845 		case IWM_UCODE_TLV_FLAGS:
    846 			if (tlv_len < sizeof(uint32_t)) {
    847 				err = EINVAL;
    848 				goto parse_out;
    849 			}
    850 			if (tlv_len % sizeof(uint32_t)) {
    851 				err = EINVAL;
    852 				goto parse_out;
    853 			}
    854 			/*
    855 			 * Apparently there can be many flags, but Linux driver
    856 			 * parses only the first one, and so do we.
    857 			 *
    858 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
    859 			 * Intentional or a bug?  Observations from
    860 			 * current firmware file:
    861 			 *  1) TLV_PAN is parsed first
    862 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
    863 			 * ==> this resets TLV_PAN to itself... hnnnk
    864 			 */
    865 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
    866 			break;
    867 		case IWM_UCODE_TLV_CSCHEME:
    868 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
    869 			if (err)
    870 				goto parse_out;
    871 			break;
    872 		case IWM_UCODE_TLV_NUM_OF_CPU: {
    873 			uint32_t num_cpu;
    874 			if (tlv_len != sizeof(uint32_t)) {
    875 				err = EINVAL;
    876 				goto parse_out;
    877 			}
    878 			num_cpu = le32toh(*(uint32_t *)tlv_data);
    879 			if (num_cpu < 1 || num_cpu > 2) {
    880 				err = EINVAL;
    881 				goto parse_out;
    882 			}
    883 			break;
    884 		}
    885 		case IWM_UCODE_TLV_SEC_RT:
    886 			err = iwm_firmware_store_section(sc,
    887 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
    888 			if (err)
    889 				goto parse_out;
    890 			break;
    891 		case IWM_UCODE_TLV_SEC_INIT:
    892 			err = iwm_firmware_store_section(sc,
    893 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
    894 			if (err)
    895 				goto parse_out;
    896 			break;
    897 		case IWM_UCODE_TLV_SEC_WOWLAN:
    898 			err = iwm_firmware_store_section(sc,
    899 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
    900 			if (err)
    901 				goto parse_out;
    902 			break;
    903 		case IWM_UCODE_TLV_DEF_CALIB:
    904 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
    905 				err = EINVAL;
    906 				goto parse_out;
    907 			}
    908 			err = iwm_set_default_calib(sc, tlv_data);
    909 			if (err)
    910 				goto parse_out;
    911 			break;
    912 		case IWM_UCODE_TLV_PHY_SKU:
    913 			if (tlv_len != sizeof(uint32_t)) {
    914 				err = EINVAL;
    915 				goto parse_out;
    916 			}
    917 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
    918 			break;
    919 
    920 		case IWM_UCODE_TLV_API_CHANGES_SET: {
    921 			struct iwm_ucode_api *api;
    922 			uint32_t idx, bits;
    923 			int i;
    924 			if (tlv_len != sizeof(*api)) {
    925 				err = EINVAL;
    926 				goto parse_out;
    927 			}
    928 			api = (struct iwm_ucode_api *)tlv_data;
    929 			idx = le32toh(api->api_index);
    930 			bits = le32toh(api->api_flags);
    931 			if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
    932 				err = EINVAL;
    933 				goto parse_out;
    934 			}
    935 			for (i = 0; i < 32; i++) {
    936 				if (!ISSET(bits, __BIT(i)))
    937 					continue;
    938 				setbit(sc->sc_ucode_api, i + (32 * idx));
    939 			}
    940 			break;
    941 		}
    942 
    943 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
    944 			struct iwm_ucode_capa *capa;
    945 			uint32_t idx, bits;
    946 			int i;
    947 			if (tlv_len != sizeof(*capa)) {
    948 				err = EINVAL;
    949 				goto parse_out;
    950 			}
    951 			capa = (struct iwm_ucode_capa *)tlv_data;
    952 			idx = le32toh(capa->api_index);
    953 			bits = le32toh(capa->api_capa);
    954 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
    955 				err = EINVAL;
    956 				goto parse_out;
    957 			}
    958 			for (i = 0; i < 32; i++) {
    959 				if (!ISSET(bits, __BIT(i)))
    960 					continue;
    961 				setbit(sc->sc_enabled_capa, i + (32 * idx));
    962 			}
    963 			break;
    964 		}
    965 
    966 		case IWM_UCODE_TLV_CMD_VERSIONS:
    967 			if (tlv_len % sizeof(struct iwm_fw_cmd_version)) {
    968 				tlv_len /= sizeof(struct iwm_fw_cmd_version);
    969 				tlv_len *= sizeof(struct iwm_fw_cmd_version);
    970 			}
    971 			if (sc->n_cmd_versions != 0) {
    972 				err = EINVAL;
    973 				goto parse_out;
    974 			}
    975 			if (tlv_len > sizeof(sc->cmd_versions)) {
    976 				err = EINVAL;
    977 				goto parse_out;
    978 			}
    979 			memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
    980 			sc->n_cmd_versions = tlv_len / sizeof(struct iwm_fw_cmd_version);
    981 			break;
    982 
    983 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
    984 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
    985 		case IWM_UCODE_TLV_FW_MEM_SEG:
    986 			/* ignore, not used by current driver */
    987 			break;
    988 
    989 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
    990 			err = iwm_firmware_store_section(sc,
    991 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
    992 			    tlv_len);
    993 			if (err)
    994 				goto parse_out;
    995 			break;
    996 
    997 		case IWM_UCODE_TLV_PAGING: {
    998 			uint32_t paging_mem_size;
    999 			if (tlv_len != sizeof(paging_mem_size)) {
   1000 				err = EINVAL;
   1001 				goto parse_out;
   1002 			}
   1003 			paging_mem_size = le32toh(*(uint32_t *)tlv_data);
   1004 			if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
   1005 				err = EINVAL;
   1006 				goto parse_out;
   1007 			}
   1008 			if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
   1009 				err = EINVAL;
   1010 				goto parse_out;
   1011 			}
   1012 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
   1013 			    paging_mem_size;
   1014 			fw->fw_sects[IWM_UCODE_TYPE_REGULAR_USNIFFER].paging_mem_size =
   1015 			    paging_mem_size;
   1016 			break;
   1017 		}
   1018 
   1019 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
   1020 			if (tlv_len != sizeof(uint32_t)) {
   1021 				err = EINVAL;
   1022 				goto parse_out;
   1023 			}
   1024 			sc->sc_capa_n_scan_channels =
   1025 			  le32toh(*(uint32_t *)tlv_data);
   1026 			if (sc->sc_capa_n_scan_channels > IWM_MAX_SCAN_CHANNELS) {
   1027 				err = ERANGE;
   1028 				goto parse_out;
   1029 			}
   1030 			break;
   1031 
   1032 		case IWM_UCODE_TLV_FW_VERSION:
   1033 			if (tlv_len != sizeof(uint32_t) * 3) {
   1034 				err = EINVAL;
   1035 				goto parse_out;
   1036 			}
   1037 
   1038 			iwm_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
   1039 			    le32toh(((uint32_t *)tlv_data)[0]),
   1040 			    le32toh(((uint32_t *)tlv_data)[1]),
   1041 			    le32toh(((uint32_t *)tlv_data)[2]));
   1042 			break;
   1043 
   1044 		default:
   1045 			DPRINTF(("%s: unknown firmware section %d, abort\n",
   1046 			    DEVNAME(sc), tlv_type));
   1047 			err = EINVAL;
   1048 			goto parse_out;
   1049 		}
   1050 
   1051 		/*
   1052 		 * Check for size_t overflow and ignore missing padding at
   1053 		 * end of firmware file.
   1054 		 */
   1055 		if (roundup(tlv_len, 4) > len)
   1056 			break;
   1057 
   1058 		len -= roundup(tlv_len, 4);
   1059 		data += roundup(tlv_len, 4);
   1060 	}
   1061 
   1062 	KASSERT(err == 0);
   1063 
   1064  parse_out:
   1065 	if (err) {
   1066 		aprint_error_dev(sc->sc_dev, "firmware parse error %d, "
   1067 		    "section type %d\n", err, tlv_type);
   1068 	}
   1069 
   1070 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
   1071 		aprint_error_dev(sc->sc_dev,
   1072 		    "device uses unsupported power ops\n");
   1073 		err = ENOTSUP;
   1074 	}
   1075 
   1076  out:
   1077 	if (err) {
   1078 		fw->fw_status = IWM_FW_STATUS_NONE;
   1079 		if (fw->fw_rawdata != NULL) {
   1080 			iwm_fw_info_free(fw);
   1081 			CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
   1082 		}
   1083 	} else {
   1084 		fw->fw_status = IWM_FW_STATUS_DONE;
   1085 	}
   1086 	wakeup(&sc->sc_fw);
   1087 
   1088 	return err;
   1089 }
   1090 
   1091 static uint32_t
   1092 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
   1093 {
   1094 	IWM_WRITE(sc,
   1095 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
   1096 	IWM_BARRIER_READ_WRITE(sc);
   1097 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
   1098 }
   1099 
   1100 static void
   1101 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
   1102 {
   1103 	IWM_WRITE(sc,
   1104 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
   1105 	IWM_BARRIER_WRITE(sc);
   1106 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
   1107 }
   1108 
   1109 static int
   1110 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
   1111 {
   1112 	int offs;
   1113 	uint32_t *vals = buf;
   1114 
   1115 	if (iwm_nic_lock(sc)) {
   1116 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
   1117 		for (offs = 0; offs < dwords; offs++)
   1118 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
   1119 		iwm_nic_unlock(sc);
   1120 		return 0;
   1121 	}
   1122 	return EBUSY;
   1123 }
   1124 
   1125 static int
   1126 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
   1127 {
   1128 	int offs;
   1129 	const uint32_t *vals = buf;
   1130 
   1131 	if (iwm_nic_lock(sc)) {
   1132 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
   1133 		/* WADDR auto-increments */
   1134 		for (offs = 0; offs < dwords; offs++) {
   1135 			uint32_t val = vals ? vals[offs] : 0;
   1136 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
   1137 		}
   1138 		iwm_nic_unlock(sc);
   1139 		return 0;
   1140 	}
   1141 	return EBUSY;
   1142 }
   1143 
   1144 static int
   1145 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
   1146 {
   1147 	return iwm_write_mem(sc, addr, &val, 1);
   1148 }
   1149 
   1150 static int
   1151 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
   1152     int timo)
   1153 {
   1154 	for (;;) {
   1155 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
   1156 			return 1;
   1157 		}
   1158 		if (timo < 10) {
   1159 			return 0;
   1160 		}
   1161 		timo -= 10;
   1162 		DELAY(10);
   1163 	}
   1164 }
   1165 
   1166 static int
   1167 iwm_nic_lock(struct iwm_softc *sc)
   1168 {
   1169 
   1170 	mutex_enter(&sc->sc_nic_mtx);
   1171 	if (sc->sc_nic_locks++ > 0) {
   1172 		mutex_exit(&sc->sc_nic_mtx);
   1173 		return 1;
   1174 	}
   1175 
   1176 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   1177 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1178 
   1179 	mutex_exit(&sc->sc_nic_mtx);
   1180 
   1181 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000)
   1182 		DELAY(2);
   1183 
   1184 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   1185 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
   1186 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
   1187 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
   1188 		return 1;
   1189 	}
   1190 
   1191 	mutex_enter(&sc->sc_nic_mtx);
   1192 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1193 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1194 	--sc->sc_nic_locks;
   1195 	mutex_exit(&sc->sc_nic_mtx);
   1196 
   1197 	device_printf(sc->sc_dev, "acquiring device failed\n");
   1198 	return 0;
   1199 }
   1200 
   1201 static void
   1202 iwm_nic_unlock(struct iwm_softc *sc)
   1203 {
   1204 	int err = 1;
   1205 
   1206 	mutex_enter(&sc->sc_nic_mtx);
   1207 	if (sc->sc_nic_locks > 0) {
   1208 		if (--sc->sc_nic_locks == 0) {
   1209 			IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1210 			    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1211 		}
   1212 		err = 0;
   1213 	}
   1214 	mutex_exit(&sc->sc_nic_mtx);
   1215 
   1216 	if (err)
   1217 		device_printf(sc->sc_dev, "NIC already unlocked\n");
   1218 }
   1219 
   1220 static int
   1221 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
   1222     uint32_t mask)
   1223 {
   1224 	uint32_t val;
   1225 
   1226 	if (iwm_nic_lock(sc)) {
   1227 		val = iwm_read_prph(sc, reg) & mask;
   1228 		val |= bits;
   1229 		iwm_write_prph(sc, reg, val);
   1230 		iwm_nic_unlock(sc);
   1231 		return 0;
   1232 	}
   1233 	return EBUSY;
   1234 }
   1235 
   1236 static int
   1237 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
   1238 {
   1239 	return iwm_set_bits_mask_prph(sc, reg, bits, ~0);
   1240 }
   1241 
   1242 static int
   1243 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
   1244 {
   1245 	return iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
   1246 }
   1247 
   1248 static int
   1249 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
   1250     bus_size_t size, bus_size_t alignment)
   1251 {
   1252 	int nsegs, err;
   1253 	void *va;
   1254 
   1255 	dma->tag = tag;
   1256 	dma->size = size;
   1257 
   1258 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
   1259 	    &dma->map);
   1260 	if (err)
   1261 		goto fail;
   1262 
   1263 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
   1264 	    BUS_DMA_NOWAIT);
   1265 	if (err)
   1266 		goto fail;
   1267 
   1268 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
   1269 	if (err)
   1270 		goto fail;
   1271 	dma->vaddr = va;
   1272 
   1273 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
   1274 	    BUS_DMA_NOWAIT);
   1275 	if (err)
   1276 		goto fail;
   1277 
   1278 	memset(dma->vaddr, 0, size);
   1279 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
   1280 	dma->paddr = dma->map->dm_segs[0].ds_addr;
   1281 
   1282 	return 0;
   1283 
   1284 fail:	iwm_dma_contig_free(dma);
   1285 	return err;
   1286 }
   1287 
   1288 static void
   1289 iwm_dma_contig_free(struct iwm_dma_info *dma)
   1290 {
   1291 	if (dma->map != NULL) {
   1292 		if (dma->vaddr != NULL) {
   1293 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
   1294 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1295 			bus_dmamap_unload(dma->tag, dma->map);
   1296 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
   1297 			bus_dmamem_free(dma->tag, &dma->seg, 1);
   1298 			dma->vaddr = NULL;
   1299 		}
   1300 		bus_dmamap_destroy(dma->tag, dma->map);
   1301 		dma->map = NULL;
   1302 	}
   1303 }
   1304 
   1305 static int
   1306 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1307 {
   1308 	bus_size_t size;
   1309 	int i, err;
   1310 
   1311 	ring->cur = 0;
   1312 
   1313 	/* Allocate RX descriptors (256-byte aligned). */
   1314 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
   1315 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1316 	if (err) {
   1317 		aprint_error_dev(sc->sc_dev,
   1318 		    "could not allocate RX ring DMA memory\n");
   1319 		goto fail;
   1320 	}
   1321 	ring->desc = ring->desc_dma.vaddr;
   1322 
   1323 	/* Allocate RX status area (16-byte aligned). */
   1324 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
   1325 	    sizeof(*ring->stat), 16);
   1326 	if (err) {
   1327 		aprint_error_dev(sc->sc_dev,
   1328 		    "could not allocate RX status DMA memory\n");
   1329 		goto fail;
   1330 	}
   1331 	ring->stat = ring->stat_dma.vaddr;
   1332 
   1333 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1334 		struct iwm_rx_data *data = &ring->data[i];
   1335 
   1336 		memset(data, 0, sizeof(*data));
   1337 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
   1338 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
   1339 		    &data->map);
   1340 		if (err) {
   1341 			aprint_error_dev(sc->sc_dev,
   1342 			    "could not create RX buf DMA map\n");
   1343 			goto fail;
   1344 		}
   1345 
   1346 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
   1347 		if (err)
   1348 			goto fail;
   1349 	}
   1350 	return 0;
   1351 
   1352 fail:	iwm_free_rx_ring(sc, ring);
   1353 	return err;
   1354 }
   1355 
   1356 static void
   1357 iwm_disable_rx_dma(struct iwm_softc *sc)
   1358 {
   1359 	int ntries;
   1360 
   1361 	if (iwm_nic_lock(sc)) {
   1362 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
   1363 		for (ntries = 0; ntries < 1000; ntries++) {
   1364 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
   1365 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
   1366 				break;
   1367 			DELAY(10);
   1368 		}
   1369 		iwm_nic_unlock(sc);
   1370 	}
   1371 }
   1372 
   1373 void
   1374 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1375 {
   1376 	ring->cur = 0;
   1377 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
   1378 	    ring->stat_dma.size, BUS_DMASYNC_PREWRITE);
   1379 	memset(ring->stat, 0, sizeof(*ring->stat));
   1380 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
   1381 	    ring->stat_dma.size, BUS_DMASYNC_POSTWRITE);
   1382 }
   1383 
   1384 static void
   1385 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1386 {
   1387 	int i;
   1388 
   1389 	iwm_dma_contig_free(&ring->desc_dma);
   1390 	iwm_dma_contig_free(&ring->stat_dma);
   1391 
   1392 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1393 		struct iwm_rx_data *data = &ring->data[i];
   1394 
   1395 		if (data->m != NULL) {
   1396 			bus_size_t sz = data->m->m_pkthdr.len;
   1397 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1398 			    sz, BUS_DMASYNC_POSTREAD);
   1399 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1400 			m_freem(data->m);
   1401 			data->m = NULL;
   1402 		}
   1403 		if (data->map != NULL) {
   1404 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1405 			data->map = NULL;
   1406 		}
   1407 	}
   1408 }
   1409 
   1410 static int
   1411 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
   1412 {
   1413 	bus_addr_t paddr;
   1414 	bus_size_t size;
   1415 	int i, err, nsegs;
   1416 
   1417 	ring->qid = qid;
   1418 	ring->queued = 0;
   1419 	ring->cur = 0;
   1420 
   1421 	/* Allocate TX descriptors (256-byte aligned). */
   1422 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
   1423 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1424 	if (err) {
   1425 		aprint_error_dev(sc->sc_dev,
   1426 		    "could not allocate TX ring DMA memory\n");
   1427 		goto fail;
   1428 	}
   1429 	ring->desc = ring->desc_dma.vaddr;
   1430 
   1431 	/*
   1432 	 * There is no need to allocate DMA buffers for unused rings.
   1433 	 * 7k/8k/9k hardware supports up to 31 Tx rings which is more
   1434 	 * than we currently need.
   1435 	 *
   1436 	 * In DQA mode we use 1 command queue + 4 DQA mgmt/data queues.
   1437 	 * The command is queue 0 (sc->txq[0]), and 4 mgmt/data frame queues
   1438 	 * are sc->tqx[IWM_DQA_MIN_MGMT_QUEUE + ac], i.e. sc->txq[5:8],
   1439 	 * in order to provide one queue per EDCA category.
   1440 	 * Tx aggregation requires additional queues, one queue per TID for
   1441 	 * which aggregation is enabled. We map TID 0-7 to sc->txq[10:17].
   1442 	 *
   1443 	 * In non-DQA mode, we use rings 0 through 9 (0-3 are EDCA, 9 is cmd),
   1444 	 * and Tx aggregation is not supported.
   1445 	 *
   1446 	 * Unfortunately, we cannot tell if DQA will be used until the
   1447 	 * firmware gets loaded later, so just allocate sufficient rings
   1448 	 * in order to satisfy both cases.
   1449 	 */
   1450 	if (qid > IWM_LAST_AGG_TX_QUEUE)
   1451 		return 0;
   1452 
   1453 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
   1454 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
   1455 	if (err) {
   1456 		aprint_error_dev(sc->sc_dev,
   1457 		    "could not allocate TX cmd DMA memory\n");
   1458 		goto fail;
   1459 	}
   1460 	ring->cmd = ring->cmd_dma.vaddr;
   1461 
   1462 	paddr = ring->cmd_dma.paddr;
   1463 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1464 		struct iwm_tx_data *data = &ring->data[i];
   1465 		size_t mapsize;
   1466 
   1467 		data->cmd_paddr = paddr;
   1468 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
   1469 		    + offsetof(struct iwm_tx_cmd, scratch);
   1470 		paddr += sizeof(struct iwm_device_cmd);
   1471 
   1472 		/* FW commands may require more mapped space than packets. */
   1473 		if (qid == IWM_CMD_QUEUE || qid == IWM_DQA_CMD_QUEUE) {
   1474 			mapsize = IWM_RBUF_SIZE;
   1475 			nsegs = 1;
   1476 		} else {
   1477 			mapsize = MCLBYTES;
   1478 			nsegs = IWM_NUM_OF_TBS - 2;
   1479 		}
   1480 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
   1481 		    nsegs, mapsize, 0, BUS_DMA_NOWAIT,
   1482 		    &data->map);
   1483 		if (err) {
   1484 			aprint_error_dev(sc->sc_dev,
   1485 			    "could not create TX buf DMA map\n");
   1486 			goto fail;
   1487 		}
   1488 	}
   1489 	KASSERT(paddr == ring->cmd_dma.paddr + size);
   1490 	return 0;
   1491 
   1492 fail:	iwm_free_tx_ring(sc, ring);
   1493 	return err;
   1494 }
   1495 
   1496 static void
   1497 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1498 {
   1499 	int i;
   1500 
   1501 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1502 		struct iwm_tx_data *data = &ring->data[i];
   1503 
   1504 		if (data->m != NULL) {
   1505 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1506 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1507 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1508 			m_freem(data->m);
   1509 			data->m = NULL;
   1510 		}
   1511 	}
   1512 	/* Clear TX descriptors. */
   1513 	memset(ring->desc, 0, ring->desc_dma.size);
   1514 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
   1515 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
   1516 	sc->qfullmsk &= ~(1 << ring->qid);
   1517 
   1518 	/* 7000 family NICs are locked while commands are in progress. */
   1519 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   1520 		if (ring->qid == sc->cmdqid && ring->queued > 0) {
   1521 			iwm_nic_unlock(sc);
   1522 		}
   1523 	}
   1524 	ring->queued = 0;
   1525 	ring->cur = 0;
   1526 }
   1527 
   1528 static void
   1529 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1530 {
   1531 	int i;
   1532 
   1533 	iwm_dma_contig_free(&ring->desc_dma);
   1534 	iwm_dma_contig_free(&ring->cmd_dma);
   1535 
   1536 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1537 		struct iwm_tx_data *data = &ring->data[i];
   1538 
   1539 		if (data->m != NULL) {
   1540 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1541 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1542 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1543 			m_freem(data->m);
   1544 			data->m = NULL;
   1545 		}
   1546 		if (data->map != NULL) {
   1547 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1548 			data->map = NULL;
   1549 		}
   1550 	}
   1551 }
   1552 
   1553 static void
   1554 iwm_enable_rfkill_int(struct iwm_softc *sc)
   1555 {
   1556 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
   1557 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1558 }
   1559 
   1560 static int
   1561 iwm_check_rfkill(struct iwm_softc *sc)
   1562 {
   1563 	uint32_t v;
   1564 	int rv;
   1565 
   1566 	/*
   1567 	 * "documentation" is not really helpful here:
   1568 	 *  27:	HW_RF_KILL_SW
   1569 	 *	Indicates state of (platform's) hardware RF-Kill switch
   1570 	 *
   1571 	 * But apparently when it's off, it's on ...
   1572 	 */
   1573 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
   1574 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
   1575 	if (rv) {
   1576 		sc->sc_flags |= IWM_FLAG_RFKILL;
   1577 	} else {
   1578 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
   1579 	}
   1580 
   1581 	return rv;
   1582 }
   1583 
   1584 static void
   1585 iwm_enable_interrupts(struct iwm_softc *sc)
   1586 {
   1587 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
   1588 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1589 }
   1590 
   1591 static void
   1592 iwm_enable_fwload_interrupt(struct iwm_softc *sc)
   1593 {
   1594 
   1595 	sc->sc_intmask = IWM_CSR_INT_BIT_FH_TX;
   1596 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1597 }
   1598 
   1599 static void
   1600 iwm_restore_interrupts(struct iwm_softc *sc)
   1601 {
   1602 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1603 }
   1604 
   1605 static void
   1606 iwm_disable_interrupts(struct iwm_softc *sc)
   1607 {
   1608 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   1609 
   1610 	/* acknowledge all interrupts */
   1611 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1612 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
   1613 }
   1614 
   1615 static void
   1616 iwm_ict_reset(struct iwm_softc *sc)
   1617 {
   1618 	iwm_disable_interrupts(sc);
   1619 
   1620 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
   1621 	bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, sc->ict_dma.size,
   1622 	    BUS_DMASYNC_PREWRITE);
   1623 	sc->ict_cur = 0;
   1624 
   1625 	/* Set physical address of ICT (4KB aligned). */
   1626 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
   1627 	    IWM_CSR_DRAM_INT_TBL_ENABLE
   1628 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
   1629 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
   1630 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
   1631 
   1632 	/* Switch to ICT interrupt mode in driver. */
   1633 	sc->sc_flags |= IWM_FLAG_USE_ICT;
   1634 
   1635 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1636 	iwm_enable_interrupts(sc);
   1637 }
   1638 
   1639 #define IWM_HW_READY_TIMEOUT 50
   1640 static int
   1641 iwm_set_hw_ready(struct iwm_softc *sc)
   1642 {
   1643 	int ready;
   1644 
   1645 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1646 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
   1647 
   1648 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1649 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1650 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1651 	    IWM_HW_READY_TIMEOUT);
   1652 	if (ready)
   1653 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
   1654 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
   1655 
   1656 	return ready;
   1657 }
   1658 #undef IWM_HW_READY_TIMEOUT
   1659 
   1660 static int
   1661 iwm_prepare_card_hw(struct iwm_softc *sc)
   1662 {
   1663 	int t = 0;
   1664 	int ntries;
   1665 
   1666 	if (iwm_set_hw_ready(sc))
   1667 		return 0;
   1668 
   1669 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
   1670 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
   1671 	DELAY(1000);
   1672 
   1673 	for (ntries = 0; ntries < 10; ntries++) {
   1674 		/* If HW is not ready, prepare the conditions to check again */
   1675 		IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1676 		    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
   1677 
   1678 		do {
   1679 			if (iwm_set_hw_ready(sc))
   1680 				return 0;
   1681 			DELAY(200);
   1682 			t += 200;
   1683 		} while (t < 150000);
   1684 		DELAY(25000);
   1685 	}
   1686 
   1687 	return ETIMEDOUT;
   1688 }
   1689 
   1690 static void
   1691 iwm_apm_config(struct iwm_softc *sc)
   1692 {
   1693 	pcireg_t reg;
   1694 
   1695 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
   1696 	    sc->sc_cap_off + PCIE_LCSR);
   1697 	if (reg & PCIE_LCSR_ASPM_L1) {
   1698 		/* Um the Linux driver prints "Disabling L0S for this one ... */
   1699 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
   1700 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1701 	} else {
   1702 		/* ... and "Enabling" here */
   1703 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
   1704 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1705 	}
   1706 
   1707 #ifdef notyet
   1708 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
   1709 	    sc->sc_cap_off + PCIE_DCSR2);
   1710 	sc->sc_ltr_enabled = (reg & PCIE_DCSR2_LTR_MEC) ? 1 : 0;
   1711 	DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
   1712 	    DEVNAME(sc),
   1713 	    (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
   1714 	    sc->sc_ltr_enabled ? "En" : "Dis"));
   1715 #endif
   1716 }
   1717 
   1718 /*
   1719  * Start up NIC's basic functionality after it has been reset
   1720  * e.g. after platform boot or shutdown.
   1721  * NOTE:  This does not load uCode nor start the embedded processor
   1722  */
   1723 static int
   1724 iwm_apm_init(struct iwm_softc *sc)
   1725 {
   1726 	int err = 0;
   1727 
   1728 	/* Disable L0S exit timer (platform NMI workaround) */
   1729 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
   1730 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1731 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
   1732 	}
   1733 
   1734 	/*
   1735 	 * Disable L0s without affecting L1;
   1736 	 *  don't wait for ICH L0s (ICH bug W/A)
   1737 	 */
   1738 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1739 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
   1740 
   1741 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
   1742 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
   1743 
   1744 	/*
   1745 	 * Enable HAP INTA (interrupt from management bus) to
   1746 	 * wake device's PCI Express link L1a -> L0s
   1747 	 */
   1748 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1749 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
   1750 
   1751 	iwm_apm_config(sc);
   1752 
   1753 #if 0 /* not for 7k/8k */
   1754 	/* Configure analog phase-lock-loop before activating to D0A */
   1755 	if (trans->cfg->base_params->pll_cfg_val)
   1756 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
   1757 		    trans->cfg->base_params->pll_cfg_val);
   1758 #endif
   1759 
   1760 	/*
   1761 	 * Set "initialization complete" bit to move adapter from
   1762 	 * D0U* --> D0A* (powered-up active) state.
   1763 	 */
   1764 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
   1765 
   1766 	/*
   1767 	 * Wait for clock stabilization; once stabilized, access to
   1768 	 * device-internal resources is supported, e.g. iwm_write_prph()
   1769 	 * and accesses to uCode SRAM.
   1770 	 */
   1771 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   1772 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   1773 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
   1774 		aprint_error_dev(sc->sc_dev,
   1775 		    "timeout waiting for clock stabilization\n");
   1776 		err = ETIMEDOUT;
   1777 		goto out;
   1778 	}
   1779 
   1780 	if (sc->host_interrupt_operation_mode) {
   1781 		/*
   1782 		 * This is a bit of an abuse - This is needed for 7260 / 3160
   1783 		 * only check host_interrupt_operation_mode even if this is
   1784 		 * not related to host_interrupt_operation_mode.
   1785 		 *
   1786 		 * Enable the oscillator to count wake up time for L1 exit. This
   1787 		 * consumes slightly more power (100uA) - but allows to be sure
   1788 		 * that we wake up from L1 on time.
   1789 		 *
   1790 		 * This looks weird: read twice the same register, discard the
   1791 		 * value, set a bit, and yet again, read that same register
   1792 		 * just to discard the value. But that's the way the hardware
   1793 		 * seems to like it.
   1794 		 */
   1795 		if (iwm_nic_lock(sc)) {
   1796 			iwm_read_prph(sc, IWM_OSC_CLK);
   1797 			iwm_read_prph(sc, IWM_OSC_CLK);
   1798 			iwm_nic_unlock(sc);
   1799 		}
   1800 		err = iwm_set_bits_prph(sc, IWM_OSC_CLK,
   1801 		    IWM_OSC_CLK_FORCE_CONTROL);
   1802 		if (err)
   1803 			goto out;
   1804 		if (iwm_nic_lock(sc)) {
   1805 			iwm_read_prph(sc, IWM_OSC_CLK);
   1806 			iwm_read_prph(sc, IWM_OSC_CLK);
   1807 			iwm_nic_unlock(sc);
   1808 		}
   1809 	}
   1810 
   1811 	/*
   1812 	 * Enable DMA clock and wait for it to stabilize.
   1813 	 *
   1814 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
   1815 	 * do not disable clocks.  This preserves any hardware bits already
   1816 	 * set by default in "CLK_CTRL_REG" after reset.
   1817 	 */
   1818 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   1819 		if (iwm_nic_lock(sc)) {
   1820 			iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
   1821 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1822 			iwm_nic_unlock(sc);
   1823 		}
   1824 		DELAY(20);
   1825 
   1826 		/* Disable L1-Active */
   1827 		err = iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1828 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1829 		if (err)
   1830 			goto out;
   1831 
   1832 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
   1833 		if (iwm_nic_lock(sc)) {
   1834 			iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
   1835 			    IWM_APMG_RTC_INT_STT_RFKILL);
   1836 			iwm_nic_unlock(sc);
   1837 		}
   1838 	}
   1839  out:
   1840 	if (err)
   1841 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
   1842 	return err;
   1843 }
   1844 
   1845 static void
   1846 iwm_apm_stop(struct iwm_softc *sc)
   1847 {
   1848 	IWM_SETBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
   1849 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
   1850 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1851 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE |
   1852 	    IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
   1853 	DELAY(1000);
   1854 	IWM_CLRBITS(sc, IWM_CSR_DBG_LINK_PWR_MGMT_REG,
   1855 	    IWM_CSR_RESET_LINK_PWR_MGMT_DISABLED);
   1856 	DELAY(5000);
   1857 
   1858 	/* stop device's busmaster DMA activity */
   1859 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
   1860 
   1861 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
   1862 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
   1863 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
   1864 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
   1865 	DPRINTF(("iwm apm stop\n"));
   1866 
   1867 	/*
   1868 	 * Clear "initialization complete" bit to move adapter from
   1869 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
   1870 	 */
   1871 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1872 	    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
   1873 }
   1874 
   1875 static int
   1876 iwm_start_hw(struct iwm_softc *sc)
   1877 {
   1878 	int err;
   1879 
   1880 	err = iwm_prepare_card_hw(sc);
   1881 	if (err)
   1882 		return err;
   1883 
   1884 	/* Reset the entire device */
   1885 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
   1886 	DELAY(5000);
   1887 
   1888 	err = iwm_apm_init(sc);
   1889 	if (err)
   1890 		return err;
   1891 
   1892 	iwm_enable_rfkill_int(sc);
   1893 	iwm_check_rfkill(sc);
   1894 
   1895 	return 0;
   1896 }
   1897 
   1898 static void
   1899 iwm_stop_device(struct iwm_softc *sc)
   1900 {
   1901 	int chnl, ntries;
   1902 	int qid;
   1903 
   1904 	iwm_disable_interrupts(sc);
   1905 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
   1906 
   1907 	/* Stop all DMA channels. */
   1908 	if (iwm_nic_lock(sc)) {
   1909 		/* Deactivate TX scheduler. */
   1910 		iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1911 
   1912 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1913 			IWM_WRITE(sc,
   1914 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
   1915 			for (ntries = 0; ntries < 200; ntries++) {
   1916 				uint32_t r;
   1917 
   1918 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
   1919 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
   1920 				    chnl))
   1921 					break;
   1922 				DELAY(20);
   1923 			}
   1924 		}
   1925 		iwm_nic_unlock(sc);
   1926 	}
   1927 	iwm_disable_rx_dma(sc);
   1928 	iwm_reset_rx_ring(sc, &sc->rxq);
   1929 
   1930 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
   1931 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
   1932 
   1933 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   1934 		/* Power-down device's busmaster DMA clocks */
   1935 		if (iwm_nic_lock(sc)) {
   1936 			iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
   1937 			    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1938 			iwm_nic_unlock(sc);
   1939 		}
   1940 		DELAY(5);
   1941 	}
   1942 
   1943 	/* Make sure (redundant) we've released our request to stay awake */
   1944 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1945 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1946 	if (sc->sc_nic_locks > 0)
   1947 		device_printf(sc->sc_dev, "%d active NIC locks cleared\n",
   1948 		    sc->sc_nic_locks);
   1949 	sc->sc_nic_locks = 0;
   1950 
   1951 	/* Stop the device, and put it in low power state */
   1952 	iwm_apm_stop(sc);
   1953 
   1954 	/* Reset the on-board processor. */
   1955 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
   1956 	DELAY(5000);
   1957 
   1958 	/*
   1959 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
   1960 	 * Clean again the interrupt here
   1961 	 */
   1962 	iwm_disable_interrupts(sc);
   1963 
   1964 	/* Even though we stop the HW we still want the RF kill interrupt. */
   1965 	iwm_enable_rfkill_int(sc);
   1966 	iwm_check_rfkill(sc);
   1967 
   1968 	iwm_prepare_card_hw(sc);
   1969 }
   1970 
   1971 static void
   1972 iwm_nic_config(struct iwm_softc *sc)
   1973 {
   1974 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
   1975 	uint32_t mask, val, reg_val = 0;
   1976 
   1977 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
   1978 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
   1979 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
   1980 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
   1981 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
   1982 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
   1983 
   1984 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
   1985 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
   1986 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
   1987 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
   1988 
   1989 	/* radio configuration */
   1990 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
   1991 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
   1992 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
   1993 
   1994 	mask = IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
   1995 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
   1996 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
   1997 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
   1998 	    IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
   1999 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
   2000 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
   2001 
   2002 	val = IWM_READ(sc, IWM_CSR_HW_IF_CONFIG_REG);
   2003 	val &= ~mask;
   2004 	val |= reg_val;
   2005 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, val);
   2006 
   2007 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
   2008 	    radio_cfg_step, radio_cfg_dash));
   2009 
   2010 	/*
   2011 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
   2012 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
   2013 	 * to lose ownership and not being able to obtain it back.
   2014 	 */
   2015 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   2016 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   2017 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
   2018 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
   2019 	}
   2020 }
   2021 
   2022 static int
   2023 iwm_nic_rx_init(struct iwm_softc *sc)
   2024 {
   2025 	return iwm_nic_rx_legacy_init(sc);
   2026 }
   2027 
   2028 static int
   2029 iwm_nic_rx_legacy_init(struct iwm_softc *sc)
   2030 {
   2031 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
   2032 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
   2033 	    0, sc->rxq.stat_dma.size,
   2034 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   2035 
   2036 	iwm_disable_rx_dma(sc);
   2037 
   2038 	if (!iwm_nic_lock(sc))
   2039 		return EBUSY;
   2040 
   2041 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
   2042 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
   2043 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
   2044 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
   2045 
   2046 	/* Set physical address of RX ring (256-byte aligned). */
   2047 	IWM_WRITE(sc,
   2048 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
   2049 
   2050 	/* Set physical address of RX status (16-byte aligned). */
   2051 	IWM_WRITE(sc,
   2052 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
   2053 
   2054 	/* Enable RX. */
   2055 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
   2056 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
   2057 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
   2058 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
   2059 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
   2060 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
   2061 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
   2062 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
   2063 
   2064 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
   2065 
   2066 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
   2067 	if (sc->host_interrupt_operation_mode)
   2068 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
   2069 
   2070 	iwm_nic_unlock(sc);
   2071 
   2072 	/*
   2073 	 * This value should initially be 0 (before preparing any RBs),
   2074 	 * and should be 8 after preparing the first 8 RBs (for example).
   2075 	 */
   2076 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
   2077 
   2078 	return 0;
   2079 }
   2080 
   2081 static int
   2082 iwm_nic_tx_init(struct iwm_softc *sc)
   2083 {
   2084 	int qid, err;
   2085 
   2086 	if (!iwm_nic_lock(sc))
   2087 		return EBUSY;
   2088 
   2089 	/* Deactivate TX scheduler. */
   2090 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   2091 
   2092 	/* Set physical address of "keep warm" page (16-byte aligned). */
   2093 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
   2094 
   2095 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
   2096 		struct iwm_tx_ring *txq = &sc->txq[qid];
   2097 
   2098 		/* Set physical address of TX ring (256-byte aligned). */
   2099 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
   2100 		    txq->desc_dma.paddr >> 8);
   2101 		DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
   2102 		    qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
   2103 	}
   2104 
   2105 	err = iwm_set_bits_prph(sc, IWM_SCD_GP_CTRL,
   2106 	    IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE |
   2107 	    IWM_SCD_GP_CTRL_ENABLE_31_QUEUES);
   2108 
   2109 	iwm_nic_unlock(sc);
   2110 
   2111 	return err;
   2112 }
   2113 
   2114 static int
   2115 iwm_nic_init(struct iwm_softc *sc)
   2116 {
   2117 	int err;
   2118 
   2119 	iwm_apm_init(sc);
   2120 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   2121 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   2122 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
   2123 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
   2124 	}
   2125 
   2126 	iwm_nic_config(sc);
   2127 
   2128 	err = iwm_nic_rx_init(sc);
   2129 	if (err)
   2130 		return err;
   2131 
   2132 	err = iwm_nic_tx_init(sc);
   2133 	if (err)
   2134 		return err;
   2135 
   2136 	DPRINTF(("shadow registers enabled\n"));
   2137 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
   2138 
   2139 	return 0;
   2140 }
   2141 
   2142 static const uint8_t iwm_ac_to_tx_fifo[] = {
   2143 	IWM_TX_FIFO_BE,
   2144 	IWM_TX_FIFO_BK,
   2145 	IWM_TX_FIFO_VI,
   2146 	IWM_TX_FIFO_VO,
   2147 };
   2148 
   2149 static int
   2150 iwm_enable_ac_txq(struct iwm_softc *sc, int qid, int fifo)
   2151 {
   2152 	int err;
   2153 
   2154 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
   2155 
   2156 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   2157 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
   2158 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
   2159 
   2160 	err = iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
   2161 	if (err) {
   2162 		return err;
   2163 	}
   2164 
   2165 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
   2166 
   2167 	iwm_write_mem32(sc,
   2168 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
   2169 
   2170 	/* Set scheduler window size and frame limit. */
   2171 	iwm_write_mem32(sc,
   2172 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
   2173 	    sizeof(uint32_t),
   2174 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
   2175 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
   2176 	    ((IWM_FRAME_LIMIT
   2177 		<< IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
   2178 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
   2179 
   2180 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   2181 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
   2182 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
   2183 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
   2184 	    IWM_SCD_QUEUE_STTS_REG_MSK);
   2185 
   2186 	if (qid == sc->cmdqid) {
   2187 		iwm_write_prph(sc, IWM_SCD_EN_CTRL,
   2188 		    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | (1 << qid));
   2189 	}
   2190 
   2191 	return 0;
   2192 }
   2193 
   2194 static int
   2195 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo,
   2196     int aggregate, uint8_t tid, uint16_t ssn)
   2197 {
   2198 	struct iwm_tx_ring *ring = &sc->txq[qid];
   2199 	struct iwm_scd_txq_cfg_cmd cmd;
   2200 	int err, idx, scd_bug;
   2201 
   2202 	/*
   2203 	 * If we need to move the SCD write pointer by steps of
   2204 	 * 0x40, 0x80 or 0xc0, it gets stuck.
   2205 	 * This is really ugly, but this is the easiest way out for
   2206 	 * this sad hardware issue.
   2207 	 * This bug has been fixed on devices 9000 and up.
   2208 	 */
   2209 #ifdef notyet
   2210 	scd_bug = !sc->sc_mqrx_supported &&
   2211 		!((ssn - ring->cur) & 0x3f) &&
   2212 		(ssn != ring->cur);
   2213 #else
   2214 	scd_bug = !((ssn - ring->cur) & 0x3f) &&
   2215 		(ssn != ring->cur);
   2216 #endif
   2217 	if (scd_bug)
   2218 		ssn = (ssn + 1) & 0xfff;
   2219 
   2220 	idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
   2221 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | idx);
   2222 	ring->cur = idx;
   2223 
   2224 	memset(&cmd, 0, sizeof(cmd));
   2225 	cmd.tid = tid;
   2226 	cmd.scd_queue = qid;
   2227 	cmd.enable = 1;
   2228 	cmd.sta_id = sta_id;
   2229 	cmd.tx_fifo = fifo;
   2230 	cmd.aggregate = aggregate;
   2231 	cmd.ssn = htole16(ssn);
   2232 	cmd.window = IWM_FRAME_LIMIT;
   2233 
   2234 	err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0,
   2235 	    sizeof(cmd), &cmd);
   2236 	if (err)
   2237 		return err;
   2238 
   2239 	return 0;
   2240 }
   2241 
   2242 static int
   2243 iwm_post_alive(struct iwm_softc *sc)
   2244 {
   2245 	int nwords;
   2246 	int err, chnl;
   2247 	uint32_t base;
   2248 
   2249 	if (!iwm_nic_lock(sc))
   2250 		return EBUSY;
   2251 
   2252 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
   2253 	if (sc->sched_base != base) {
   2254 		DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
   2255 		    DEVNAME(sc), sc->sched_base, base));
   2256 		sc->sched_base = base;
   2257 	}
   2258 
   2259 	iwm_ict_reset(sc);
   2260 
   2261 	iwm_nic_unlock(sc);
   2262 
   2263 	/* Clear TX scheduler state in SRAM. */
   2264 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
   2265 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
   2266 	    / sizeof(uint32_t);
   2267 	err = iwm_write_mem(sc,
   2268 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
   2269 	    NULL, nwords);
   2270 	if (err)
   2271 		return err;
   2272 
   2273 	if (!iwm_nic_lock(sc))
   2274 		return EBUSY;
   2275 
   2276 	/* Set physical address of TX scheduler rings (1KB aligned). */
   2277 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
   2278 
   2279 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
   2280 
   2281 	/* enable command channel */
   2282 	err = iwm_enable_ac_txq(sc, sc->cmdqid, IWM_TX_FIFO_CMD);
   2283 	if (err) {
   2284 		iwm_nic_unlock(sc);
   2285 		return err;
   2286 	}
   2287 
   2288 	/* Activate TX scheduler. */
   2289 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
   2290 
   2291 	/* Enable DMA channels. */
   2292 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   2293 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
   2294 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
   2295 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
   2296 	}
   2297 
   2298 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
   2299 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
   2300 
   2301 	iwm_nic_unlock(sc);
   2302 
   2303 	/* Enable L1-Active */
   2304 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
   2305 		err = iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   2306 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   2307 	}
   2308 
   2309 	return err;
   2310 }
   2311 
   2312 static struct iwm_phy_db_entry *
   2313 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
   2314     uint16_t chg_id)
   2315 {
   2316 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   2317 
   2318 	if (type >= IWM_PHY_DB_MAX)
   2319 		return NULL;
   2320 
   2321 	switch (type) {
   2322 	case IWM_PHY_DB_CFG:
   2323 		return &phy_db->cfg;
   2324 	case IWM_PHY_DB_CALIB_NCH:
   2325 		return &phy_db->calib_nch;
   2326 	case IWM_PHY_DB_CALIB_CHG_PAPD:
   2327 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
   2328 			return NULL;
   2329 		return &phy_db->calib_ch_group_papd[chg_id];
   2330 	case IWM_PHY_DB_CALIB_CHG_TXP:
   2331 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
   2332 			return NULL;
   2333 		return &phy_db->calib_ch_group_txp[chg_id];
   2334 	default:
   2335 		return NULL;
   2336 	}
   2337 	return NULL;
   2338 }
   2339 
   2340 static int
   2341 iwm_phy_db_set_section(struct iwm_softc *sc,
   2342     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
   2343 {
   2344 	struct iwm_phy_db_entry *entry;
   2345 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
   2346 	uint16_t chg_id = 0;
   2347 
   2348 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
   2349 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
   2350 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
   2351 
   2352 	entry = iwm_phy_db_get_section(sc, type, chg_id);
   2353 	if (!entry)
   2354 		return EINVAL;
   2355 
   2356 	if (entry->data)
   2357 		kmem_intr_free(entry->data, entry->size);
   2358 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
   2359 	if (!entry->data) {
   2360 		entry->size = 0;
   2361 		return ENOMEM;
   2362 	}
   2363 	memcpy(entry->data, phy_db_notif->data, size);
   2364 	entry->size = size;
   2365 
   2366 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
   2367 	    __func__, __LINE__, type, size, entry->data));
   2368 
   2369 	return 0;
   2370 }
   2371 
   2372 static int
   2373 iwm_is_valid_channel(uint16_t ch_id)
   2374 {
   2375 	if (ch_id <= 14 ||
   2376 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
   2377 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
   2378 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
   2379 		return 1;
   2380 	return 0;
   2381 }
   2382 
   2383 static uint8_t
   2384 iwm_ch_id_to_ch_index(uint16_t ch_id)
   2385 {
   2386 	if (!iwm_is_valid_channel(ch_id))
   2387 		return 0xff;
   2388 
   2389 	if (ch_id <= 14)
   2390 		return ch_id - 1;
   2391 	if (ch_id <= 64)
   2392 		return (ch_id + 20) / 4;
   2393 	if (ch_id <= 140)
   2394 		return (ch_id - 12) / 4;
   2395 	return (ch_id - 13) / 4;
   2396 }
   2397 
   2398 
   2399 static uint16_t
   2400 iwm_channel_id_to_papd(uint16_t ch_id)
   2401 {
   2402 	if (!iwm_is_valid_channel(ch_id))
   2403 		return 0xff;
   2404 
   2405 	if (1 <= ch_id && ch_id <= 14)
   2406 		return 0;
   2407 	if (36 <= ch_id && ch_id <= 64)
   2408 		return 1;
   2409 	if (100 <= ch_id && ch_id <= 140)
   2410 		return 2;
   2411 	return 3;
   2412 }
   2413 
   2414 static uint16_t
   2415 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
   2416 {
   2417 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   2418 	struct iwm_phy_db_chg_txp *txp_chg;
   2419 	int i;
   2420 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
   2421 
   2422 	if (ch_index == 0xff)
   2423 		return 0xff;
   2424 
   2425 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
   2426 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
   2427 		if (!txp_chg)
   2428 			return 0xff;
   2429 		/*
   2430 		 * Looking for the first channel group the max channel
   2431 		 * of which is higher than the requested channel.
   2432 		 */
   2433 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
   2434 			return i;
   2435 	}
   2436 	return 0xff;
   2437 }
   2438 
   2439 static int
   2440 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
   2441     uint16_t *size, uint16_t ch_id)
   2442 {
   2443 	struct iwm_phy_db_entry *entry;
   2444 	uint16_t ch_group_id = 0;
   2445 
   2446 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
   2447 		ch_group_id = iwm_channel_id_to_papd(ch_id);
   2448 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
   2449 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
   2450 
   2451 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
   2452 	if (!entry)
   2453 		return EINVAL;
   2454 
   2455 	*data = entry->data;
   2456 	*size = entry->size;
   2457 
   2458 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
   2459 		       __func__, __LINE__, type, *size));
   2460 
   2461 	return 0;
   2462 }
   2463 
   2464 static int
   2465 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
   2466     void *data)
   2467 {
   2468 	struct iwm_phy_db_cmd phy_db_cmd;
   2469 	struct iwm_host_cmd cmd = {
   2470 		.id = IWM_PHY_DB_CMD,
   2471 		.flags = IWM_CMD_ASYNC,
   2472 	};
   2473 
   2474 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
   2475 	    type, length));
   2476 
   2477 	phy_db_cmd.type = le16toh(type);
   2478 	phy_db_cmd.length = le16toh(length);
   2479 
   2480 	cmd.data[0] = &phy_db_cmd;
   2481 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
   2482 	cmd.data[1] = data;
   2483 	cmd.len[1] = length;
   2484 
   2485 	return iwm_send_cmd(sc, &cmd);
   2486 }
   2487 
   2488 static int
   2489 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
   2490     enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
   2491 {
   2492 	uint16_t i;
   2493 	int err;
   2494 	struct iwm_phy_db_entry *entry;
   2495 
   2496 	/* Send all the channel-specific groups to operational fw */
   2497 	for (i = 0; i < max_ch_groups; i++) {
   2498 		entry = iwm_phy_db_get_section(sc, type, i);
   2499 		if (!entry)
   2500 			return EINVAL;
   2501 
   2502 		if (!entry->size)
   2503 			continue;
   2504 
   2505 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
   2506 		if (err) {
   2507 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
   2508 			    "err %d\n", DEVNAME(sc), type, i, err));
   2509 			return err;
   2510 		}
   2511 
   2512 		DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
   2513 		    DEVNAME(sc), type, i));
   2514 
   2515 		DELAY(1000);
   2516 	}
   2517 
   2518 	return 0;
   2519 }
   2520 
   2521 static int
   2522 iwm_send_phy_db_data(struct iwm_softc *sc)
   2523 {
   2524 	uint8_t *data = NULL;
   2525 	uint16_t size = 0;
   2526 	int err;
   2527 
   2528 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
   2529 	if (err)
   2530 		return err;
   2531 
   2532 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
   2533 	if (err)
   2534 		return err;
   2535 
   2536 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
   2537 	    &data, &size, 0);
   2538 	if (err)
   2539 		return err;
   2540 
   2541 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
   2542 	if (err)
   2543 		return err;
   2544 
   2545 	err = iwm_phy_db_send_all_channel_groups(sc,
   2546 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
   2547 	if (err)
   2548 		return err;
   2549 
   2550 	err = iwm_phy_db_send_all_channel_groups(sc,
   2551 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
   2552 	if (err)
   2553 		return err;
   2554 
   2555 	return 0;
   2556 }
   2557 
   2558 static int
   2559 iwm_send_time_event_cmd(struct iwm_softc *sc,
   2560     const struct iwm_time_event_cmd *cmd)
   2561 {
   2562 	struct iwm_rx_packet *pkt;
   2563 	struct iwm_time_event_resp *resp;
   2564 	struct iwm_host_cmd hcmd = {
   2565 		.id = IWM_TIME_EVENT_CMD,
   2566 		.flags = IWM_CMD_WANT_RESP,
   2567 		.resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
   2568 	};
   2569 	uint32_t resp_len;
   2570 	int err;
   2571 
   2572 	hcmd.data[0] = cmd;
   2573 	hcmd.len[0] = sizeof(*cmd);
   2574 	err = iwm_send_cmd(sc, &hcmd);
   2575 	if (err)
   2576 		return err;
   2577 
   2578 	pkt = hcmd.resp_pkt;
   2579 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
   2580 		err = EIO;
   2581 		goto out;
   2582 	}
   2583 
   2584 	resp_len = iwm_rx_packet_payload_len(pkt);
   2585 	if (resp_len != sizeof(*resp)) {
   2586 		err = EIO;
   2587 		goto out;
   2588 	}
   2589 
   2590 	resp = (void *)pkt->data;
   2591 	if (le32toh(resp->status) == 0)
   2592 		sc->sc_time_event_uid = le32toh(resp->unique_id);
   2593 	else
   2594 		err = EIO;
   2595 out:
   2596 	iwm_free_resp(sc, &hcmd);
   2597 	return err;
   2598 }
   2599 
   2600 static void
   2601 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
   2602     uint32_t duration, uint32_t max_delay)
   2603 {
   2604 	struct iwm_time_event_cmd time_cmd;
   2605 
   2606 	/* Do nothing if a time event is already scheduled. */
   2607 	if (ISSET(sc->sc_flags, IWM_FLAG_TE_ACTIVE))
   2608 		return;
   2609 
   2610 	memset(&time_cmd, 0, sizeof(time_cmd));
   2611 
   2612 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
   2613 	time_cmd.id_and_color =
   2614 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   2615 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
   2616 
   2617 	time_cmd.apply_time = htole32(0);
   2618 
   2619 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
   2620 	time_cmd.max_delay = htole32(max_delay);
   2621 	/* TODO: why do we need to interval = bi if it is not periodic? */
   2622 	time_cmd.interval = htole32(1);
   2623 	time_cmd.duration = htole32(duration);
   2624 	time_cmd.repeat = 1;
   2625 	time_cmd.policy
   2626 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
   2627 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
   2628 		IWM_TE_V2_START_IMMEDIATELY);
   2629 
   2630 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
   2631 		SET(sc->sc_flags, IWM_FLAG_TE_ACTIVE);
   2632 
   2633 	DELAY(100);
   2634 }
   2635 
   2636 static void
   2637 iwm_unprotect_session(struct iwm_softc *sc, struct iwm_node *in)
   2638 {
   2639 	struct iwm_time_event_cmd time_cmd;
   2640 
   2641 	/* Do nothing if the time event has already ended. */
   2642 	if ((sc->sc_flags & IWM_FLAG_TE_ACTIVE) == 0)
   2643 		return;
   2644 
   2645 	memset(&time_cmd, 0, sizeof(time_cmd));
   2646 
   2647 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
   2648 	time_cmd.id_and_color =
   2649 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   2650 	time_cmd.id = htole32(sc->sc_time_event_uid);
   2651 
   2652 	if (iwm_send_time_event_cmd(sc, &time_cmd) == 0)
   2653 		CLR(sc->sc_flags, IWM_FLAG_TE_ACTIVE);
   2654 
   2655 	DELAY(100);
   2656 }
   2657 
   2658 /*
   2659  * NVM read access and content parsing.  We do not support
   2660  * external NVM or writing NVM.
   2661  */
   2662 
   2663 /* list of NVM sections we are allowed/need to read */
   2664 static const int iwm_nvm_to_read[] = {
   2665 	IWM_NVM_SECTION_TYPE_HW,
   2666 	IWM_NVM_SECTION_TYPE_SW,
   2667 	IWM_NVM_SECTION_TYPE_REGULATORY,
   2668 	IWM_NVM_SECTION_TYPE_CALIBRATION,
   2669 	IWM_NVM_SECTION_TYPE_PRODUCTION,
   2670 	IWM_NVM_SECTION_TYPE_REGULATORY_SDP,
   2671 	IWM_NVM_SECTION_TYPE_HW_8000,
   2672 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
   2673 	IWM_NVM_SECTION_TYPE_PHY_SKU,
   2674 };
   2675 
   2676 /* Default NVM size to read */
   2677 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
   2678 #define IWM_MAX_NVM_SECTION_SIZE_7000	(16 * 512 * sizeof(uint16_t)) /*16 KB*/
   2679 #define IWM_MAX_NVM_SECTION_SIZE_8000	(32 * 512 * sizeof(uint16_t)) /*32 KB*/
   2680 
   2681 #define IWM_NVM_WRITE_OPCODE 1
   2682 #define IWM_NVM_READ_OPCODE 0
   2683 
   2684 static int
   2685 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
   2686     uint16_t length, uint8_t *data, uint16_t *len)
   2687 {
   2688 	offset = 0;
   2689 	struct iwm_nvm_access_cmd nvm_access_cmd = {
   2690 		.offset = htole16(offset),
   2691 		.length = htole16(length),
   2692 		.type = htole16(section),
   2693 		.op_code = IWM_NVM_READ_OPCODE,
   2694 	};
   2695 	struct iwm_nvm_access_resp *nvm_resp;
   2696 	struct iwm_rx_packet *pkt;
   2697 	struct iwm_host_cmd cmd = {
   2698 		.id = IWM_NVM_ACCESS_CMD,
   2699 		.flags = (IWM_CMD_WANT_RESP | IWM_CMD_SEND_IN_RFKILL),
   2700 		.resp_pkt_len = IWM_CMD_RESP_MAX,
   2701 		.data = { &nvm_access_cmd, },
   2702 	};
   2703 	int err, offset_read;
   2704 	size_t bytes_read;
   2705 	uint8_t *resp_data;
   2706 
   2707 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
   2708 
   2709 	err = iwm_send_cmd(sc, &cmd);
   2710 	if (err) {
   2711 		DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
   2712 		    DEVNAME(sc), err));
   2713 		return err;
   2714 	}
   2715 
   2716 	pkt = cmd.resp_pkt;
   2717 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   2718 		err = EIO;
   2719 		goto exit;
   2720 	}
   2721 
   2722 	/* Extract NVM response */
   2723 	nvm_resp = (void *)pkt->data;
   2724 	if (nvm_resp == NULL) {
   2725 		err = EIO;
   2726 		goto exit;
   2727 	}
   2728 
   2729 	err = le16toh(nvm_resp->status);
   2730 	bytes_read = le16toh(nvm_resp->length);
   2731 	offset_read = le16toh(nvm_resp->offset);
   2732 	resp_data = nvm_resp->data;
   2733 	if (err) {
   2734 		err = EINVAL;
   2735 		goto exit;
   2736 	}
   2737 
   2738 	if (offset_read != offset) {
   2739 		err = EINVAL;
   2740 		goto exit;
   2741 	}
   2742 	if (bytes_read > length) {
   2743 		err = EINVAL;
   2744 		goto exit;
   2745 	}
   2746 
   2747 	memcpy(data + offset, resp_data, bytes_read);
   2748 	*len = bytes_read;
   2749 
   2750  exit:
   2751 	iwm_free_resp(sc, &cmd);
   2752 	return err;
   2753 }
   2754 
   2755 /*
   2756  * Reads an NVM section completely.
   2757  * NICs prior to 7000 family doesn't have a real NVM, but just read
   2758  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
   2759  * by uCode, we need to manually check in this case that we don't
   2760  * overflow and try to read more than the EEPROM size.
   2761  */
   2762 static int
   2763 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
   2764     uint16_t *len, size_t max_len)
   2765 {
   2766 	uint16_t chunklen, seglen;
   2767 	int err;
   2768 
   2769 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
   2770 	*len = 0;
   2771 
   2772 	/* Read NVM chunks until exhausted (reading less than requested) */
   2773 	while (seglen == chunklen && *len < max_len) {
   2774 		err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
   2775 		    &seglen);
   2776 		if (err) {
   2777 			DPRINTF(("%s: Cannot read NVM from section %d "
   2778 			    "offset %d, length %d\n",
   2779 			    DEVNAME(sc), section, *len, chunklen));
   2780 			return err;
   2781 		}
   2782 		*len += seglen;
   2783 	}
   2784 
   2785 	DPRINTFN(4, ("NVM section %d read completed\n", section));
   2786 	return 0;
   2787 }
   2788 
   2789 static uint8_t
   2790 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
   2791 {
   2792 	uint8_t tx_ant;
   2793 
   2794 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
   2795 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
   2796 
   2797 	if (sc->sc_nvm.valid_tx_ant)
   2798 		tx_ant &= sc->sc_nvm.valid_tx_ant;
   2799 
   2800 	return tx_ant;
   2801 }
   2802 
   2803 static uint8_t
   2804 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
   2805 {
   2806 	uint8_t rx_ant;
   2807 
   2808 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
   2809 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
   2810 
   2811 	if (sc->sc_nvm.valid_rx_ant)
   2812 		rx_ant &= sc->sc_nvm.valid_rx_ant;
   2813 
   2814 	return rx_ant;
   2815 }
   2816 
   2817 static void
   2818 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
   2819     const uint8_t *nvm_channels, size_t nchan)
   2820 {
   2821 	struct ieee80211com *ic = &sc->sc_ic;
   2822 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2823 	int ch_idx;
   2824 	struct ieee80211_channel *channel;
   2825 	uint16_t ch_flags;
   2826 	int is_5ghz;
   2827 	int flags, hw_value;
   2828 
   2829 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
   2830 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
   2831 		aprint_debug_dev(sc->sc_dev,
   2832 		    "Ch. %d: %svalid %cibss %s %cradar %cdfs"
   2833 		    " %cwide %c40MHz %c80MHz %c160MHz\n",
   2834 		    nvm_channels[ch_idx],
   2835 		    ch_flags & IWM_NVM_CHANNEL_VALID ? "" : "in",
   2836 		    ch_flags & IWM_NVM_CHANNEL_IBSS ? '+' : '-',
   2837 		    ch_flags & IWM_NVM_CHANNEL_ACTIVE ? "active" : "passive",
   2838 		    ch_flags & IWM_NVM_CHANNEL_RADAR ? '+' : '-',
   2839 		    ch_flags & IWM_NVM_CHANNEL_DFS ? '+' : '-',
   2840 		    ch_flags & IWM_NVM_CHANNEL_WIDE ? '+' : '-',
   2841 		    ch_flags & IWM_NVM_CHANNEL_40MHZ ? '+' : '-',
   2842 		    ch_flags & IWM_NVM_CHANNEL_80MHZ ? '+' : '-',
   2843 		    ch_flags & IWM_NVM_CHANNEL_160MHZ ? '+' : '-');
   2844 
   2845 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
   2846 		    !data->sku_cap_band_52GHz_enable)
   2847 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
   2848 
   2849 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
   2850 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
   2851 			    nvm_channels[ch_idx], ch_flags,
   2852 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? "5" : "2.4"));
   2853 			continue;
   2854 		}
   2855 
   2856 		hw_value = nvm_channels[ch_idx];
   2857 		channel = &ic->ic_channels[hw_value];
   2858 
   2859 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
   2860 		if (!is_5ghz) {
   2861 			flags = IEEE80211_CHAN_2GHZ;
   2862 			channel->ic_flags
   2863 			    = IEEE80211_CHAN_CCK
   2864 			    | IEEE80211_CHAN_OFDM
   2865 			    | IEEE80211_CHAN_DYN
   2866 			    | IEEE80211_CHAN_2GHZ;
   2867 		} else {
   2868 			flags = IEEE80211_CHAN_5GHZ;
   2869 			channel->ic_flags =
   2870 			    IEEE80211_CHAN_A;
   2871 		}
   2872 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
   2873 
   2874 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
   2875 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
   2876 
   2877 #ifndef IEEE80211_NO_HT
   2878 		if (data->sku_cap_11n_enable)
   2879 			channel->ic_flags |= IEEE80211_CHAN_HT;
   2880 #endif
   2881 	}
   2882 }
   2883 
   2884 static int
   2885 iwm_mimo_enabled(struct iwm_softc *sc)
   2886 {
   2887 #ifndef IEEE80211_NO_HT
   2888         struct ieee80211com *ic = &sc->sc_ic;
   2889 
   2890         return !sc->sc_nvm.sku_cap_mimo_disable &&
   2891             (ic->ic_userflags & IEEE80211_F_NOMIMO) == 0;
   2892 #else
   2893         return 0;
   2894 #endif
   2895 }
   2896 
   2897 #ifndef IEEE80211_NO_HT
   2898 static void
   2899 iwm_setup_ht_rates(struct iwm_softc *sc)
   2900 {
   2901 	struct ieee80211com *ic = &sc->sc_ic;
   2902 
   2903 	/* TX is supported with the same MCS as RX. */
   2904 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
   2905 
   2906 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
   2907 
   2908 #ifdef notyet
   2909 	if (sc->sc_nvm.sku_cap_mimo_disable)
   2910 		return;
   2911 
   2912 	if (iwm_fw_valid_rx_ant(sc) > 1)
   2913 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
   2914 	if (iwm_fw_valid_rx_ant(sc) > 2)
   2915 		ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
   2916 #endif
   2917 }
   2918 
   2919 #define IWM_MAX_RX_BA_SESSIONS 16
   2920 
   2921 static void
   2922 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
   2923     uint16_t ssn, int start)
   2924 {
   2925 	struct ieee80211com *ic = &sc->sc_ic;
   2926 	struct iwm_add_sta_cmd cmd;
   2927 	struct iwm_node *in = (struct iwm_node *)ni;
   2928 	int err, s;
   2929 	uint32_t status;
   2930 	size_t cmdsize;
   2931 
   2932 	s = splnet();
   2933 
   2934 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
   2935 		ieee80211_addba_req_refuse(ic, ni, tid);
   2936 		splx(s);
   2937 		return;
   2938 	}
   2939 
   2940 	memset(&cmd, 0, sizeof(cmd));
   2941 
   2942 	cmd.sta_id = IWM_STATION_ID;
   2943 	cmd.mac_id_n_color
   2944 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   2945 	cmd.add_modify = IWM_STA_MODE_MODIFY;
   2946 
   2947 	if (start) {
   2948 		cmd.add_immediate_ba_tid = (uint8_t)tid;
   2949 		cmd.add_immediate_ba_ssn = ssn;
   2950 	} else {
   2951 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
   2952 	}
   2953 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
   2954 	    IWM_STA_MODIFY_REMOVE_BA_TID;
   2955 
   2956 	status = IWM_ADD_STA_SUCCESS;
   2957 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
   2958 		cmdsize = sizeof(cmd);
   2959 	else
   2960 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
   2961 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
   2962 	    &status);
   2963 	if (!err && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
   2964 		err = EIO;
   2965 	if (err) {
   2966 		if (start)
   2967 			ieee80211_addba_req_refuse(ic, ni, tid);
   2968 		splx(s);
   2969 		return err;
   2970 	}
   2971 
   2972 	if (start) {
   2973 		sc->sc_rx_ba_sessions++;
   2974 		ieee80211_addba_req_accept(ic, ni, tid);
   2975 	} else if (sc->sc_rx_ba_sessions > 0)
   2976 		sc->sc_rx_ba_sessions--;
   2977 	splx(s);
   2978 }
   2979 
   2980 static void
   2981 iwm_htprot_task(void *arg)
   2982 {
   2983 	struct iwm_softc *sc = arg;
   2984 	struct ieee80211com *ic = &sc->sc_ic;
   2985 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   2986 	int err;
   2987 
   2988 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
   2989 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
   2990 	if (err)
   2991 		aprint_error_dev(sc->sc_dev,
   2992 		    "could not change HT protection: error %d\n", err);
   2993 }
   2994 
   2995 /*
   2996  * This function is called by upper layer when HT protection settings in
   2997  * beacons have changed.
   2998  */
   2999 static void
   3000 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
   3001 {
   3002 	struct iwm_softc *sc = ic->ic_softc;
   3003 
   3004 	/* assumes that ni == ic->ic_bss */
   3005 	task_add(systq, &sc->htprot_task);
   3006 }
   3007 
   3008 static void
   3009 iwm_ba_task(void *arg)
   3010 {
   3011 	struct iwm_softc *sc = arg;
   3012 	struct ieee80211com *ic = &sc->sc_ic;
   3013 	struct ieee80211_node *ni = ic->ic_bss;
   3014 
   3015 	if (sc->ba_start)
   3016 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
   3017 	else
   3018 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
   3019 }
   3020 
   3021 /*
   3022  * This function is called by upper layer when an ADDBA request is received
   3023  * from another STA and before the ADDBA response is sent.
   3024  */
   3025 static int
   3026 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
   3027     uint8_t tid)
   3028 {
   3029 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
   3030 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   3031 
   3032 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
   3033 		return ENOSPC;
   3034 
   3035 	sc->ba_start = 1;
   3036 	sc->ba_tid = tid;
   3037 	sc->ba_ssn = htole16(ba->ba_winstart);
   3038 	task_add(systq, &sc->ba_task);
   3039 
   3040 	return EBUSY;
   3041 }
   3042 
   3043 /*
   3044  * This function is called by upper layer on teardown of an HT-immediate
   3045  * Block Ack agreement (eg. upon receipt of a DELBA frame).
   3046  */
   3047 static void
   3048 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
   3049     uint8_t tid)
   3050 {
   3051 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   3052 
   3053 	sc->ba_start = 0;
   3054 	sc->ba_tid = tid;
   3055 	task_add(systq, &sc->ba_task);
   3056 }
   3057 #endif
   3058 
   3059 static void
   3060 iwm_free_fw_paging(struct iwm_softc *sc)
   3061 {
   3062 	int i;
   3063 
   3064 	if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
   3065 		return;
   3066 
   3067 	for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
   3068 		iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
   3069 	}
   3070 
   3071 	memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
   3072 }
   3073 
   3074 static int
   3075 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
   3076 {
   3077 	int sec_idx, idx;
   3078 	uint32_t offset = 0;
   3079 
   3080 	/*
   3081 	 * find where is the paging image start point:
   3082 	 * if CPU2 exist and it's in paging format, then the image looks like:
   3083 	 * CPU1 sections (2 or more)
   3084 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
   3085 	 * CPU2 sections (not paged)
   3086 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
   3087 	 * non paged to CPU2 paging sec
   3088 	 * CPU2 paging CSS
   3089 	 * CPU2 paging image (including instruction and data)
   3090 	 */
   3091 	for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
   3092 		if (fws->fw_sect[sec_idx].fws_devoff ==
   3093 		    IWM_PAGING_SEPARATOR_SECTION) {
   3094 			sec_idx++;
   3095 			break;
   3096 		}
   3097 	}
   3098 
   3099 	/*
   3100 	 * If paging is enabled there should be at least 2 more sections left
   3101 	 * (one for CSS and one for Paging data)
   3102 	 */
   3103 	if (sec_idx >= __arraycount(fws->fw_sect) - 1) {
   3104 		aprint_error_dev(sc->sc_dev,
   3105 		    "Paging: Missing CSS and/or paging sections\n");
   3106 		iwm_free_fw_paging(sc);
   3107 		return EINVAL;
   3108 	}
   3109 
   3110 	/* copy the CSS block to the dram */
   3111 	DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n", DEVNAME(sc),
   3112 	    sec_idx));
   3113 
   3114 	memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
   3115 	    fws->fw_sect[sec_idx].fws_data, sc->fw_paging_db[0].fw_paging_size);
   3116 
   3117 	DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
   3118 	    DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
   3119 
   3120 	sec_idx++;
   3121 
   3122 	/*
   3123 	 * copy the paging blocks to the dram
   3124 	 * loop index start from 1 since that CSS block already copied to dram
   3125 	 * and CSS index is 0.
   3126 	 * loop stop at num_of_paging_blk since that last block is not full.
   3127 	 */
   3128 	for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
   3129 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
   3130 		       (const char *)fws->fw_sect[sec_idx].fws_data + offset,
   3131 		       sc->fw_paging_db[idx].fw_paging_size);
   3132 
   3133 		DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
   3134 		    DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
   3135 
   3136 		offset += sc->fw_paging_db[idx].fw_paging_size;
   3137 	}
   3138 
   3139 	/* copy the last paging block */
   3140 	if (sc->num_of_pages_in_last_blk > 0) {
   3141 		memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
   3142 		    (const char *)fws->fw_sect[sec_idx].fws_data + offset,
   3143 		    IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
   3144 
   3145 		DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
   3146 		    DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
   3147 	}
   3148 
   3149 	return 0;
   3150 }
   3151 
   3152 static int
   3153 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
   3154 {
   3155 	int blk_idx = 0;
   3156 	int error, num_of_pages;
   3157 	bus_dmamap_t dmap;
   3158 
   3159 	if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
   3160 		int i;
   3161 		/* Device got reset, and we setup firmware paging again */
   3162 		bus_dmamap_sync(sc->sc_dmat,
   3163 		    sc->fw_paging_db[0].fw_paging_block.map,
   3164 		    0, IWM_FW_PAGING_SIZE,
   3165 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
   3166 		for (i = 0; i < sc->num_of_paging_blk + 1; i++) {
   3167 			dmap = sc->fw_paging_db[i].fw_paging_block.map;
   3168 			bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
   3169 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
   3170 		}
   3171 		return 0;
   3172 	}
   3173 
   3174 	/* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
   3175 	CTASSERT(__BIT(IWM_BLOCK_2_EXP_SIZE) == IWM_PAGING_BLOCK_SIZE);
   3176 
   3177 	num_of_pages = fws->paging_mem_size / IWM_FW_PAGING_SIZE;
   3178 	sc->num_of_paging_blk =
   3179 	    ((num_of_pages - 1) / IWM_NUM_OF_PAGE_PER_GROUP) + 1;
   3180 
   3181 	sc->num_of_pages_in_last_blk =
   3182 	    num_of_pages -
   3183 	    IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
   3184 
   3185 	DPRINTF(("%s: Paging: allocating mem for %d paging blocks, "
   3186 	    "each block holds 8 pages, last block holds %d pages\n",
   3187 	    DEVNAME(sc), sc->num_of_paging_blk, sc->num_of_pages_in_last_blk));
   3188 
   3189 	/* allocate block of 4Kbytes for paging CSS */
   3190 	error = iwm_dma_contig_alloc(sc->sc_dmat,
   3191 	    &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
   3192 	    4096);
   3193 	if (error) {
   3194 		/* free all the previous pages since we failed */
   3195 		iwm_free_fw_paging(sc);
   3196 		return ENOMEM;
   3197 	}
   3198 
   3199 	sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
   3200 
   3201 	DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
   3202 	    DEVNAME(sc)));
   3203 
   3204 	/*
   3205 	 * allocate blocks in dram.
   3206 	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
   3207 	 */
   3208 	for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
   3209 		/* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
   3210 		/* XXX Use iwm_dma_contig_alloc for allocating */
   3211 		error = iwm_dma_contig_alloc(sc->sc_dmat,
   3212 		    &sc->fw_paging_db[blk_idx].fw_paging_block,
   3213 		    IWM_PAGING_BLOCK_SIZE, 4096);
   3214 		if (error) {
   3215 			/* free all the previous pages since we failed */
   3216 			iwm_free_fw_paging(sc);
   3217 			return ENOMEM;
   3218 		}
   3219 
   3220 		sc->fw_paging_db[blk_idx].fw_paging_size =
   3221 		    IWM_PAGING_BLOCK_SIZE;
   3222 
   3223 		DPRINTF(("%s: Paging: allocated 32K bytes for firmware "
   3224 		    "paging.\n", DEVNAME(sc)));
   3225 	}
   3226 
   3227 	return 0;
   3228 }
   3229 
   3230 static int
   3231 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
   3232 {
   3233 	int err;
   3234 
   3235 	err = iwm_alloc_fw_paging_mem(sc, fws);
   3236 	if (err)
   3237 		return err;
   3238 
   3239 	return iwm_fill_paging_mem(sc, fws);
   3240 }
   3241 
   3242 static bool
   3243 iwm_has_new_tx_api(struct iwm_softc *sc)
   3244 {
   3245 	/* XXX */
   3246 	return false;
   3247 }
   3248 
   3249 /* send paging cmd to FW in case CPU2 has paging image */
   3250 static int
   3251 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
   3252 {
   3253 	struct iwm_fw_paging_cmd fw_paging_cmd = {
   3254 		.flags = htole32(IWM_PAGING_CMD_IS_SECURED |
   3255 		                 IWM_PAGING_CMD_IS_ENABLED |
   3256 		                 (sc->num_of_pages_in_last_blk <<
   3257 		                  IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
   3258 		.block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
   3259 		.block_num = htole32(sc->num_of_paging_blk),
   3260 	};
   3261 	size_t size = sizeof(fw_paging_cmd);
   3262 	int blk_idx;
   3263 	bus_dmamap_t dmap;
   3264 
   3265 	if (!iwm_has_new_tx_api(sc))
   3266 		size -= (sizeof(uint64_t) - sizeof(uint32_t)) *
   3267 		    IWM_NUM_OF_FW_PAGING_BLOCKS;
   3268 
   3269 	/* loop for all paging blocks + CSS block */
   3270 	for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
   3271 		bus_addr_t dev_phy_addr =
   3272 		    sc->fw_paging_db[blk_idx].fw_paging_block.paddr;
   3273 		if (iwm_has_new_tx_api(sc)) {
   3274 			fw_paging_cmd.device_phy_addr.addr64[blk_idx] =
   3275 			    htole64(dev_phy_addr);
   3276 		} else {
   3277 			dev_phy_addr = dev_phy_addr >> IWM_PAGE_2_EXP_SIZE;
   3278 			fw_paging_cmd.device_phy_addr.addr32[blk_idx] =
   3279 			    htole32(dev_phy_addr);
   3280 		}
   3281 		dmap = sc->fw_paging_db[blk_idx].fw_paging_block.map;
   3282 		bus_dmamap_sync(sc->sc_dmat, dmap, 0,
   3283 		    blk_idx == 0 ? IWM_FW_PAGING_SIZE : IWM_PAGING_BLOCK_SIZE,
   3284 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
   3285 	}
   3286 
   3287 	return iwm_send_cmd_pdu(sc,
   3288 	    iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD, IWM_LONG_GROUP, 0),
   3289 	    0, size, &fw_paging_cmd);
   3290 }
   3291 
   3292 static void
   3293 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
   3294     const uint16_t *mac_override, const uint16_t *nvm_hw)
   3295 {
   3296 	static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
   3297 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
   3298 	};
   3299 	const uint8_t *hw_addr;
   3300 
   3301 	if (mac_override) {
   3302 		hw_addr = (const uint8_t *)(mac_override +
   3303 		    IWM_MAC_ADDRESS_OVERRIDE_8000);
   3304 
   3305 		/*
   3306 		 * Store the MAC address from MAO section.
   3307 		 * No byte swapping is required in MAO section
   3308 		 */
   3309 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
   3310 
   3311 		/*
   3312 		 * Force the use of the OTP MAC address in case of reserved MAC
   3313 		 * address in the NVM, or if address is given but invalid.
   3314 		 */
   3315 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
   3316 		    (memcmp(etherbroadcastaddr, data->hw_addr,
   3317 		    sizeof(etherbroadcastaddr)) != 0) &&
   3318 		    (memcmp(etheranyaddr, data->hw_addr,
   3319 		    sizeof(etheranyaddr)) != 0) &&
   3320 		    !ETHER_IS_MULTICAST(data->hw_addr))
   3321 			return;
   3322 	}
   3323 
   3324 	if (nvm_hw) {
   3325 		/* Read the mac address from WFMP registers. */
   3326 		uint32_t mac_addr0, mac_addr1;
   3327 
   3328 		if (!iwm_nic_lock(sc))
   3329 			goto out;
   3330 		mac_addr0 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
   3331 		mac_addr1 = htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
   3332 		iwm_nic_unlock(sc);
   3333 
   3334 		hw_addr = (const uint8_t *)&mac_addr0;
   3335 		data->hw_addr[0] = hw_addr[3];
   3336 		data->hw_addr[1] = hw_addr[2];
   3337 		data->hw_addr[2] = hw_addr[1];
   3338 		data->hw_addr[3] = hw_addr[0];
   3339 
   3340 		hw_addr = (const uint8_t *)&mac_addr1;
   3341 		data->hw_addr[4] = hw_addr[1];
   3342 		data->hw_addr[5] = hw_addr[0];
   3343 
   3344 		return;
   3345 	}
   3346 out:
   3347 	aprint_error_dev(sc->sc_dev, "mac address not found\n");
   3348 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
   3349 }
   3350 
   3351 static int
   3352 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
   3353     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
   3354     const uint16_t *mac_override, const uint16_t *phy_sku,
   3355     const uint16_t *regulatory, int n_regulatory)
   3356 {
   3357 	struct iwm_nvm_data *data = &sc->sc_nvm;
   3358 	uint8_t hw_addr[ETHER_ADDR_LEN];
   3359 	uint32_t sku;
   3360 
   3361 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   3362 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
   3363 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
   3364 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
   3365 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
   3366 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
   3367 
   3368 		data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
   3369 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
   3370 	} else {
   3371 		uint32_t radio_cfg = le32_to_cpup(phy_sku + IWM_RADIO_CFG_8000);
   3372 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
   3373 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
   3374 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
   3375 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
   3376 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
   3377 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
   3378 
   3379 		data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION_8000);
   3380 		sku = le32_to_cpup(phy_sku + IWM_SKU_8000);
   3381 	}
   3382 
   3383 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
   3384 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
   3385 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
   3386 	data->sku_cap_11ac_enable = sku & IWM_NVM_SKU_CAP_11AC_ENABLE;
   3387 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
   3388 
   3389 	if (sc->sc_device_family >= IWM_DEVICE_FAMILY_8000) {
   3390 #if 0
   3391 		uint16_t lar_offset = data->nvm_version < 0xE39 ?
   3392 				       IWM_NVM_LAR_OFFSET_8000_OLD :
   3393 				       IWM_NVM_LAR_OFFSET_8000;
   3394 		uint16_t lar_config;
   3395 
   3396 		lar_config = le16_to_cpup(regulatory + lar_offset);
   3397 		data->lar_enabled = !!(lar_config &
   3398 				       IWM_NVM_LAR_ENABLED_8000);
   3399 #else
   3400 		data->lar_enabled = 0;
   3401 #endif
   3402 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS_8000);
   3403 	} else
   3404 		data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
   3405 
   3406 	/* The byte order is little endian 16 bit, meaning 214365 */
   3407 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   3408 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
   3409 		data->hw_addr[0] = hw_addr[1];
   3410 		data->hw_addr[1] = hw_addr[0];
   3411 		data->hw_addr[2] = hw_addr[3];
   3412 		data->hw_addr[3] = hw_addr[2];
   3413 		data->hw_addr[4] = hw_addr[5];
   3414 		data->hw_addr[5] = hw_addr[4];
   3415 	} else
   3416 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
   3417 
   3418 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   3419 		if (sc->nvm_type == IWM_NVM_SDP) {
   3420 			iwm_init_channel_map(sc, regulatory, iwm_nvm_channels,
   3421 			    MIN(n_regulatory, __arraycount(iwm_nvm_channels)));
   3422 		} else {
   3423 			iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
   3424 			    iwm_nvm_channels, __arraycount(iwm_nvm_channels));
   3425 		}
   3426 	} else
   3427 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
   3428 		    iwm_nvm_channels_8000,
   3429 		    MIN(n_regulatory, __arraycount(iwm_nvm_channels_8000)));
   3430 
   3431 	data->calib_version = 255;   /* TODO:
   3432 					this value will prevent some checks from
   3433 					failing, we need to check if this
   3434 					field is still needed, and if it does,
   3435 					where is it in the NVM */
   3436 
   3437 	return 0;
   3438 }
   3439 
   3440 static int
   3441 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
   3442 {
   3443 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
   3444 	const uint16_t *regulatory = NULL;
   3445 	int n_regulatory = 0;
   3446 
   3447 	/* Checking for required sections */
   3448 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   3449 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
   3450 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
   3451 			return ENOENT;
   3452 		}
   3453 
   3454 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
   3455 
   3456 		if (sc->nvm_type == IWM_NVM_SDP) {
   3457 			if (!sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data)
   3458 				return ENOENT;
   3459 			regulatory = (const uint16_t *)
   3460 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data;
   3461 			n_regulatory =
   3462 			    sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].length
   3463 ;
   3464 		}
   3465 	} else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
   3466 		/* SW and REGULATORY sections are mandatory */
   3467 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
   3468 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
   3469 			return ENOENT;
   3470 		}
   3471 		/* MAC_OVERRIDE or at least HW section must exist */
   3472 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
   3473 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
   3474 			return ENOENT;
   3475 		}
   3476 
   3477 		/* PHY_SKU section is mandatory in B0 */
   3478 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
   3479 			return ENOENT;
   3480 		}
   3481 
   3482 		regulatory = (const uint16_t *)
   3483 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
   3484 		n_regulatory = sections[IWM_NVM_SECTION_TYPE_REGULATORY].length;
   3485 		hw = (const uint16_t *)
   3486 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
   3487 		mac_override =
   3488 			(const uint16_t *)
   3489 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
   3490 		phy_sku = (const uint16_t *)
   3491 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
   3492 	} else {
   3493 		panic("unknown device family %d\n", sc->sc_device_family);
   3494 	}
   3495 
   3496 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
   3497 	calib = (const uint16_t *)
   3498 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
   3499 
   3500 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
   3501 	    phy_sku, regulatory, n_regulatory);
   3502 }
   3503 
   3504 static int
   3505 iwm_nvm_init(struct iwm_softc *sc)
   3506 {
   3507 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
   3508 	int i, section, err;
   3509 	uint16_t len;
   3510 	uint8_t *buf;
   3511 	const size_t bufsz = (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) ?
   3512 	    IWM_MAX_NVM_SECTION_SIZE_8000 : IWM_MAX_NVM_SECTION_SIZE_7000;
   3513 
   3514 	/* Read From FW NVM */
   3515 	DPRINTF(("Read NVM\n"));
   3516 
   3517 	memset(nvm_sections, 0, sizeof(nvm_sections));
   3518 
   3519 	buf = kmem_alloc(bufsz, KM_SLEEP);
   3520 
   3521 	for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
   3522 		section = iwm_nvm_to_read[i];
   3523 		KASSERT(section < IWM_NVM_NUM_OF_SECTIONS);
   3524 
   3525 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
   3526 		if (err) {
   3527 			err = 0;
   3528 			continue;
   3529 		}
   3530 		nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
   3531 		memcpy(nvm_sections[section].data, buf, len);
   3532 		nvm_sections[section].length = len;
   3533 	}
   3534 	kmem_free(buf, bufsz);
   3535 	err = iwm_parse_nvm_sections(sc, nvm_sections);
   3536 
   3537 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
   3538 		if (nvm_sections[i].data != NULL)
   3539 			kmem_free(nvm_sections[i].data, nvm_sections[i].length);
   3540 	}
   3541 
   3542 	return err;
   3543 }
   3544 
   3545 static int
   3546 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
   3547     const uint8_t *section, uint32_t byte_cnt)
   3548 {
   3549 	int err = EINVAL;
   3550 	uint32_t chunk_sz, offset;
   3551 
   3552 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
   3553 
   3554 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
   3555 		uint32_t addr, len;
   3556 		const uint8_t *data;
   3557 
   3558 		addr = dst_addr + offset;
   3559 		len = MIN(chunk_sz, byte_cnt - offset);
   3560 		data = section + offset;
   3561 
   3562 		err = iwm_firmware_load_chunk(sc, addr, data, len);
   3563 		if (err)
   3564 			break;
   3565 	}
   3566 
   3567 	return err;
   3568 }
   3569 
   3570 static int
   3571 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
   3572     const uint8_t *section, uint32_t byte_cnt)
   3573 {
   3574 	struct iwm_dma_info *dma = &sc->fw_dma;
   3575 	int err;
   3576 
   3577 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
   3578 	memcpy(dma->vaddr, section, byte_cnt);
   3579 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
   3580 	    BUS_DMASYNC_PREWRITE);
   3581 
   3582 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
   3583 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
   3584 		err = iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
   3585 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
   3586 		if (err)
   3587 			goto done;
   3588 	}
   3589 
   3590 	sc->sc_fw_chunk_done = 0;
   3591 
   3592 	if (!iwm_nic_lock(sc)) {
   3593 		err = EBUSY;
   3594 		goto done;
   3595 	}
   3596 
   3597 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   3598 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
   3599 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
   3600 	    dst_addr);
   3601 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
   3602 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
   3603 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
   3604 	    (iwm_get_dma_hi_addr(dma->paddr)
   3605 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
   3606 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
   3607 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
   3608 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
   3609 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
   3610 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   3611 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
   3612 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
   3613 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
   3614 
   3615 	iwm_nic_unlock(sc);
   3616 
   3617 	/* Wait for this segment to load. */
   3618 	err = 0;
   3619 	while (!sc->sc_fw_chunk_done) {
   3620 		err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(1000));
   3621 		if (err)
   3622 			break;
   3623 	}
   3624 	if (!sc->sc_fw_chunk_done) {
   3625 		DPRINTF(("%s: fw chunk addr 0x%x len %d failed to load\n",
   3626 		    DEVNAME(sc), dst_addr, byte_cnt));
   3627 	}
   3628 
   3629 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
   3630 	    dst_addr <= IWM_FW_MEM_EXTENDED_END) {
   3631 		int err2 = iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
   3632 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
   3633 		if (!err)
   3634 			err = err2;
   3635 	}
   3636 
   3637 done:
   3638 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
   3639 	    BUS_DMASYNC_POSTWRITE);
   3640 
   3641 	return err;
   3642 }
   3643 
   3644 static int
   3645 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3646 {
   3647 	struct iwm_fw_sects *fws;
   3648 	int err, i;
   3649 	void *data;
   3650 	uint32_t dlen;
   3651 	uint32_t offset;
   3652 
   3653 	fws = &sc->sc_fw.fw_sects[ucode_type];
   3654 	for (i = 0; i < fws->fw_count; i++) {
   3655 		data = fws->fw_sect[i].fws_data;
   3656 		dlen = fws->fw_sect[i].fws_len;
   3657 		offset = fws->fw_sect[i].fws_devoff;
   3658 		if (dlen > sc->sc_fwdmasegsz) {
   3659 			err = EFBIG;
   3660 		} else
   3661 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
   3662 		if (err) {
   3663 			printf("%s: could not load firmware chunk %u of %u"
   3664 			    " (error %d)\n",DEVNAME(sc), i, fws->fw_count, err);
   3665 			return err;
   3666 		}
   3667 	}
   3668 
   3669 	iwm_enable_interrupts(sc);
   3670 
   3671 	/* release CPU reset */
   3672 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
   3673 
   3674 	return 0;
   3675 }
   3676 
   3677 static int
   3678 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
   3679     int cpu, int *first_ucode_section)
   3680 {
   3681 	int shift_param;
   3682 	int i, err = 0, sec_num = 0x1;
   3683 	uint32_t val, last_read_idx = 0;
   3684 	void *data;
   3685 	uint32_t dlen;
   3686 	uint32_t offset;
   3687 
   3688 	if (cpu == 1) {
   3689 		shift_param = 0;
   3690 		*first_ucode_section = 0;
   3691 	} else {
   3692 		shift_param = 16;
   3693 		(*first_ucode_section)++;
   3694 	}
   3695 
   3696 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
   3697 		last_read_idx = i;
   3698 		data = fws->fw_sect[i].fws_data;
   3699 		dlen = fws->fw_sect[i].fws_len;
   3700 		offset = fws->fw_sect[i].fws_devoff;
   3701 
   3702 		/*
   3703 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
   3704 		 * CPU1 to CPU2.
   3705 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
   3706 		 * CPU2 non paged to CPU2 paging sec.
   3707 		 */
   3708 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
   3709 		    offset == IWM_PAGING_SEPARATOR_SECTION)
   3710 			break;
   3711 
   3712 		if (dlen > sc->sc_fwdmasegsz) {
   3713 			err = EFBIG;
   3714 		} else
   3715 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
   3716 		if (err) {
   3717 			DPRINTF(("%s: could not load firmware chunk %d "
   3718 			    "(error %d)\n", DEVNAME(sc), i, err));
   3719 			return err;
   3720 		}
   3721 
   3722 		/* Notify the ucode of the loaded section number and status */
   3723 		if (iwm_nic_lock(sc)) {
   3724 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
   3725 			val = val | (sec_num << shift_param);
   3726 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
   3727 			sec_num = (sec_num << 1) | 0x1;
   3728 			iwm_nic_unlock(sc);
   3729 		} else {
   3730 			err = EBUSY;
   3731 			DPRINTF(("%s: could not load firmware chunk %d "
   3732 			    "(error %d)\n", DEVNAME(sc), i, err));
   3733 			return err;
   3734 		}
   3735 	}
   3736 
   3737 	*first_ucode_section = last_read_idx;
   3738 
   3739 	if (iwm_nic_lock(sc)) {
   3740 		if (cpu == 1)
   3741 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
   3742 		else
   3743 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
   3744 		iwm_nic_unlock(sc);
   3745 	} else {
   3746 		err = EBUSY;
   3747 		DPRINTF(("%s: could not finalize firmware loading (error %d)\n",
   3748 		  DEVNAME(sc), err));
   3749 		return err;
   3750 	}
   3751 
   3752 	return 0;
   3753 }
   3754 
   3755 static int
   3756 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3757 {
   3758 	struct iwm_fw_sects *fws;
   3759 	int err = 0;
   3760 	int first_ucode_section;
   3761 
   3762 	fws = &sc->sc_fw.fw_sects[ucode_type];
   3763 
   3764 	/* configure the ucode to be ready to get the secured image */
   3765 	/* release CPU reset */
   3766 	if (iwm_nic_lock(sc)) {
   3767 		iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
   3768 		    IWM_RELEASE_CPU_RESET_BIT);
   3769 		iwm_nic_unlock(sc);
   3770 	}
   3771 
   3772 	/* load to FW the binary Secured sections of CPU1 */
   3773 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
   3774 	if (err)
   3775 		return err;
   3776 
   3777 	/* load to FW the binary sections of CPU2 */
   3778 	err = iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
   3779 	if (err)
   3780 		return err;
   3781 
   3782 	iwm_enable_interrupts(sc);
   3783 	return 0;
   3784 }
   3785 
   3786 static int
   3787 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3788 {
   3789 	int err, w;
   3790 
   3791 	sc->sc_uc.uc_intr = 0;
   3792 
   3793 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   3794 		err = iwm_load_firmware_8000(sc, ucode_type);
   3795 	else
   3796 		err = iwm_load_firmware_7000(sc, ucode_type);
   3797 	if (err)
   3798 		return err;
   3799 
   3800 	/* wait for the firmware to load */
   3801 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
   3802 		err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
   3803 	if (err || !sc->sc_uc.uc_ok) {
   3804 		aprint_error_dev(sc->sc_dev,
   3805 		    "could not load firmware (error %d, ok %d)\n",
   3806 		    err, sc->sc_uc.uc_ok);
   3807 		if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
   3808 			aprint_error_dev(sc->sc_dev, "cpu1 status: 0x%x\n",
   3809 			    iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
   3810 			aprint_error_dev(sc->sc_dev, "cpu2 status: 0x%x\n",
   3811 			    iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
   3812 		}
   3813 	}
   3814 
   3815 	return err;
   3816 }
   3817 
   3818 static int
   3819 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3820 {
   3821 	int err;
   3822 
   3823 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   3824 
   3825 	err = iwm_nic_init(sc);
   3826 	if (err) {
   3827 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
   3828 		return err;
   3829 	}
   3830 
   3831 	/* make sure rfkill handshake bits are cleared */
   3832 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   3833 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
   3834 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
   3835 
   3836 	/* clear (again), then enable firmware load interrupts */
   3837 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   3838 	iwm_enable_fwload_interrupt(sc);
   3839 
   3840 	/* really make sure rfkill handshake bits are cleared */
   3841 	/* maybe we should write a few times more?  just to make sure */
   3842 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   3843 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   3844 
   3845 	return iwm_load_firmware(sc, ucode_type);
   3846 }
   3847 
   3848 static int
   3849 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
   3850 {
   3851 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
   3852 		.valid = htole32(valid_tx_ant),
   3853 	};
   3854 
   3855 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
   3856 	    sizeof(tx_ant_cmd), &tx_ant_cmd);
   3857 }
   3858 
   3859 static int
   3860 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
   3861 {
   3862 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
   3863 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
   3864 
   3865 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
   3866 	phy_cfg_cmd.calib_control.event_trigger =
   3867 	    sc->sc_default_calib[ucode_type].event_trigger;
   3868 	phy_cfg_cmd.calib_control.flow_trigger =
   3869 	    sc->sc_default_calib[ucode_type].flow_trigger;
   3870 
   3871 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
   3872 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
   3873 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
   3874 }
   3875 
   3876 static int
   3877 iwm_send_dqa_cmd(struct iwm_softc *sc)
   3878 {
   3879 	struct iwm_dqa_enable_cmd dqa_cmd = {
   3880 		.cmd_queue = htole32(IWM_DQA_CMD_QUEUE),
   3881 	};
   3882 	uint32_t cmd_id;
   3883 
   3884 	cmd_id = iwm_cmd_id(IWM_DQA_ENABLE_CMD, IWM_DATA_PATH_GROUP, 0);
   3885 	return iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
   3886 }
   3887 
   3888 static int
   3889 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3890 {
   3891 	struct iwm_fw_sects *fws;
   3892 	enum iwm_ucode_type old_type = sc->sc_uc_current;
   3893 	int err;
   3894 
   3895 	err = iwm_read_firmware(sc, ucode_type);
   3896 	if (err)
   3897 		return err;
   3898 
   3899 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
   3900 		sc->cmdqid = IWM_DQA_CMD_QUEUE;
   3901 	else
   3902 		sc->cmdqid = IWM_CMD_QUEUE;
   3903 
   3904 	sc->sc_uc_current = ucode_type;
   3905 	err = iwm_start_fw(sc, ucode_type);
   3906 	if (err) {
   3907 		sc->sc_uc_current = old_type;
   3908 		return err;
   3909 	}
   3910 
   3911 	err = iwm_post_alive(sc);
   3912 	if (err)
   3913 		return err;
   3914 
   3915 	fws = &sc->sc_fw.fw_sects[ucode_type];
   3916 	if (fws->paging_mem_size) {
   3917 		err = iwm_save_fw_paging(sc, fws);
   3918 		if (err)
   3919 			return err;
   3920 
   3921 		err = iwm_send_paging_cmd(sc, fws);
   3922 		if (err) {
   3923 			iwm_free_fw_paging(sc);
   3924 			return err;
   3925 		}
   3926 	}
   3927 
   3928 	return 0;
   3929 }
   3930 
   3931 static int
   3932 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
   3933 {
   3934 	const int wait_flags = (IWM_INIT_COMPLETE | IWM_CALIB_COMPLETE);
   3935 	int err, s;
   3936 
   3937 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
   3938 		aprint_error_dev(sc->sc_dev,
   3939 		    "radio is disabled by hardware switch\n");
   3940 		return EPERM;
   3941 	}
   3942 
   3943 	s = splnet();
   3944 	sc->sc_init_complete = 0;
   3945 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
   3946 	if (err) {
   3947 		DPRINTF(("%s: failed to load init firmware\n", DEVNAME(sc)));
   3948 		splx(s);
   3949 		return err;
   3950 	}
   3951 
   3952 	if (sc->sc_device_family < IWM_DEVICE_FAMILY_8000) {
   3953 		err = iwm_send_bt_init_conf(sc);
   3954 		if (err) {
   3955 			splx(s);
   3956 			return err;
   3957 		}
   3958 	}
   3959 
   3960 	if (justnvm) {
   3961 		err = iwm_nvm_init(sc);
   3962 		if (err) {
   3963 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
   3964 			splx(s);
   3965 			return err;
   3966 		}
   3967 
   3968 		if (IEEE80211_ADDR_EQ(etheranyaddr, sc->sc_ic.ic_myaddr))
   3969 			IEEE80211_ADDR_COPY(&sc->sc_ic.ic_myaddr,
   3970 			    &sc->sc_nvm.hw_addr);
   3971 		splx(s);
   3972 		return 0;
   3973 	}
   3974 
   3975 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
   3976 	if (err) {
   3977 		splx(s);
   3978 		return err;
   3979 	}
   3980 
   3981 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
   3982 	if (err) {
   3983 		splx(s);
   3984 		return err;
   3985 	}
   3986 
   3987 	/*
   3988 	 * Send phy configurations command to init uCode
   3989 	 * to start the 16.0 uCode init image internal calibrations.
   3990 	 */
   3991 	err = iwm_send_phy_cfg_cmd(sc);
   3992 	if (err) {
   3993 		splx(s);
   3994 		return err;
   3995 	}
   3996 
   3997 	/*
   3998 	 * Nothing to do but wait for the init complete notification
   3999 	 * from the firmware
   4000 	 */
   4001 	while ((sc->sc_init_complete & wait_flags) != wait_flags) {
   4002 		err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
   4003 		if (err)
   4004 			break;
   4005 	}
   4006 
   4007 	splx(s);
   4008 	return err;
   4009 }
   4010 
   4011 #ifdef notyet
   4012 static int
   4013 iwm_config_ltr(struct iwm_softc *sc)
   4014 {
   4015 	struct iwm_ltr_config_cmd cmd = {
   4016 		.flags = htole32(IWM_LTR_CFG_FLAG_FEATURE_ENABLE),
   4017 	};
   4018 
   4019 	if (!sc->sc_ltr_enabled)
   4020 		return 0;
   4021 
   4022 	return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
   4023 }
   4024 #endif
   4025 
   4026 static int
   4027 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
   4028 {
   4029 	struct iwm_rx_ring *ring = &sc->rxq;
   4030 	struct iwm_rx_data *data = &ring->data[idx];
   4031 	struct mbuf *m;
   4032 	int err;
   4033 	int fatal = 0;
   4034 
   4035 	m = m_gethdr(M_DONTWAIT, MT_DATA);
   4036 	if (m == NULL)
   4037 		return ENOBUFS;
   4038 
   4039 	if (size <= MCLBYTES) {
   4040 		MCLGET(m, M_DONTWAIT);
   4041 	} else {
   4042 		MEXTMALLOC(m, size, M_DONTWAIT);
   4043 	}
   4044 	if ((m->m_flags & M_EXT) == 0) {
   4045 		m_freem(m);
   4046 		return ENOBUFS;
   4047 	}
   4048 
   4049 	if (data->m != NULL) {
   4050 		bus_dmamap_unload(sc->sc_dmat, data->map);
   4051 		fatal = 1;
   4052 	}
   4053 
   4054 	m->m_len = m->m_pkthdr.len = size;
   4055 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   4056 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   4057 	if (err) {
   4058 		/* XXX */
   4059 		if (fatal)
   4060 			panic("iwm: could not load RX mbuf");
   4061 		m_freem(m);
   4062 		return err;
   4063 	}
   4064 	data->m = m;
   4065 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
   4066 
   4067 	/* Update RX descriptor. */
   4068 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
   4069 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   4070 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
   4071 
   4072 	return 0;
   4073 }
   4074 
   4075 #define IWM_RSSI_OFFSET 50
   4076 static int
   4077 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
   4078 {
   4079 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
   4080 	uint32_t agc_a, agc_b;
   4081 	uint32_t val;
   4082 
   4083 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
   4084 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
   4085 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
   4086 
   4087 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
   4088 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
   4089 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
   4090 
   4091 	/*
   4092 	 * dBm = rssi dB - agc dB - constant.
   4093 	 * Higher AGC (higher radio gain) means lower signal.
   4094 	 */
   4095 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
   4096 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
   4097 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
   4098 
   4099 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
   4100 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
   4101 
   4102 	return max_rssi_dbm;
   4103 }
   4104 
   4105 /*
   4106  * RSSI values are reported by the FW as positive values - need to negate
   4107  * to obtain their dBM.  Account for missing antennas by replacing 0
   4108  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
   4109  */
   4110 static int
   4111 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
   4112 {
   4113 	int energy_a, energy_b, energy_c, max_energy;
   4114 	uint32_t val;
   4115 
   4116 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
   4117 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
   4118 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
   4119 	energy_a = energy_a ? -energy_a : -256;
   4120 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
   4121 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
   4122 	energy_b = energy_b ? -energy_b : -256;
   4123 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
   4124 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
   4125 	energy_c = energy_c ? -energy_c : -256;
   4126 	max_energy = MAX(energy_a, energy_b);
   4127 	max_energy = MAX(max_energy, energy_c);
   4128 
   4129 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
   4130 	    energy_a, energy_b, energy_c, max_energy));
   4131 
   4132 	return max_energy;
   4133 }
   4134 
   4135 static void
   4136 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   4137     struct iwm_rx_data *data)
   4138 {
   4139 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
   4140 
   4141 	DPRINTFN(20, ("received PHY stats\n"));
   4142 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
   4143 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
   4144 
   4145 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
   4146 }
   4147 
   4148 /*
   4149  * Retrieve the average noise (in dBm) among receivers.
   4150  */
   4151 static int
   4152 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
   4153 {
   4154 	int i, total, nbant, noise;
   4155 
   4156 	total = nbant = noise = 0;
   4157 	for (i = 0; i < 3; i++) {
   4158 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
   4159 		if (noise) {
   4160 			total += noise;
   4161 			nbant++;
   4162 		}
   4163 	}
   4164 
   4165 	/* There should be at least one antenna but check anyway. */
   4166 	return (nbant == 0) ? -127 : (total / nbant) - 107;
   4167 }
   4168 
   4169 static void
   4170 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   4171     struct iwm_rx_data *data)
   4172 {
   4173 	struct ieee80211com *ic = &sc->sc_ic;
   4174 	struct ieee80211_frame *wh;
   4175 	struct ieee80211_node *ni;
   4176 	struct ieee80211_channel *c = NULL;
   4177 	struct mbuf *m;
   4178 	struct iwm_rx_phy_info *phy_info;
   4179 	struct iwm_rx_mpdu_res_start *rx_res;
   4180 	int device_timestamp;
   4181 	uint32_t len;
   4182 	uint32_t rx_pkt_status;
   4183 	int rssi;
   4184 	int s;
   4185 
   4186 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   4187 	    BUS_DMASYNC_POSTREAD);
   4188 
   4189 	phy_info = &sc->sc_last_phy_info;
   4190 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
   4191 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
   4192 	len = le16toh(rx_res->byte_count);
   4193 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
   4194 	    sizeof(*rx_res) + len));
   4195 
   4196 	m = data->m;
   4197 	m->m_data = pkt->data + sizeof(*rx_res);
   4198 	m->m_pkthdr.len = m->m_len = len;
   4199 
   4200 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
   4201 		DPRINTF(("dsp size out of range [0,20]: %d\n",
   4202 		    phy_info->cfg_phy_cnt));
   4203 		return;
   4204 	}
   4205 
   4206 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
   4207 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
   4208 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
   4209 		return; /* drop */
   4210 	}
   4211 
   4212 	device_timestamp = le32toh(phy_info->system_timestamp);
   4213 
   4214 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
   4215 		rssi = iwm_get_signal_strength(sc, phy_info);
   4216 	} else {
   4217 		rssi = iwm_calc_rssi(sc, phy_info);
   4218 	}
   4219 	rssi = -rssi;
   4220 
   4221 	if (ic->ic_state == IEEE80211_S_SCAN)
   4222 		iwm_fix_channel(sc, m);
   4223 
   4224 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
   4225 		return;
   4226 
   4227 	m_set_rcvif(m, IC2IFP(ic));
   4228 
   4229 	if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
   4230 		c = &ic->ic_channels[le32toh(phy_info->channel)];
   4231 
   4232 	s = splnet();
   4233 
   4234 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
   4235 	if (c)
   4236 		ni->ni_chan = c;
   4237 
   4238 	if (__predict_false(sc->sc_drvbpf != NULL)) {
   4239 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
   4240 
   4241 		tap->wr_flags = 0;
   4242 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
   4243 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
   4244 		tap->wr_chan_freq =
   4245 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
   4246 		tap->wr_chan_flags =
   4247 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
   4248 		tap->wr_dbm_antsignal = (int8_t)rssi;
   4249 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
   4250 		tap->wr_tsft = phy_info->system_timestamp;
   4251 		if (phy_info->phy_flags &
   4252 		    htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
   4253 			uint8_t mcs = (phy_info->rate_n_flags &
   4254 			    htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK |
   4255 			      IWM_RATE_HT_MCS_NSS_MSK));
   4256 			tap->wr_rate = (0x80 | mcs);
   4257 		} else {
   4258 			uint8_t rate = (phy_info->rate_n_flags &
   4259 			    htole32(IWM_RATE_LEGACY_RATE_MSK));
   4260 			switch (rate) {
   4261 			/* CCK rates. */
   4262 			case  10: tap->wr_rate =   2; break;
   4263 			case  20: tap->wr_rate =   4; break;
   4264 			case  55: tap->wr_rate =  11; break;
   4265 			case 110: tap->wr_rate =  22; break;
   4266 			/* OFDM rates. */
   4267 			case 0xd: tap->wr_rate =  12; break;
   4268 			case 0xf: tap->wr_rate =  18; break;
   4269 			case 0x5: tap->wr_rate =  24; break;
   4270 			case 0x7: tap->wr_rate =  36; break;
   4271 			case 0x9: tap->wr_rate =  48; break;
   4272 			case 0xb: tap->wr_rate =  72; break;
   4273 			case 0x1: tap->wr_rate =  96; break;
   4274 			case 0x3: tap->wr_rate = 108; break;
   4275 			/* Unknown rate: should not happen. */
   4276 			default:  tap->wr_rate =   0;
   4277 			}
   4278 		}
   4279 
   4280 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m, BPF_D_IN);
   4281 	}
   4282 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
   4283 	ieee80211_free_node(ni);
   4284 
   4285 	splx(s);
   4286 }
   4287 
   4288 static void
   4289 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   4290     struct iwm_node *in)
   4291 {
   4292 	struct ieee80211com *ic = &sc->sc_ic;
   4293 	struct ifnet *ifp = IC2IFP(ic);
   4294 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
   4295 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
   4296 	int failack = tx_resp->failure_frame;
   4297 
   4298 	KASSERT(tx_resp->frame_count == 1);
   4299 
   4300 	/* Update rate control statistics. */
   4301 	in->in_amn.amn_txcnt++;
   4302 	if (failack > 0) {
   4303 		in->in_amn.amn_retrycnt++;
   4304 	}
   4305 
   4306 	if (status != IWM_TX_STATUS_SUCCESS &&
   4307 	    status != IWM_TX_STATUS_DIRECT_DONE)
   4308 		if_statinc(ifp, if_oerrors);
   4309 	else
   4310 		if_statinc(ifp, if_opackets);
   4311 }
   4312 
   4313 static void
   4314 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   4315     struct iwm_rx_data *data)
   4316 {
   4317 	struct ieee80211com *ic = &sc->sc_ic;
   4318 	struct ifnet *ifp = IC2IFP(ic);
   4319 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
   4320 	int idx = cmd_hdr->idx;
   4321 	int qid = cmd_hdr->qid;
   4322 	struct iwm_tx_ring *ring = &sc->txq[qid];
   4323 	struct iwm_tx_data *txd = &ring->data[idx];
   4324 	struct iwm_node *in = txd->in;
   4325 	int s;
   4326 
   4327 	s = splnet();
   4328 
   4329 	if (txd->done) {
   4330 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
   4331 		    DEVNAME(sc)));
   4332 		splx(s);
   4333 		return;
   4334 	}
   4335 
   4336 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   4337 	    BUS_DMASYNC_POSTREAD);
   4338 
   4339 	sc->sc_tx_timer = 0;
   4340 
   4341 	iwm_rx_tx_cmd_single(sc, pkt, in);
   4342 
   4343 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
   4344 	    BUS_DMASYNC_POSTWRITE);
   4345 	bus_dmamap_unload(sc->sc_dmat, txd->map);
   4346 	m_freem(txd->m);
   4347 
   4348 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
   4349 	KASSERT(txd->done == 0);
   4350 	txd->done = 1;
   4351 	KASSERT(txd->in);
   4352 
   4353 	txd->m = NULL;
   4354 	txd->in = NULL;
   4355 	ieee80211_free_node(&in->in_ni);
   4356 
   4357 	if (--ring->queued < IWM_TX_RING_LOMARK) {
   4358 		sc->qfullmsk &= ~(1 << qid);
   4359 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
   4360 			ifp->if_flags &= ~IFF_OACTIVE;
   4361 			KASSERT(KERNEL_LOCKED_P());
   4362 			iwm_start(ifp);
   4363 		}
   4364 	}
   4365 
   4366 	splx(s);
   4367 }
   4368 
   4369 static int
   4370 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
   4371 {
   4372 	struct iwm_binding_cmd cmd;
   4373 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
   4374 	uint32_t mac_id = IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
   4375 	int i, err, active = (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE);
   4376 	uint32_t status;
   4377 	size_t cmdsize;
   4378 
   4379 	if (action == IWM_FW_CTXT_ACTION_ADD && active) {
   4380 		device_printf(sc->sc_dev, "binding already added\n");
   4381 		return EINVAL;
   4382 	}
   4383 
   4384 	if (action == IWM_FW_CTXT_ACTION_REMOVE && !active) {
   4385 		device_printf(sc->sc_dev, "binding already removed\n");
   4386 		return EINVAL;
   4387 	}
   4388 
   4389 	if (phyctxt == NULL)
   4390 		return EINVAL;
   4391 
   4392 	memset(&cmd, 0, sizeof(cmd));
   4393 
   4394 	cmd.id_and_color
   4395 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   4396 	cmd.action = htole32(action);
   4397 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   4398 
   4399 	cmd.macs[0] = htole32(mac_id);
   4400 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
   4401 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
   4402 
   4403 	if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
   4404 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CDB_SUPPORT))
   4405 		cmd.lmac_id = htole32(IWM_LMAC_24G_INDEX);
   4406 	else
   4407 		cmd.lmac_id = htole32(IWM_LMAC_5G_INDEX);
   4408 
   4409 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT))
   4410 		cmdsize = sizeof(cmd);
   4411 	else
   4412 		cmdsize = sizeof(struct iwm_binding_cmd_v1);
   4413 
   4414 	status = 0;
   4415 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD, cmdsize, &cmd,
   4416 	    &status);
   4417 	if (err == 0 && status != 0)
   4418 		err = EIO;
   4419 
   4420 	return err;
   4421 }
   4422 
   4423 static void
   4424 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
   4425     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
   4426 {
   4427 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
   4428 
   4429 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
   4430 	    ctxt->color));
   4431 	cmd->action = htole32(action);
   4432 	cmd->apply_time = htole32(apply_time);
   4433 }
   4434 
   4435 static void
   4436 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
   4437     struct ieee80211_channel *chan, uint8_t chains_static,
   4438     uint8_t chains_dynamic)
   4439 {
   4440 	struct ieee80211com *ic = &sc->sc_ic;
   4441 	uint8_t active_cnt, idle_cnt;
   4442 
   4443 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
   4444 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
   4445 
   4446 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
   4447 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
   4448 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
   4449 
   4450 	/* Set rx the chains */
   4451 	idle_cnt = chains_static;
   4452 	active_cnt = chains_dynamic;
   4453 
   4454 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
   4455 	    IWM_PHY_RX_CHAIN_VALID_POS);
   4456 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
   4457 	cmd->rxchain_info |= htole32(active_cnt <<
   4458 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
   4459 
   4460 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
   4461 }
   4462 
   4463 static int
   4464 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt, uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
   4465     uint32_t apply_time)
   4466 {
   4467 	struct iwm_phy_context_cmd cmd;
   4468 
   4469 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
   4470 
   4471 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
   4472 	    chains_static, chains_dynamic);
   4473 
   4474 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
   4475 	    sizeof(struct iwm_phy_context_cmd), &cmd);
   4476 }
   4477 
   4478 static int
   4479 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   4480 {
   4481 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
   4482 	struct iwm_tfd *desc;
   4483 	struct iwm_tx_data *txdata;
   4484 	struct iwm_device_cmd *cmd;
   4485 	struct mbuf *m;
   4486 	bus_addr_t paddr;
   4487 	uint32_t addr_lo;
   4488 	int err = 0, i, paylen, off, s;
   4489 	int idx, code, async, group_id;
   4490 	size_t hdrlen, datasz;
   4491 	uint8_t *data;
   4492 	int generation = sc->sc_generation;
   4493 
   4494 	code = hcmd->id;
   4495 	async = hcmd->flags & IWM_CMD_ASYNC;
   4496 	idx = ring->cur;
   4497 
   4498 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
   4499 		paylen += hcmd->len[i];
   4500 	}
   4501 
   4502         /* If this command waits for a response, allocate response buffer. */
   4503         hcmd->resp_pkt = NULL;
   4504         if (hcmd->flags & IWM_CMD_WANT_RESP) {
   4505                 uint8_t *resp_buf;
   4506                 KASSERT(!async);
   4507                 KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwm_rx_packet));
   4508                 KASSERT(hcmd->resp_pkt_len <= IWM_CMD_RESP_MAX);
   4509                 if (sc->sc_cmd_resp_pkt[idx] != NULL)
   4510                         return ENOSPC;
   4511                 resp_buf = kmem_zalloc(hcmd->resp_pkt_len, KM_NOSLEEP);
   4512                 if (resp_buf == NULL)
   4513                         return ENOMEM;
   4514                 sc->sc_cmd_resp_pkt[idx] = resp_buf;
   4515                 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
   4516         } else {
   4517                 sc->sc_cmd_resp_pkt[idx] = NULL;
   4518                 sc->sc_cmd_resp_len[idx] = 0;
   4519         }
   4520 
   4521 	/*
   4522 	 * Is the hardware still available?  (after e.g. above wait).
   4523 	 */
   4524 	s = splnet();
   4525 
   4526 	desc = &ring->desc[idx];
   4527 	txdata = &ring->data[idx];
   4528 
   4529 	group_id = iwm_cmd_groupid(code);
   4530 	if (group_id != 0) {
   4531 		hdrlen = sizeof(cmd->hdr_wide);
   4532 		datasz = sizeof(cmd->data_wide);
   4533 	} else {
   4534 		hdrlen = sizeof(cmd->hdr);
   4535 		datasz = sizeof(cmd->data);
   4536 	}
   4537 
   4538 	if (paylen > datasz) {
   4539 		/* Command is too large to fit in pre-allocated space. */
   4540 		size_t totlen = hdrlen + paylen;
   4541 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
   4542 			device_printf(sc->sc_dev,
   4543 			    "firmware command too long (%zd bytes)\n", totlen);
   4544 			err = EINVAL;
   4545 			goto out;
   4546 		}
   4547 		m = m_gethdr(M_DONTWAIT, MT_DATA);
   4548 		if (m == NULL) {
   4549 			device_printf(sc->sc_dev,
   4550 			    "could not get fw cmd mbuf header\n");
   4551 			err = ENOMEM;
   4552 			goto out;
   4553 		}
   4554 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   4555 		if (!(m->m_flags & M_EXT)) {
   4556 			device_printf(sc->sc_dev,
   4557 			    "could not get fw cmd mbuf (%zd bytes)\n", totlen);
   4558 			m_freem(m);
   4559 			err = ENOMEM;
   4560 			goto out;
   4561 		}
   4562 		cmd = mtod(m, struct iwm_device_cmd *);
   4563 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
   4564 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   4565 		if (err) {
   4566 			device_printf(sc->sc_dev,
   4567 			    "could not load fw cmd mbuf (%zd bytes)\n", totlen);
   4568 			m_freem(m);
   4569 			goto out;
   4570 		}
   4571 		txdata->m = m;
   4572 		paddr = txdata->map->dm_segs[0].ds_addr;
   4573 	} else {
   4574 		cmd = &ring->cmd[idx];
   4575 		paddr = txdata->cmd_paddr;
   4576 	}
   4577 
   4578 	if (group_id != 0) {
   4579 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
   4580 		cmd->hdr_wide.group_id = group_id;
   4581 		cmd->hdr_wide.qid = ring->qid;
   4582 		cmd->hdr_wide.idx = idx;
   4583 		cmd->hdr_wide.length = htole16(paylen);
   4584 		cmd->hdr_wide.version = iwm_cmd_version(code);
   4585 		data = cmd->data_wide;
   4586 	} else {
   4587 		cmd->hdr.code = code;
   4588 		cmd->hdr.flags = 0;
   4589 		cmd->hdr.qid = ring->qid;
   4590 		cmd->hdr.idx = idx;
   4591 		data = cmd->data;
   4592 	}
   4593 
   4594 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
   4595 		if (hcmd->len[i] == 0)
   4596 			continue;
   4597 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
   4598 		off += hcmd->len[i];
   4599 	}
   4600 	KASSERT(off == paylen);
   4601 
   4602 	/* lo field is not aligned */
   4603 	addr_lo = htole32((uint32_t)paddr);
   4604 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
   4605 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
   4606 	    | ((hdrlen + paylen) << 4));
   4607 	desc->num_tbs = 1;
   4608 
   4609 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
   4610 	    code, hdrlen + paylen, async ? " (async)" : ""));
   4611 
   4612 	if (paylen > datasz) {
   4613 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
   4614 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
   4615 	} else {
   4616 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   4617 		    (uint8_t *)cmd - (uint8_t *)ring->cmd_dma.vaddr,
   4618 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
   4619 	}
   4620 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   4621 	    (uint8_t *)desc - (uint8_t *)ring->desc_dma.vaddr,
   4622 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
   4623 
   4624 	/*
   4625 	 * Wake up the NIC to make sure that the firmware will see the host
   4626 	 * command - we will let the NIC sleep once all the host commands
   4627 	 * returned. This needs to be done only on 7000 family NICs.
   4628 	 */
   4629 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   4630 		if (ring->queued == 0 && !iwm_nic_lock(sc)) {
   4631 			err = EBUSY;
   4632 			goto out;
   4633 		}
   4634 	}
   4635 
   4636 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
   4637 
   4638 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
   4639 	    code, ring->qid, ring->cur));
   4640 
   4641 	/* Kick command ring. */
   4642 	ring->queued++;
   4643 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   4644 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   4645 
   4646 	if (!async) {
   4647 		err = tsleep(desc, PCATCH, "iwmcmd", mstohz(2000));
   4648 		if (err == 0) {
   4649 			/* if hardware is no longer up, return error */
   4650 			if (generation != sc->sc_generation) {
   4651 				err = ENXIO;
   4652 				goto out;
   4653 			}
   4654 
   4655 			/* Response buffer will be freed in iwm_free_resp(). */
   4656 			hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
   4657 			sc->sc_cmd_resp_pkt[idx] = NULL;
   4658 			sc->sc_cmd_resp_len[idx] = 0;
   4659 		} else if (generation == sc->sc_generation) {
   4660 			if (sc->sc_cmd_resp_pkt[idx] != NULL) {
   4661 				KASSERT(sc->sc_cmd_resp_len[idx] > 0);
   4662 				kmem_free(sc->sc_cmd_resp_pkt[idx],
   4663 				    sc->sc_cmd_resp_len[idx]);
   4664 			}
   4665 			sc->sc_cmd_resp_pkt[idx] = NULL;
   4666 			sc->sc_cmd_resp_len[idx] = 0;
   4667 		}
   4668 	}
   4669  out:
   4670 	splx(s);
   4671 
   4672 	return err;
   4673 }
   4674 
   4675 static int
   4676 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
   4677     uint16_t len, const void *data)
   4678 {
   4679 	struct iwm_host_cmd cmd = {
   4680 		.id = id,
   4681 		.len = { len, },
   4682 		.data = { data, },
   4683 		.flags = flags,
   4684 	};
   4685 
   4686 	return iwm_send_cmd(sc, &cmd);
   4687 }
   4688 
   4689 static int
   4690 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
   4691     uint32_t *status)
   4692 {
   4693 	struct iwm_rx_packet *pkt;
   4694 	struct iwm_cmd_response *resp;
   4695 	int err, resp_len;
   4696 
   4697 	KASSERT((cmd->flags & IWM_CMD_WANT_RESP) == 0);
   4698 	cmd->flags |= IWM_CMD_WANT_RESP;
   4699 	cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
   4700 
   4701 	err = iwm_send_cmd(sc, cmd);
   4702 	if (err)
   4703 		return err;
   4704 
   4705 	pkt = cmd->resp_pkt;
   4706 	if (pkt == NULL || (pkt->hdr.flags & IWM_CMD_FAILED_MSK))
   4707 		return EIO;
   4708 
   4709 	resp_len = iwm_rx_packet_payload_len(pkt);
   4710 	if (resp_len != sizeof(*resp)) {
   4711 		iwm_free_resp(sc, cmd);
   4712 		return EIO;
   4713 	}
   4714 
   4715 	resp = (void *)pkt->data;
   4716 	*status = le32toh(resp->status);
   4717 	iwm_free_resp(sc, cmd);
   4718 	return err;
   4719 }
   4720 
   4721 static int
   4722 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
   4723     const void *data, uint32_t *status)
   4724 {
   4725 	struct iwm_host_cmd cmd = {
   4726 		.id = id,
   4727 		.len = { len, },
   4728 		.data = { data, },
   4729 	};
   4730 
   4731 	return iwm_send_cmd_status(sc, &cmd, status);
   4732 }
   4733 
   4734 static void
   4735 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   4736 {
   4737 	KASSERT(hcmd != NULL);
   4738 	KASSERT((hcmd->flags & IWM_CMD_WANT_RESP) == IWM_CMD_WANT_RESP);
   4739 	if (hcmd->resp_pkt != NULL) {
   4740 		KASSERT(hcmd->resp_pkt_len > 0);
   4741 		kmem_free(hcmd->resp_pkt, hcmd->resp_pkt_len);
   4742 	}
   4743 	hcmd->resp_pkt = NULL;
   4744 }
   4745 
   4746 static void
   4747 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
   4748 {
   4749 	struct iwm_tx_ring *ring = &sc->txq[sc->cmdqid];
   4750 	struct iwm_tx_data *data;
   4751 
   4752 	if (qid != sc->cmdqid) {
   4753 		return;	/* Not a command ack. */
   4754 	}
   4755 
   4756 	data = &ring->data[idx];
   4757 
   4758 	if (data->m != NULL) {
   4759 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   4760 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   4761 		bus_dmamap_unload(sc->sc_dmat, data->map);
   4762 		m_freem(data->m);
   4763 		data->m = NULL;
   4764 	}
   4765 	wakeup(&ring->desc[idx]);
   4766 
   4767 	if (ring->queued == 0) {
   4768 		device_printf(sc->sc_dev, "cmd_done with empty ring\n");
   4769 	} else if (--ring->queued == 0) {
   4770                /*
   4771                  * 7000 family NICs are locked while commands are in progress.
   4772                  * All commands are now done so we may unlock the NIC again.
   4773                  */
   4774                 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   4775                         iwm_nic_unlock(sc);
   4776 		}
   4777 	}
   4778 }
   4779 
   4780 /*
   4781  * necessary only for block ack mode
   4782  */
   4783 void
   4784 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
   4785     uint16_t len)
   4786 {
   4787 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
   4788 	uint16_t val;
   4789 
   4790 	scd_bc_tbl = sc->sched_dma.vaddr;
   4791 
   4792 	len += IWM_TX_CRC_SIZE + IWM_TX_DELIMITER_SIZE;
   4793 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
   4794 		len = roundup(len, 4) / 4;
   4795 
   4796 	val = htole16(sta_id << 12 | len);
   4797 
   4798 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   4799 	    0, sc->sched_dma.size, BUS_DMASYNC_PREWRITE);
   4800 
   4801 	/* Update TX scheduler. */
   4802 	scd_bc_tbl[qid].tfd_offset[idx] = val;
   4803 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
   4804 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
   4805 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   4806 	    0, sc->sched_dma.size, BUS_DMASYNC_POSTWRITE);
   4807 }
   4808 
   4809 /*
   4810  * Fill in various bit for management frames, and leave them
   4811  * unfilled for data frames (firmware takes care of that).
   4812  * Return the selected TX rate.
   4813  */
   4814 static const struct iwm_rate *
   4815 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
   4816     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
   4817 {
   4818 	struct ieee80211com *ic = &sc->sc_ic;
   4819 	struct ieee80211_node *ni = &in->in_ni;
   4820 	const struct iwm_rate *rinfo;
   4821 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   4822 	int ridx, rate_flags, i, ind;
   4823 	int nrates = ni->ni_rates.rs_nrates;
   4824 
   4825 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
   4826 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
   4827 
   4828 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
   4829 	    type != IEEE80211_FC0_TYPE_DATA) {
   4830 		/* for non-data, use the lowest supported rate */
   4831 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
   4832 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
   4833 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
   4834 #ifndef IEEE80211_NO_HT
   4835 	} else if (ic->ic_fixed_mcs != -1) {
   4836 		ridx = sc->sc_fixed_ridx;
   4837 #endif
   4838 	} else if (ic->ic_fixed_rate != -1) {
   4839 		ridx = sc->sc_fixed_ridx;
   4840 	} else {
   4841 		/* for data frames, use RS table */
   4842 		tx->initial_rate_index = 0;
   4843 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
   4844 		DPRINTFN(12, ("start with txrate %d\n",
   4845 		    tx->initial_rate_index));
   4846 #ifndef IEEE80211_NO_HT
   4847 		if (ni->ni_flags & IEEE80211_NODE_HT) {
   4848 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
   4849 			return &iwm_rates[ridx];
   4850 		}
   4851 #endif
   4852 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
   4853 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
   4854 		for (i = 0; i < nrates; i++) {
   4855 			if (iwm_rates[i].rate == (ni->ni_txrate &
   4856 			    IEEE80211_RATE_VAL)) {
   4857 				ridx = i;
   4858 				break;
   4859 			}
   4860 		}
   4861 		return &iwm_rates[ridx];
   4862 	}
   4863 
   4864 	rinfo = &iwm_rates[ridx];
   4865 	for (i = 0, ind = sc->sc_mgmt_last_antenna;
   4866 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
   4867 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
   4868 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
   4869 			sc->sc_mgmt_last_antenna = ind;
   4870 			break;
   4871 		}
   4872 	}
   4873 	rate_flags = (1 << sc->sc_mgmt_last_antenna) << IWM_RATE_MCS_ANT_POS;
   4874 	if (IWM_RIDX_IS_CCK(ridx))
   4875 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
   4876 #ifndef IEEE80211_NO_HT
   4877 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   4878 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
   4879 		rate_flags |= IWM_RATE_MCS_HT_MSK;
   4880 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
   4881 	} else
   4882 #endif
   4883 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
   4884 
   4885 	return rinfo;
   4886 }
   4887 
   4888 #define TB0_SIZE 16
   4889 static int
   4890 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
   4891 {
   4892 	struct ieee80211com *ic = &sc->sc_ic;
   4893 	struct iwm_node *in = (struct iwm_node *)ni;
   4894 	struct iwm_tx_ring *ring;
   4895 	struct iwm_tx_data *data;
   4896 	struct iwm_tfd *desc;
   4897 	struct iwm_device_cmd *cmd;
   4898 	struct iwm_tx_cmd *tx;
   4899 	struct ieee80211_frame *wh;
   4900 	struct ieee80211_key *k = NULL;
   4901 	struct mbuf *m1;
   4902 	const struct iwm_rate *rinfo;
   4903 	uint32_t flags;
   4904 	u_int hdrlen;
   4905 	bus_dma_segment_t *seg;
   4906 	uint8_t tid, type, subtype;
   4907 	int i, totlen, err, pad;
   4908 	int qid;
   4909 #ifndef IEEE80211_NO_HT
   4910 	int hasqos;
   4911 #endif
   4912 
   4913 	wh = mtod(m, struct ieee80211_frame *);
   4914 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   4915 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
   4916 	hdrlen = ieee80211_anyhdrsize(wh);
   4917 
   4918 #ifndef IEEE80211_NO_HT
   4919 	hasqos = ieee80211_has_qos(wh);
   4920 #endif
   4921 	if (type == IEEE80211_FC0_TYPE_DATA)
   4922 		tid = IWM_TID_NON_QOS;
   4923 	else
   4924 		tid = IWM_MAX_TID_COUNT;
   4925 
   4926 	/*
   4927 	 * Map EDCA categories to Tx data queues.
   4928 	 *
   4929 	 * We use static data queue assignments even in DQA mode. We do not
   4930 	 * need to share Tx queues between stations because we only implement
   4931 	 * client mode; the firmware's station table contains only one entry
   4932 	 * which represents our access point.
   4933 	 */
   4934 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
   4935 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
   4936 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
   4937 		else
   4938 			qid = IWM_AUX_QUEUE;
   4939 	} else if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
   4940 		qid = IWM_DQA_MIN_MGMT_QUEUE + ac;
   4941 	else
   4942 		qid = ac;
   4943 
   4944 #ifndef IEEE80211_NO_HT
   4945 	/* If possible, put this frame on an aggregation queue. */
   4946 	if (hasqos) {
   4947 		struct ieee80211_tx_ba *ba;
   4948 		uint16_t qos = ieee80211_get_qos(wh);
   4949 		int qostid = qos & IEEE80211_QOS_TID;
   4950 		int agg_qid = IWM_FIRST_AGG_TX_QUEUE + qostid;
   4951 
   4952 		ba = &ni->ni_tx_ba[qostid];
   4953 		if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
   4954 		    type == IEEE80211_FC0_TYPE_DATA &&
   4955 		    subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
   4956 		    (sc->tx_ba_queue_mask & (1 << agg_qid)) &&
   4957 		    ba->ba_state == IEEE80211_BA_AGREED) {
   4958 			qid = agg_qid;
   4959 			tid = qostid;
   4960 			ac = ieee80211_up_to_ac(ic, qostid);
   4961 		}
   4962 	}
   4963 #endif
   4964 
   4965 	ring = &sc->txq[qid];
   4966 	if (ring == NULL) {
   4967 		device_printf(sc->sc_dev, "no ring data for queue %d\n", qid);
   4968 		m_freem(m);
   4969 		return EFAULT;
   4970 	}
   4971 	desc = &ring->desc[ring->cur];
   4972 	memset(desc, 0, sizeof(*desc));
   4973 	data = &ring->data[ring->cur];
   4974 
   4975 	cmd = &ring->cmd[ring->cur];
   4976 	cmd->hdr.code = IWM_TX_CMD;
   4977 	cmd->hdr.flags = 0;
   4978 	cmd->hdr.qid = ring->qid;
   4979 	cmd->hdr.idx = ring->cur;
   4980 
   4981 	tx = (void *)cmd->data;
   4982 	memset(tx, 0, sizeof(*tx));
   4983 
   4984 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
   4985 
   4986 	if (__predict_false(sc->sc_drvbpf != NULL)) {
   4987 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
   4988 
   4989 		tap->wt_flags = 0;
   4990 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
   4991 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
   4992 #ifndef IEEE80211_NO_HT
   4993 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   4994 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
   4995 		    type == IEEE80211_FC0_TYPE_DATA &&
   4996 		    rinfo->plcp == IWM_RATE_INVM_PLCP) {
   4997 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
   4998 		} else
   4999 #endif
   5000 			tap->wt_rate = rinfo->rate;
   5001 		tap->wt_hwqueue = ac;
   5002 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
   5003 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
   5004 
   5005 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m, BPF_D_OUT);
   5006 	}
   5007 
   5008 	/* Encrypt the frame if need be. */
   5009 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
   5010 		k = ieee80211_crypto_encap(ic, ni, m);
   5011 		if (k == NULL) {
   5012 			m_freem(m);
   5013 			return ENOBUFS;
   5014 		}
   5015 		/* Packet header may have moved, reset our local pointer. */
   5016 		wh = mtod(m, struct ieee80211_frame *);
   5017 	}
   5018 	totlen = m->m_pkthdr.len;
   5019 
   5020 	flags = 0;
   5021 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
   5022 		flags |= IWM_TX_CMD_FLG_ACK;
   5023 	}
   5024 
   5025 	if (type == IEEE80211_FC0_TYPE_DATA &&
   5026 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
   5027 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
   5028 	     (ic->ic_flags & IEEE80211_F_USEPROT)))
   5029 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
   5030 
   5031 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
   5032 	    type != IEEE80211_FC0_TYPE_DATA)
   5033 		tx->sta_id = IWM_AUX_STA_ID;
   5034 	else
   5035 		tx->sta_id = IWM_STATION_ID;
   5036 
   5037 	if (type == IEEE80211_FC0_TYPE_MGT) {
   5038 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
   5039 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
   5040 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
   5041 		else
   5042 			tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
   5043 	} else {
   5044 #ifndef IEEE80211_NO_HT
   5045 		if (type == IEEE80211_FC0_TYPE_CTL &&
   5046 		    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
   5047 			struct ieee80211_frame_min *mwh;
   5048 			uint8_t *barfrm;
   5049 			uint16_t ctl;
   5050 
   5051 			mwh = mtod(m, struct ieee80211_frame_min *);
   5052 			barfrm = (uint8_t *)&mwh[1];
   5053 			ctl = barfrm[1] << 8 | barfrm[0];
   5054 			tid = (ctl & IEEE80211_BA_TID_INFO_MASK) >>
   5055 			    IEEE80211_BA_TID_INFO_SHIFT;
   5056 			flags |= IWM_TX_CMD_FLG_ACK | IWM_TX_CMD_FLG_BAR;
   5057 			tx->data_retry_limit = IWM_BAR_DEFAULT_RETRY_LIMIT;
   5058 		}
   5059 #endif
   5060 
   5061 		tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
   5062 	}
   5063 
   5064 	if (hdrlen & 3) {
   5065 		/* First segment length must be a multiple of 4. */
   5066 		flags |= IWM_TX_CMD_FLG_MH_PAD;
   5067 		pad = 4 - (hdrlen & 3);
   5068 	} else
   5069 		pad = 0;
   5070 
   5071 	tx->driver_txop = 0;
   5072 	tx->next_frame_len = 0;
   5073 
   5074 	tx->len = htole16(totlen);
   5075 	tx->tid_tspec = tid;
   5076 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
   5077 
   5078 	/* Set physical address of "scratch area". */
   5079 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
   5080 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
   5081 
   5082 	/* Copy 802.11 header in TX command. */
   5083 	memcpy(tx + 1, wh, hdrlen);
   5084 
   5085 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
   5086 
   5087 	tx->sec_ctl = 0;
   5088 	tx->tx_flags |= htole32(flags);
   5089 
   5090 	/* Trim 802.11 header. */
   5091 	m_adj(m, hdrlen);
   5092 
   5093 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   5094 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   5095 	if (err) {
   5096 		if (err != EFBIG) {
   5097 			aprint_error_dev(sc->sc_dev,
   5098 			    "can't map mbuf (error %d)\n", err);
   5099 			m_freem(m);
   5100 			return err;
   5101 		}
   5102 		/* Too many DMA segments, linearize mbuf. */
   5103 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
   5104 		if (m1 == NULL) {
   5105 			m_freem(m);
   5106 			return ENOBUFS;
   5107 		}
   5108 		MCLAIM(m1, &sc->sc_ec.ec_rx_mowner);
   5109 		if (m->m_pkthdr.len > MHLEN) {
   5110 			MCLGET(m1, M_DONTWAIT);
   5111 			if (!(m1->m_flags & M_EXT)) {
   5112 				m_freem(m);
   5113 				m_freem(m1);
   5114 				return ENOBUFS;
   5115 			}
   5116 		}
   5117 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
   5118 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
   5119 		m_freem(m);
   5120 		m = m1;
   5121 
   5122 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   5123 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   5124 		if (err) {
   5125 			aprint_error_dev(sc->sc_dev,
   5126 			    "can't map mbuf (error %d)\n", err);
   5127 			m_freem(m);
   5128 			return err;
   5129 		}
   5130 	}
   5131 	data->m = m;
   5132 	data->in = in;
   5133 	data->done = 0;
   5134 #ifndef IEEE80211_NO_HT
   5135         data->txmcs = ni->ni_txmcs;
   5136         data->txrate = ni->ni_txrate;
   5137 #ifdef notyet
   5138         data->ampdu_txmcs = ni->ni_txmcs; /* updated upon Tx interrupt */
   5139         data->ampdu_txnss = ni->ni_vht_ss; /* updated upon Tx interrupt */
   5140 #endif
   5141 #endif
   5142 
   5143 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
   5144 	KASSERT(data->in != NULL);
   5145 
   5146 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d type=%d "
   5147 	    "subtype=%x tx_flags=%08x init_rateidx=%08x rate_n_flags=%08x\n",
   5148 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs, type,
   5149 	    (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 4,
   5150 	    le32toh(tx->tx_flags), le32toh(tx->initial_rate_index),
   5151 	    le32toh(tx->rate_n_flags)));
   5152 
   5153 	/* Fill TX descriptor. */
   5154 	desc->num_tbs = 2 + data->map->dm_nsegs;
   5155 
   5156 	desc->tbs[0].lo = htole32(data->cmd_paddr);
   5157 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
   5158 	    (TB0_SIZE << 4));
   5159 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
   5160 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
   5161 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
   5162 	      + hdrlen + pad - TB0_SIZE) << 4));
   5163 
   5164 	/* Other DMA segments are for data payload. */
   5165 	seg = data->map->dm_segs;
   5166 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
   5167 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
   5168 		desc->tbs[i+2].hi_n_len =
   5169 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr)
   5170 		    | ((seg->ds_len) << 4));
   5171 	}
   5172 
   5173 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, m->m_pkthdr.len,
   5174 	    BUS_DMASYNC_PREWRITE);
   5175 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   5176 	    (uint8_t *)cmd - (uint8_t *)ring->cmd, sizeof(*cmd),
   5177 	    BUS_DMASYNC_PREWRITE);
   5178 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   5179 	    (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
   5180 	    BUS_DMASYNC_PREWRITE);
   5181 
   5182 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, totlen);
   5183 
   5184 	/* Kick TX ring. */
   5185 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   5186 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   5187 
   5188 	/* Mark TX ring as full if we reach a certain threshold. */
   5189 	if (++ring->queued > IWM_TX_RING_HIMARK) {
   5190 		sc->qfullmsk |= 1 << ring->qid;
   5191 	}
   5192 
   5193 	return 0;
   5194 }
   5195 
   5196 #if 0
   5197 /* not necessary? */
   5198 static int
   5199 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
   5200 {
   5201 	struct iwm_tx_path_flush_cmd flush_cmd = {
   5202 		.queues_ctl = htole32(tfd_msk),
   5203 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
   5204 	};
   5205 	int err;
   5206 
   5207 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
   5208 	    sizeof(flush_cmd), &flush_cmd);
   5209 	if (err)
   5210 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
   5211 		    err);
   5212 	return err;
   5213 }
   5214 #endif
   5215 
   5216 static void
   5217 iwm_led_enable(struct iwm_softc *sc)
   5218 {
   5219 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
   5220 }
   5221 
   5222 static void
   5223 iwm_led_disable(struct iwm_softc *sc)
   5224 {
   5225 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
   5226 }
   5227 
   5228 static int
   5229 iwm_led_is_enabled(struct iwm_softc *sc)
   5230 {
   5231 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
   5232 }
   5233 
   5234 static void
   5235 iwm_led_blink_timeout(void *arg)
   5236 {
   5237 	struct iwm_softc *sc = arg;
   5238 
   5239 	if (iwm_led_is_enabled(sc))
   5240 		iwm_led_disable(sc);
   5241 	else
   5242 		iwm_led_enable(sc);
   5243 
   5244 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
   5245 }
   5246 
   5247 static void
   5248 iwm_led_blink_start(struct iwm_softc *sc)
   5249 {
   5250 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
   5251 }
   5252 
   5253 static void
   5254 iwm_led_blink_stop(struct iwm_softc *sc)
   5255 {
   5256 	callout_stop(&sc->sc_led_blink_to);
   5257 	iwm_led_disable(sc);
   5258 }
   5259 
   5260 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
   5261 
   5262 static int
   5263 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
   5264     struct iwm_beacon_filter_cmd *cmd)
   5265 {
   5266 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
   5267 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
   5268 }
   5269 
   5270 static void
   5271 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
   5272     struct iwm_beacon_filter_cmd *cmd)
   5273 {
   5274 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
   5275 }
   5276 
   5277 static int
   5278 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
   5279 {
   5280 	struct iwm_beacon_filter_cmd cmd = {
   5281 		IWM_BF_CMD_CONFIG_DEFAULTS,
   5282 		.bf_enable_beacon_filter = htole32(1),
   5283 		.ba_enable_beacon_abort = htole32(enable),
   5284 	};
   5285 
   5286 	if (!sc->sc_bf.bf_enabled)
   5287 		return 0;
   5288 
   5289 	sc->sc_bf.ba_enabled = enable;
   5290 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
   5291 	return iwm_beacon_filter_send_cmd(sc, &cmd);
   5292 }
   5293 
   5294 static void
   5295 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
   5296     struct iwm_mac_power_cmd *cmd)
   5297 {
   5298 	struct ieee80211_node *ni = &in->in_ni;
   5299 	int dtim_period, dtim_msec, keep_alive;
   5300 
   5301 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   5302 	    in->in_color));
   5303 	if (ni->ni_dtim_period)
   5304 		dtim_period = ni->ni_dtim_period;
   5305 	else
   5306 		dtim_period = 1;
   5307 
   5308 	/*
   5309 	 * Regardless of power management state the driver must set
   5310 	 * keep alive period. FW will use it for sending keep alive NDPs
   5311 	 * immediately after association. Check that keep alive period
   5312 	 * is at least 3 * DTIM.
   5313 	 */
   5314 	dtim_msec = dtim_period * ni->ni_intval;
   5315 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
   5316 	keep_alive = roundup(keep_alive, 1000) / 1000;
   5317 	cmd->keep_alive_seconds = htole16(keep_alive);
   5318 
   5319 #ifdef notyet
   5320 	cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
   5321 	cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
   5322 	cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
   5323 #endif
   5324 }
   5325 
   5326 static int
   5327 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
   5328 {
   5329 	int err;
   5330 	int ba_enable;
   5331 	struct iwm_mac_power_cmd cmd;
   5332 
   5333 	memset(&cmd, 0, sizeof(cmd));
   5334 
   5335 	iwm_power_build_cmd(sc, in, &cmd);
   5336 
   5337 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
   5338 	    sizeof(cmd), &cmd);
   5339 	if (err)
   5340 		return err;
   5341 
   5342 	ba_enable = !!(cmd.flags &
   5343 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
   5344 	return iwm_update_beacon_abort(sc, in, ba_enable);
   5345 }
   5346 
   5347 static int
   5348 iwm_power_update_device(struct iwm_softc *sc)
   5349 {
   5350 	struct iwm_device_power_cmd cmd = { };
   5351 	struct ieee80211com *ic = &sc->sc_ic;
   5352 
   5353 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
   5354 		return 0;
   5355 
   5356 	if (ic->ic_opmode != IEEE80211_M_MONITOR)
   5357 		cmd.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
   5358 
   5359 	DPRINTF(("Sending device power command with flags = 0x%X\n",
   5360 	    cmd.flags));
   5361 
   5362 	return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
   5363 }
   5364 
   5365 #ifdef notyet
   5366 static int
   5367 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   5368 {
   5369 	struct iwm_beacon_filter_cmd cmd = {
   5370 		IWM_BF_CMD_CONFIG_DEFAULTS,
   5371 		.bf_enable_beacon_filter = htole32(1),
   5372 	};
   5373 	int err;
   5374 
   5375 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
   5376 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
   5377 
   5378 	if (err == 0)
   5379 		sc->sc_bf.bf_enabled = 1;
   5380 
   5381 	return err;
   5382 }
   5383 #endif
   5384 
   5385 static int
   5386 iwm_disable_beacon_filter(struct iwm_softc *sc)
   5387 {
   5388 	struct iwm_beacon_filter_cmd cmd;
   5389 	int err;
   5390 
   5391 	memset(&cmd, 0, sizeof(cmd));
   5392 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
   5393 		return 0;
   5394 
   5395 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
   5396 	if (err == 0)
   5397 		sc->sc_bf.bf_enabled = 0;
   5398 
   5399 	return err;
   5400 }
   5401 
   5402 static int
   5403 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
   5404 {
   5405 	struct iwm_add_sta_cmd add_sta_cmd;
   5406 	int err;
   5407 	uint32_t status;
   5408 	size_t cmdsize;
   5409 	struct ieee80211com *ic = &sc->sc_ic;
   5410 
   5411 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
   5412 
   5413 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
   5414 		add_sta_cmd.sta_id = IWM_MONITOR_STA_ID;
   5415 	else
   5416 		add_sta_cmd.sta_id = IWM_STATION_ID;
   5417 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE)) {
   5418 		if (ic->ic_opmode == IEEE80211_M_MONITOR)
   5419 			add_sta_cmd.station_type = IWM_STA_GENERAL_PURPOSE;
   5420 		else
   5421 			add_sta_cmd.station_type = IWM_STA_LINK;
   5422 	}
   5423 	add_sta_cmd.mac_id_n_color
   5424 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   5425 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
   5426 		int qid;
   5427 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, etheranyaddr);
   5428 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
   5429 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
   5430 		else
   5431 			qid = IWM_AUX_QUEUE;
   5432 		in->tfd_queue_msk |= (1 << qid);
   5433 	} else {
   5434 		int ac;
   5435 		for (ac = 0; ac < WME_NUM_AC; ac++) {
   5436 			int qid = ac;
   5437 			if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
   5438 				qid += IWM_DQA_MIN_MGMT_QUEUE;
   5439 			in->tfd_queue_msk |= (1 << qid);
   5440 		}
   5441 	}
   5442 	if (!update) {
   5443 		if (ic->ic_opmode == IEEE80211_M_MONITOR) {
   5444 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
   5445 			    etherbroadcastaddr);
   5446 		} else {
   5447 			IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
   5448 			    in->in_macaddr);
   5449 		}
   5450 	}
   5451 	add_sta_cmd.add_modify = update ? 1 : 0;
   5452 	add_sta_cmd.station_flags_msk
   5453 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
   5454 	if (update) {
   5455 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_QUEUES |
   5456 		    IWM_STA_MODIFY_TID_DISABLE_TX);
   5457 	}
   5458 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
   5459 	add_sta_cmd.tfd_queue_msk = htole32(in->tfd_queue_msk);
   5460 
   5461 #ifndef IEEE80211_NO_HT
   5462 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
   5463 		add_sta_cmd.station_flags_msk
   5464 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
   5465 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
   5466 
   5467 		add_sta_cmd.station_flags
   5468 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
   5469 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
   5470 		case IEEE80211_AMPDU_PARAM_SS_2:
   5471 			add_sta_cmd.station_flags
   5472 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
   5473 			break;
   5474 		case IEEE80211_AMPDU_PARAM_SS_4:
   5475 			add_sta_cmd.station_flags
   5476 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
   5477 			break;
   5478 		case IEEE80211_AMPDU_PARAM_SS_8:
   5479 			add_sta_cmd.station_flags
   5480 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
   5481 			break;
   5482 		case IEEE80211_AMPDU_PARAM_SS_16:
   5483 			add_sta_cmd.station_flags
   5484 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
   5485 			break;
   5486 		default:
   5487 			break;
   5488 		}
   5489 	}
   5490 #endif
   5491 
   5492 	status = IWM_ADD_STA_SUCCESS;
   5493 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
   5494 		cmdsize = sizeof(add_sta_cmd);
   5495 	else
   5496 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
   5497 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize,
   5498 	    &add_sta_cmd, &status);
   5499 	if (err == 0 && (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
   5500 		err = EIO;
   5501 
   5502 	return err;
   5503 }
   5504 
   5505 static int
   5506 iwm_add_aux_sta(struct iwm_softc *sc)
   5507 {
   5508 	struct iwm_add_sta_cmd cmd;
   5509 	int err, qid;
   5510 	uint32_t status;
   5511 	size_t cmdsize;
   5512 
   5513 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
   5514 		qid = IWM_DQA_AUX_QUEUE;
   5515 		err = iwm_enable_txq(sc, IWM_AUX_STA_ID, qid,
   5516 		    IWM_TX_FIFO_MCAST, 0, IWM_MAX_TID_COUNT, 0);
   5517 	} else {
   5518 		qid = IWM_AUX_QUEUE;
   5519 		err = iwm_enable_ac_txq(sc, qid, IWM_TX_FIFO_MCAST);
   5520 	}
   5521 	if (err)
   5522 		return err;
   5523 
   5524 	memset(&cmd, 0, sizeof(cmd));
   5525 	cmd.sta_id = IWM_AUX_STA_ID;
   5526 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
   5527 		cmd.station_type = IWM_STA_AUX_ACTIVITY;
   5528 	cmd.mac_id_n_color =
   5529 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
   5530 	cmd.tfd_queue_msk = htole32(1 << qid);
   5531 	cmd.tid_disable_tx = htole16(0xffff);
   5532 
   5533 	status = IWM_ADD_STA_SUCCESS;
   5534 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_STA_TYPE))
   5535 		cmdsize = sizeof(cmd);
   5536 	else
   5537 		cmdsize = sizeof(struct iwm_add_sta_cmd_v7);
   5538 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, cmdsize, &cmd,
   5539 	    &status);
   5540 	if (err == 0 &&
   5541 	    (status & IWM_ADD_STA_STATUS_MASK) != IWM_ADD_STA_SUCCESS)
   5542 		err = EIO;
   5543 
   5544 	return err;
   5545 }
   5546 
   5547 #define IWM_PLCP_QUIET_THRESH 1
   5548 #define IWM_ACTIVE_QUIET_TIME 10
   5549 #define LONG_OUT_TIME_PERIOD 600
   5550 #define SHORT_OUT_TIME_PERIOD 200
   5551 #define SUSPEND_TIME_PERIOD 100
   5552 
   5553 static uint16_t
   5554 iwm_scan_rx_chain(struct iwm_softc *sc)
   5555 {
   5556 	uint16_t rx_chain;
   5557 	uint8_t rx_ant;
   5558 
   5559 	rx_ant = iwm_fw_valid_rx_ant(sc);
   5560 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
   5561 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
   5562 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
   5563 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
   5564 	return htole16(rx_chain);
   5565 }
   5566 
   5567 static uint32_t
   5568 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
   5569 {
   5570 	uint32_t tx_ant;
   5571 	int i, ind;
   5572 
   5573 	for (i = 0, ind = sc->sc_scan_last_antenna;
   5574 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
   5575 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
   5576 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
   5577 			sc->sc_scan_last_antenna = ind;
   5578 			break;
   5579 		}
   5580 	}
   5581 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
   5582 
   5583 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
   5584 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
   5585 				   tx_ant);
   5586 	else
   5587 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
   5588 }
   5589 
   5590 static uint8_t
   5591 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
   5592     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
   5593 {
   5594 	struct ieee80211com *ic = &sc->sc_ic;
   5595 	struct ieee80211_channel *c;
   5596 	uint8_t nchan;
   5597 
   5598 	for (nchan = 0, c = &ic->ic_channels[1];
   5599 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
   5600 	    nchan < sc->sc_capa_n_scan_channels;
   5601 	    c++) {
   5602 		if (c->ic_flags == 0)
   5603 			continue;
   5604 
   5605 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
   5606 		chan->iter_count = htole16(1);
   5607 		chan->iter_interval = htole32(0);
   5608 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
   5609 		chan->flags |= htole32(IWM_SCAN_CHANNEL_NSSIDS(n_ssids));
   5610 		if (!IEEE80211_IS_CHAN_PASSIVE(c) && n_ssids != 0)
   5611 			chan->flags |= htole32(IWM_SCAN_CHANNEL_TYPE_ACTIVE);
   5612 		chan++;
   5613 		nchan++;
   5614 	}
   5615 
   5616 	return nchan;
   5617 }
   5618 
   5619 static uint8_t
   5620 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
   5621     struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
   5622 {
   5623 	struct ieee80211com *ic = &sc->sc_ic;
   5624 	struct ieee80211_channel *c;
   5625 	uint8_t nchan;
   5626 
   5627 	for (nchan = 0, c = &ic->ic_channels[1];
   5628 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
   5629 	    nchan < sc->sc_capa_n_scan_channels;
   5630 	    c++) {
   5631 		if (c->ic_flags == 0)
   5632 			continue;
   5633 
   5634 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
   5635 		chan->iter_count = 1;
   5636 		chan->iter_interval = htole16(0);
   5637 		chan->flags = htole32(IWM_SCAN_CHANNEL_UMAC_NSSIDS(n_ssids));
   5638 		chan++;
   5639 		nchan++;
   5640 	}
   5641 
   5642 	return nchan;
   5643 }
   5644 
   5645 static int
   5646 iwm_fill_probe_req_v1(struct iwm_softc *sc, struct iwm_scan_probe_req_v1 *preq1)
   5647 {
   5648 	struct iwm_scan_probe_req preq2;
   5649 	int err, i;
   5650 
   5651 	err = iwm_fill_probe_req(sc, &preq2);
   5652 	if (err)
   5653 		return err;
   5654 
   5655 	preq1->mac_header = preq2.mac_header;
   5656 	for (i=0; i<__arraycount(preq1->band_data); i++)
   5657 		preq1->band_data[i] = preq2.band_data[i];
   5658 	preq1->common_data = preq2.common_data;
   5659 	memcpy(preq1->buf, preq2.buf, sizeof(preq1->buf));
   5660 	return 0;
   5661 }
   5662 
   5663 static int
   5664 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
   5665 {
   5666 	struct ieee80211com *ic = &sc->sc_ic;
   5667 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
   5668 	struct ieee80211_rateset *rs;
   5669 	size_t remain = sizeof(preq->buf);
   5670 	uint8_t *frm, *pos;
   5671 
   5672 	memset(preq, 0, sizeof(*preq));
   5673 
   5674 	KASSERT(ic->ic_des_esslen < sizeof(ic->ic_des_essid));
   5675 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
   5676 		return ENOBUFS;
   5677 
   5678 	/*
   5679 	 * Build a probe request frame.  Most of the following code is a
   5680 	 * copy & paste of what is done in net80211.
   5681 	 */
   5682 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
   5683 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
   5684 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
   5685 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
   5686 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
   5687 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
   5688 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
   5689 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
   5690 
   5691 	frm = (uint8_t *)(wh + 1);
   5692 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
   5693 
   5694 	/* Tell the firmware where the MAC header is. */
   5695 	preq->mac_header.offset = 0;
   5696 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
   5697 	remain -= frm - (uint8_t *)wh;
   5698 
   5699 	/* Fill in 2GHz IEs and tell firmware where they are. */
   5700 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
   5701 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
   5702 		if (remain < 4 + rs->rs_nrates)
   5703 			return ENOBUFS;
   5704 	} else if (remain < 2 + rs->rs_nrates)
   5705 		return ENOBUFS;
   5706 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
   5707 	pos = frm;
   5708 	frm = ieee80211_add_rates(frm, rs);
   5709 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
   5710 		frm = ieee80211_add_xrates(frm, rs);
   5711 	preq->band_data[0].len = htole16(frm - pos);
   5712 	remain -= frm - pos;
   5713 
   5714 	if (isset(sc->sc_enabled_capa,
   5715 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
   5716 		if (remain < 3)
   5717 			return ENOBUFS;
   5718 		*frm++ = IEEE80211_ELEMID_DSPARMS;
   5719 		*frm++ = 1;
   5720 		*frm++ = 0;
   5721 		remain -= 3;
   5722 	}
   5723 
   5724 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
   5725 		/* Fill in 5GHz IEs. */
   5726 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
   5727 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
   5728 			if (remain < 4 + rs->rs_nrates)
   5729 				return ENOBUFS;
   5730 		} else if (remain < 2 + rs->rs_nrates)
   5731 			return ENOBUFS;
   5732 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
   5733 		pos = frm;
   5734 		frm = ieee80211_add_rates(frm, rs);
   5735 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
   5736 			frm = ieee80211_add_xrates(frm, rs);
   5737 		preq->band_data[1].len = htole16(frm - pos);
   5738 		remain -= frm - pos;
   5739 	}
   5740 
   5741 #ifndef IEEE80211_NO_HT
   5742 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
   5743 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
   5744 	pos = frm;
   5745 	if (ic->ic_flags & IEEE80211_F_HTON) {
   5746 		if (remain < 28)
   5747 			return ENOBUFS;
   5748 		frm = ieee80211_add_htcaps(frm, ic);
   5749 		/* XXX add WME info? */
   5750 	}
   5751 #endif
   5752 
   5753 	preq->common_data.len = htole16(frm - pos);
   5754 
   5755 	return 0;
   5756 }
   5757 
   5758 static int
   5759 iwm_lmac_scan(struct iwm_softc *sc)
   5760 {
   5761 	struct ieee80211com *ic = &sc->sc_ic;
   5762 	struct iwm_host_cmd hcmd = {
   5763 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
   5764 		.len = { 0, },
   5765 		.data = { NULL, },
   5766 		.flags = 0,
   5767 	};
   5768 	struct iwm_scan_req_lmac *req;
   5769 	size_t req_len;
   5770 	int err;
   5771 
   5772 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
   5773 
   5774 	req_len = sizeof(struct iwm_scan_req_lmac) +
   5775 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
   5776 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req_v1);
   5777 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
   5778 		return ENOMEM;
   5779 	req = kmem_zalloc(req_len, KM_SLEEP);
   5780 	hcmd.len[0] = (uint16_t)req_len;
   5781 	hcmd.data[0] = (void *)req;
   5782 
   5783 	/* These timings correspond to iwlwifi's UNASSOC scan. */
   5784 	req->active_dwell = 10;
   5785 	req->passive_dwell = 110;
   5786 	req->fragmented_dwell = 44;
   5787 	req->extended_dwell = 90;
   5788 	req->max_out_time = 0;
   5789 	req->suspend_time = 0;
   5790 
   5791 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
   5792 	req->rx_chain_select = iwm_scan_rx_chain(sc);
   5793 	req->iter_num = htole32(1);
   5794 	req->delay = 0;
   5795 
   5796 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
   5797 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
   5798 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
   5799 	if (ic->ic_des_esslen == 0)
   5800 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
   5801 	else
   5802 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
   5803 	if (isset(sc->sc_enabled_capa,
   5804 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
   5805 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
   5806 
   5807 	req->flags = htole32(IWM_PHY_BAND_24);
   5808 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
   5809 		req->flags |= htole32(IWM_PHY_BAND_5);
   5810 	req->filter_flags =
   5811 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
   5812 
   5813 	/* Tx flags 2 GHz. */
   5814 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
   5815 	    IWM_TX_CMD_FLG_BT_DIS);
   5816 	req->tx_cmd[0].rate_n_flags =
   5817 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
   5818 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
   5819 
   5820 	/* Tx flags 5 GHz. */
   5821 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
   5822 	    IWM_TX_CMD_FLG_BT_DIS);
   5823 	req->tx_cmd[1].rate_n_flags =
   5824 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
   5825 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
   5826 
   5827 	/* Check if we're doing an active directed scan. */
   5828 	if (ic->ic_des_esslen != 0) {
   5829 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
   5830 		req->direct_scan[0].len = ic->ic_des_esslen;
   5831 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
   5832 		    ic->ic_des_esslen);
   5833 	}
   5834 
   5835 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
   5836 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
   5837 	    ic->ic_des_esslen != 0);
   5838 
   5839 	err = iwm_fill_probe_req_v1(sc,
   5840 	    (struct iwm_scan_probe_req_v1 *)(req->data +
   5841 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
   5842 	     sc->sc_capa_n_scan_channels)));
   5843 	if (err) {
   5844 		kmem_free(req, req_len);
   5845 		return err;
   5846 	}
   5847 
   5848 	/* Specify the scan plan: We'll do one iteration. */
   5849 	req->schedule[0].iterations = 1;
   5850 	req->schedule[0].full_scan_mul = 1;
   5851 
   5852 	/* Disable EBS. */
   5853 	req->channel_opt[0].non_ebs_ratio = 1;
   5854 	req->channel_opt[1].non_ebs_ratio = 1;
   5855 
   5856 	err = iwm_send_cmd(sc, &hcmd);
   5857 	kmem_free(req, req_len);
   5858 	return err;
   5859 }
   5860 
   5861 static int
   5862 iwm_config_umac_scan(struct iwm_softc *sc)
   5863 {
   5864 	struct ieee80211com *ic = &sc->sc_ic;
   5865 	struct iwm_scan_config *scan_config;
   5866 	int err, nchan;
   5867 	size_t cmd_size;
   5868 	struct ieee80211_channel *c;
   5869 	struct iwm_host_cmd hcmd = {
   5870 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_LONG_GROUP, 0),
   5871 		.flags = 0,
   5872 	};
   5873 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
   5874 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
   5875 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
   5876 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
   5877 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
   5878 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
   5879 	    IWM_SCAN_CONFIG_RATE_54M);
   5880 
   5881 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
   5882 
   5883 	scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
   5884 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
   5885 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
   5886 	scan_config->legacy_rates = htole32(rates |
   5887 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
   5888 
   5889 	/* These timings correspond to iwlwifi's UNASSOC scan. */
   5890 	scan_config->dwell_active = 10;
   5891 	scan_config->dwell_passive = 110;
   5892 	scan_config->dwell_fragmented = 44;
   5893 	scan_config->dwell_extended = 90;
   5894 	scan_config->out_of_channel_time = htole32(0);
   5895 	scan_config->suspend_time = htole32(0);
   5896 
   5897 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
   5898 
   5899 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
   5900 #if 1
   5901 	scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
   5902 	    IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
   5903 	    IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
   5904 #else
   5905 	scan_config->channel_flags = 0;
   5906 #endif
   5907 
   5908 	for (c = &ic->ic_channels[1], nchan = 0;
   5909 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
   5910 	    nchan < sc->sc_capa_n_scan_channels; c++) {
   5911 		if (c->ic_flags == 0)
   5912 			continue;
   5913 		scan_config->channel_array[nchan++] =
   5914 		    ieee80211_mhz2ieee(c->ic_freq, 0);
   5915 	}
   5916 
   5917 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
   5918 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
   5919 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
   5920 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
   5921 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
   5922 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
   5923 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
   5924 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
   5925 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
   5926 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
   5927 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
   5928 
   5929 	hcmd.data[0] = scan_config;
   5930 	hcmd.len[0] = cmd_size;
   5931 
   5932 	err = iwm_send_cmd(sc, &hcmd);
   5933 	kmem_free(scan_config, cmd_size);
   5934 	return err;
   5935 }
   5936 
   5937 static int
   5938 iwm_umac_scan_size(struct iwm_softc *sc)
   5939 {
   5940 	int base_size = IWM_SCAN_REQ_UMAC_SIZE_V1;
   5941 	int tail_size;
   5942 
   5943 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
   5944 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V8;
   5945 	else if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
   5946 		base_size = IWM_SCAN_REQ_UMAC_SIZE_V7;
   5947 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
   5948 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v2);
   5949 	else
   5950 		tail_size = sizeof(struct iwm_scan_req_umac_tail_v1);
   5951 
   5952 	return base_size + sizeof(struct iwm_scan_channel_cfg_umac) *
   5953 	    sc->sc_capa_n_scan_channels + tail_size;
   5954 }
   5955 
   5956 static struct iwm_scan_umac_chan_param *
   5957 iwm_get_scan_req_umac_chan_param(struct iwm_softc *sc,
   5958     struct iwm_scan_req_umac *req)
   5959 {
   5960 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
   5961 		return &req->v8.channel;
   5962 
   5963 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
   5964 		return &req->v7.channel;
   5965 
   5966 	return &req->v1.channel;
   5967 }
   5968 
   5969 static void *
   5970 iwm_get_scan_req_umac_data(struct iwm_softc *sc, struct iwm_scan_req_umac *req)
   5971 {
   5972 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2))
   5973 		return (void *)&req->v8.data;
   5974 
   5975 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL))
   5976 		return (void *)&req->v7.data;
   5977 
   5978 	return (void *)&req->v1.data;
   5979 
   5980 }
   5981 
   5982 /* adaptive dwell max budget time [TU] for full scan */
   5983 #define IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
   5984 /* adaptive dwell max budget time [TU] for directed scan */
   5985 #define IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
   5986 /* adaptive dwell default high band APs number */
   5987 #define IWM_SCAN_ADWELL_DEFAULT_HB_N_APS 8
   5988 /* adaptive dwell default low band APs number */
   5989 #define IWM_SCAN_ADWELL_DEFAULT_LB_N_APS 2
   5990 /* adaptive dwell default APs number in social channels (1, 6, 11) */
   5991 #define IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
   5992 
   5993 static int
   5994 iwm_umac_scan(struct iwm_softc *sc)
   5995 {
   5996 	struct ieee80211com *ic = &sc->sc_ic;
   5997 	struct iwm_host_cmd hcmd = {
   5998 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_LONG_GROUP, 0),
   5999 		.len = { 0, },
   6000 		.data = { NULL, },
   6001 		.flags = 0,
   6002 	};
   6003 	struct iwm_scan_req_umac *req;
   6004 	void *cmd_data, *tail_data;
   6005 	struct iwm_scan_req_umac_tail_v2 *tail;
   6006 	struct iwm_scan_req_umac_tail_v1 *tailv1;
   6007 	struct iwm_scan_umac_chan_param *chanparam;
   6008 	size_t req_len;
   6009 	int err;
   6010 
   6011 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
   6012 
   6013 	req_len = iwm_umac_scan_size(sc);
   6014 	if (req_len < IWM_SCAN_REQ_UMAC_SIZE_V1 + sizeof(*tailv1) ||
   6015 	    req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
   6016 		return ERANGE;
   6017 	req = kmem_zalloc(req_len, KM_SLEEP);
   6018 
   6019 	hcmd.len[0] = (uint16_t)req_len;
   6020 	hcmd.data[0] = (void *)req;
   6021 
   6022 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL)) {
   6023 		req->v7.adwell_default_n_aps_social =
   6024 		    IWM_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
   6025 		req->v7.adwell_default_n_aps =
   6026 		    IWM_SCAN_ADWELL_DEFAULT_LB_N_APS;
   6027 
   6028 		if (ic->ic_des_esslen != 0)
   6029 			req->v7.adwell_max_budget =
   6030 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
   6031 		else
   6032 			req->v7.adwell_max_budget =
   6033 			    htole16(IWM_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
   6034 
   6035 		req->v7.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
   6036 		req->v7.max_out_time[IWM_SCAN_LB_LMAC_IDX] = 0;
   6037 		req->v7.suspend_time[IWM_SCAN_LB_LMAC_IDX] = 0;
   6038 
   6039 		if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_ADAPTIVE_DWELL_V2)) {
   6040 			req->v8.active_dwell[IWM_SCAN_LB_LMAC_IDX] = 10;
   6041 			req->v8.passive_dwell[IWM_SCAN_LB_LMAC_IDX] = 110;
   6042 		} else {
   6043 			req->v7.active_dwell = 10;
   6044 			req->v7.passive_dwell = 110;
   6045 			req->v7.fragmented_dwell = 44;
   6046 		}
   6047 	} else {
   6048 		/* These timings correspond to iwlwifi's UNASSOC scan. */
   6049 		req->v1.active_dwell = 10;
   6050 		req->v1.passive_dwell = 110;
   6051 		req->v1.fragmented_dwell = 44;
   6052 		req->v1.extended_dwell = 90;
   6053 
   6054 		req->v1.scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
   6055 	}
   6056 
   6057 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
   6058 
   6059 	cmd_data = iwm_get_scan_req_umac_data(sc, req);
   6060 	chanparam = iwm_get_scan_req_umac_chan_param(sc, req);
   6061 	chanparam->count = iwm_umac_scan_fill_channels(sc,
   6062 	    (struct iwm_scan_channel_cfg_umac *)cmd_data,
   6063 	    ic->ic_des_esslen != 0);
   6064 	chanparam->flags = 0;
   6065 
   6066 	tail_data = (char *)cmd_data
   6067 	    + sizeof(struct iwm_scan_channel_cfg_umac) * sc->sc_capa_n_scan_channels;
   6068 
   6069 	tail = (struct iwm_scan_req_umac_tail_v2 *)tail_data;
   6070 	tailv1 = (struct iwm_scan_req_umac_tail_v1 *)tail_data;
   6071 
   6072 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
   6073 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
   6074 	    IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
   6075 
   6076 	/* Check if we're doing an active directed scan. */
   6077 	if (ic->ic_des_esslen != 0) {
   6078 		tailv1->direct_scan[0].id = IEEE80211_ELEMID_SSID;
   6079 		tailv1->direct_scan[0].len = ic->ic_des_esslen;
   6080 		memcpy(tailv1->direct_scan[0].ssid, ic->ic_des_essid,
   6081 		    ic->ic_des_esslen);
   6082 		req->general_flags |=
   6083 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
   6084 	} else
   6085 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
   6086 
   6087 	if (isset(sc->sc_enabled_capa,
   6088 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
   6089 		req->general_flags |=
   6090 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
   6091 
   6092 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_SCAN_EXT_CHAN_VER))
   6093 		err = iwm_fill_probe_req(sc, &tail->preq);
   6094 	else
   6095 		err = iwm_fill_probe_req_v1(sc, &tailv1->preq);
   6096 	if (err) {
   6097 		kmem_free(req, req_len);
   6098 		return err;
   6099 	}
   6100 
   6101 	/* Specify the scan plan: We'll do one iteration. */
   6102 	tailv1->schedule[0].interval = 0;
   6103 	tailv1->schedule[0].iter_count = 1;
   6104 
   6105 	err = iwm_send_cmd(sc, &hcmd);
   6106 	kmem_free(req, req_len);
   6107 	return err;
   6108 }
   6109 
   6110 void
   6111 iwm_mcc_update(struct iwm_softc *sc, struct iwm_mcc_chub_notif *notif)
   6112 {
   6113 	struct ieee80211com *ic = &sc->sc_ic;
   6114 	struct ifnet *ifp = IC2IFP(ic);
   6115 	char alpha2[3];
   6116 
   6117 	snprintf(alpha2, sizeof(alpha2), "%c%c",
   6118 	    (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
   6119 
   6120 	if (ifp->if_flags & IFF_DEBUG) {
   6121 		printf("%s: firmware has detected regulatory domain '%s' "
   6122 		    "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
   6123 	}
   6124 
   6125 	/* TODO: Schedule a task to send MCC_UPDATE_CMD? */
   6126 }
   6127 
   6128 static uint8_t
   6129 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
   6130 {
   6131 	int i;
   6132 	uint8_t rval;
   6133 
   6134 	for (i = 0; i < rs->rs_nrates; i++) {
   6135 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
   6136 		if (rval == iwm_rates[ridx].rate)
   6137 			return rs->rs_rates[i];
   6138 	}
   6139 	return 0;
   6140 }
   6141 
   6142 static void
   6143 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
   6144     int *ofdm_rates)
   6145 {
   6146 	struct ieee80211_node *ni = &in->in_ni;
   6147 	struct ieee80211_rateset *rs = &ni->ni_rates;
   6148 	int lowest_present_ofdm = -1;
   6149 	int lowest_present_cck = -1;
   6150 	uint8_t cck = 0;
   6151 	uint8_t ofdm = 0;
   6152 	int i;
   6153 
   6154 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
   6155 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
   6156 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
   6157 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
   6158 				continue;
   6159 			cck |= (1 << i);
   6160 			if (lowest_present_cck == -1 || lowest_present_cck > i)
   6161 				lowest_present_cck = i;
   6162 		}
   6163 	}
   6164 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
   6165 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
   6166 			continue;
   6167 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
   6168 		if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
   6169 			lowest_present_ofdm = i;
   6170 	}
   6171 
   6172 	/*
   6173 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
   6174 	 * variables. This isn't sufficient though, as there might not
   6175 	 * be all the right rates in the bitmap. E.g. if the only basic
   6176 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
   6177 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
   6178 	 *
   6179 	 *    [...] a STA responding to a received frame shall transmit
   6180 	 *    its Control Response frame [...] at the highest rate in the
   6181 	 *    BSSBasicRateSet parameter that is less than or equal to the
   6182 	 *    rate of the immediately previous frame in the frame exchange
   6183 	 *    sequence ([...]) and that is of the same modulation class
   6184 	 *    ([...]) as the received frame. If no rate contained in the
   6185 	 *    BSSBasicRateSet parameter meets these conditions, then the
   6186 	 *    control frame sent in response to a received frame shall be
   6187 	 *    transmitted at the highest mandatory rate of the PHY that is
   6188 	 *    less than or equal to the rate of the received frame, and
   6189 	 *    that is of the same modulation class as the received frame.
   6190 	 *
   6191 	 * As a consequence, we need to add all mandatory rates that are
   6192 	 * lower than all of the basic rates to these bitmaps.
   6193 	 */
   6194 
   6195 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
   6196 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
   6197 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
   6198 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
   6199 	/* 6M already there or needed so always add */
   6200 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
   6201 
   6202 	/*
   6203 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
   6204 	 * Note, however:
   6205 	 *  - if no CCK rates are basic, it must be ERP since there must
   6206 	 *    be some basic rates at all, so they're OFDM => ERP PHY
   6207 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
   6208 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
   6209 	 *  - if 5.5M is basic, 1M and 2M are mandatory
   6210 	 *  - if 2M is basic, 1M is mandatory
   6211 	 *  - if 1M is basic, that's the only valid ACK rate.
   6212 	 * As a consequence, it's not as complicated as it sounds, just add
   6213 	 * any lower rates to the ACK rate bitmap.
   6214 	 */
   6215 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
   6216 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
   6217 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
   6218 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
   6219 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
   6220 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
   6221 	/* 1M already there or needed so always add */
   6222 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
   6223 
   6224 	*cck_rates = cck;
   6225 	*ofdm_rates = ofdm;
   6226 }
   6227 
   6228 static void
   6229 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
   6230     struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
   6231 {
   6232 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
   6233 	struct ieee80211com *ic = &sc->sc_ic;
   6234 	struct ieee80211_node *ni = ic->ic_bss;
   6235 	int cck_ack_rates, ofdm_ack_rates;
   6236 	int i;
   6237 
   6238 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   6239 	    in->in_color));
   6240 	cmd->action = htole32(action);
   6241 
   6242 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
   6243 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_LISTENER);
   6244 	else if (ic->ic_opmode == IEEE80211_M_STA)
   6245 		cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
   6246 	else
   6247 		panic("unsupported operating mode %d", ic->ic_opmode);
   6248 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
   6249 
   6250 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
   6251 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
   6252 		IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
   6253 		return;
   6254 	}
   6255 
   6256 	IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
   6257 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
   6258 	cmd->cck_rates = htole32(cck_ack_rates);
   6259 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
   6260 
   6261 	cmd->cck_short_preamble
   6262 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
   6263 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
   6264 	cmd->short_slot
   6265 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
   6266 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
   6267 
   6268 	for (i = 0; i < WME_NUM_AC; i++) {
   6269 		struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
   6270 		int txf = iwm_ac_to_tx_fifo[i];
   6271 
   6272 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
   6273 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
   6274 		cmd->ac[txf].aifsn = wmep->wmep_aifsn;
   6275 		cmd->ac[txf].fifos_mask = (1 << txf);
   6276 		cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
   6277 	}
   6278 	if (ni->ni_flags & IEEE80211_NODE_QOS)
   6279 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
   6280 
   6281 #ifndef IEEE80211_NO_HT
   6282 	if (ni->ni_flags & IEEE80211_NODE_HT) {
   6283 		enum ieee80211_htprot htprot =
   6284 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
   6285 		switch (htprot) {
   6286 		case IEEE80211_HTPROT_NONE:
   6287 			break;
   6288 		case IEEE80211_HTPROT_NONMEMBER:
   6289 		case IEEE80211_HTPROT_NONHT_MIXED:
   6290 			cmd->protection_flags |=
   6291 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
   6292 		case IEEE80211_HTPROT_20MHZ:
   6293 			cmd->protection_flags |=
   6294 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
   6295 			    IWM_MAC_PROT_FLG_FAT_PROT);
   6296 			break;
   6297 		default:
   6298 			break;
   6299 		}
   6300 
   6301 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
   6302 	}
   6303 #endif
   6304 
   6305 	if (ic->ic_flags & IEEE80211_F_USEPROT)
   6306 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
   6307 
   6308 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
   6309 #undef IWM_EXP2
   6310 }
   6311 
   6312 static void
   6313 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
   6314     struct iwm_mac_data_sta *sta, int assoc)
   6315 {
   6316 	struct ieee80211_node *ni = &in->in_ni;
   6317 	uint32_t dtim_off;
   6318 	uint64_t tsf;
   6319 
   6320 	dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
   6321 	tsf = le64toh(ni->ni_tstamp.tsf);
   6322 
   6323 	sta->is_assoc = htole32(assoc);
   6324 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
   6325 	sta->dtim_tsf = htole64(tsf + dtim_off);
   6326 	sta->bi = htole32(ni->ni_intval);
   6327 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
   6328 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
   6329 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
   6330 	sta->listen_interval = htole32(10);
   6331 	sta->assoc_id = htole32(ni->ni_associd);
   6332 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
   6333 }
   6334 
   6335 static int
   6336 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
   6337     int assoc)
   6338 {
   6339 	struct ieee80211_node *ni = &in->in_ni;
   6340 	struct iwm_mac_ctx_cmd cmd;
   6341 
   6342 	memset(&cmd, 0, sizeof(cmd));
   6343 
   6344 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
   6345 
   6346 	/* Allow beacons to pass through as long as we are not associated or we
   6347 	 * do not have dtim period information */
   6348 	if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
   6349 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
   6350 	else
   6351 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
   6352 
   6353 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
   6354 }
   6355 
   6356 #define IWM_MISSED_BEACONS_THRESHOLD 8
   6357 
   6358 static void
   6359 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
   6360     struct iwm_missed_beacons_notif *mb)
   6361 {
   6362 	int s;
   6363 
   6364 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
   6365 	    le32toh(mb->mac_id),
   6366 	    le32toh(mb->consec_missed_beacons),
   6367 	    le32toh(mb->consec_missed_beacons_since_last_rx),
   6368 	    le32toh(mb->num_recvd_beacons),
   6369 	    le32toh(mb->num_expected_beacons)));
   6370 
   6371 	/*
   6372 	 * TODO: the threshold should be adjusted based on latency conditions,
   6373 	 * and/or in case of a CS flow on one of the other AP vifs.
   6374 	 */
   6375 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
   6376 	    IWM_MISSED_BEACONS_THRESHOLD) {
   6377 		s = splnet();
   6378 		ieee80211_beacon_miss(&sc->sc_ic);
   6379 		splx(s);
   6380 	}
   6381 }
   6382 
   6383 static int
   6384 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
   6385 {
   6386 	struct iwm_time_quota_cmd cmd;
   6387 	int i, idx, num_active_macs, quota, quota_rem;
   6388 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
   6389 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
   6390 	uint16_t id;
   6391 
   6392 	memset(&cmd, 0, sizeof(cmd));
   6393 
   6394 	/* currently, PHY ID == binding ID */
   6395 	if (in) {
   6396 		id = in->in_phyctxt->id;
   6397 		KASSERT(id < IWM_MAX_BINDINGS);
   6398 		colors[id] = in->in_phyctxt->color;
   6399 
   6400 		if (1)
   6401 			n_ifs[id] = 1;
   6402 	}
   6403 
   6404 	/*
   6405 	 * The FW's scheduling session consists of
   6406 	 * IWM_MAX_QUOTA fragments. Divide these fragments
   6407 	 * equally between all the bindings that require quota
   6408 	 */
   6409 	num_active_macs = 0;
   6410 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
   6411 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
   6412 		num_active_macs += n_ifs[i];
   6413 	}
   6414 
   6415 	quota = 0;
   6416 	quota_rem = 0;
   6417 	if (num_active_macs) {
   6418 		quota = IWM_MAX_QUOTA / num_active_macs;
   6419 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
   6420 	}
   6421 
   6422 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
   6423 		if (colors[i] < 0)
   6424 			continue;
   6425 
   6426 		cmd.quotas[idx].id_and_color =
   6427 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
   6428 
   6429 		if (n_ifs[i] <= 0) {
   6430 			cmd.quotas[idx].quota = htole32(0);
   6431 			cmd.quotas[idx].max_duration = htole32(0);
   6432 		} else {
   6433 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
   6434 			cmd.quotas[idx].max_duration = htole32(0);
   6435 		}
   6436 		idx++;
   6437 	}
   6438 
   6439 	/* Give the remainder of the session to the first binding */
   6440 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
   6441 
   6442 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
   6443 }
   6444 
   6445 static int
   6446 iwm_scan(struct iwm_softc *sc)
   6447 {
   6448 	struct ieee80211com *ic = &sc->sc_ic;
   6449 	int err;
   6450 
   6451 #ifdef notyet
   6452 	if (sc->sc_flags & IWM_FLAG_BGSCAN) {
   6453 		err = iwm_scan_abort(sc);
   6454 		if (err) {
   6455 			device_printf(sc->sc_dev,
   6456 			    "could not abort background scan\n");
   6457 			return err;
   6458 		}
   6459 	}
   6460 #endif
   6461 
   6462 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
   6463 		err = iwm_umac_scan(sc);
   6464 	else
   6465 		err = iwm_lmac_scan(sc);
   6466 	if (err) {
   6467 		device_printf(sc->sc_dev, "could not initiate scan (error = %d)\n", err);
   6468 		return err;
   6469 	}
   6470 
   6471 	/*
   6472 	 * The current mode might have been fixed during association.
   6473 	 * Ensure all channels get scanned.
   6474 	 */
   6475 	if (IFM_SUBTYPE(ic->ic_media.ifm_cur->ifm_media) == IFM_AUTO)
   6476 		ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
   6477 
   6478 	return 0;
   6479 }
   6480 
   6481 static int
   6482 iwm_phy_ctxt_update(struct iwm_softc *sc, struct iwm_phy_ctxt *phyctxt,
   6483     struct ieee80211_channel *chan, uint8_t chains_static,
   6484     uint8_t chains_dynamic, uint32_t apply_time)
   6485 {
   6486 	uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
   6487 	int err;
   6488 
   6489 	if (isset(sc->sc_enabled_capa,
   6490 	    IWM_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
   6491 	    (phyctxt->channel->ic_flags & band_flags) !=
   6492 	    (chan->ic_flags & band_flags)) {
   6493 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
   6494 		    chains_dynamic, IWM_FW_CTXT_ACTION_REMOVE, apply_time);
   6495 		if (err) {
   6496 			device_printf(sc->sc_dev,
   6497 			    "could not remove PHY context (error %d)\n", err);
   6498 			return err;
   6499 		}
   6500 		phyctxt->channel = chan;
   6501 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
   6502 		    chains_dynamic, IWM_FW_CTXT_ACTION_ADD, apply_time);
   6503 		if (err) {
   6504 			device_printf(sc->sc_dev,
   6505 			    "could not add PHY context (error %d)\n", err);
   6506 			return err;
   6507 		}
   6508 	} else {
   6509 		phyctxt->channel = chan;
   6510 		err = iwm_phy_ctxt_cmd(sc, phyctxt, chains_static,
   6511 		    chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, apply_time);
   6512 		if (err) {
   6513 			device_printf(sc->sc_dev,
   6514 			    "could not update PHY context (error %d)\n", err);
   6515 			return err;
   6516 		}
   6517 	}
   6518 
   6519 	return 0;
   6520 }
   6521 
   6522 static int
   6523 iwm_auth(struct iwm_softc *sc)
   6524 {
   6525 	struct ieee80211com *ic = &sc->sc_ic;
   6526 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   6527 	uint32_t duration;
   6528 	int generation = sc->sc_generation, err;
   6529 
   6530 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
   6531 		err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
   6532 		    ic->ic_ibss_chan, 1, 1, 0);
   6533 		if (err)
   6534 			return err;
   6535 	} else {
   6536 		err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
   6537 		    in->in_ni.ni_chan, 1, 1, 0);
   6538 		if (err)
   6539 			return err;
   6540 	}
   6541 
   6542 	in->in_phyctxt = &sc->sc_phyctxt[0];
   6543 	IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
   6544 	iwm_setrates(in);
   6545 
   6546 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
   6547 	if (err) {
   6548 		aprint_error_dev(sc->sc_dev,
   6549 		    "could not add MAC context (error %d)\n", err);
   6550 		return err;
   6551 	}
   6552 	sc->sc_flags |= IWM_FLAG_MAC_ACTIVE;
   6553 
   6554 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
   6555 	if (err) {
   6556 		aprint_error_dev(sc->sc_dev,
   6557 		    "could not add binding (error %d)\n", err);
   6558 		goto rm_mac_ctxt;
   6559 	}
   6560 	sc->sc_flags |= IWM_FLAG_BINDING_ACTIVE;
   6561 
   6562 	in->tid_disable_ampdu = 0xffff;
   6563 	err = iwm_add_sta_cmd(sc, in, 0);
   6564 	if (err) {
   6565 		aprint_error_dev(sc->sc_dev,
   6566 		    "could not add sta (error %d)\n", err);
   6567 		goto rm_binding;
   6568 	}
   6569 	sc->sc_flags |= IWM_FLAG_STA_ACTIVE;
   6570 
   6571 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
   6572 		return 0;
   6573 
   6574 	/*
   6575 	 * Prevent the FW from wandering off channel during association
   6576 	 * by "protecting" the session with a time event.
   6577 	 */
   6578 	if (in->in_ni.ni_intval)
   6579 		duration = in->in_ni.ni_intval * 5;
   6580 	else
   6581 		duration = IEEE80211_DUR_TU;
   6582 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
   6583 
   6584 	return 0;
   6585 
   6586 rm_binding:
   6587 	if (generation == sc->sc_generation) {
   6588 		iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
   6589 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
   6590 	}
   6591 rm_mac_ctxt:
   6592 	if (generation == sc->sc_generation) {
   6593 		iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
   6594 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
   6595 	}
   6596 	return err;
   6597 }
   6598 
   6599 #ifdef notyet
   6600 static int
   6601 iwm_deauth(struct iwm_softc *sc)
   6602 {
   6603 	struct ieee80211com *ic = &sc->sc_ic;
   6604 	struct iwm_node *in = (void *)ic->ic_bss;
   6605 	int err;
   6606 
   6607 	splassert(IPL_NET);
   6608 
   6609 	iwm_unprotect_session(sc, in);
   6610 
   6611 	if (sc->sc_flags & IWM_FLAG_STA_ACTIVE) {
   6612 		err = iwm_flush_sta(sc, in);
   6613 		if (err)
   6614 			return err;
   6615 		err = iwm_rm_sta_cmd(sc, in);
   6616 		if (err) {
   6617 			printf("%s: could not remove STA (error %d)\n",
   6618 			    DEVNAME(sc), err);
   6619 			return err;
   6620 		}
   6621 		in->tid_disable_ampdu = 0xffff;
   6622 		sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
   6623 		sc->sc_rx_ba_sessions = 0;
   6624 		sc->ba_rx.start_tidmask = 0;
   6625 		sc->ba_rx.stop_tidmask = 0;
   6626 		sc->tx_ba_queue_mask = 0;
   6627 		sc->ba_tx.start_tidmask = 0;
   6628 		sc->ba_tx.stop_tidmask = 0;
   6629 	}
   6630 
   6631 	if (sc->sc_flags & IWM_FLAG_BINDING_ACTIVE) {
   6632 		err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE);
   6633 		if (err) {
   6634 			printf("%s: could not remove binding (error %d)\n",
   6635 			    DEVNAME(sc), err);
   6636 			return err;
   6637 		}
   6638 		sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
   6639 	}
   6640 
   6641 	if (sc->sc_flags & IWM_FLAG_MAC_ACTIVE) {
   6642 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_REMOVE, 0);
   6643 		if (err) {
   6644 			printf("%s: could not remove MAC context (error %d)\n",
   6645 			    DEVNAME(sc), err);
   6646 			return err;
   6647 		}
   6648 		sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
   6649 	}
   6650 
   6651 	/* Move unused PHY context to a default channel. */
   6652 	err = iwm_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
   6653 	    &ic->ic_channels[1], 1, 1, 0);
   6654 	if (err)
   6655 		return err;
   6656 
   6657 	return 0;
   6658 }
   6659 #endif
   6660 
   6661 static int
   6662 iwm_run(struct iwm_softc *sc)
   6663 {
   6664 	struct ieee80211com *ic = &sc->sc_ic;
   6665 	struct iwm_node *in = (void *)ic->ic_bss;
   6666 #ifndef IEEE80211_NO_HT
   6667 	struct ieee80211_node *ni = &in->in_ni;
   6668 #endif
   6669 	int err;
   6670 
   6671 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
   6672 		/* Add a MAC context and a sniffing STA. */
   6673 		err = iwm_auth(sc);
   6674 		if (err)
   6675 			return err;
   6676 	}
   6677 
   6678 	/* Configure Rx chains for MIMO and configure 40 MHz channel. */
   6679 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
   6680 		uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
   6681 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
   6682 		    in->in_phyctxt->channel, chains, chains, 0);
   6683 		if (err) {
   6684 			printf("%s: failed to update PHY\n", DEVNAME(sc));
   6685 			return err;
   6686 		}
   6687 	}
   6688 #ifndef IEEE80211_NO_HT
   6689 	else if (ni->ni_flags & IEEE80211_NODE_HT) {
   6690 		uint8_t chains = iwm_mimo_enabled(sc) ? 2 : 1;
   6691 		uint8_t sco, vht_chan_width;
   6692 		if (IEEE80211_CHAN_40MHZ_ALLOWED(in->in_ni.ni_chan) &&
   6693 		    ieee80211_node_supports_ht_chan40(ni))
   6694 			sco = (ni->ni_htop0 & IEEE80211_HTOP0_SCO_MASK);
   6695 		else
   6696 			sco = IEEE80211_HTOP0_SCO_SCN;
   6697 		if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
   6698 		    IEEE80211_CHAN_80MHZ_ALLOWED(in->in_ni.ni_chan) &&
   6699 		    ieee80211_node_supports_vht_chan80(ni))
   6700 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
   6701 		else
   6702 			vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
   6703 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
   6704 		    in->in_phyctxt->channel, chains, chains, 0);
   6705 	       if (err) {
   6706 			printf("%s: failed to update PHY\n", DEVNAME(sc));
   6707 			return err;
   6708 		}
   6709 	}
   6710 #endif
   6711 
   6712 	/* Update STA again to apply HT and VHT settings. */
   6713 	err = iwm_add_sta_cmd(sc, in, 1);
   6714 	if (err) {
   6715 		printf("%s: could not update STA (error %d)\n",
   6716 		    DEVNAME(sc), err);
   6717 		return err;
   6718 	}
   6719 
   6720 	/* We have now been assigned an associd by the AP. */
   6721 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
   6722 	if (err) {
   6723 		printf("%s: failed to update MAC\n", DEVNAME(sc));
   6724 		return err;
   6725 	}
   6726 
   6727 	/* XXX */
   6728 	iwm_unprotect_session(sc, in);
   6729 
   6730 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
   6731 	if (err) {
   6732 		printf("%s: could not set sf full on (error %d)\n",
   6733 		    DEVNAME(sc), err);
   6734 		return err;
   6735 	}
   6736 
   6737 	err = iwm_allow_mcast(sc);
   6738 	if (err) {
   6739 		printf("%s: could not allow mcast (error %d)\n",
   6740 		    DEVNAME(sc), err);
   6741 		return err;
   6742 	}
   6743 
   6744 	err = iwm_power_update_device(sc);
   6745 	if (err) {
   6746 		printf("%s: could not send power command (error %d)\n",
   6747 		    DEVNAME(sc), err);
   6748 		return err;
   6749 	}
   6750 #ifdef notyet
   6751 	/*
   6752 	 * Disabled for now. Default beacon filter settings
   6753 	 * prevent net80211 from getting ERP and HT protection
   6754 	 * updates from beacons.
   6755 	 */
   6756 	err = iwm_enable_beacon_filter(sc, in);
   6757 	if (err) {
   6758 		printf("%s: could not enable beacon filter\n",
   6759 		    DEVNAME(sc));
   6760 		return err;
   6761 	}
   6762 #endif
   6763 	err = iwm_power_mac_update_mode(sc, in);
   6764 	if (err) {
   6765 		printf("%s: could not update MAC power (error %d)\n",
   6766 		    DEVNAME(sc), err);
   6767 		return err;
   6768 	}
   6769 
   6770 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
   6771 		err = iwm_update_quotas(sc, in);
   6772 		if (err) {
   6773 			printf("%s: could not update quotas (error %d)\n",
   6774 			    DEVNAME(sc), err);
   6775 			return err;
   6776 		}
   6777 	}
   6778 
   6779 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
   6780 #ifndef IEEE80211_NO_HT
   6781 	ieee80211_ra_node_init(&in->in_rn);
   6782 	ieee80211_ra_vht_node_init(&in->in_rn_vht);
   6783 #endif
   6784 
   6785 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
   6786 		iwm_led_blink_start(sc);
   6787 		return 0;
   6788 	}
   6789 
   6790 	/* Start at lowest available bit-rate, AMRR will raise. */
   6791 	in->in_ni.ni_txrate = 0;
   6792 #ifndef IEEE80211_NO_HT
   6793 	in->in_ni.ni_txmcs = 0;
   6794 	in->in_ni.ni_vht_ss = 1;
   6795 #endif
   6796 	iwm_setrates(in);
   6797 
   6798 	callout_schedule(&sc->sc_calib_to, mstohz(500));
   6799 	iwm_led_enable(sc);
   6800 
   6801 	return 0;
   6802 }
   6803 
   6804 #ifdef notyet
   6805 static int
   6806 iwm_run_stop(struct iwm_softc *sc)
   6807 {
   6808 	struct ieee80211com *ic = &sc->sc_ic;
   6809 	struct iwm_node *in = (void *)ic->ic_bss;
   6810 #ifndef IEEE80211_NO_HT
   6811 	struct ieee80211_node *ni = &in->in_ni;
   6812 	int i, tid;
   6813 #endif
   6814 	int err;
   6815 
   6816 	/*
   6817 	 * Stop Tx/Rx BA sessions now. We cannot rely on the BA task
   6818 	 * for this when moving out of RUN state since it runs in a
   6819 	 * separate thread.
   6820 	 * Note that in->in_ni (struct ieee80211_node) already represents
   6821 	 * our new access point in case we are roaming between APs.
   6822 	 * This means we cannot rely on struct ieee802111_node to tell
   6823 	 * us which BA sessions exist.
   6824 	 */
   6825 #ifndef IEEE80211_NO_HT
   6826 	for (i = 0; i < __arraycount(sc->sc_rxba_data); i++) {
   6827 		struct iwm_rxba_data *rxba = &sc->sc_rxba_data[i];
   6828 		if (rxba->baid == IWM_RX_REORDER_DATA_INVALID_BAID)
   6829 			continue;
   6830 		err = iwm_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
   6831 		if (err)
   6832 			return err;
   6833 		iwm_clear_reorder_buffer(sc, rxba);
   6834 		if (sc->sc_rx_ba_sessions > 0)
   6835 			sc->sc_rx_ba_sessions--;
   6836 	}
   6837 	for (tid = 0; tid < IWM_MAX_TID_COUNT; tid++) {
   6838 		int qid = IWM_FIRST_AGG_TX_QUEUE + tid;
   6839 		if ((sc->tx_ba_queue_mask & (1 << qid)) == 0)
   6840 			continue;
   6841 		err = iwm_sta_tx_agg(sc, ni, tid, 0, 0, 0);
   6842 		if (err)
   6843 			return err;
   6844 		err = iwm_disable_txq(sc, IWM_STATION_ID, qid, tid);
   6845 		if (err)
   6846 			return err;
   6847 		in->tfd_queue_msk &= ~(1 << qid);
   6848 	}
   6849 	ieee80211_ba_del(ni);
   6850 #endif
   6851 
   6852 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
   6853 		iwm_led_blink_stop(sc);
   6854 
   6855 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
   6856 	if (err)
   6857 		return err;
   6858 
   6859 	iwm_disable_beacon_filter(sc);
   6860 
   6861 	if (!isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
   6862 		err = iwm_update_quotas(sc, in);
   6863 		if (err) {
   6864 			printf("%s: could not update quotas (error %d)\n",
   6865 			    DEVNAME(sc), err);
   6866 			return err;
   6867 		}
   6868 	}
   6869 
   6870 	/* Mark station as disassociated. */
   6871 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
   6872 	if (err) {
   6873 		printf("%s: failed to update MAC\n", DEVNAME(sc));
   6874 		return err;
   6875 	}
   6876 
   6877 #ifndef IEEE80211_NO_HT
   6878 	/* Reset Tx chains in case MIMO or 40 MHz channels were enabled. */
   6879 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
   6880 		err = iwm_phy_ctxt_update(sc, in->in_phyctxt,
   6881 		    in->in_phyctxt->channel, 1, 1, 0);
   6882 		if (err) {
   6883 			printf("%s: failed to update PHY\n", DEVNAME(sc));
   6884 			return err;
   6885 		}
   6886 	}
   6887 #endif
   6888 
   6889 	return 0;
   6890 }
   6891 #endif
   6892 
   6893 static int
   6894 iwm_assoc(struct iwm_softc *sc)
   6895 {
   6896 #if 0
   6897 	struct ieee80211com *ic = &sc->sc_ic;
   6898 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   6899 	int err;
   6900 
   6901 	err = iwm_add_sta_cmd(sc, in, 1);
   6902 	if (err)
   6903 		return err;
   6904 #endif
   6905 
   6906 	return 0;
   6907 }
   6908 
   6909 static struct ieee80211_node *
   6910 iwm_node_alloc(struct ieee80211_node_table *nt)
   6911 {
   6912 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
   6913 }
   6914 
   6915 static void
   6916 iwm_calib_timeout(void *arg)
   6917 {
   6918 	struct iwm_softc *sc = arg;
   6919 	struct ieee80211com *ic = &sc->sc_ic;
   6920 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   6921 	struct ieee80211_node *ni = &in->in_ni;
   6922 	int otxrate;
   6923 	int s;
   6924 
   6925 	s = splnet();
   6926 	if ((ic->ic_fixed_rate == -1
   6927 #ifndef IEEE80211_NO_HT
   6928 	    || ic->ic_fixed_mcs == -1
   6929 #endif
   6930 	    ) &&
   6931 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
   6932 #ifndef IEEE80211_NO_HT
   6933 		if (ni->ni_flags & IEEE80211_NODE_HT)
   6934 			otxrate = ni->ni_txmcs;
   6935 		else
   6936 #endif
   6937 			otxrate = ni->ni_txrate;
   6938 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
   6939 
   6940 #ifndef IEEE80211_NO_HT
   6941 		/*
   6942 		 * If AMRR has chosen a new TX rate we must update
   6943 		 * the firwmare's LQ rate table from process context.
   6944 		 */
   6945 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   6946 		    otxrate != ni->ni_txmcs)
   6947 			iwm_setrates_task(sc);
   6948 		else
   6949 #endif
   6950 		if (otxrate != ni->ni_txrate)
   6951 			iwm_setrates_task(sc);
   6952 	}
   6953 	splx(s);
   6954 
   6955 	callout_schedule(&sc->sc_calib_to, mstohz(500));
   6956 }
   6957 
   6958 static int
   6959 iwm_setrates_task(struct iwm_softc *sc)
   6960 {
   6961 	struct work *wk;
   6962 
   6963 	wk = kmem_intr_alloc(sizeof(*wk), KM_NOSLEEP);
   6964 	if (!wk) {
   6965 		DPRINTF(("%s: allocating setrates cb mem failed\n", DEVNAME(sc)));
   6966 		return ENOMEM;
   6967 	}
   6968 
   6969 	workqueue_enqueue(sc->sc_setratewq, wk, NULL);
   6970 
   6971 	return 0;
   6972 }
   6973 
   6974 static void
   6975 iwm_setrates_cb(struct work *wk, void *arg)
   6976 {
   6977 	struct iwm_softc *sc = arg;
   6978 	struct ieee80211com *ic = &sc->sc_ic;
   6979 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   6980 	int s;
   6981 
   6982 	kmem_intr_free(wk, sizeof(*wk));
   6983 
   6984 	s = splnet();
   6985 
   6986 	/* Update rates table based on new TX rate determined by AMRR. */
   6987 	iwm_setrates(in);
   6988 
   6989 	splx(s);
   6990 }
   6991 
   6992 static int
   6993 iwm_setrates(struct iwm_node *in)
   6994 {
   6995 	struct ieee80211_node *ni = &in->in_ni;
   6996 	struct ieee80211com *ic = ni->ni_ic;
   6997 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   6998 	struct iwm_lq_cmd *lq = &in->in_lq;
   6999 	struct ieee80211_rateset *rs = &ni->ni_rates;
   7000 	int i, j, ridx, ridx_min, tab = 0;
   7001 #ifndef IEEE80211_NO_HT
   7002 	int sgi_ok;
   7003 #endif
   7004 	struct iwm_host_cmd cmd = {
   7005 		.id = IWM_LQ_CMD,
   7006 		.len = { sizeof(in->in_lq), },
   7007 	};
   7008 
   7009 	memset(lq, 0, sizeof(*lq));
   7010 	lq->sta_id = IWM_STATION_ID;
   7011 
   7012 	if (ic->ic_flags & IEEE80211_F_USEPROT)
   7013 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
   7014 
   7015 #ifndef IEEE80211_NO_HT
   7016 	sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
   7017 	    (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
   7018 #endif
   7019 
   7020 
   7021 	/*
   7022 	 * Fill the LQ rate selection table with legacy and/or HT rates
   7023 	 * in descending order, i.e. with the node's current TX rate first.
   7024 	 * In cases where throughput of an HT rate corresponds to a legacy
   7025 	 * rate it makes no sense to add both. We rely on the fact that
   7026 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
   7027 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
   7028 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
   7029 	 */
   7030 	j = 0;
   7031 	ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
   7032 	    IWM_RIDX_OFDM : IWM_RIDX_CCK;
   7033 	for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
   7034 		if (j >= __arraycount(lq->rs_table))
   7035 			break;
   7036 		tab = 0;
   7037 #ifndef IEEE80211_NO_HT
   7038 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   7039 		    iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
   7040 			for (i = ni->ni_txmcs; i >= 0; i--) {
   7041 				if (isclr(ni->ni_rxmcs, i))
   7042 					continue;
   7043 				if (ridx == iwm_mcs2ridx[i]) {
   7044 					tab = iwm_rates[ridx].ht_plcp;
   7045 					tab |= IWM_RATE_MCS_HT_MSK;
   7046 					if (sgi_ok)
   7047 						tab |= IWM_RATE_MCS_SGI_MSK;
   7048 					break;
   7049 				}
   7050 			}
   7051 		}
   7052 #endif
   7053 		if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
   7054 			for (i = ni->ni_txrate; i >= 0; i--) {
   7055 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
   7056 				    IEEE80211_RATE_VAL)) {
   7057 					tab = iwm_rates[ridx].plcp;
   7058 					break;
   7059 				}
   7060 			}
   7061 		}
   7062 
   7063 		if (tab == 0)
   7064 			continue;
   7065 
   7066 		tab |= 1 << IWM_RATE_MCS_ANT_POS;
   7067 		if (IWM_RIDX_IS_CCK(ridx))
   7068 			tab |= IWM_RATE_MCS_CCK_MSK;
   7069 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
   7070 		lq->rs_table[j++] = htole32(tab);
   7071 	}
   7072 
   7073 	/* Fill the rest with the lowest possible rate */
   7074 	i = j > 0 ? j - 1 : 0;
   7075 	while (j < __arraycount(lq->rs_table))
   7076 		lq->rs_table[j++] = lq->rs_table[i];
   7077 
   7078 	lq->single_stream_ant_msk = IWM_ANT_A;
   7079 	lq->dual_stream_ant_msk = IWM_ANT_AB;
   7080 
   7081 	lq->agg_time_limit = htole16(4000);	/* 4ms */
   7082 	lq->agg_disable_start_th = 3;
   7083 #ifdef notyet
   7084 	lq->agg_frame_cnt_limit = 0x3f;
   7085 #else
   7086 	lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
   7087 #endif
   7088 
   7089 	cmd.data[0] = &in->in_lq;
   7090 	return iwm_send_cmd(sc, &cmd);
   7091 }
   7092 
   7093 static int
   7094 iwm_media_change(struct ifnet *ifp)
   7095 {
   7096 	struct iwm_softc *sc = ifp->if_softc;
   7097 	struct ieee80211com *ic = &sc->sc_ic;
   7098 	uint8_t rate, ridx;
   7099 	int err;
   7100 
   7101 	err = ieee80211_media_change(ifp);
   7102 	if (err != ENETRESET)
   7103 		return err;
   7104 
   7105 #ifndef IEEE80211_NO_HT
   7106 	if (ic->ic_fixed_mcs != -1)
   7107 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
   7108 	else
   7109 #endif
   7110 	if (ic->ic_fixed_rate != -1) {
   7111 		rate = ic->ic_sup_rates[ic->ic_curmode].
   7112 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
   7113 		/* Map 802.11 rate to HW rate index. */
   7114 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
   7115 			if (iwm_rates[ridx].rate == rate)
   7116 				break;
   7117 		sc->sc_fixed_ridx = ridx;
   7118 	}
   7119 
   7120 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   7121 	    (IFF_UP | IFF_RUNNING)) {
   7122 		iwm_stop(ifp, 0);
   7123 		err = iwm_init(ifp);
   7124 	}
   7125 	return err;
   7126 }
   7127 
   7128 static int
   7129 iwm_do_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
   7130 {
   7131 	struct ifnet *ifp = IC2IFP(ic);
   7132 	struct iwm_softc *sc = ifp->if_softc;
   7133 	enum ieee80211_state ostate = ic->ic_state;
   7134 	int err;
   7135 
   7136 	DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
   7137 	    ieee80211_state_name[nstate]));
   7138 
   7139 	if (ostate == IEEE80211_S_SCAN && nstate != ostate)
   7140 		iwm_led_blink_stop(sc);
   7141 
   7142 	if (ostate == IEEE80211_S_RUN && nstate != ostate)
   7143 		iwm_disable_beacon_filter(sc);
   7144 
   7145 	/* Reset the device if moving out of AUTH, ASSOC, or RUN. */
   7146 	/* XXX Is there a way to switch states without a full reset? */
   7147 	if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
   7148 		/*
   7149 		 * Upon receiving a deauth frame from AP the net80211 stack
   7150 		 * puts the driver into AUTH state. This will fail with this
   7151 		 * driver so bring the FSM from RUN to SCAN in this case.
   7152 		 */
   7153 		if (nstate != IEEE80211_S_INIT) {
   7154 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
   7155 			/* Always pass arg as -1 since we can't Tx right now. */
   7156 			sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
   7157 			iwm_stop(ifp, 1);
   7158 			iwm_init(ifp);
   7159 			return 0;
   7160 		}
   7161 	}
   7162 
   7163 	switch (nstate) {
   7164 	case IEEE80211_S_INIT:
   7165 		break;
   7166 
   7167 	case IEEE80211_S_SCAN:
   7168 		if (!ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
   7169 			err = iwm_scan(sc);
   7170 			if (err)
   7171 				return err;
   7172 			SET(sc->sc_flags, IWM_FLAG_SCANNING);
   7173 		}
   7174 		iwm_led_blink_start(sc);
   7175 		ic->ic_state = nstate;
   7176 		return 0;
   7177 
   7178 	case IEEE80211_S_AUTH:
   7179 		err = iwm_auth(sc);
   7180 		if (err) {
   7181 			DPRINTF(("%s: could not move to auth state: %d\n",
   7182 			    DEVNAME(sc), err));
   7183 			return err;
   7184 		}
   7185 		break;
   7186 
   7187 	case IEEE80211_S_ASSOC:
   7188 		err = iwm_assoc(sc);
   7189 		if (err) {
   7190 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
   7191 			    err));
   7192 			return err;
   7193 		}
   7194 		break;
   7195 
   7196 	case IEEE80211_S_RUN:
   7197 		err = iwm_run(sc);
   7198 		if (err) {
   7199 			DPRINTF(("%s: failed to run: %d\n", DEVNAME(sc),
   7200 			    err));
   7201 			return err;
   7202 		}
   7203 		break;
   7204 
   7205 	default:
   7206 		break;
   7207 	}
   7208 
   7209 	return sc->sc_newstate(ic, nstate, arg);
   7210 }
   7211 
   7212 static void
   7213 iwm_newstate_cb(struct work *wk, void *v)
   7214 {
   7215 	struct iwm_softc *sc = v;
   7216 	struct ieee80211com *ic = &sc->sc_ic;
   7217 	struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
   7218 	enum ieee80211_state nstate = iwmns->ns_nstate;
   7219 	int generation = iwmns->ns_generation;
   7220 	int arg = iwmns->ns_arg;
   7221 
   7222 	kmem_intr_free(iwmns, sizeof(*iwmns));
   7223 
   7224 	if (ISSET(sc->sc_flags, IWM_FLAG_STOPPED))
   7225 		return;
   7226 
   7227 	DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
   7228 	if (sc->sc_generation != generation) {
   7229 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
   7230 		if (nstate == IEEE80211_S_INIT) {
   7231 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: "
   7232 			    "calling sc_newstate()\n"));
   7233 			(void) sc->sc_newstate(ic, nstate, arg);
   7234 		}
   7235 	} else
   7236 		(void) iwm_do_newstate(ic, nstate, arg);
   7237 }
   7238 
   7239 static int
   7240 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
   7241 {
   7242 	struct iwm_newstate_state *iwmns;
   7243 	struct ifnet *ifp = IC2IFP(ic);
   7244 	struct iwm_softc *sc = ifp->if_softc;
   7245 
   7246 	callout_stop(&sc->sc_calib_to);
   7247 
   7248 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
   7249 	if (!iwmns) {
   7250 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
   7251 		return ENOMEM;
   7252 	}
   7253 
   7254 	iwmns->ns_nstate = nstate;
   7255 	iwmns->ns_arg = arg;
   7256 	iwmns->ns_generation = sc->sc_generation;
   7257 
   7258 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
   7259 
   7260 	return 0;
   7261 }
   7262 
   7263 static void
   7264 iwm_endscan(struct iwm_softc *sc)
   7265 {
   7266 	struct ieee80211com *ic = &sc->sc_ic;
   7267 	int s;
   7268 
   7269 	DPRINTF(("%s: scan ended\n", DEVNAME(sc)));
   7270 
   7271 	s = splnet();
   7272 	if (ic->ic_state == IEEE80211_S_SCAN)
   7273 		ieee80211_end_scan(ic);
   7274 	CLR(sc->sc_flags, IWM_FLAG_SCANNING);
   7275 	splx(s);
   7276 }
   7277 
   7278 /*
   7279  * Aging and idle timeouts for the different possible scenarios
   7280  * in default configuration
   7281  */
   7282 static const uint32_t
   7283 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
   7284 	{
   7285 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
   7286 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
   7287 	},
   7288 	{
   7289 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
   7290 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
   7291 	},
   7292 	{
   7293 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
   7294 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
   7295 	},
   7296 	{
   7297 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
   7298 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
   7299 	},
   7300 	{
   7301 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
   7302 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
   7303 	},
   7304 };
   7305 
   7306 /*
   7307  * Aging and idle timeouts for the different possible scenarios
   7308  * in single BSS MAC configuration.
   7309  */
   7310 static const uint32_t
   7311 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
   7312 	{
   7313 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
   7314 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
   7315 	},
   7316 	{
   7317 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
   7318 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
   7319 	},
   7320 	{
   7321 		htole32(IWM_SF_MCAST_AGING_TIMER),
   7322 		htole32(IWM_SF_MCAST_IDLE_TIMER)
   7323 	},
   7324 	{
   7325 		htole32(IWM_SF_BA_AGING_TIMER),
   7326 		htole32(IWM_SF_BA_IDLE_TIMER)
   7327 	},
   7328 	{
   7329 		htole32(IWM_SF_TX_RE_AGING_TIMER),
   7330 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
   7331 	},
   7332 };
   7333 
   7334 static void
   7335 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
   7336     struct ieee80211_node *ni)
   7337 {
   7338 	int i, j, watermark;
   7339 
   7340 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
   7341 
   7342 	/*
   7343 	 * If we are in association flow - check antenna configuration
   7344 	 * capabilities of the AP station, and choose the watermark accordingly.
   7345 	 */
   7346 	if (ni) {
   7347 #ifndef IEEE80211_NO_HT
   7348 		if (ni->ni_flags & IEEE80211_NODE_HT) {
   7349 #ifdef notyet
   7350 			if (ni->ni_rxmcs[2] != 0)
   7351 				watermark = IWM_SF_W_MARK_MIMO3;
   7352 			else if (ni->ni_rxmcs[1] != 0)
   7353 				watermark = IWM_SF_W_MARK_MIMO2;
   7354 			else
   7355 #endif
   7356 				watermark = IWM_SF_W_MARK_SISO;
   7357 		} else
   7358 #endif
   7359 			watermark = IWM_SF_W_MARK_LEGACY;
   7360 	/* default watermark value for unassociated mode. */
   7361 	} else {
   7362 		watermark = IWM_SF_W_MARK_MIMO2;
   7363 	}
   7364 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
   7365 
   7366 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
   7367 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
   7368 			sf_cmd->long_delay_timeouts[i][j] =
   7369 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
   7370 		}
   7371 	}
   7372 
   7373 	if (ni) {
   7374 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
   7375 		       sizeof(iwm_sf_full_timeout));
   7376 	} else {
   7377 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
   7378 		       sizeof(iwm_sf_full_timeout_def));
   7379 	}
   7380 }
   7381 
   7382 static int
   7383 iwm_sf_config(struct iwm_softc *sc, int new_state)
   7384 {
   7385 	struct ieee80211com *ic = &sc->sc_ic;
   7386 	struct iwm_sf_cfg_cmd sf_cmd = {
   7387 		.state = htole32(IWM_SF_FULL_ON),
   7388 	};
   7389 
   7390 #if 0
   7391 	/* only used for models with sdio interface, in iwlwifi */
   7392 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   7393 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
   7394 #endif
   7395 
   7396 	switch (new_state) {
   7397 	case IWM_SF_UNINIT:
   7398 	case IWM_SF_INIT_OFF:
   7399 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
   7400 		break;
   7401 	case IWM_SF_FULL_ON:
   7402 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
   7403 		break;
   7404 	default:
   7405 		return EINVAL;
   7406 	}
   7407 
   7408 	return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
   7409 	    sizeof(sf_cmd), &sf_cmd);
   7410 }
   7411 
   7412 static int
   7413 iwm_send_bt_init_conf(struct iwm_softc *sc)
   7414 {
   7415 	struct iwm_bt_coex_cmd bt_cmd;
   7416 
   7417 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
   7418 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
   7419 
   7420 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
   7421 	    &bt_cmd);
   7422 }
   7423 
   7424 static bool
   7425 iwm_is_lar_supported(struct iwm_softc *sc)
   7426 {
   7427 	bool nvm_lar = sc->sc_nvm.lar_enabled;
   7428 	bool tlv_lar = isset(sc->sc_enabled_capa,
   7429 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
   7430 
   7431 	if (iwm_lar_disable)
   7432 		return false;
   7433 
   7434 	/*
   7435 	 * Enable LAR only if it is supported by the FW (TLV) &&
   7436 	 * enabled in the NVM
   7437 	 */
   7438 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   7439 		return nvm_lar && tlv_lar;
   7440 	else
   7441 		return tlv_lar;
   7442 }
   7443 
   7444 static int
   7445 iwm_send_soc_conf(struct iwm_softc *sc)
   7446 {
   7447 	struct iwm_soc_configuration_cmd cmd;
   7448 	int err;
   7449 	uint32_t cmd_id, flags = 0;
   7450 
   7451 	memset(&cmd, 0, sizeof(cmd));
   7452 
   7453 	/*
   7454 	 * In VER_1 of this command, the discrete value is considered
   7455 	 * an integer; In VER_2, it's a bitmask.  Since we have only 2
   7456 	 * values in VER_1, this is backwards-compatible with VER_2,
   7457 	 * as long as we don't set any other flag bits.
   7458 	 */
   7459 #ifdef notyet
   7460 	if (!sc->sc_integrated) { /* VER_1 */
   7461 		flags = IWM_SOC_CONFIG_CMD_FLAGS_DISCRETE;
   7462 	} else { /* VER_2 */
   7463 		uint8_t scan_cmd_ver;
   7464 		if (sc->sc_ltr_delay != IWM_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
   7465 			flags |= (sc->sc_ltr_delay &
   7466 			    IWM_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
   7467 		scan_cmd_ver = iwm_lookup_cmd_ver(sc, IWM_LONG_GROUP,
   7468 		    IWM_SCAN_REQ_UMAC);
   7469 		if (scan_cmd_ver != IWM_FW_CMD_VER_UNKNOWN &&
   7470 		    scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
   7471 			flags |= IWM_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
   7472 	}
   7473 	cmd.latency = htole32(sc->sc_xtal_latency);
   7474 #else
   7475 	flags = IWM_SOC_CONFIG_CMD_FLAGS_DISCRETE;
   7476 #endif
   7477 	cmd.flags = htole32(flags);
   7478 
   7479 	cmd_id = iwm_cmd_id(IWM_SOC_CONFIGURATION_CMD, IWM_SYSTEM_GROUP, 0);
   7480 	err = iwm_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
   7481 	if (err)
   7482 		printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
   7483 	return err;
   7484 }
   7485 
   7486 static int
   7487 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
   7488 {
   7489 	struct iwm_mcc_update_cmd mcc_cmd;
   7490 	struct iwm_host_cmd hcmd = {
   7491 		.id = IWM_MCC_UPDATE_CMD,
   7492 		.flags = IWM_CMD_WANT_RESP,
   7493 		.resp_pkt_len = IWM_CMD_RESP_MAX,
   7494 		.data = { &mcc_cmd },
   7495 	};
   7496 	struct iwm_rx_packet *pkt;
   7497 	size_t resp_len;
   7498 	int err;
   7499 	int resp_v3 = isset(sc->sc_enabled_capa,
   7500 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V3);
   7501 
   7502 	if (!iwm_is_lar_supported(sc)) {
   7503 		DPRINTF(("%s: no LAR support\n", __func__));
   7504 		return 0;
   7505 	}
   7506 
   7507 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
   7508 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
   7509 	if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
   7510 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
   7511 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
   7512 	else
   7513 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
   7514 
   7515 	if (resp_v3)
   7516 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
   7517 	else
   7518 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
   7519 
   7520 	err = iwm_send_cmd(sc, &hcmd);
   7521 	if (err)
   7522 		return err;
   7523 
   7524 	pkt = hcmd.resp_pkt;
   7525 	if (!pkt || (pkt->hdr.flags & IWM_CMD_FAILED_MSK)) {
   7526 		err = EIO;
   7527 		goto out;
   7528 	}
   7529 
   7530 	if (resp_v3) {
   7531 		struct iwm_mcc_update_resp_v3 *resp;
   7532 		resp_len = iwm_rx_packet_payload_len(pkt);
   7533 		if (resp_len < sizeof(*resp)) {
   7534 			err = EIO;
   7535 			goto out;
   7536 		}
   7537 
   7538 		resp = (void *)pkt->data;
   7539 		if (resp_len != sizeof(*resp) +
   7540 		    resp->n_channels * sizeof(resp->channels[0])) {
   7541 			err = EIO;
   7542 			goto out;
   7543 		}
   7544 	} else {
   7545 		struct iwm_mcc_update_resp_v1 *resp_v1;
   7546 		resp_len = iwm_rx_packet_payload_len(pkt);
   7547 		if (resp_len < sizeof(*resp_v1)) {
   7548 			err = EIO;
   7549 			goto out;
   7550 		}
   7551 
   7552 		resp_v1 = (void *)pkt->data;
   7553 		if (resp_len != sizeof(*resp_v1) +
   7554 		    resp_v1->n_channels * sizeof(resp_v1->channels[0])) {
   7555 			err = EIO;
   7556 			goto out;
   7557 		}
   7558 	}
   7559 out:
   7560 	iwm_free_resp(sc, &hcmd);
   7561 	return err;
   7562 }
   7563 
   7564 static int
   7565 iwm_send_temp_report_ths_cmd(struct iwm_softc *sc)
   7566 {
   7567 	struct iwm_temp_report_ths_cmd cmd;
   7568 	int err;
   7569 
   7570 	/*
   7571 	 * In order to give responsibility for critical-temperature-kill
   7572 	 * and TX backoff to FW we need to send an empty temperature
   7573 	 * reporting command at init time.
   7574 	 */
   7575 	memset(&cmd, 0, sizeof(cmd));
   7576 
   7577 	err = iwm_send_cmd_pdu(sc,
   7578 	    IWM_WIDE_ID(IWM_PHY_OPS_GROUP, IWM_TEMP_REPORTING_THRESHOLDS_CMD),
   7579 	    0, sizeof(cmd), &cmd);
   7580 	if (err)
   7581 		printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
   7582 		    DEVNAME(sc), err);
   7583 
   7584 	return err;
   7585 }
   7586 
   7587 static void
   7588 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
   7589 {
   7590 	struct iwm_host_cmd cmd = {
   7591 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
   7592 		.len = { sizeof(uint32_t), },
   7593 		.data = { &backoff, },
   7594 	};
   7595 
   7596 	iwm_send_cmd(sc, &cmd);
   7597 }
   7598 
   7599 static int
   7600 iwm_init_hw(struct iwm_softc *sc)
   7601 {
   7602 	struct ieee80211com *ic = &sc->sc_ic;
   7603 	int err, i, ac, qid, s;
   7604 
   7605 	err = iwm_run_init_mvm_ucode(sc, 0);
   7606 	if (err)
   7607 		return err;
   7608 
   7609 	/* Should stop and start HW since INIT image just loaded. */
   7610 	iwm_stop_device(sc);
   7611 	err = iwm_start_hw(sc);
   7612 	if (err) {
   7613 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   7614 		return err;
   7615 	}
   7616 
   7617 	/* Restart, this time with the regular firmware */
   7618 	s = splnet();
   7619 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
   7620 	if (err) {
   7621 		aprint_error_dev(sc->sc_dev,
   7622 		    "could not load firmware (error %d)\n", err);
   7623 		splx(s);
   7624 		return err;
   7625 	}
   7626 
   7627 	if (!iwm_nic_lock(sc)) {
   7628 		splx(s);
   7629 		return EBUSY;
   7630 	}
   7631 
   7632 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
   7633 	if (err) {
   7634 		aprint_error_dev(sc->sc_dev,
   7635 		    "could not init tx ant config (error %d)\n", err);
   7636 		goto err;
   7637 	}
   7638 
   7639 	/* Send phy db control command and then phy db calibration*/
   7640 	err = iwm_send_phy_db_data(sc);
   7641 	if (err) {
   7642 		aprint_error_dev(sc->sc_dev,
   7643 		    "could not init phy db (error %d)\n", err);
   7644 		goto err;
   7645 	}
   7646 
   7647 	err = iwm_send_phy_cfg_cmd(sc);
   7648 	if (err) {
   7649 		aprint_error_dev(sc->sc_dev,
   7650 		    "could not send phy config (error %d)\n", err);
   7651 		goto err;
   7652 	}
   7653 
   7654 	err = iwm_send_bt_init_conf(sc);
   7655 	if (err) {
   7656 		aprint_error_dev(sc->sc_dev,
   7657 		    "could not init bt coex (error %d)\n", err);
   7658 		goto err;
   7659 	}
   7660 
   7661 	if (isset(sc->sc_enabled_capa,
   7662 	    IWM_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
   7663 		err = iwm_send_soc_conf(sc);
   7664 		if (err)
   7665 			goto err;
   7666 	}
   7667 
   7668 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT)) {
   7669 		err = iwm_send_dqa_cmd(sc);
   7670 		if (err)
   7671 			goto err;
   7672 	}
   7673 
   7674 	/* Add auxiliary station for scanning */
   7675 	err = iwm_add_aux_sta(sc);
   7676 	if (err) {
   7677 		aprint_error_dev(sc->sc_dev,
   7678 		    "could not add aux station (error %d)\n", err);
   7679 		goto err;
   7680 	}
   7681 
   7682 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
   7683 		/*
   7684 		 * The channel used here isn't relevant as it's
   7685 		 * going to be overwritten in the other flows.
   7686 		 * For now use the first channel we have.
   7687 		 */
   7688 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
   7689 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
   7690 		    IWM_FW_CTXT_ACTION_ADD, 0);
   7691 		if (err) {
   7692 			aprint_error_dev(sc->sc_dev,
   7693 			    "could not add phy context %d (error %d)\n",
   7694 			    i, err);
   7695 			goto err;
   7696 		}
   7697 	}
   7698 
   7699 	/* Initialize tx backoffs to the minimum. */
   7700 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
   7701 		iwm_tt_tx_backoff(sc, 0);
   7702 
   7703 #ifdef notyet
   7704 	err = iwm_config_ltr(sc);
   7705 	if (err) {
   7706 		aprint_error_dev(sc->sc_dev,
   7707 			"PCIe LTR confguration failed (error %d)\n", err);
   7708 	}
   7709 #endif
   7710 
   7711 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
   7712 		err = iwm_send_temp_report_ths_cmd(sc);
   7713 		if (err)
   7714 			goto err;
   7715 	}
   7716 
   7717 	err = iwm_power_update_device(sc);
   7718 	if (err) {
   7719 		aprint_error_dev(sc->sc_dev,
   7720 		    "could send power command (error %d)\n", err);
   7721 		goto err;
   7722 	}
   7723 
   7724 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
   7725 		err = iwm_send_update_mcc_cmd(sc, iwm_default_mcc);
   7726 		if (err) {
   7727 			aprint_error_dev(sc->sc_dev,
   7728 			    "could not init LAR (error %d)\n", err);
   7729 			goto err;
   7730 		}
   7731 	}
   7732 
   7733 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
   7734 		err = iwm_config_umac_scan(sc);
   7735 		if (err) {
   7736 			aprint_error_dev(sc->sc_dev,
   7737 			    "could not configure scan (error %d)\n", err);
   7738 			goto err;
   7739 		}
   7740 	}
   7741 
   7742 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
   7743 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
   7744 			qid = IWM_DQA_INJECT_MONITOR_QUEUE;
   7745 		else
   7746 			qid = IWM_AUX_QUEUE;
   7747 		err = iwm_enable_txq(sc, IWM_MONITOR_STA_ID, qid,
   7748 		    iwm_ac_to_tx_fifo[WME_AC_BE], 0, IWM_MAX_TID_COUNT, 0);
   7749 		if (err) {
   7750 			aprint_error_dev(sc->sc_dev,
   7751 			    "could not enable monitor inject Tx queue "
   7752 			    "(error %d)\n", err);
   7753 			goto err;
   7754 		}
   7755 	} else {
   7756 		for (ac = 0; ac < WME_NUM_AC; ac++) {
   7757 			if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_DQA_SUPPORT))
   7758 				qid = ac + IWM_DQA_MIN_MGMT_QUEUE;
   7759 			else
   7760 				qid = ac;
   7761 			err = iwm_enable_txq(sc, IWM_STATION_ID, qid,
   7762 			    iwm_ac_to_tx_fifo[ac], 0, IWM_TID_NON_QOS, 0);
   7763 			if (err) {
   7764 				aprint_error_dev(sc->sc_dev,
   7765 				    "could not enable Tx queue %d (error %d)\n",
   7766 				    ac, err);
   7767 				goto err;
   7768 			}
   7769 		}
   7770 	}
   7771 
   7772 	err = iwm_disable_beacon_filter(sc);
   7773 	if (err) {
   7774 		aprint_error_dev(sc->sc_dev,
   7775 		    "could not disable beacon filter (error %d)\n", err);
   7776 		goto err;
   7777 	}
   7778 
   7779  err:
   7780 	iwm_nic_unlock(sc);
   7781 	splx(s);
   7782 	return err;
   7783 }
   7784 
   7785 /* Allow multicast from our BSSID. */
   7786 static int
   7787 iwm_allow_mcast(struct iwm_softc *sc)
   7788 {
   7789 	struct ieee80211com *ic = &sc->sc_ic;
   7790 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   7791 	struct iwm_mcast_filter_cmd *cmd;
   7792 	size_t size;
   7793 	int err;
   7794 
   7795 	size = roundup(sizeof(*cmd), 4);
   7796 	cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
   7797 	if (cmd == NULL)
   7798 		return ENOMEM;
   7799 	cmd->filter_own = 1;
   7800 	cmd->port_id = 0;
   7801 	cmd->count = 0;
   7802 	cmd->pass_all = 1;
   7803 	IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
   7804 
   7805 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
   7806 	    0, size, cmd);
   7807 	kmem_intr_free(cmd, size);
   7808 	return err;
   7809 }
   7810 
   7811 static int
   7812 iwm_init(struct ifnet *ifp)
   7813 {
   7814 	struct iwm_softc *sc = ifp->if_softc;
   7815 	struct ieee80211com *ic = &sc->sc_ic;
   7816 	int err, generation;
   7817 
   7818 	generation = ++sc->sc_generation;
   7819 
   7820 	err = iwm_preinit(sc, false);
   7821 	if (err)
   7822 		return err;
   7823 
   7824 	err = iwm_start_hw(sc);
   7825 	if (err) {
   7826 		device_printf(sc->sc_dev, "could not initialize hardware\n");
   7827 		return err;
   7828 	}
   7829 
   7830 	CLR(sc->sc_flags, IWM_FLAG_STOPPED);
   7831 
   7832 	err = iwm_init_hw(sc);
   7833 	if (err) {
   7834 		if (generation == sc->sc_generation)
   7835 			iwm_stop_device(sc);
   7836 		return err;
   7837 	}
   7838 
   7839 	ifp->if_flags &= ~IFF_OACTIVE;
   7840 	ifp->if_flags |= IFF_RUNNING;
   7841 
   7842 	if (ic->ic_opmode == IEEE80211_M_MONITOR) {
   7843 		ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
   7844 		return 0;
   7845 	}
   7846 
   7847 	ieee80211_begin_scan(&sc->sc_ic, 0);
   7848 
   7849 	return 0;
   7850 }
   7851 
   7852 static void
   7853 iwm_start(struct ifnet *ifp)
   7854 {
   7855 	struct iwm_softc *sc = ifp->if_softc;
   7856 	struct ieee80211com *ic = &sc->sc_ic;
   7857 	struct ieee80211_node *ni;
   7858 	struct ether_header *eh;
   7859 	struct mbuf *m;
   7860 	int ac;
   7861 
   7862 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   7863 		return;
   7864 
   7865 	for (;;) {
   7866 		/* why isn't this done per-queue? */
   7867 		if (sc->qfullmsk != 0) {
   7868 			ifp->if_flags |= IFF_OACTIVE;
   7869 			break;
   7870 		}
   7871 
   7872 		/* need to send management frames even if we're not RUNning */
   7873 		IF_DEQUEUE(&ic->ic_mgtq, m);
   7874 		if (m) {
   7875 			ni = M_GETCTX(m, struct ieee80211_node *);
   7876 			M_CLEARCTX(m);
   7877 			ac = WME_AC_BE;
   7878 			goto sendit;
   7879 		}
   7880 		if (ic->ic_state != IEEE80211_S_RUN) {
   7881 			break;
   7882 		}
   7883 
   7884 		IFQ_DEQUEUE(&ifp->if_snd, m);
   7885 		if (m == NULL)
   7886 			break;
   7887 
   7888 		if (m->m_len < sizeof (*eh) &&
   7889 		   (m = m_pullup(m, sizeof (*eh))) == NULL) {
   7890 			if_statinc(ifp, if_oerrors);
   7891 			continue;
   7892 		}
   7893 
   7894 		eh = mtod(m, struct ether_header *);
   7895 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
   7896 		if (ni == NULL) {
   7897 			m_freem(m);
   7898 			if_statinc(ifp, if_oerrors);
   7899 			continue;
   7900 		}
   7901 
   7902 		/* classify mbuf so we can find which tx ring to use */
   7903 		if (ieee80211_classify(ic, m, ni) != 0) {
   7904 			m_freem(m);
   7905 			ieee80211_free_node(ni);
   7906 			if_statinc(ifp, if_oerrors);
   7907 			continue;
   7908 		}
   7909 
   7910 		/* No QoS encapsulation for EAPOL frames. */
   7911 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
   7912 		    M_WME_GETAC(m) : WME_AC_BE;
   7913 
   7914 		bpf_mtap(ifp, m, BPF_D_OUT);
   7915 
   7916 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
   7917 			ieee80211_free_node(ni);
   7918 			if_statinc(ifp, if_oerrors);
   7919 			continue;
   7920 		}
   7921 
   7922  sendit:
   7923 		bpf_mtap3(ic->ic_rawbpf, m, BPF_D_OUT);
   7924 
   7925 		if (iwm_tx(sc, m, ni, ac) != 0) {
   7926 			ieee80211_free_node(ni);
   7927 			if_statinc(ifp, if_oerrors);
   7928 			continue;
   7929 		}
   7930 
   7931 		if (ifp->if_flags & IFF_UP) {
   7932 			sc->sc_tx_timer = 15;
   7933 			ifp->if_timer = 1;
   7934 		}
   7935 	}
   7936 }
   7937 
   7938 static void
   7939 iwm_stop(struct ifnet *ifp, int disable)
   7940 {
   7941 	struct iwm_softc *sc = ifp->if_softc;
   7942 	struct ieee80211com *ic = &sc->sc_ic;
   7943 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   7944 	int i, s;
   7945 
   7946 	s = splnet();
   7947 
   7948 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
   7949 	sc->sc_flags |= IWM_FLAG_STOPPED;
   7950 
   7951 	iwm_stop_device(sc);
   7952 
   7953 	sc->sc_generation++;
   7954 	for (i = 0; i < __arraycount(sc->sc_cmd_resp_pkt); i++) {
   7955 		if (sc->sc_cmd_resp_pkt[i] != NULL) {
   7956 			KASSERT(sc->sc_cmd_resp_len[i] > 0);
   7957 			kmem_free(sc->sc_cmd_resp_pkt[i],
   7958 			    sc->sc_cmd_resp_len[i]);
   7959 		}
   7960 		sc->sc_cmd_resp_pkt[i] = NULL;
   7961 		sc->sc_cmd_resp_len[i] = 0;
   7962 	}
   7963 
   7964 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   7965 
   7966 	if (in) {
   7967 		in->in_phyctxt = NULL;
   7968 		in->tid_disable_ampdu = 0xffff;
   7969 		in->tfd_queue_msk = 0;
   7970 		IEEE80211_ADDR_COPY(in->in_macaddr, etheranyaddr);
   7971 	}
   7972 
   7973 	sc->sc_flags &= ~(IWM_FLAG_SCANNING | IWM_FLAG_BGSCAN);
   7974 	sc->sc_flags &= ~IWM_FLAG_MAC_ACTIVE;
   7975 	sc->sc_flags &= ~IWM_FLAG_BINDING_ACTIVE;
   7976 	sc->sc_flags &= ~IWM_FLAG_STA_ACTIVE;
   7977 	sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
   7978 #ifdef notyet
   7979 	sc->sc_flags &= ~IWM_FLAG_HW_ERR;
   7980 	sc->sc_flags &= ~IWM_FLAG_SHUTDOWN;
   7981 #endif
   7982 	sc->sc_flags &= ~IWM_FLAG_TXFLUSH;
   7983 
   7984 	if (ic->ic_state != IEEE80211_S_INIT)
   7985 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
   7986 
   7987 	iwm_led_blink_stop(sc);
   7988 	ifp->if_timer = sc->sc_tx_timer = 0;
   7989 	splx(s);
   7990 }
   7991 
   7992 static void
   7993 iwm_watchdog(struct ifnet *ifp)
   7994 {
   7995 	struct iwm_softc *sc = ifp->if_softc;
   7996 
   7997 	ifp->if_timer = 0;
   7998 	if (sc->sc_tx_timer > 0) {
   7999 		if (--sc->sc_tx_timer == 0) {
   8000 			aprint_error_dev(sc->sc_dev, "device timeout\n");
   8001 #ifdef IWM_DEBUG
   8002 			iwm_nic_error(sc);
   8003 			iwm_dump_driver_status(sc);
   8004 #endif
   8005 			ifp->if_flags &= ~IFF_UP;
   8006 			iwm_stop(ifp, 1);
   8007 			if_statinc(ifp, if_oerrors);
   8008 			return;
   8009 		}
   8010 		ifp->if_timer = 1;
   8011 	}
   8012 
   8013 	ieee80211_watchdog(&sc->sc_ic);
   8014 }
   8015 
   8016 static int
   8017 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   8018 {
   8019 	struct iwm_softc *sc = ifp->if_softc;
   8020 	struct ieee80211com *ic = &sc->sc_ic;
   8021 	const struct sockaddr *sa;
   8022 	int s, err = 0;
   8023 
   8024 	s = splnet();
   8025 
   8026 	switch (cmd) {
   8027 	case SIOCSIFADDR:
   8028 		ifp->if_flags |= IFF_UP;
   8029 		/* FALLTHROUGH */
   8030 	case SIOCSIFFLAGS:
   8031 		err = ifioctl_common(ifp, cmd, data);
   8032 		if (err)
   8033 			break;
   8034 		if (ifp->if_flags & IFF_UP) {
   8035 			if (!(ifp->if_flags & IFF_RUNNING)) {
   8036 				/* Force reload of firmware image from disk. */
   8037 				sc->sc_fw.fw_status = IWM_FW_STATUS_NONE;
   8038 				err = iwm_init(ifp);
   8039 			}
   8040 		} else {
   8041 			if (ifp->if_flags & IFF_RUNNING) {
   8042 				iwm_stop(ifp, 1);
   8043 			}
   8044 		}
   8045 		break;
   8046 
   8047 	case SIOCADDMULTI:
   8048 	case SIOCDELMULTI:
   8049 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
   8050 		err = (cmd == SIOCADDMULTI) ?
   8051 		    ether_addmulti(sa, &sc->sc_ec) :
   8052 		    ether_delmulti(sa, &sc->sc_ec);
   8053 		if (err == ENETRESET)
   8054 			err = 0;
   8055 		break;
   8056 
   8057 	default:
   8058 		err = ieee80211_ioctl(ic, cmd, data);
   8059 		break;
   8060 	}
   8061 
   8062 	if (err == ENETRESET) {
   8063 		err = 0;
   8064 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   8065 		    (IFF_UP | IFF_RUNNING)) {
   8066 			iwm_stop(ifp, 0);
   8067 			err = iwm_init(ifp);
   8068 		}
   8069 	}
   8070 
   8071 	splx(s);
   8072 	return err;
   8073 }
   8074 
   8075 /*
   8076  * Note: This structure is read from the device with IO accesses,
   8077  * and the reading already does the endian conversion. As it is
   8078  * read with uint32_t-sized accesses, any members with a different size
   8079  * need to be ordered correctly though!
   8080  */
   8081 struct iwm_error_event_table {
   8082 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
   8083 	uint32_t error_id;		/* type of error */
   8084 	uint32_t trm_hw_status0;	/* TRM HW status */
   8085 	uint32_t trm_hw_status1;	/* TRM HW status */
   8086 	uint32_t blink2;		/* branch link */
   8087 	uint32_t ilink1;		/* interrupt link */
   8088 	uint32_t ilink2;		/* interrupt link */
   8089 	uint32_t data1;		/* error-specific data */
   8090 	uint32_t data2;		/* error-specific data */
   8091 	uint32_t data3;		/* error-specific data */
   8092 	uint32_t bcon_time;		/* beacon timer */
   8093 	uint32_t tsf_low;		/* network timestamp function timer */
   8094 	uint32_t tsf_hi;		/* network timestamp function timer */
   8095 	uint32_t gp1;		/* GP1 timer register */
   8096 	uint32_t gp2;		/* GP2 timer register */
   8097 	uint32_t fw_rev_type;	/* firmware revision type */
   8098 	uint32_t major;		/* uCode version major */
   8099 	uint32_t minor;		/* uCode version minor */
   8100 	uint32_t hw_ver;		/* HW Silicon version */
   8101 	uint32_t brd_ver;		/* HW board version */
   8102 	uint32_t log_pc;		/* log program counter */
   8103 	uint32_t frame_ptr;		/* frame pointer */
   8104 	uint32_t stack_ptr;		/* stack pointer */
   8105 	uint32_t hcmd;		/* last host command header */
   8106 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
   8107 				 * rxtx_flag */
   8108 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
   8109 				 * host_flag */
   8110 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
   8111 				 * enc_flag */
   8112 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
   8113 				 * time_flag */
   8114 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
   8115 				 * wico interrupt */
   8116 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
   8117 	uint32_t wait_event;		/* wait event() caller address */
   8118 	uint32_t l2p_control;	/* L2pControlField */
   8119 	uint32_t l2p_duration;	/* L2pDurationField */
   8120 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
   8121 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
   8122 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
   8123 				 * (LMPM_PMG_SEL) */
   8124 	uint32_t u_timestamp;	/* indicate when the date and time of the
   8125 				 * compilation */
   8126 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
   8127 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
   8128 
   8129 /*
   8130  * UMAC error struct - relevant starting from family 8000 chip.
   8131  * Note: This structure is read from the device with IO accesses,
   8132  * and the reading already does the endian conversion. As it is
   8133  * read with u32-sized accesses, any members with a different size
   8134  * need to be ordered correctly though!
   8135  */
   8136 struct iwm_umac_error_event_table {
   8137 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
   8138 	uint32_t error_id;	/* type of error */
   8139 	uint32_t blink1;	/* branch link */
   8140 	uint32_t blink2;	/* branch link */
   8141 	uint32_t ilink1;	/* interrupt link */
   8142 	uint32_t ilink2;	/* interrupt link */
   8143 	uint32_t data1;		/* error-specific data */
   8144 	uint32_t data2;		/* error-specific data */
   8145 	uint32_t data3;		/* error-specific data */
   8146 	uint32_t umac_major;
   8147 	uint32_t umac_minor;
   8148 	uint32_t frame_pointer;	/* core register 27 */
   8149 	uint32_t stack_pointer;	/* core register 28 */
   8150 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
   8151 	uint32_t nic_isr_pref;	/* ISR status register */
   8152 } __packed;
   8153 
   8154 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
   8155 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
   8156 
   8157 #define IWM_FW_SYSASSERT_CPU_MASK 0xf0000000
   8158 static const struct {
   8159 	const char *name;
   8160 	uint8_t num;
   8161 } advanced_lookup[] = {
   8162 	{ "NMI_INTERRUPT_WDG", 0x34 },
   8163 	{ "SYSASSERT", 0x35 },
   8164 	{ "UCODE_VERSION_MISMATCH", 0x37 },
   8165 	{ "BAD_COMMAND", 0x38 },
   8166 	{ "BAD_COMMAND", 0x39 },
   8167 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
   8168 	{ "FATAL_ERROR", 0x3D },
   8169 	{ "NMI_TRM_HW_ERR", 0x46 },
   8170 	{ "NMI_INTERRUPT_TRM", 0x4C },
   8171 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
   8172 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
   8173 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
   8174 	{ "NMI_INTERRUPT_HOST", 0x66 },
   8175 	{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
   8176 	{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
   8177 	{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
   8178 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
   8179 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
   8180 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
   8181 	{ "ADVANCED_SYSASSERT", 0 },
   8182 };
   8183 
   8184 static const char *
   8185 iwm_desc_lookup(uint32_t num)
   8186 {
   8187 	int i;
   8188 
   8189 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
   8190 		if (advanced_lookup[i].num ==
   8191 		    (num & ~IWM_FW_SYSASSERT_CPU_MASK))
   8192 			return advanced_lookup[i].name;
   8193 
   8194 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
   8195 	return advanced_lookup[i].name;
   8196 }
   8197 
   8198 /*
   8199  * Support for dumping the error log seemed like a good idea ...
   8200  * but it's mostly hex junk and the only sensible thing is the
   8201  * hw/ucode revision (which we know anyway).  Since it's here,
   8202  * I'll just leave it in, just in case e.g. the Intel guys want to
   8203  * help us decipher some "ADVANCED_SYSASSERT" later.
   8204  */
   8205 static void
   8206 iwm_nic_error(struct iwm_softc *sc)
   8207 {
   8208 	struct iwm_error_event_table t;
   8209 	uint32_t base;
   8210 
   8211 	device_printf(sc->sc_dev, "dumping device error log\n");
   8212 	base = sc->sc_uc.uc_error_event_table;
   8213 	if (base < 0x800000) {
   8214 		device_printf(sc->sc_dev,
   8215 		    "Invalid error log pointer 0x%08x\n", base);
   8216 		return;
   8217 	}
   8218 
   8219 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
   8220 		device_printf(sc->sc_dev, "reading errlog failed\n");
   8221 		return;
   8222 	}
   8223 
   8224 	if (!t.valid) {
   8225 		device_printf(sc->sc_dev, "errlog not found, skipping\n");
   8226 		return;
   8227 	}
   8228 
   8229 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
   8230 		device_printf(sc->sc_dev, "Start Error Log Dump:\n");
   8231 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
   8232 		    sc->sc_flags, t.valid);
   8233 	}
   8234 
   8235 	device_printf(sc->sc_dev, "%08X | %-28s\n", t.error_id,
   8236 	    iwm_desc_lookup(t.error_id));
   8237 	device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
   8238 	    t.trm_hw_status0);
   8239 	device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
   8240 	    t.trm_hw_status1);
   8241 	device_printf(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
   8242 	device_printf(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
   8243 	device_printf(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
   8244 	device_printf(sc->sc_dev, "%08X | data1\n", t.data1);
   8245 	device_printf(sc->sc_dev, "%08X | data2\n", t.data2);
   8246 	device_printf(sc->sc_dev, "%08X | data3\n", t.data3);
   8247 	device_printf(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
   8248 	device_printf(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
   8249 	device_printf(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
   8250 	device_printf(sc->sc_dev, "%08X | time gp1\n", t.gp1);
   8251 	device_printf(sc->sc_dev, "%08X | time gp2\n", t.gp2);
   8252 	device_printf(sc->sc_dev, "%08X | uCode revision type\n",
   8253 	    t.fw_rev_type);
   8254 	device_printf(sc->sc_dev, "%08X | uCode version major\n",
   8255 	    t.major);
   8256 	device_printf(sc->sc_dev, "%08X | uCode version minor\n",
   8257 	    t.minor);
   8258 	device_printf(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
   8259 	device_printf(sc->sc_dev, "%08X | board version\n", t.brd_ver);
   8260 	device_printf(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
   8261 	device_printf(sc->sc_dev, "%08X | isr0\n", t.isr0);
   8262 	device_printf(sc->sc_dev, "%08X | isr1\n", t.isr1);
   8263 	device_printf(sc->sc_dev, "%08X | isr2\n", t.isr2);
   8264 	device_printf(sc->sc_dev, "%08X | isr3\n", t.isr3);
   8265 	device_printf(sc->sc_dev, "%08X | isr4\n", t.isr4);
   8266 	device_printf(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
   8267 	device_printf(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
   8268 	device_printf(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
   8269 	device_printf(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
   8270 	device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
   8271 	device_printf(sc->sc_dev, "%08X | l2p_addr_match\n",
   8272 	    t.l2p_addr_match);
   8273 	device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
   8274 	device_printf(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
   8275 	device_printf(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
   8276 
   8277 	if (sc->sc_uc.uc_umac_error_event_table)
   8278 		iwm_nic_umac_error(sc);
   8279 }
   8280 
   8281 static void
   8282 iwm_nic_umac_error(struct iwm_softc *sc)
   8283 {
   8284 	struct iwm_umac_error_event_table t;
   8285 	uint32_t base;
   8286 
   8287 	base = sc->sc_uc.uc_umac_error_event_table;
   8288 
   8289 	if (base < 0x800000) {
   8290 		device_printf(sc->sc_dev,
   8291 		    "Invalid error log pointer 0x%08x\n", base);
   8292 		return;
   8293 	}
   8294 
   8295 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
   8296 		device_printf(sc->sc_dev, "reading errlog failed\n");
   8297 		return;
   8298 	}
   8299 
   8300 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
   8301 		device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
   8302 		device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
   8303 		    sc->sc_flags, t.valid);
   8304 	}
   8305 
   8306 	device_printf(sc->sc_dev, "0x%08X | %s\n", t.error_id,
   8307 		iwm_desc_lookup(t.error_id));
   8308 	device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
   8309 	device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
   8310 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
   8311 	    t.ilink1);
   8312 	device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
   8313 	    t.ilink2);
   8314 	device_printf(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
   8315 	device_printf(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
   8316 	device_printf(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
   8317 	device_printf(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
   8318 	device_printf(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
   8319 	device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
   8320 	    t.frame_pointer);
   8321 	device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
   8322 	    t.stack_pointer);
   8323 	device_printf(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
   8324 	device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
   8325 	    t.nic_isr_pref);
   8326 }
   8327 
   8328 static void
   8329 iwm_dump_driver_status(struct iwm_softc *sc)
   8330 {
   8331 	int i;
   8332 
   8333 	printf("driver status:\n");
   8334 	for (i = 0; i < IWM_MAX_QUEUES; i++) {
   8335 		struct iwm_tx_ring *ring = &sc->txq[i];
   8336 		printf("  tx ring %2d: qid=%-2d cur=%-3d "
   8337 		    "queued=%-3d\n",
   8338 		    i, ring->qid, ring->cur, ring->queued);
   8339 	}
   8340 	printf("  rx ring: cur=%d\n", sc->rxq.cur);
   8341 	printf("  802.11 state %s\n",
   8342 	    ieee80211_state_name[sc->sc_ic.ic_state]);
   8343 }
   8344 
   8345 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
   8346 do {									\
   8347 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   8348 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
   8349 	_var_ = (void *)((_pkt_)+1);					\
   8350 } while (/*CONSTCOND*/0)
   8351 
   8352 #define SYNC_RESP(_len_, _pkt_)				\
   8353 do {									\
   8354 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   8355 	    (_len_), BUS_DMASYNC_POSTREAD);			\
   8356 } while (/*CONSTCOND*/0)
   8357 
   8358 
   8359 static void
   8360 iwm_rx_pkt(struct iwm_softc *sc, struct iwm_rx_data *data)
   8361 {
   8362 	struct iwm_rx_packet *pkt;
   8363 	int qid, idx, code;
   8364 	int handled = 1;
   8365 
   8366 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   8367 	    BUS_DMASYNC_POSTREAD);
   8368 	pkt = mtod(data->m, struct iwm_rx_packet *);
   8369 
   8370 	qid = pkt->hdr.qid;
   8371 	idx = pkt->hdr.idx;
   8372 	code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
   8373 
   8374 	if (((qid & ~0x80) == 0 && idx == 0 && code == 0) ||
   8375 	    pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID))
   8376 		return;
   8377 
   8378 	switch (code) {
   8379 	case IWM_REPLY_RX_PHY_CMD:
   8380 		iwm_rx_rx_phy_cmd(sc, pkt, data);
   8381 		break;
   8382 
   8383 	case IWM_REPLY_RX_MPDU_CMD:
   8384 		iwm_rx_rx_mpdu(sc, pkt, data);
   8385 		break;
   8386 
   8387 	case IWM_TX_CMD:
   8388 		iwm_rx_tx_cmd(sc, pkt, data);
   8389 		break;
   8390 
   8391 #ifdef notyet
   8392 	case IWM_BA_NOTIF:
   8393 		iwm_rx_compressed_ba(sc, pkt);
   8394 		break;
   8395 #endif
   8396 
   8397 	case IWM_MISSED_BEACONS_NOTIFICATION: {
   8398 		struct iwm_missed_beacons_notif *mb;
   8399 
   8400 		SYNC_RESP_STRUCT(mb, pkt);
   8401 		iwm_rx_missed_beacons_notif(sc, mb);
   8402 		break;
   8403 	}
   8404 
   8405 	case IWM_MFUART_LOAD_NOTIFICATION:
   8406 		break;
   8407 
   8408 	case IWM_ALIVE: {
   8409 		struct iwm_alive_resp_v1 *resp1;
   8410 		struct iwm_alive_resp_v2 *resp2;
   8411 		struct iwm_alive_resp_v3 *resp3;
   8412 
   8413 		if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
   8414 			SYNC_RESP_STRUCT(resp1, pkt);
   8415 			sc->sc_uc.uc_error_event_table
   8416 			    = le32toh(resp1->error_event_table_ptr);
   8417 			sc->sc_uc.uc_log_event_table
   8418 			    = le32toh(resp1->log_event_table_ptr);
   8419 			sc->sched_base = le32toh(resp1->scd_base_ptr);
   8420 			if (resp1->status == IWM_ALIVE_STATUS_OK)
   8421 				sc->sc_uc.uc_ok = 1;
   8422 			else
   8423 				sc->sc_uc.uc_ok = 0;
   8424 		}
   8425 
   8426 
   8427 		if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
   8428 			SYNC_RESP_STRUCT(resp2, pkt);
   8429 			sc->sc_uc.uc_error_event_table
   8430 			    = le32toh(resp2->error_event_table_ptr);
   8431 			sc->sc_uc.uc_log_event_table
   8432 			    = le32toh(resp2->log_event_table_ptr);
   8433 			sc->sched_base = le32toh(resp2->scd_base_ptr);
   8434 			sc->sc_uc.uc_umac_error_event_table
   8435 			    = le32toh(resp2->error_info_addr);
   8436 			if (resp2->status == IWM_ALIVE_STATUS_OK)
   8437 				sc->sc_uc.uc_ok = 1;
   8438 			else
   8439 				sc->sc_uc.uc_ok = 0;
   8440 		}
   8441 
   8442 		if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
   8443 			SYNC_RESP_STRUCT(resp3, pkt);
   8444 			sc->sc_uc.uc_error_event_table
   8445 			    = le32toh(resp3->error_event_table_ptr);
   8446 			sc->sc_uc.uc_log_event_table
   8447 			    = le32toh(resp3->log_event_table_ptr);
   8448 			sc->sched_base = le32toh(resp3->scd_base_ptr);
   8449 			sc->sc_uc.uc_umac_error_event_table
   8450 			    = le32toh(resp3->error_info_addr);
   8451 			if (resp3->status == IWM_ALIVE_STATUS_OK)
   8452 				sc->sc_uc.uc_ok = 1;
   8453 			else
   8454 				sc->sc_uc.uc_ok = 0;
   8455 		}
   8456 
   8457 		sc->sc_uc.uc_intr = 1;
   8458 		wakeup(&sc->sc_uc);
   8459 		break;
   8460 	}
   8461 
   8462 	case IWM_CALIB_RES_NOTIF_PHY_DB: {
   8463 		struct iwm_calib_res_notif_phy_db *phy_db_notif;
   8464 		SYNC_RESP_STRUCT(phy_db_notif, pkt);
   8465 		uint16_t size = le16toh(phy_db_notif->length);
   8466 		bus_dmamap_sync(sc->sc_dmat, data->map,
   8467 		    sizeof(*pkt) + sizeof(*phy_db_notif),
   8468 		    size, BUS_DMASYNC_POSTREAD);
   8469 		iwm_phy_db_set_section(sc, phy_db_notif, size);
   8470 		sc->sc_init_complete |= IWM_CALIB_COMPLETE;
   8471 		wakeup(&sc->sc_init_complete);
   8472 		break;
   8473 	}
   8474 
   8475 	case IWM_STATISTICS_NOTIFICATION: {
   8476 		struct iwm_notif_statistics *stats;
   8477 		SYNC_RESP_STRUCT(stats, pkt);
   8478 		memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
   8479 		sc->sc_noise = iwm_get_noise(&stats->rx.general);
   8480 		break;
   8481 	}
   8482 
   8483 	case IWM_MCC_CHUB_UPDATE_CMD: {
   8484 		struct iwm_mcc_chub_notif *notif;
   8485 		SYNC_RESP_STRUCT(notif, pkt);
   8486 		iwm_mcc_update(sc, notif);
   8487 		break;
   8488 	}
   8489 
   8490 	case IWM_DTS_MEASUREMENT_NOTIFICATION:
   8491 	case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
   8492 			 IWM_DTS_MEASUREMENT_NOTIF_WIDE):
   8493 	case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
   8494 			 IWM_TEMP_REPORTING_THRESHOLDS_CMD):
   8495 		break;
   8496 
   8497 #if notyet
   8498 	case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
   8499 	    IWM_CT_KILL_NOTIFICATION): {
   8500 		struct iwm_ct_kill_notif *notif;
   8501 		SYNC_RESP_STRUCT(notif, pkt);
   8502 		printf("%s: device at critical temperature (%u degC), "
   8503 		    "stopping device\n",
   8504 		    DEVNAME(sc), le16toh(notif->temperature));
   8505 		sc->sc_flags |= IWM_FLAG_HW_ERR;
   8506 		task_add(systq, &sc->init_task);
   8507 		break;
   8508 	}
   8509 #endif
   8510 
   8511 	case IWM_ADD_STA_KEY:
   8512 	case IWM_PHY_CONFIGURATION_CMD:
   8513 	case IWM_TX_ANT_CONFIGURATION_CMD:
   8514 	case IWM_ADD_STA:
   8515 	case IWM_MAC_CONTEXT_CMD:
   8516 	case IWM_REPLY_SF_CFG_CMD:
   8517 	case IWM_POWER_TABLE_CMD:
   8518 	case IWM_LTR_CONFIG:
   8519 	case IWM_PHY_CONTEXT_CMD:
   8520 	case IWM_BINDING_CONTEXT_CMD:
   8521 	case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_CFG_CMD):
   8522 	case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_REQ_UMAC):
   8523 	case IWM_WIDE_ID(IWM_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
   8524 	case IWM_SCAN_OFFLOAD_REQUEST_CMD:
   8525 	case IWM_SCAN_OFFLOAD_ABORT_CMD:
   8526 	case IWM_REPLY_BEACON_FILTERING_CMD:
   8527 	case IWM_MAC_PM_POWER_TABLE:
   8528 	case IWM_TIME_QUOTA_CMD:
   8529 	case IWM_REMOVE_STA:
   8530 	case IWM_TXPATH_FLUSH:
   8531 	case IWM_LQ_CMD:
   8532 	case IWM_WIDE_ID(IWM_LONG_GROUP,
   8533 			 IWM_FW_PAGING_BLOCK_CMD):
   8534 	case IWM_BT_CONFIG:
   8535 	case IWM_REPLY_THERMAL_MNG_BACKOFF:
   8536 	case IWM_NVM_ACCESS_CMD:
   8537 	case IWM_MCC_UPDATE_CMD:
   8538 	case IWM_TIME_EVENT_CMD: {
   8539 		size_t pkt_len;
   8540 
   8541 		if (sc->sc_cmd_resp_pkt[idx] == NULL)
   8542 			break;
   8543 
   8544 		pkt_len = sizeof(pkt->len_n_flags) +
   8545 		    iwm_rx_packet_len(pkt);
   8546 
   8547 		if ((pkt->hdr.flags & IWM_CMD_FAILED_MSK) ||
   8548 		    pkt_len < sizeof(*pkt) ||
   8549 		    pkt_len > sc->sc_cmd_resp_len[idx]) {
   8550 			kmem_free(sc->sc_cmd_resp_pkt[idx],
   8551 			    sc->sc_cmd_resp_len[idx]);
   8552 			sc->sc_cmd_resp_pkt[idx] = NULL;
   8553 			sc->sc_cmd_resp_len[idx] = 0;
   8554 			break;
   8555 		}
   8556 
   8557 		SYNC_RESP(pkt_len - sizeof(*pkt), pkt);
   8558 		memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
   8559 		break;
   8560 	}
   8561 
   8562 	/* ignore */
   8563 	case IWM_PHY_DB_CMD:
   8564 		break;
   8565 
   8566 	case IWM_INIT_COMPLETE_NOTIF:
   8567 		sc->sc_init_complete |= IWM_INIT_COMPLETE;
   8568 		wakeup(&sc->sc_init_complete);
   8569 		break;
   8570 
   8571 	case IWM_SCAN_OFFLOAD_COMPLETE: {
   8572 		struct iwm_periodic_scan_complete *notif;
   8573 		SYNC_RESP_STRUCT(notif, pkt);
   8574 		break;
   8575 	}
   8576 
   8577 	case IWM_SCAN_ITERATION_COMPLETE: {
   8578 		struct iwm_lmac_scan_complete_notif *notif;
   8579 		SYNC_RESP_STRUCT(notif, pkt);
   8580 		if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
   8581 			iwm_endscan(sc);
   8582 		break;
   8583 	}
   8584 
   8585 	case IWM_SCAN_COMPLETE_UMAC: {
   8586 		struct iwm_umac_scan_complete *notif;
   8587 		SYNC_RESP_STRUCT(notif, pkt);
   8588 		if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
   8589 			iwm_endscan(sc);
   8590 		break;
   8591 	}
   8592 
   8593 	case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
   8594 		struct iwm_umac_scan_iter_complete_notif *notif;
   8595 		SYNC_RESP_STRUCT(notif, pkt);
   8596 		if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
   8597 			iwm_endscan(sc);
   8598 		break;
   8599 	}
   8600 
   8601 	case IWM_REPLY_ERROR: {
   8602 		struct iwm_error_resp *resp;
   8603 		SYNC_RESP_STRUCT(resp, pkt);
   8604 		device_printf(sc->sc_dev, "firmware error 0x%x, cmd 0x%x\n",
   8605 		    le32toh(resp->error_type), resp->cmd_id);
   8606 		break;
   8607 	}
   8608 
   8609 	case IWM_TIME_EVENT_NOTIFICATION: {
   8610 		struct iwm_time_event_notif *notif;
   8611 		uint32_t action;
   8612 		SYNC_RESP_STRUCT(notif, pkt);
   8613 
   8614 		if (sc->sc_time_event_uid != le32toh(notif->unique_id))
   8615 			break;
   8616 		action = le32toh(notif->action);
   8617 		if (action & IWM_TE_V2_NOTIF_HOST_EVENT_END)
   8618 			CLR(sc->sc_flags, IWM_FLAG_TE_ACTIVE);
   8619 		break;
   8620 	}
   8621 
   8622 	case IWM_WIDE_ID(IWM_SYSTEM_GROUP,
   8623 	    IWM_FSEQ_VER_MISMATCH_NOTIFICATION):
   8624 		break;
   8625 
   8626 	/*
   8627 	 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
   8628 	 * messages. Just ignore them for now.
   8629 	 */
   8630 	case IWM_DEBUG_LOG_MSG:
   8631 		break;
   8632 
   8633 	case IWM_MCAST_FILTER_CMD:
   8634 		break;
   8635 
   8636 	case IWM_SCD_QUEUE_CFG: {
   8637 		struct iwm_scd_txq_cfg_rsp *rsp;
   8638 		SYNC_RESP_STRUCT(rsp, pkt);
   8639 
   8640 		break;
   8641 	}
   8642 
   8643 	case IWM_WIDE_ID(IWM_DATA_PATH_GROUP, IWM_DQA_ENABLE_CMD):
   8644 		break;
   8645 
   8646 	case IWM_WIDE_ID(IWM_SYSTEM_GROUP, IWM_SOC_CONFIGURATION_CMD):
   8647 		break;
   8648 
   8649 	default:
   8650 		handled = 0;
   8651 		device_printf(sc->sc_dev,
   8652 		    "unhandled firmware response 0x%x/0x%x rx ring %d[%d]\n",
   8653 		    code, pkt->len_n_flags, (qid & ~0x80), idx);
   8654 		break;
   8655 	}
   8656 
   8657 	/*
   8658 	 * uCode sets bit 0x80 when it originates the notification,
   8659 	 * i.e. when the notification is not a direct response to a
   8660 	 * command sent by the driver.
   8661 	 * For example, uCode issues IWM_REPLY_RX when it sends a
   8662 	 * received frame to the driver.
   8663 	 */
   8664 	if (handled && !(qid & (1 << 7))) {
   8665 		iwm_cmd_done(sc, qid, idx);
   8666 	}
   8667 }
   8668 
   8669 static void
   8670 iwm_notif_intr(struct iwm_softc *sc)
   8671 {
   8672 	uint32_t wreg;
   8673 	uint16_t hw;
   8674 	int count;
   8675 
   8676 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
   8677 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
   8678 
   8679 #if notyet
   8680         if (sc->sc_mqrx_supported) {
   8681                 count = IWM_RX_MQ_RING_COUNT;
   8682                 wreg = IWM_RFH_Q0_FRBDCB_WIDX_TRG;
   8683         } else
   8684 #endif
   8685 	{
   8686                 count = IWM_RX_RING_COUNT;
   8687                 wreg = IWM_FH_RSCSR_CHNL0_WPTR;
   8688         }
   8689 
   8690         hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
   8691         hw &= (count - 1);
   8692         while (sc->rxq.cur != hw) {
   8693                 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
   8694                 iwm_rx_pkt(sc, data);
   8695                 sc->rxq.cur = (sc->rxq.cur + 1) % count;
   8696         }
   8697 
   8698         /*
   8699          * Tell the firmware what we have processed.
   8700          * Seems like the hardware gets upset unless we align the write by 8??
   8701          */
   8702 	hw = (hw == 0) ? count - 1 : hw - 1;
   8703 	IWM_WRITE(sc, wreg, hw & ~7);
   8704 }
   8705 
   8706 static int
   8707 iwm_intr(void *arg)
   8708 {
   8709 	struct iwm_softc *sc = arg;
   8710 
   8711 	/* Disable interrupts */
   8712 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   8713 
   8714 	softint_schedule(sc->sc_soft_ih);
   8715 	return 1;
   8716 }
   8717 
   8718 static void
   8719 iwm_softintr(void *arg)
   8720 {
   8721 	struct iwm_softc *sc = arg;
   8722 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   8723 	uint32_t r1, r2;
   8724 	int s;
   8725 
   8726 	if (__predict_true(sc->sc_flags & IWM_FLAG_USE_ICT)) {
   8727 		uint32_t *ict = sc->ict_dma.vaddr;
   8728 		int tmp;
   8729 
   8730 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
   8731 		    0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
   8732 		tmp = htole32(ict[sc->ict_cur]);
   8733 		if (tmp == 0)
   8734 			goto out_ena;	/* Interrupt not for us. */
   8735 
   8736 		/*
   8737 		 * ok, there was something.  keep plowing until we have all.
   8738 		 */
   8739 		r1 = r2 = 0;
   8740 		while (tmp) {
   8741 			r1 |= tmp;
   8742 			ict[sc->ict_cur] = 0;	/* Acknowledge. */
   8743 			sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
   8744 			tmp = htole32(ict[sc->ict_cur]);
   8745 		}
   8746 
   8747 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
   8748 		    0, sc->ict_dma.size, BUS_DMASYNC_PREWRITE);
   8749 
   8750 		/* this is where the fun begins.  don't ask */
   8751 		if (r1 == 0xffffffff)
   8752 			r1 = 0;
   8753 
   8754 		/*
   8755 		 * Workaround for hardware bug where bits are falsely cleared
   8756 		 * when using interrupt coalescing.  Bit 15 should be set if
   8757 		 * bits 18 and 19 are set.
   8758 		 */
   8759 		if (r1 & 0xc0000)
   8760 			r1 |= 0x8000;
   8761 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
   8762 	} else {
   8763 		r1 = IWM_READ(sc, IWM_CSR_INT);
   8764 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
   8765 	}
   8766 	if (r1 == 0 && r2 == 0) {
   8767 		goto out_ena;	/* Interrupt not for us. */
   8768 	}
   8769 	if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
   8770 		return;	/* Hardware gone! */
   8771 
   8772 	/* Acknowledge interrupts. */
   8773 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
   8774 
   8775 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
   8776 		if (iwm_check_rfkill(sc) && ifp != NULL &&
   8777 		    (ifp->if_flags & IFF_UP))
   8778 			goto fatal;
   8779 	}
   8780 
   8781 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
   8782 
   8783 		iwm_nic_error(sc);
   8784 		iwm_dump_driver_status(sc);
   8785 
   8786 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
   8787  fatal:
   8788 		if (ifp != NULL) {
   8789 			s = splnet();
   8790 			ifp->if_flags &= ~IFF_UP;
   8791 			iwm_stop(ifp, 1);
   8792 			splx(s);
   8793 		}
   8794 		/* Don't restore interrupt mask */
   8795 		return;
   8796 
   8797 	}
   8798 
   8799 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
   8800 		aprint_error_dev(sc->sc_dev,
   8801 		    "hardware error, stopping device\n");
   8802 		goto fatal;
   8803 	}
   8804 
   8805 	/* firmware chunk loaded */
   8806 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
   8807 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
   8808 		sc->sc_fw_chunk_done = 1;
   8809 		wakeup(&sc->sc_fw);
   8810 	}
   8811 
   8812 	if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX |
   8813 	    IWM_CSR_INT_BIT_RX_PERIODIC)) {
   8814 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) {
   8815 			IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
   8816 		}
   8817 
   8818 		if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
   8819 			IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
   8820 		}
   8821 
   8822 		/* Disable periodic interrupt; we use it as just a one-shot. */
   8823 		IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
   8824 
   8825                 /*
   8826                  * Enable periodic interrupt in 8 msec only if we received
   8827                  * real RX interrupt (instead of just periodic int), to catch
   8828                  * any dangling Rx interrupt.  If it was just the periodic
   8829                  * interrupt, there was no dangling Rx activity, and no need
   8830                  * to extend the periodic interrupt; one-shot is enough.
   8831                  */
   8832                 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX))
   8833                         IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
   8834                             IWM_CSR_INT_PERIODIC_ENA);
   8835 
   8836 		iwm_notif_intr(sc);
   8837 	}
   8838 
   8839 out_ena:
   8840 	iwm_restore_interrupts(sc);
   8841 }
   8842 
   8843 /*
   8844  * Autoconf glue-sniffing
   8845  */
   8846 
   8847 static const pci_product_id_t iwm_devices[] = {
   8848 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
   8849 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
   8850 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
   8851 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
   8852 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
   8853 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
   8854 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
   8855 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
   8856 	PCI_PRODUCT_INTEL_WIFI_LINK_3168,
   8857 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
   8858 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
   8859 	PCI_PRODUCT_INTEL_WIFI_LINK_4165_1,
   8860 	PCI_PRODUCT_INTEL_WIFI_LINK_4165_2,
   8861 	PCI_PRODUCT_INTEL_WIFI_LINK_8265,
   8862 };
   8863 
   8864 static int
   8865 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
   8866 {
   8867 	struct pci_attach_args *pa = aux;
   8868 
   8869 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
   8870 		return 0;
   8871 
   8872 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
   8873 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
   8874 			return 1;
   8875 
   8876 	return 0;
   8877 }
   8878 
   8879 static int
   8880 iwm_preinit(struct iwm_softc *sc, bool printme)
   8881 {
   8882 	int err;
   8883 
   8884 	err = iwm_prepare_card_hw(sc);
   8885 	if (err) {
   8886 		aprint_error_dev(sc->sc_dev, "could not prepare hardware\n");
   8887 		return err;
   8888 	}
   8889 
   8890 	if (sc->attached) {
   8891 		return 0;
   8892 	}
   8893 
   8894 	err = iwm_start_hw(sc);
   8895 	if (err) {
   8896 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   8897 		return err;
   8898 	}
   8899 
   8900 	err = iwm_run_init_mvm_ucode(sc, 1);
   8901 	iwm_stop_device(sc);
   8902 	if (err)
   8903 		return err;
   8904 
   8905 	sc->attached = 1;
   8906 
   8907 	if (printme)
   8908 		aprint_normal_dev(sc->sc_dev,
   8909 		    "hw rev 0x%x, fw ver %s, address %s\n",
   8910 		    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
   8911 		    ether_sprintf(sc->sc_nvm.hw_addr));
   8912 
   8913 	return 0;
   8914 }
   8915 
   8916 static void
   8917 iwm_attach_hook(device_t dev)
   8918 {
   8919 	struct iwm_softc *sc = device_private(dev);
   8920 
   8921 	iwm_config_complete(sc);
   8922 }
   8923 
   8924 static void
   8925 iwm_attach(device_t parent, device_t self, void *aux)
   8926 {
   8927 	struct iwm_softc *sc = device_private(self);
   8928 	struct pci_attach_args *pa = aux;
   8929 	pcireg_t reg, memtype;
   8930 	char intrbuf[PCI_INTRSTR_LEN];
   8931 	const char *intrstr;
   8932 	int err;
   8933 	int txq_i;
   8934 	const struct sysctlnode *node;
   8935 
   8936 	sc->sc_dev = self;
   8937 	sc->sc_pct = pa->pa_pc;
   8938 	sc->sc_pcitag = pa->pa_tag;
   8939 	sc->sc_dmat = pa->pa_dmat;
   8940 	sc->sc_pciid = pa->pa_id;
   8941 
   8942 	pci_aprint_devinfo(pa, NULL);
   8943 
   8944 	mutex_init(&sc->sc_nic_mtx, MUTEX_DEFAULT, IPL_NET);
   8945 
   8946 	if (workqueue_create(&sc->sc_nswq, "iwmns",
   8947 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
   8948 		panic("%s: could not create workqueue: newstate",
   8949 		    device_xname(self));
   8950 	sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwm_softintr, sc);
   8951 	if (sc->sc_soft_ih == NULL)
   8952 		panic("%s: could not establish softint", device_xname(self));
   8953 
   8954 	/*
   8955 	 * Get the offset of the PCI Express Capability Structure in PCI
   8956 	 * Configuration Space.
   8957 	 */
   8958 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
   8959 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
   8960 	if (err == 0) {
   8961 		aprint_error_dev(self,
   8962 		    "PCIe capability structure not found!\n");
   8963 		return;
   8964 	}
   8965 
   8966 	/* Clear device-specific "PCI retry timeout" register (41h). */
   8967 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   8968 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   8969 
   8970 	/* Enable bus-mastering */
   8971 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
   8972 	reg |= PCI_COMMAND_MASTER_ENABLE;
   8973 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
   8974 
   8975 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
   8976 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
   8977 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
   8978 	if (err) {
   8979 		aprint_error_dev(self, "can't map mem space\n");
   8980 		return;
   8981 	}
   8982 
   8983 	/* Install interrupt handler. */
   8984 	err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
   8985 	if (err) {
   8986 		aprint_error_dev(self, "can't allocate interrupt\n");
   8987 		return;
   8988 	}
   8989 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
   8990 	if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX)
   8991 		CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
   8992 	else
   8993 		SET(reg, PCI_COMMAND_INTERRUPT_DISABLE);
   8994 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
   8995 	intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
   8996 	    sizeof(intrbuf));
   8997 	sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0],
   8998 	    IPL_NET, iwm_intr, sc, device_xname(self));
   8999 	if (sc->sc_ih == NULL) {
   9000 		aprint_error_dev(self, "can't establish interrupt");
   9001 		if (intrstr != NULL)
   9002 			aprint_error(" at %s", intrstr);
   9003 		aprint_error("\n");
   9004 		return;
   9005 	}
   9006 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
   9007 
   9008 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
   9009 	switch (PCI_PRODUCT(sc->sc_pciid)) {
   9010 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
   9011 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
   9012 		sc->sc_fwname = "iwlwifi-3160-17.ucode";
   9013 		sc->host_interrupt_operation_mode = 1;
   9014 		sc->apmg_wake_up_wa = 1;
   9015 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   9016 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   9017 		sc->nvm_type = IWM_NVM;
   9018 		break;
   9019 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
   9020 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
   9021 #if 0
   9022 /* 7265D-29 */
   9023 		sc->sc_fwname = "iwlwifi-7265D-29.ucode";
   9024 #else
   9025 /* 7265D-22 */
   9026 		sc->sc_fwname = "iwlwifi-7265D-22.ucode";
   9027 #endif
   9028 		sc->host_interrupt_operation_mode = 0;
   9029 		sc->apmg_wake_up_wa = 1;
   9030 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   9031 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   9032 		sc->nvm_type = IWM_NVM;
   9033 		break;
   9034 	case PCI_PRODUCT_INTEL_WIFI_LINK_3168:
   9035 		sc->sc_fwname = "iwlwifi-3168-29.ucode";
   9036 		sc->host_interrupt_operation_mode = 0;
   9037 		sc->apmg_wake_up_wa = 1;
   9038 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   9039 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   9040 		sc->nvm_type = IWM_NVM_SDP;
   9041 		break;
   9042 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
   9043 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
   9044 		sc->sc_fwname = "iwlwifi-7260-17.ucode";
   9045 		sc->host_interrupt_operation_mode = 1;
   9046 		sc->apmg_wake_up_wa = 1;
   9047 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   9048 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   9049 		sc->nvm_type = IWM_NVM;
   9050 		break;
   9051 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
   9052 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
   9053 		sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
   9054 		    IWM_CSR_HW_REV_TYPE_7265D ?
   9055 #if 0
   9056 /* 7265D-29 */
   9057 		    "iwlwifi-7265D-29.ucode": "iwlwifi-7265-17.ucode";
   9058 #else
   9059 /* 7265D-22 */
   9060 		    "iwlwifi-7265D-22.ucode": "iwlwifi-7265-17.ucode";
   9061 #endif
   9062 		sc->host_interrupt_operation_mode = 0;
   9063 		sc->apmg_wake_up_wa = 1;
   9064 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   9065 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   9066 		sc->nvm_type = IWM_NVM;
   9067 		break;
   9068 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
   9069 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
   9070 	case PCI_PRODUCT_INTEL_WIFI_LINK_4165_1:
   9071 	case PCI_PRODUCT_INTEL_WIFI_LINK_4165_2:
   9072 #if 0
   9073 /* 8000C-36 */
   9074 		sc->sc_fwname = "iwlwifi-8000C-36.ucode";
   9075 #else
   9076 /* 8000C-22 */
   9077 		sc->sc_fwname = "iwlwifi-8000C-22.ucode";
   9078 #endif
   9079 		sc->host_interrupt_operation_mode = 0;
   9080 		sc->apmg_wake_up_wa = 0;
   9081 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
   9082 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
   9083 		sc->nvm_type = IWM_NVM_EXT;
   9084 		break;
   9085 	case PCI_PRODUCT_INTEL_WIFI_LINK_8265:
   9086 #if 0
   9087 /* 8265-36 */
   9088 		sc->sc_fwname = "iwlwifi-8265-36.ucode";
   9089 #else
   9090 /* 8265-22 */
   9091 		sc->sc_fwname = "iwlwifi-8265-22.ucode";
   9092 #endif
   9093 		sc->host_interrupt_operation_mode = 0;
   9094 		sc->apmg_wake_up_wa = 0;
   9095 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
   9096 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
   9097 		sc->nvm_type = IWM_NVM_EXT;
   9098 		break;
   9099 /* INTEL_WL_9560 -> 9000-46 */
   9100 	default:
   9101 		aprint_error_dev(self, "unknown product %#x",
   9102 		    PCI_PRODUCT(sc->sc_pciid));
   9103 		return;
   9104 	}
   9105 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
   9106 
   9107 	/*
   9108 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
   9109 	 * changed, and now the revision step also includes bit 0-1 (no more
   9110 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
   9111 	 * in the old format.
   9112 	 */
   9113 
   9114 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   9115 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
   9116 		    (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
   9117 
   9118 	if (iwm_prepare_card_hw(sc) != 0) {
   9119 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   9120 		return;
   9121 	}
   9122 
   9123 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
   9124 		uint32_t hw_step;
   9125 
   9126 		/*
   9127 		 * In order to recognize C step the driver should read the
   9128 		 * chip version id located at the AUX bus MISC address.
   9129 		 */
   9130 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   9131 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
   9132 		DELAY(2);
   9133 
   9134 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   9135 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   9136 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   9137 				   25000);
   9138 		if (!err) {
   9139 			aprint_error_dev(sc->sc_dev,
   9140 			    "failed to wake up the nic\n");
   9141 			return;
   9142 		}
   9143 
   9144 		if (iwm_nic_lock(sc)) {
   9145 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
   9146 			hw_step |= IWM_ENABLE_WFPM;
   9147 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
   9148 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
   9149 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
   9150 			if (hw_step == 0x3)
   9151 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
   9152 				    (IWM_SILICON_C_STEP << 2);
   9153 			iwm_nic_unlock(sc);
   9154 		} else {
   9155 			aprint_error_dev(sc->sc_dev,
   9156 			    "failed to lock the nic\n");
   9157 			return;
   9158 		}
   9159 	}
   9160 
   9161 	/*
   9162 	 * Allocate DMA memory for firmware transfers.
   9163 	 * Must be aligned on a 16-byte boundary.
   9164 	 */
   9165 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
   9166 	    16);
   9167 	if (err) {
   9168 		aprint_error_dev(sc->sc_dev,
   9169 		    "could not allocate memory for firmware\n");
   9170 		return;
   9171 	}
   9172 
   9173 	/* Allocate "Keep Warm" page, used internally by the card. */
   9174 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
   9175 	if (err) {
   9176 		aprint_error_dev(sc->sc_dev,
   9177 		    "could not allocate keep warm page\n");
   9178 		goto fail1;
   9179 	}
   9180 
   9181 	/* Allocate interrupt cause table (ICT).*/
   9182 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
   9183 	    1 << IWM_ICT_PADDR_SHIFT);
   9184 	if (err) {
   9185 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
   9186 		goto fail2;
   9187 	}
   9188 
   9189 	/* TX scheduler rings must be aligned on a 1KB boundary. */
   9190 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
   9191 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
   9192 	if (err) {
   9193 		aprint_error_dev(sc->sc_dev,
   9194 		    "could not allocate TX scheduler rings\n");
   9195 		goto fail3;
   9196 	}
   9197 
   9198 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
   9199 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
   9200 		if (err) {
   9201 			aprint_error_dev(sc->sc_dev,
   9202 			    "could not allocate TX ring %d\n", txq_i);
   9203 			goto fail4;
   9204 		}
   9205 	}
   9206 
   9207 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
   9208 	if (err) {
   9209 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
   9210 		goto fail5;
   9211 	}
   9212 
   9213 	/* Clear pending interrupts. */
   9214 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
   9215 
   9216 	if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
   9217 	    0, CTLTYPE_NODE, device_xname(sc->sc_dev),
   9218 	    SYSCTL_DESCR("iwm per-controller controls"),
   9219 	    NULL, 0, NULL, 0,
   9220 	    CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
   9221 	    CTL_EOL)) != 0) {
   9222 		aprint_normal_dev(sc->sc_dev,
   9223 		    "couldn't create iwm per-controller sysctl node\n");
   9224 	}
   9225 	if (err == 0) {
   9226 		int iwm_nodenum = node->sysctl_num;
   9227 
   9228 		/* Reload firmware sysctl node */
   9229 		if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
   9230 		    CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
   9231 		    SYSCTL_DESCR("Reload firmware"),
   9232 		    iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
   9233 		    CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
   9234 		    CTL_EOL)) != 0) {
   9235 			aprint_normal_dev(sc->sc_dev,
   9236 			    "couldn't create load_fw sysctl node\n");
   9237 		}
   9238 	}
   9239 
   9240 	callout_init(&sc->sc_calib_to, 0);
   9241 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
   9242 	callout_init(&sc->sc_led_blink_to, 0);
   9243 	callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
   9244 
   9245 	if (workqueue_create(&sc->sc_setratewq, "iwmsr",
   9246 	    iwm_setrates_cb, sc, PRI_NONE, IPL_NET, 0))
   9247 		panic("%s: could not create workqueue: setrates",
   9248 		    device_xname(self));
   9249 
   9250 #ifndef IEEE80211_NO_HT
   9251 	if (workqueue_create(&sc->sc_bawq, "iwmba",
   9252 	    iwm_ba_cb, sc, PRI_NONE, IPL_NET, 0))
   9253 		panic("%s: could not create workqueue: blockack",
   9254 		    device_xname(self));
   9255 	if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
   9256 	    iwm_htprot_cb, sc, PRI_NONE, IPL_NET, 0))
   9257 		panic("%s: could not create workqueue: htprot",
   9258 		    device_xname(self));
   9259 #endif
   9260 
   9261 	/*
   9262 	 * We can't do normal attach before the file system is mounted
   9263 	 * because we cannot read the MAC address without loading the
   9264 	 * firmware from disk.  So we postpone until mountroot is done.
   9265 	 * Notably, this will require a full driver unload/load cycle
   9266 	 * (or reboot) in case the firmware is not present when the
   9267 	 * hook runs.
   9268 	 */
   9269 	config_mountroot(self, iwm_attach_hook);
   9270 
   9271 	return;
   9272 
   9273 fail5:	while (--txq_i >= 0)
   9274 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
   9275 fail4:	iwm_dma_contig_free(&sc->sched_dma);
   9276 fail3:	if (sc->ict_dma.vaddr != NULL)
   9277 		iwm_dma_contig_free(&sc->ict_dma);
   9278 fail2:	iwm_dma_contig_free(&sc->kw_dma);
   9279 fail1:	iwm_dma_contig_free(&sc->fw_dma);
   9280 }
   9281 
   9282 static int
   9283 iwm_config_complete(struct iwm_softc *sc)
   9284 {
   9285 	device_t self = sc->sc_dev;
   9286 	struct ieee80211com *ic = &sc->sc_ic;
   9287 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   9288 	int err;
   9289 
   9290 	KASSERT(!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED));
   9291 
   9292 	err = iwm_preinit(sc, true);
   9293 	if (err)
   9294 		return err;
   9295 
   9296 	/*
   9297 	 * Attach interface
   9298 	 */
   9299 	ic->ic_ifp = ifp;
   9300 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
   9301 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
   9302 	ic->ic_state = IEEE80211_S_INIT;
   9303 
   9304 	/* Set device capabilities. */
   9305 	ic->ic_caps =
   9306 	    IEEE80211_C_WEP |		/* WEP */
   9307 	    IEEE80211_C_WPA |		/* 802.11i */
   9308 #ifdef notyet
   9309 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
   9310 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
   9311 #endif
   9312 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
   9313 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
   9314 
   9315 #ifndef IEEE80211_NO_HT
   9316 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
   9317 	ic->ic_htxcaps = 0;
   9318 	ic->ic_txbfcaps = 0;
   9319 	ic->ic_aselcaps = 0;
   9320 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
   9321 #endif
   9322 
   9323 	/* all hardware can do 2.4GHz band */
   9324 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
   9325 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
   9326 
   9327 	/* not all hardware can do 5GHz band */
   9328 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
   9329 		ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
   9330 
   9331 #ifndef IEEE80211_NO_HT
   9332 	if (sc->sc_nvm.sku_cap_11n_enable)
   9333 		iwm_setup_ht_rates(sc);
   9334 #endif
   9335 
   9336 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
   9337 		sc->sc_phyctxt[i].id = i;
   9338 	}
   9339 
   9340 	sc->sc_amrr.amrr_min_success_threshold =  1;
   9341 	sc->sc_amrr.amrr_max_success_threshold = 15;
   9342 
   9343 	/* IBSS channel undefined for now. */
   9344 	ic->ic_ibss_chan = &ic->ic_channels[1];
   9345 
   9346 #if 0
   9347 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
   9348 #endif
   9349 
   9350 	ifp->if_softc = sc;
   9351 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   9352 	ifp->if_init = iwm_init;
   9353 	ifp->if_stop = iwm_stop;
   9354 	ifp->if_ioctl = iwm_ioctl;
   9355 	ifp->if_start = iwm_start;
   9356 	ifp->if_watchdog = iwm_watchdog;
   9357 	IFQ_SET_READY(&ifp->if_snd);
   9358 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
   9359 
   9360 	if_initialize(ifp);
   9361 	ieee80211_ifattach(ic);
   9362 	/* Use common softint-based if_input */
   9363 	ifp->if_percpuq = if_percpuq_create(ifp);
   9364 	if_register(ifp);
   9365 
   9366 	ic->ic_node_alloc = iwm_node_alloc;
   9367 
   9368 	/* Override 802.11 state transition machine. */
   9369 	sc->sc_newstate = ic->ic_newstate;
   9370 	ic->ic_newstate = iwm_newstate;
   9371 
   9372 	/* XXX media locking needs revisiting */
   9373 	mutex_init(&sc->sc_media_mtx, MUTEX_DEFAULT, IPL_SOFTNET);
   9374 	ieee80211_media_init_with_lock(ic,
   9375 	    iwm_media_change, ieee80211_media_status, &sc->sc_media_mtx);
   9376 
   9377 	ieee80211_announce(ic);
   9378 
   9379 	iwm_radiotap_attach(sc);
   9380 
   9381 	if (pmf_device_register(self, NULL, NULL))
   9382 		pmf_class_network_register(self, ifp);
   9383 	else
   9384 		aprint_error_dev(self, "couldn't establish power handler\n");
   9385 
   9386 	sc->sc_flags |= IWM_FLAG_ATTACHED;
   9387 
   9388 	return 0;
   9389 }
   9390 
   9391 void
   9392 iwm_radiotap_attach(struct iwm_softc *sc)
   9393 {
   9394 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   9395 
   9396 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
   9397 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
   9398 	    &sc->sc_drvbpf);
   9399 
   9400 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
   9401 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
   9402 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
   9403 
   9404 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
   9405 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
   9406 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
   9407 }
   9408 
   9409 #if 0
   9410 static void
   9411 iwm_init_task(void *arg)
   9412 {
   9413 	struct iwm_softc *sc = arg;
   9414 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   9415 	int s;
   9416 
   9417 	rw_enter_write(&sc->ioctl_rwl);
   9418 	s = splnet();
   9419 
   9420 	iwm_stop(ifp, 0);
   9421 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
   9422 		iwm_init(ifp);
   9423 
   9424 	splx(s);
   9425 	rw_exit(&sc->ioctl_rwl);
   9426 }
   9427 
   9428 static void
   9429 iwm_wakeup(struct iwm_softc *sc)
   9430 {
   9431 	pcireg_t reg;
   9432 
   9433 	/* Clear device-specific "PCI retry timeout" register (41h). */
   9434 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   9435 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   9436 
   9437 	iwm_init_task(sc);
   9438 }
   9439 
   9440 static int
   9441 iwm_activate(device_t self, enum devact act)
   9442 {
   9443 	struct iwm_softc *sc = device_private(self);
   9444 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   9445 
   9446 	switch (act) {
   9447 	case DVACT_DEACTIVATE:
   9448 		if (ifp->if_flags & IFF_RUNNING)
   9449 			iwm_stop(ifp, 1);
   9450 		return 0;
   9451 	default:
   9452 		return EOPNOTSUPP;
   9453 	}
   9454 }
   9455 #endif
   9456 
   9457 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
   9458 	NULL, NULL);
   9459 
   9460 static int
   9461 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
   9462 {
   9463 	struct sysctlnode node;
   9464 	struct iwm_softc *sc;
   9465 	int err, t;
   9466 
   9467 	node = *rnode;
   9468 	sc = node.sysctl_data;
   9469 	t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
   9470 	node.sysctl_data = &t;
   9471 	err = sysctl_lookup(SYSCTLFN_CALL(&node));
   9472 	if (err || newp == NULL)
   9473 		return err;
   9474 
   9475 	if (t == 0)
   9476 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
   9477 	return 0;
   9478 }
   9479 
   9480 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
   9481 {
   9482 	const struct sysctlnode *rnode;
   9483 #ifdef IWM_DEBUG
   9484 	const struct sysctlnode *cnode;
   9485 #endif /* IWM_DEBUG */
   9486 	int rc;
   9487 
   9488 	if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
   9489 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
   9490 	    SYSCTL_DESCR("iwm global controls"),
   9491 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   9492 		goto err;
   9493 
   9494 	iwm_sysctl_root_num = rnode->sysctl_num;
   9495 
   9496 #ifdef IWM_DEBUG
   9497 	/* control debugging printfs */
   9498 	if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
   9499 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
   9500 	    "debug", SYSCTL_DESCR("Enable debugging output"),
   9501 	    NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
   9502 		goto err;
   9503 #endif /* IWM_DEBUG */
   9504 
   9505 	return;
   9506 
   9507  err:
   9508 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
   9509 }
   9510