Home | History | Annotate | Line # | Download | only in pci
if_iwm.c revision 1.53
      1 /*	$NetBSD: if_iwm.c,v 1.53 2017/01/09 08:10:25 nonaka Exp $	*/
      2 /*	OpenBSD: if_iwm.c,v 1.147 2016/11/17 14:12:33 stsp Exp	*/
      3 #define IEEE80211_NO_HT
      4 /*
      5  * Copyright (c) 2014, 2016 genua gmbh <info (at) genua.de>
      6  *   Author: Stefan Sperling <stsp (at) openbsd.org>
      7  * Copyright (c) 2014 Fixup Software Ltd.
      8  *
      9  * Permission to use, copy, modify, and distribute this software for any
     10  * purpose with or without fee is hereby granted, provided that the above
     11  * copyright notice and this permission notice appear in all copies.
     12  *
     13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     20  */
     21 
     22 /*-
     23  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
     24  * which were used as the reference documentation for this implementation.
     25  *
     26  ***********************************************************************
     27  *
     28  * This file is provided under a dual BSD/GPLv2 license.  When using or
     29  * redistributing this file, you may do so under either license.
     30  *
     31  * GPL LICENSE SUMMARY
     32  *
     33  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
     34  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
     35  * Copyright(c) 2016 Intel Deutschland GmbH
     36  *
     37  * This program is free software; you can redistribute it and/or modify
     38  * it under the terms of version 2 of the GNU General Public License as
     39  * published by the Free Software Foundation.
     40  *
     41  * This program is distributed in the hope that it will be useful, but
     42  * WITHOUT ANY WARRANTY; without even the implied warranty of
     43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     44  * General Public License for more details.
     45  *
     46  * You should have received a copy of the GNU General Public License
     47  * along with this program; if not, write to the Free Software
     48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
     49  * USA
     50  *
     51  * The full GNU General Public License is included in this distribution
     52  * in the file called COPYING.
     53  *
     54  * Contact Information:
     55  *  Intel Linux Wireless <ilw (at) linux.intel.com>
     56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
     57  *
     58  *
     59  * BSD LICENSE
     60  *
     61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
     62  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
     63  * Copyright(c) 2016 Intel Deutschland GmbH
     64  * All rights reserved.
     65  *
     66  * Redistribution and use in source and binary forms, with or without
     67  * modification, are permitted provided that the following conditions
     68  * are met:
     69  *
     70  *  * Redistributions of source code must retain the above copyright
     71  *    notice, this list of conditions and the following disclaimer.
     72  *  * Redistributions in binary form must reproduce the above copyright
     73  *    notice, this list of conditions and the following disclaimer in
     74  *    the documentation and/or other materials provided with the
     75  *    distribution.
     76  *  * Neither the name Intel Corporation nor the names of its
     77  *    contributors may be used to endorse or promote products derived
     78  *    from this software without specific prior written permission.
     79  *
     80  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     81  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     82  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     83  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     84  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     85  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     86  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     87  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     88  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     89  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     90  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     91  */
     92 
     93 /*-
     94  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
     95  *
     96  * Permission to use, copy, modify, and distribute this software for any
     97  * purpose with or without fee is hereby granted, provided that the above
     98  * copyright notice and this permission notice appear in all copies.
     99  *
    100  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
    101  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    102  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    103  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    104  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    105  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    106  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    107  */
    108 
    109 #include <sys/cdefs.h>
    110 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.53 2017/01/09 08:10:25 nonaka Exp $");
    111 
    112 #include <sys/param.h>
    113 #include <sys/conf.h>
    114 #include <sys/kernel.h>
    115 #include <sys/kmem.h>
    116 #include <sys/mbuf.h>
    117 #include <sys/mutex.h>
    118 #include <sys/proc.h>
    119 #include <sys/socket.h>
    120 #include <sys/sockio.h>
    121 #include <sys/sysctl.h>
    122 #include <sys/systm.h>
    123 
    124 #include <sys/cpu.h>
    125 #include <sys/bus.h>
    126 #include <sys/workqueue.h>
    127 #include <machine/endian.h>
    128 #include <machine/intr.h>
    129 
    130 #include <dev/pci/pcireg.h>
    131 #include <dev/pci/pcivar.h>
    132 #include <dev/pci/pcidevs.h>
    133 #include <dev/firmload.h>
    134 
    135 #include <net/bpf.h>
    136 #include <net/if.h>
    137 #include <net/if_dl.h>
    138 #include <net/if_media.h>
    139 #include <net/if_ether.h>
    140 
    141 #include <netinet/in.h>
    142 #include <netinet/ip.h>
    143 
    144 #include <net80211/ieee80211_var.h>
    145 #include <net80211/ieee80211_amrr.h>
    146 #include <net80211/ieee80211_radiotap.h>
    147 
    148 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
    149 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
    150 
    151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
    152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
    153 
    154 #ifdef IWM_DEBUG
    155 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
    156 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
    157 int iwm_debug = 0;
    158 #else
    159 #define DPRINTF(x)	do { ; } while (0)
    160 #define DPRINTFN(n, x)	do { ; } while (0)
    161 #endif
    162 
    163 #include <dev/pci/if_iwmreg.h>
    164 #include <dev/pci/if_iwmvar.h>
    165 
    166 static const uint8_t iwm_nvm_channels[] = {
    167 	/* 2.4 GHz */
    168 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
    169 	/* 5 GHz */
    170 	36, 40, 44, 48, 52, 56, 60, 64,
    171 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
    172 	149, 153, 157, 161, 165
    173 };
    174 
    175 static const uint8_t iwm_nvm_channels_8000[] = {
    176 	/* 2.4 GHz */
    177 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
    178 	/* 5 GHz */
    179 	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
    180 	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
    181 	149, 153, 157, 161, 165, 169, 173, 177, 181
    182 };
    183 
    184 #define IWM_NUM_2GHZ_CHANNELS	14
    185 
    186 static const struct iwm_rate {
    187 	uint8_t rate;
    188 	uint8_t plcp;
    189 	uint8_t ht_plcp;
    190 } iwm_rates[] = {
    191 		/* Legacy */		/* HT */
    192 	{   2,	IWM_RATE_1M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
    193 	{   4,	IWM_RATE_2M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
    194 	{  11,	IWM_RATE_5M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
    195 	{  22,	IWM_RATE_11M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP },
    196 	{  12,	IWM_RATE_6M_PLCP,	IWM_RATE_HT_SISO_MCS_0_PLCP },
    197 	{  18,	IWM_RATE_9M_PLCP,	IWM_RATE_HT_SISO_MCS_INV_PLCP  },
    198 	{  24,	IWM_RATE_12M_PLCP,	IWM_RATE_HT_SISO_MCS_1_PLCP },
    199 	{  36,	IWM_RATE_18M_PLCP,	IWM_RATE_HT_SISO_MCS_2_PLCP },
    200 	{  48,	IWM_RATE_24M_PLCP,	IWM_RATE_HT_SISO_MCS_3_PLCP },
    201 	{  72,	IWM_RATE_36M_PLCP,	IWM_RATE_HT_SISO_MCS_4_PLCP },
    202 	{  96,	IWM_RATE_48M_PLCP,	IWM_RATE_HT_SISO_MCS_5_PLCP },
    203 	{ 108,	IWM_RATE_54M_PLCP,	IWM_RATE_HT_SISO_MCS_6_PLCP },
    204 	{ 128,	IWM_RATE_INVM_PLCP,	IWM_RATE_HT_SISO_MCS_7_PLCP },
    205 };
    206 #define IWM_RIDX_CCK	0
    207 #define IWM_RIDX_OFDM	4
    208 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
    209 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
    210 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
    211 
    212 #ifndef IEEE80211_NO_HT
    213 /* Convert an MCS index into an iwm_rates[] index. */
    214 static const int iwm_mcs2ridx[] = {
    215 	IWM_RATE_MCS_0_INDEX,
    216 	IWM_RATE_MCS_1_INDEX,
    217 	IWM_RATE_MCS_2_INDEX,
    218 	IWM_RATE_MCS_3_INDEX,
    219 	IWM_RATE_MCS_4_INDEX,
    220 	IWM_RATE_MCS_5_INDEX,
    221 	IWM_RATE_MCS_6_INDEX,
    222 	IWM_RATE_MCS_7_INDEX,
    223 };
    224 #endif
    225 
    226 struct iwm_nvm_section {
    227 	uint16_t length;
    228 	uint8_t *data;
    229 };
    230 
    231 struct iwm_newstate_state {
    232 	struct work ns_wk;
    233 	enum ieee80211_state ns_nstate;
    234 	int ns_arg;
    235 	int ns_generation;
    236 };
    237 
    238 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
    239 static int	iwm_firmware_store_section(struct iwm_softc *,
    240 		    enum iwm_ucode_type, uint8_t *, size_t);
    241 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
    242 static int	iwm_read_firmware(struct iwm_softc *);
    243 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
    244 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
    245 #ifdef IWM_DEBUG
    246 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
    247 #endif
    248 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
    249 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
    250 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
    251 static int	iwm_nic_lock(struct iwm_softc *);
    252 static void	iwm_nic_unlock(struct iwm_softc *);
    253 static void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
    254 		    uint32_t);
    255 static void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    256 static void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    257 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
    258 		    bus_size_t, bus_size_t);
    259 static void	iwm_dma_contig_free(struct iwm_dma_info *);
    260 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    261 static void	iwm_disable_rx_dma(struct iwm_softc *);
    262 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    263 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    264 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
    265 		    int);
    266 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    267 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    268 static void	iwm_enable_rfkill_int(struct iwm_softc *);
    269 static int	iwm_check_rfkill(struct iwm_softc *);
    270 static void	iwm_enable_interrupts(struct iwm_softc *);
    271 static void	iwm_restore_interrupts(struct iwm_softc *);
    272 static void	iwm_disable_interrupts(struct iwm_softc *);
    273 static void	iwm_ict_reset(struct iwm_softc *);
    274 static int	iwm_set_hw_ready(struct iwm_softc *);
    275 static int	iwm_prepare_card_hw(struct iwm_softc *);
    276 static void	iwm_apm_config(struct iwm_softc *);
    277 static int	iwm_apm_init(struct iwm_softc *);
    278 static void	iwm_apm_stop(struct iwm_softc *);
    279 static int	iwm_allow_mcast(struct iwm_softc *);
    280 static int	iwm_start_hw(struct iwm_softc *);
    281 static void	iwm_stop_device(struct iwm_softc *);
    282 static void	iwm_nic_config(struct iwm_softc *);
    283 static int	iwm_nic_rx_init(struct iwm_softc *);
    284 static int	iwm_nic_tx_init(struct iwm_softc *);
    285 static int	iwm_nic_init(struct iwm_softc *);
    286 static int	iwm_enable_txq(struct iwm_softc *, int, int, int);
    287 static int	iwm_post_alive(struct iwm_softc *);
    288 static struct iwm_phy_db_entry *
    289 		iwm_phy_db_get_section(struct iwm_softc *,
    290 		    enum iwm_phy_db_section_type, uint16_t);
    291 static int	iwm_phy_db_set_section(struct iwm_softc *,
    292 		    struct iwm_calib_res_notif_phy_db *, uint16_t);
    293 static int	iwm_is_valid_channel(uint16_t);
    294 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
    295 static uint16_t iwm_channel_id_to_papd(uint16_t);
    296 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
    297 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
    298 		    uint8_t **, uint16_t *, uint16_t);
    299 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
    300 		    void *);
    301 static int	iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
    302 		    enum iwm_phy_db_section_type, uint8_t);
    303 static int	iwm_send_phy_db_data(struct iwm_softc *);
    304 static void	iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
    305 		    struct iwm_time_event_cmd_v1 *);
    306 static int	iwm_send_time_event_cmd(struct iwm_softc *,
    307 		    const struct iwm_time_event_cmd_v2 *);
    308 static void	iwm_protect_session(struct iwm_softc *, struct iwm_node *,
    309 		    uint32_t, uint32_t);
    310 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
    311 		    uint16_t, uint8_t *, uint16_t *);
    312 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
    313 		    uint16_t *, size_t);
    314 static void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
    315 		    const uint8_t *, size_t);
    316 #ifndef IEEE80211_NO_HT
    317 static void	iwm_setup_ht_rates(struct iwm_softc *);
    318 static void	iwm_htprot_task(void *);
    319 static void	iwm_update_htprot(struct ieee80211com *,
    320 		    struct ieee80211_node *);
    321 static int	iwm_ampdu_rx_start(struct ieee80211com *,
    322 		    struct ieee80211_node *, uint8_t);
    323 static void	iwm_ampdu_rx_stop(struct ieee80211com *,
    324 		    struct ieee80211_node *, uint8_t);
    325 static void	iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
    326 		    uint8_t, uint16_t, int);
    327 #ifdef notyet
    328 static int	iwm_ampdu_tx_start(struct ieee80211com *,
    329 		    struct ieee80211_node *, uint8_t);
    330 static void	iwm_ampdu_tx_stop(struct ieee80211com *,
    331 		    struct ieee80211_node *, uint8_t);
    332 #endif
    333 static void	iwm_ba_task(void *);
    334 #endif
    335 
    336 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
    337 		    const uint16_t *, const uint16_t *, const uint16_t *,
    338 		    const uint16_t *, const uint16_t *);
    339 static void	iwm_set_hw_address_8000(struct iwm_softc *,
    340 		    struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
    341 static int	iwm_parse_nvm_sections(struct iwm_softc *,
    342 		    struct iwm_nvm_section *);
    343 static int	iwm_nvm_init(struct iwm_softc *);
    344 static int	iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
    345 		    const uint8_t *, uint32_t);
    346 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
    347 		    const uint8_t *, uint32_t);
    348 static int	iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
    349 static int	iwm_load_cpu_sections_8000(struct iwm_softc *,
    350 		    struct iwm_fw_sects *, int , int *);
    351 static int	iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
    352 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
    353 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
    354 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
    355 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
    356 static int	iwm_load_ucode_wait_alive(struct iwm_softc *,
    357 		    enum iwm_ucode_type);
    358 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
    359 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
    360 static int	iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
    361 static int	iwm_get_signal_strength(struct iwm_softc *,
    362 		    struct iwm_rx_phy_info *);
    363 static void	iwm_rx_rx_phy_cmd(struct iwm_softc *,
    364 		    struct iwm_rx_packet *, struct iwm_rx_data *);
    365 static int	iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
    366 static void	iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
    367 		    struct iwm_rx_data *);
    368 static void	iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,		    struct iwm_node *);
    369 static void	iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
    370 		    struct iwm_rx_data *);
    371 static int	iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
    372 		    uint32_t);
    373 #if 0
    374 static int	iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
    375 static int	iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
    376 #endif
    377 static void	iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
    378 		    struct iwm_phy_context_cmd *, uint32_t, uint32_t);
    379 static void	iwm_phy_ctxt_cmd_data(struct iwm_softc *,
    380 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
    381 		    uint8_t, uint8_t);
    382 static int	iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
    383 		    uint8_t, uint8_t, uint32_t, uint32_t);
    384 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
    385 static int	iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
    386 		    uint16_t, const void *);
    387 static int	iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
    388 		    uint32_t *);
    389 static int	iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
    390 		    const void *, uint32_t *);
    391 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
    392 static void	iwm_cmd_done(struct iwm_softc *, int qid, int idx);
    393 #if 0
    394 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
    395 		    uint16_t);
    396 #endif
    397 static const struct iwm_rate *
    398 		iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
    399 		    struct ieee80211_frame *, struct iwm_tx_cmd *);
    400 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
    401 		    struct ieee80211_node *, int);
    402 static void	iwm_led_enable(struct iwm_softc *);
    403 static void	iwm_led_disable(struct iwm_softc *);
    404 static int	iwm_led_is_enabled(struct iwm_softc *);
    405 static void	iwm_led_blink_timeout(void *);
    406 static void	iwm_led_blink_start(struct iwm_softc *);
    407 static void	iwm_led_blink_stop(struct iwm_softc *);
    408 static int	iwm_beacon_filter_send_cmd(struct iwm_softc *,
    409 		    struct iwm_beacon_filter_cmd *);
    410 static void	iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
    411 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
    412 static int	iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
    413 		    int);
    414 static void	iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
    415 		    struct iwm_mac_power_cmd *);
    416 static int	iwm_power_mac_update_mode(struct iwm_softc *,
    417 		    struct iwm_node *);
    418 static int	iwm_power_update_device(struct iwm_softc *);
    419 #ifdef notyet
    420 static int	iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
    421 #endif
    422 static int	iwm_disable_beacon_filter(struct iwm_softc *);
    423 static int	iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
    424 static int	iwm_add_aux_sta(struct iwm_softc *);
    425 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
    426 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
    427 #ifdef notyet
    428 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
    429 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
    430 #endif
    431 static uint8_t	iwm_lmac_scan_fill_channels(struct iwm_softc *,
    432 		    struct iwm_scan_channel_cfg_lmac *, int);
    433 static int	iwm_fill_probe_req(struct iwm_softc *,
    434 		    struct iwm_scan_probe_req *);
    435 static int	iwm_lmac_scan(struct iwm_softc *);
    436 static int	iwm_config_umac_scan(struct iwm_softc *);
    437 static int	iwm_umac_scan(struct iwm_softc *);
    438 static uint8_t	iwm_ridx2rate(struct ieee80211_rateset *, int);
    439 static void	iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
    440 		    int *);
    441 static void	iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
    442 		    struct iwm_mac_ctx_cmd *, uint32_t, int);
    443 static void	iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
    444 		    struct iwm_mac_data_sta *, int);
    445 static int	iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
    446 		    uint32_t, int);
    447 static int	iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
    448 static int	iwm_auth(struct iwm_softc *);
    449 static int	iwm_assoc(struct iwm_softc *);
    450 static void	iwm_calib_timeout(void *);
    451 #ifndef IEEE80211_NO_HT
    452 static void	iwm_setrates_task(void *);
    453 static int	iwm_setrates(struct iwm_node *);
    454 #endif
    455 static int	iwm_media_change(struct ifnet *);
    456 static void	iwm_newstate_cb(struct work *, void *);
    457 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
    458 static void	iwm_endscan(struct iwm_softc *);
    459 static void	iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
    460 		    struct ieee80211_node *);
    461 static int	iwm_sf_config(struct iwm_softc *, int);
    462 static int	iwm_send_bt_init_conf(struct iwm_softc *);
    463 static int	iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
    464 static void	iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
    465 static int	iwm_init_hw(struct iwm_softc *);
    466 static int	iwm_init(struct ifnet *);
    467 static void	iwm_start(struct ifnet *);
    468 static void	iwm_stop(struct ifnet *, int);
    469 static void	iwm_watchdog(struct ifnet *);
    470 static int	iwm_ioctl(struct ifnet *, u_long, void *);
    471 #ifdef IWM_DEBUG
    472 static const char *iwm_desc_lookup(uint32_t);
    473 static void	iwm_nic_error(struct iwm_softc *);
    474 static void	iwm_nic_umac_error(struct iwm_softc *);
    475 #endif
    476 static void	iwm_notif_intr(struct iwm_softc *);
    477 static void	iwm_softintr(void *);
    478 static int	iwm_intr(void *);
    479 static int	iwm_preinit(struct iwm_softc *);
    480 static void	iwm_attach_hook(device_t);
    481 static void	iwm_attach(device_t, device_t, void *);
    482 #if 0
    483 static void	iwm_init_task(void *);
    484 static int	iwm_activate(device_t, enum devact);
    485 static void	iwm_wakeup(struct iwm_softc *);
    486 #endif
    487 static void	iwm_radiotap_attach(struct iwm_softc *);
    488 static int	iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
    489 
    490 static int iwm_sysctl_root_num;
    491 
    492 static int
    493 iwm_firmload(struct iwm_softc *sc)
    494 {
    495 	struct iwm_fw_info *fw = &sc->sc_fw;
    496 	firmware_handle_t fwh;
    497 	int err;
    498 
    499 	if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
    500 		return 0;
    501 
    502 	/* Open firmware image. */
    503 	err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
    504 	if (err) {
    505 		aprint_error_dev(sc->sc_dev,
    506 		    "could not get firmware handle %s\n", sc->sc_fwname);
    507 		return err;
    508 	}
    509 
    510 	if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
    511 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
    512 		fw->fw_rawdata = NULL;
    513 	}
    514 
    515 	fw->fw_rawsize = firmware_get_size(fwh);
    516 	/*
    517 	 * Well, this is how the Linux driver checks it ....
    518 	 */
    519 	if (fw->fw_rawsize < sizeof(uint32_t)) {
    520 		aprint_error_dev(sc->sc_dev,
    521 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
    522 		err = EINVAL;
    523 		goto out;
    524 	}
    525 
    526 	/* some sanity */
    527 	if (fw->fw_rawsize > IWM_FWMAXSIZE) {
    528 		aprint_error_dev(sc->sc_dev,
    529 		    "firmware size is ridiculous: %zd bytes\n", fw->fw_rawsize);
    530 		err = EINVAL;
    531 		goto out;
    532 	}
    533 
    534 	/* Read the firmware. */
    535 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
    536 	if (fw->fw_rawdata == NULL) {
    537 		aprint_error_dev(sc->sc_dev,
    538 		    "not enough memory to stock firmware %s\n", sc->sc_fwname);
    539 		err = ENOMEM;
    540 		goto out;
    541 	}
    542 	err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
    543 	if (err) {
    544 		aprint_error_dev(sc->sc_dev,
    545 		    "could not read firmware %s\n", sc->sc_fwname);
    546 		goto out;
    547 	}
    548 
    549 	SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
    550  out:
    551 	/* caller will release memory, if necessary */
    552 
    553 	firmware_close(fwh);
    554 	return err;
    555 }
    556 
    557 /*
    558  * just maintaining status quo.
    559  */
    560 static void
    561 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
    562 {
    563 	struct ieee80211com *ic = &sc->sc_ic;
    564 	struct ieee80211_frame *wh;
    565 	uint8_t subtype;
    566 
    567 	wh = mtod(m, struct ieee80211_frame *);
    568 
    569 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
    570 		return;
    571 
    572 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
    573 
    574 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
    575 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
    576 		return;
    577 
    578 	int chan = le32toh(sc->sc_last_phy_info.channel);
    579 	if (chan < __arraycount(ic->ic_channels))
    580 		ic->ic_curchan = &ic->ic_channels[chan];
    581 }
    582 
    583 static int
    584 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
    585 {
    586 	struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
    587 
    588 	if (dlen < sizeof(*l) ||
    589 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
    590 		return EINVAL;
    591 
    592 	/* we don't actually store anything for now, always use s/w crypto */
    593 
    594 	return 0;
    595 }
    596 
    597 static int
    598 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
    599     uint8_t *data, size_t dlen)
    600 {
    601 	struct iwm_fw_sects *fws;
    602 	struct iwm_fw_onesect *fwone;
    603 
    604 	if (type >= IWM_UCODE_TYPE_MAX)
    605 		return EINVAL;
    606 	if (dlen < sizeof(uint32_t))
    607 		return EINVAL;
    608 
    609 	fws = &sc->sc_fw.fw_sects[type];
    610 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
    611 		return EINVAL;
    612 
    613 	fwone = &fws->fw_sect[fws->fw_count];
    614 
    615 	/* first 32bit are device load offset */
    616 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
    617 
    618 	/* rest is data */
    619 	fwone->fws_data = data + sizeof(uint32_t);
    620 	fwone->fws_len = dlen - sizeof(uint32_t);
    621 
    622 	/* for freeing the buffer during driver unload */
    623 	fwone->fws_alloc = data;
    624 	fwone->fws_allocsize = dlen;
    625 
    626 	fws->fw_count++;
    627 	fws->fw_totlen += fwone->fws_len;
    628 
    629 	return 0;
    630 }
    631 
    632 struct iwm_tlv_calib_data {
    633 	uint32_t ucode_type;
    634 	struct iwm_tlv_calib_ctrl calib;
    635 } __packed;
    636 
    637 static int
    638 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
    639 {
    640 	const struct iwm_tlv_calib_data *def_calib = data;
    641 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
    642 
    643 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
    644 		DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
    645 		    DEVNAME(sc), ucode_type));
    646 		return EINVAL;
    647 	}
    648 
    649 	sc->sc_default_calib[ucode_type].flow_trigger =
    650 	    def_calib->calib.flow_trigger;
    651 	sc->sc_default_calib[ucode_type].event_trigger =
    652 	    def_calib->calib.event_trigger;
    653 
    654 	return 0;
    655 }
    656 
    657 static int
    658 iwm_read_firmware(struct iwm_softc *sc)
    659 {
    660 	struct iwm_fw_info *fw = &sc->sc_fw;
    661 	struct iwm_tlv_ucode_header *uhdr;
    662 	struct iwm_ucode_tlv tlv;
    663 	enum iwm_ucode_tlv_type tlv_type;
    664 	uint8_t *data;
    665 	int err, status;
    666 	size_t len;
    667 
    668 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
    669 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
    670 	} else {
    671 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
    672 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
    673 	}
    674 	status = fw->fw_status;
    675 
    676 	if (status == IWM_FW_STATUS_DONE)
    677 		return 0;
    678 
    679 	err = iwm_firmload(sc);
    680 	if (err) {
    681 		aprint_error_dev(sc->sc_dev,
    682 		    "could not read firmware %s (error %d)\n",
    683 		    sc->sc_fwname, err);
    684 		goto out;
    685 	}
    686 
    687 	sc->sc_capaflags = 0;
    688 	sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
    689 	memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
    690 	memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
    691 
    692 	uhdr = (void *)fw->fw_rawdata;
    693 	if (*(uint32_t *)fw->fw_rawdata != 0
    694 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
    695 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
    696 		    sc->sc_fwname);
    697 		err = EINVAL;
    698 		goto out;
    699 	}
    700 
    701 	snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
    702 	    IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
    703 	    IWM_UCODE_MINOR(le32toh(uhdr->ver)),
    704 	    IWM_UCODE_API(le32toh(uhdr->ver)));
    705 	data = uhdr->data;
    706 	len = fw->fw_rawsize - sizeof(*uhdr);
    707 
    708 	while (len >= sizeof(tlv)) {
    709 		size_t tlv_len;
    710 		void *tlv_data;
    711 
    712 		memcpy(&tlv, data, sizeof(tlv));
    713 		tlv_len = le32toh(tlv.length);
    714 		tlv_type = le32toh(tlv.type);
    715 
    716 		len -= sizeof(tlv);
    717 		data += sizeof(tlv);
    718 		tlv_data = data;
    719 
    720 		if (len < tlv_len) {
    721 			aprint_error_dev(sc->sc_dev,
    722 			    "firmware too short: %zu bytes\n", len);
    723 			err = EINVAL;
    724 			goto parse_out;
    725 		}
    726 
    727 		switch (tlv_type) {
    728 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
    729 			if (tlv_len < sizeof(uint32_t)) {
    730 				err = EINVAL;
    731 				goto parse_out;
    732 			}
    733 			sc->sc_capa_max_probe_len
    734 			    = le32toh(*(uint32_t *)tlv_data);
    735 			/* limit it to something sensible */
    736 			if (sc->sc_capa_max_probe_len >
    737 			    IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
    738 				err = EINVAL;
    739 				goto parse_out;
    740 			}
    741 			break;
    742 		case IWM_UCODE_TLV_PAN:
    743 			if (tlv_len) {
    744 				err = EINVAL;
    745 				goto parse_out;
    746 			}
    747 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
    748 			break;
    749 		case IWM_UCODE_TLV_FLAGS:
    750 			if (tlv_len < sizeof(uint32_t)) {
    751 				err = EINVAL;
    752 				goto parse_out;
    753 			}
    754 			/*
    755 			 * Apparently there can be many flags, but Linux driver
    756 			 * parses only the first one, and so do we.
    757 			 *
    758 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
    759 			 * Intentional or a bug?  Observations from
    760 			 * current firmware file:
    761 			 *  1) TLV_PAN is parsed first
    762 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
    763 			 * ==> this resets TLV_PAN to itself... hnnnk
    764 			 */
    765 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
    766 			break;
    767 		case IWM_UCODE_TLV_CSCHEME:
    768 			err = iwm_store_cscheme(sc, tlv_data, tlv_len);
    769 			if (err)
    770 				goto parse_out;
    771 			break;
    772 		case IWM_UCODE_TLV_NUM_OF_CPU: {
    773 			uint32_t num_cpu;
    774 			if (tlv_len != sizeof(uint32_t)) {
    775 				err = EINVAL;
    776 				goto parse_out;
    777 			}
    778 			num_cpu = le32toh(*(uint32_t *)tlv_data);
    779 			if (num_cpu < 1 || num_cpu > 2) {
    780 				err = EINVAL;
    781 				goto parse_out;
    782 			}
    783 			break;
    784 		}
    785 		case IWM_UCODE_TLV_SEC_RT:
    786 			err = iwm_firmware_store_section(sc,
    787 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
    788 			if (err)
    789 				goto parse_out;
    790 			break;
    791 		case IWM_UCODE_TLV_SEC_INIT:
    792 			err = iwm_firmware_store_section(sc,
    793 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
    794 			if (err)
    795 				goto parse_out;
    796 			break;
    797 		case IWM_UCODE_TLV_SEC_WOWLAN:
    798 			err = iwm_firmware_store_section(sc,
    799 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
    800 			if (err)
    801 				goto parse_out;
    802 			break;
    803 		case IWM_UCODE_TLV_DEF_CALIB:
    804 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
    805 				err = EINVAL;
    806 				goto parse_out;
    807 			}
    808 			err = iwm_set_default_calib(sc, tlv_data);
    809 			if (err)
    810 				goto parse_out;
    811 			break;
    812 		case IWM_UCODE_TLV_PHY_SKU:
    813 			if (tlv_len != sizeof(uint32_t)) {
    814 				err = EINVAL;
    815 				goto parse_out;
    816 			}
    817 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
    818 			break;
    819 
    820 		case IWM_UCODE_TLV_API_CHANGES_SET: {
    821 			struct iwm_ucode_api *api;
    822 			if (tlv_len != sizeof(*api)) {
    823 				err = EINVAL;
    824 				goto parse_out;
    825 			}
    826 			api = (struct iwm_ucode_api *)tlv_data;
    827 			/* Flags may exceed 32 bits in future firmware. */
    828 			if (le32toh(api->api_index) > 0) {
    829 				goto parse_out;
    830 			}
    831 			sc->sc_ucode_api = le32toh(api->api_flags);
    832 			break;
    833 		}
    834 
    835 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
    836 			struct iwm_ucode_capa *capa;
    837 			int idx, i;
    838 			if (tlv_len != sizeof(*capa)) {
    839 				err = EINVAL;
    840 				goto parse_out;
    841 			}
    842 			capa = (struct iwm_ucode_capa *)tlv_data;
    843 			idx = le32toh(capa->api_index);
    844 			if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
    845 				goto parse_out;
    846 			}
    847 			for (i = 0; i < 32; i++) {
    848 				if (!ISSET(le32toh(capa->api_capa), __BIT(i)))
    849 					continue;
    850 				setbit(sc->sc_enabled_capa, i + (32 * idx));
    851 			}
    852 			break;
    853 		}
    854 
    855 		case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
    856 		case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
    857 		case IWM_UCODE_TLV_FW_GSCAN_CAPA:
    858 			/* ignore, not used by current driver */
    859 			break;
    860 
    861 		case IWM_UCODE_TLV_SEC_RT_USNIFFER:
    862 			err = iwm_firmware_store_section(sc,
    863 			    IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
    864 			    tlv_len);
    865 			if (err)
    866 				goto parse_out;
    867 			break;
    868 
    869 		case IWM_UCODE_TLV_N_SCAN_CHANNELS:
    870 			if (tlv_len != sizeof(uint32_t)) {
    871 				err = EINVAL;
    872 				goto parse_out;
    873 			}
    874 			sc->sc_capa_n_scan_channels =
    875 			  le32toh(*(uint32_t *)tlv_data);
    876 			break;
    877 
    878 		case IWM_UCODE_TLV_FW_VERSION:
    879 			if (tlv_len != sizeof(uint32_t) * 3) {
    880 				err = EINVAL;
    881 				goto parse_out;
    882 			}
    883 			snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
    884 			    "%d.%d.%d",
    885 			    le32toh(((uint32_t *)tlv_data)[0]),
    886 			    le32toh(((uint32_t *)tlv_data)[1]),
    887 			    le32toh(((uint32_t *)tlv_data)[2]));
    888 			break;
    889 
    890 		default:
    891 			DPRINTF(("%s: unknown firmware section %d, abort\n",
    892 			    DEVNAME(sc), tlv_type));
    893 			err = EINVAL;
    894 			goto parse_out;
    895 		}
    896 
    897 		len -= roundup(tlv_len, 4);
    898 		data += roundup(tlv_len, 4);
    899 	}
    900 
    901 	KASSERT(err == 0);
    902 
    903  parse_out:
    904 	if (err) {
    905 		aprint_error_dev(sc->sc_dev,
    906 		    "firmware parse error, section type %d\n", tlv_type);
    907 	}
    908 
    909 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
    910 		aprint_error_dev(sc->sc_dev,
    911 		    "device uses unsupported power ops\n");
    912 		err = ENOTSUP;
    913 	}
    914 
    915  out:
    916 	if (err)
    917 		fw->fw_status = IWM_FW_STATUS_NONE;
    918 	else
    919 		fw->fw_status = IWM_FW_STATUS_DONE;
    920 	wakeup(&sc->sc_fw);
    921 
    922 	if (err && fw->fw_rawdata != NULL) {
    923 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
    924 		fw->fw_rawdata = NULL;
    925 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
    926 		/* don't touch fw->fw_status */
    927 		memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
    928 	}
    929 	return err;
    930 }
    931 
    932 static uint32_t
    933 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
    934 {
    935 	IWM_WRITE(sc,
    936 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
    937 	IWM_BARRIER_READ_WRITE(sc);
    938 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
    939 }
    940 
    941 static void
    942 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    943 {
    944 	IWM_WRITE(sc,
    945 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
    946 	IWM_BARRIER_WRITE(sc);
    947 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
    948 }
    949 
    950 #ifdef IWM_DEBUG
    951 static int
    952 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
    953 {
    954 	int offs;
    955 	uint32_t *vals = buf;
    956 
    957 	if (iwm_nic_lock(sc)) {
    958 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
    959 		for (offs = 0; offs < dwords; offs++)
    960 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
    961 		iwm_nic_unlock(sc);
    962 		return 0;
    963 	}
    964 	return EBUSY;
    965 }
    966 #endif
    967 
    968 static int
    969 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
    970 {
    971 	int offs;
    972 	const uint32_t *vals = buf;
    973 
    974 	if (iwm_nic_lock(sc)) {
    975 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
    976 		/* WADDR auto-increments */
    977 		for (offs = 0; offs < dwords; offs++) {
    978 			uint32_t val = vals ? vals[offs] : 0;
    979 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
    980 		}
    981 		iwm_nic_unlock(sc);
    982 		return 0;
    983 	}
    984 	return EBUSY;
    985 }
    986 
    987 static int
    988 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    989 {
    990 	return iwm_write_mem(sc, addr, &val, 1);
    991 }
    992 
    993 static int
    994 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
    995     int timo)
    996 {
    997 	for (;;) {
    998 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
    999 			return 1;
   1000 		}
   1001 		if (timo < 10) {
   1002 			return 0;
   1003 		}
   1004 		timo -= 10;
   1005 		DELAY(10);
   1006 	}
   1007 }
   1008 
   1009 static int
   1010 iwm_nic_lock(struct iwm_softc *sc)
   1011 {
   1012 	int rv = 0;
   1013 
   1014 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   1015 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1016 
   1017 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   1018 		DELAY(2);
   1019 
   1020 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   1021 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
   1022 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
   1023 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
   1024 		rv = 1;
   1025 	} else {
   1026 		aprint_error_dev(sc->sc_dev, "device timeout\n");
   1027 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
   1028 	}
   1029 
   1030 	return rv;
   1031 }
   1032 
   1033 static void
   1034 iwm_nic_unlock(struct iwm_softc *sc)
   1035 {
   1036 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1037 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1038 }
   1039 
   1040 static void
   1041 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
   1042     uint32_t mask)
   1043 {
   1044 	uint32_t val;
   1045 
   1046 	/* XXX: no error path? */
   1047 	if (iwm_nic_lock(sc)) {
   1048 		val = iwm_read_prph(sc, reg) & mask;
   1049 		val |= bits;
   1050 		iwm_write_prph(sc, reg, val);
   1051 		iwm_nic_unlock(sc);
   1052 	}
   1053 }
   1054 
   1055 static void
   1056 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
   1057 {
   1058 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
   1059 }
   1060 
   1061 static void
   1062 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
   1063 {
   1064 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
   1065 }
   1066 
   1067 static int
   1068 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
   1069     bus_size_t size, bus_size_t alignment)
   1070 {
   1071 	int nsegs, err;
   1072 	void *va;
   1073 
   1074 	dma->tag = tag;
   1075 	dma->size = size;
   1076 
   1077 	err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
   1078 	    &dma->map);
   1079 	if (err)
   1080 		goto fail;
   1081 
   1082 	err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
   1083 	    BUS_DMA_NOWAIT);
   1084 	if (err)
   1085 		goto fail;
   1086 
   1087 	err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
   1088 	if (err)
   1089 		goto fail;
   1090 	dma->vaddr = va;
   1091 
   1092 	err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
   1093 	    BUS_DMA_NOWAIT);
   1094 	if (err)
   1095 		goto fail;
   1096 
   1097 	memset(dma->vaddr, 0, size);
   1098 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
   1099 	dma->paddr = dma->map->dm_segs[0].ds_addr;
   1100 
   1101 	return 0;
   1102 
   1103 fail:	iwm_dma_contig_free(dma);
   1104 	return err;
   1105 }
   1106 
   1107 static void
   1108 iwm_dma_contig_free(struct iwm_dma_info *dma)
   1109 {
   1110 	if (dma->map != NULL) {
   1111 		if (dma->vaddr != NULL) {
   1112 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
   1113 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1114 			bus_dmamap_unload(dma->tag, dma->map);
   1115 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
   1116 			bus_dmamem_free(dma->tag, &dma->seg, 1);
   1117 			dma->vaddr = NULL;
   1118 		}
   1119 		bus_dmamap_destroy(dma->tag, dma->map);
   1120 		dma->map = NULL;
   1121 	}
   1122 }
   1123 
   1124 static int
   1125 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1126 {
   1127 	bus_size_t size;
   1128 	int i, err;
   1129 
   1130 	ring->cur = 0;
   1131 
   1132 	/* Allocate RX descriptors (256-byte aligned). */
   1133 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
   1134 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1135 	if (err) {
   1136 		aprint_error_dev(sc->sc_dev,
   1137 		    "could not allocate RX ring DMA memory\n");
   1138 		goto fail;
   1139 	}
   1140 	ring->desc = ring->desc_dma.vaddr;
   1141 
   1142 	/* Allocate RX status area (16-byte aligned). */
   1143 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
   1144 	    sizeof(*ring->stat), 16);
   1145 	if (err) {
   1146 		aprint_error_dev(sc->sc_dev,
   1147 		    "could not allocate RX status DMA memory\n");
   1148 		goto fail;
   1149 	}
   1150 	ring->stat = ring->stat_dma.vaddr;
   1151 
   1152 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1153 		struct iwm_rx_data *data = &ring->data[i];
   1154 
   1155 		memset(data, 0, sizeof(*data));
   1156 		err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
   1157 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
   1158 		    &data->map);
   1159 		if (err) {
   1160 			aprint_error_dev(sc->sc_dev,
   1161 			    "could not create RX buf DMA map\n");
   1162 			goto fail;
   1163 		}
   1164 
   1165 		err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
   1166 		if (err)
   1167 			goto fail;
   1168 	}
   1169 	return 0;
   1170 
   1171 fail:	iwm_free_rx_ring(sc, ring);
   1172 	return err;
   1173 }
   1174 
   1175 static void
   1176 iwm_disable_rx_dma(struct iwm_softc *sc)
   1177 {
   1178 	int ntries;
   1179 
   1180 	if (iwm_nic_lock(sc)) {
   1181 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
   1182 		for (ntries = 0; ntries < 1000; ntries++) {
   1183 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
   1184 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
   1185 				break;
   1186 			DELAY(10);
   1187 		}
   1188 		iwm_nic_unlock(sc);
   1189 	}
   1190 }
   1191 
   1192 void
   1193 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1194 {
   1195 	ring->cur = 0;
   1196 	memset(ring->stat, 0, sizeof(*ring->stat));
   1197 	bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
   1198 	    ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1199 }
   1200 
   1201 static void
   1202 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1203 {
   1204 	int i;
   1205 
   1206 	iwm_dma_contig_free(&ring->desc_dma);
   1207 	iwm_dma_contig_free(&ring->stat_dma);
   1208 
   1209 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1210 		struct iwm_rx_data *data = &ring->data[i];
   1211 
   1212 		if (data->m != NULL) {
   1213 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1214 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1215 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1216 			m_freem(data->m);
   1217 			data->m = NULL;
   1218 		}
   1219 		if (data->map != NULL) {
   1220 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1221 			data->map = NULL;
   1222 		}
   1223 	}
   1224 }
   1225 
   1226 static int
   1227 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
   1228 {
   1229 	bus_addr_t paddr;
   1230 	bus_size_t size;
   1231 	int i, err;
   1232 
   1233 	ring->qid = qid;
   1234 	ring->queued = 0;
   1235 	ring->cur = 0;
   1236 
   1237 	/* Allocate TX descriptors (256-byte aligned). */
   1238 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
   1239 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1240 	if (err) {
   1241 		aprint_error_dev(sc->sc_dev,
   1242 		    "could not allocate TX ring DMA memory\n");
   1243 		goto fail;
   1244 	}
   1245 	ring->desc = ring->desc_dma.vaddr;
   1246 
   1247 	/*
   1248 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
   1249 	 * to allocate commands space for other rings.
   1250 	 */
   1251 	if (qid > IWM_CMD_QUEUE)
   1252 		return 0;
   1253 
   1254 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
   1255 	err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
   1256 	if (err) {
   1257 		aprint_error_dev(sc->sc_dev,
   1258 		    "could not allocate TX cmd DMA memory\n");
   1259 		goto fail;
   1260 	}
   1261 	ring->cmd = ring->cmd_dma.vaddr;
   1262 
   1263 	paddr = ring->cmd_dma.paddr;
   1264 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1265 		struct iwm_tx_data *data = &ring->data[i];
   1266 		size_t mapsize;
   1267 
   1268 		data->cmd_paddr = paddr;
   1269 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
   1270 		    + offsetof(struct iwm_tx_cmd, scratch);
   1271 		paddr += sizeof(struct iwm_device_cmd);
   1272 
   1273 		/* FW commands may require more mapped space than packets. */
   1274 		if (qid == IWM_CMD_QUEUE)
   1275 			mapsize = (sizeof(struct iwm_cmd_header) +
   1276 			    IWM_MAX_CMD_PAYLOAD_SIZE);
   1277 		else
   1278 			mapsize = MCLBYTES;
   1279 		err = bus_dmamap_create(sc->sc_dmat, mapsize,
   1280 		    IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT, &data->map);
   1281 		if (err) {
   1282 			aprint_error_dev(sc->sc_dev,
   1283 			    "could not create TX buf DMA map\n");
   1284 			goto fail;
   1285 		}
   1286 	}
   1287 	KASSERT(paddr == ring->cmd_dma.paddr + size);
   1288 	return 0;
   1289 
   1290 fail:	iwm_free_tx_ring(sc, ring);
   1291 	return err;
   1292 }
   1293 
   1294 static void
   1295 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1296 {
   1297 	int i;
   1298 
   1299 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1300 		struct iwm_tx_data *data = &ring->data[i];
   1301 
   1302 		if (data->m != NULL) {
   1303 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1304 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1305 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1306 			m_freem(data->m);
   1307 			data->m = NULL;
   1308 		}
   1309 	}
   1310 	/* Clear TX descriptors. */
   1311 	memset(ring->desc, 0, ring->desc_dma.size);
   1312 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
   1313 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
   1314 	sc->qfullmsk &= ~(1 << ring->qid);
   1315 	ring->queued = 0;
   1316 	ring->cur = 0;
   1317 }
   1318 
   1319 static void
   1320 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1321 {
   1322 	int i;
   1323 
   1324 	iwm_dma_contig_free(&ring->desc_dma);
   1325 	iwm_dma_contig_free(&ring->cmd_dma);
   1326 
   1327 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1328 		struct iwm_tx_data *data = &ring->data[i];
   1329 
   1330 		if (data->m != NULL) {
   1331 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1332 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1333 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1334 			m_freem(data->m);
   1335 		}
   1336 		if (data->map != NULL) {
   1337 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1338 			data->map = NULL;
   1339 		}
   1340 	}
   1341 }
   1342 
   1343 static void
   1344 iwm_enable_rfkill_int(struct iwm_softc *sc)
   1345 {
   1346 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
   1347 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1348 }
   1349 
   1350 static int
   1351 iwm_check_rfkill(struct iwm_softc *sc)
   1352 {
   1353 	uint32_t v;
   1354 	int s;
   1355 	int rv;
   1356 
   1357 	s = splnet();
   1358 
   1359 	/*
   1360 	 * "documentation" is not really helpful here:
   1361 	 *  27:	HW_RF_KILL_SW
   1362 	 *	Indicates state of (platform's) hardware RF-Kill switch
   1363 	 *
   1364 	 * But apparently when it's off, it's on ...
   1365 	 */
   1366 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
   1367 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
   1368 	if (rv) {
   1369 		sc->sc_flags |= IWM_FLAG_RFKILL;
   1370 	} else {
   1371 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
   1372 	}
   1373 
   1374 	splx(s);
   1375 	return rv;
   1376 }
   1377 
   1378 static void
   1379 iwm_enable_interrupts(struct iwm_softc *sc)
   1380 {
   1381 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
   1382 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1383 }
   1384 
   1385 static void
   1386 iwm_restore_interrupts(struct iwm_softc *sc)
   1387 {
   1388 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1389 }
   1390 
   1391 static void
   1392 iwm_disable_interrupts(struct iwm_softc *sc)
   1393 {
   1394 	int s = splnet();
   1395 
   1396 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   1397 
   1398 	/* acknowledge all interrupts */
   1399 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1400 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
   1401 
   1402 	splx(s);
   1403 }
   1404 
   1405 static void
   1406 iwm_ict_reset(struct iwm_softc *sc)
   1407 {
   1408 	iwm_disable_interrupts(sc);
   1409 
   1410 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
   1411 	bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
   1412 	    BUS_DMASYNC_PREWRITE);
   1413 	sc->ict_cur = 0;
   1414 
   1415 	/* Set physical address of ICT (4KB aligned). */
   1416 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
   1417 	    IWM_CSR_DRAM_INT_TBL_ENABLE
   1418 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
   1419 	    | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
   1420 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
   1421 
   1422 	/* Switch to ICT interrupt mode in driver. */
   1423 	sc->sc_flags |= IWM_FLAG_USE_ICT;
   1424 
   1425 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1426 	iwm_enable_interrupts(sc);
   1427 }
   1428 
   1429 #define IWM_HW_READY_TIMEOUT 50
   1430 static int
   1431 iwm_set_hw_ready(struct iwm_softc *sc)
   1432 {
   1433 	int ready;
   1434 
   1435 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1436 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
   1437 
   1438 	ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1439 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1440 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1441 	    IWM_HW_READY_TIMEOUT);
   1442 	if (ready)
   1443 		IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
   1444 		    IWM_CSR_MBOX_SET_REG_OS_ALIVE);
   1445 
   1446 	return ready;
   1447 }
   1448 #undef IWM_HW_READY_TIMEOUT
   1449 
   1450 static int
   1451 iwm_prepare_card_hw(struct iwm_softc *sc)
   1452 {
   1453 	int t = 0;
   1454 
   1455 	if (iwm_set_hw_ready(sc))
   1456 		return 0;
   1457 
   1458 	DELAY(100);
   1459 
   1460 	/* If HW is not ready, prepare the conditions to check again */
   1461 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1462 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
   1463 
   1464 	do {
   1465 		if (iwm_set_hw_ready(sc))
   1466 			return 0;
   1467 		DELAY(200);
   1468 		t += 200;
   1469 	} while (t < 150000);
   1470 
   1471 	return ETIMEDOUT;
   1472 }
   1473 
   1474 static void
   1475 iwm_apm_config(struct iwm_softc *sc)
   1476 {
   1477 	pcireg_t reg;
   1478 
   1479 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
   1480 	    sc->sc_cap_off + PCIE_LCSR);
   1481 	if (reg & PCIE_LCSR_ASPM_L1) {
   1482 		/* Um the Linux driver prints "Disabling L0S for this one ... */
   1483 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
   1484 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1485 	} else {
   1486 		/* ... and "Enabling" here */
   1487 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
   1488 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1489 	}
   1490 }
   1491 
   1492 /*
   1493  * Start up NIC's basic functionality after it has been reset
   1494  * e.g. after platform boot or shutdown.
   1495  * NOTE:  This does not load uCode nor start the embedded processor
   1496  */
   1497 static int
   1498 iwm_apm_init(struct iwm_softc *sc)
   1499 {
   1500 	int err = 0;
   1501 
   1502 	/* Disable L0S exit timer (platform NMI workaround) */
   1503 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
   1504 		IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1505 		    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
   1506 
   1507 	/*
   1508 	 * Disable L0s without affecting L1;
   1509 	 *  don't wait for ICH L0s (ICH bug W/A)
   1510 	 */
   1511 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1512 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
   1513 
   1514 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
   1515 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
   1516 
   1517 	/*
   1518 	 * Enable HAP INTA (interrupt from management bus) to
   1519 	 * wake device's PCI Express link L1a -> L0s
   1520 	 */
   1521 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1522 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
   1523 
   1524 	iwm_apm_config(sc);
   1525 
   1526 #if 0 /* not for 7k/8k */
   1527 	/* Configure analog phase-lock-loop before activating to D0A */
   1528 	if (trans->cfg->base_params->pll_cfg_val)
   1529 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
   1530 		    trans->cfg->base_params->pll_cfg_val);
   1531 #endif
   1532 
   1533 	/*
   1534 	 * Set "initialization complete" bit to move adapter from
   1535 	 * D0U* --> D0A* (powered-up active) state.
   1536 	 */
   1537 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
   1538 
   1539 	/*
   1540 	 * Wait for clock stabilization; once stabilized, access to
   1541 	 * device-internal resources is supported, e.g. iwm_write_prph()
   1542 	 * and accesses to uCode SRAM.
   1543 	 */
   1544 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   1545 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   1546 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
   1547 		aprint_error_dev(sc->sc_dev,
   1548 		    "timeout waiting for clock stabilization\n");
   1549 		err = ETIMEDOUT;
   1550 		goto out;
   1551 	}
   1552 
   1553 	if (sc->host_interrupt_operation_mode) {
   1554 		/*
   1555 		 * This is a bit of an abuse - This is needed for 7260 / 3160
   1556 		 * only check host_interrupt_operation_mode even if this is
   1557 		 * not related to host_interrupt_operation_mode.
   1558 		 *
   1559 		 * Enable the oscillator to count wake up time for L1 exit. This
   1560 		 * consumes slightly more power (100uA) - but allows to be sure
   1561 		 * that we wake up from L1 on time.
   1562 		 *
   1563 		 * This looks weird: read twice the same register, discard the
   1564 		 * value, set a bit, and yet again, read that same register
   1565 		 * just to discard the value. But that's the way the hardware
   1566 		 * seems to like it.
   1567 		 */
   1568 		iwm_read_prph(sc, IWM_OSC_CLK);
   1569 		iwm_read_prph(sc, IWM_OSC_CLK);
   1570 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
   1571 		iwm_read_prph(sc, IWM_OSC_CLK);
   1572 		iwm_read_prph(sc, IWM_OSC_CLK);
   1573 	}
   1574 
   1575 	/*
   1576 	 * Enable DMA clock and wait for it to stabilize.
   1577 	 *
   1578 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
   1579 	 * do not disable clocks.  This preserves any hardware bits already
   1580 	 * set by default in "CLK_CTRL_REG" after reset.
   1581 	 */
   1582 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   1583 		iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
   1584 		    IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1585 		DELAY(20);
   1586 
   1587 		/* Disable L1-Active */
   1588 		iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1589 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1590 
   1591 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
   1592 		iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
   1593 		    IWM_APMG_RTC_INT_STT_RFKILL);
   1594 	}
   1595  out:
   1596 	if (err)
   1597 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
   1598 	return err;
   1599 }
   1600 
   1601 static void
   1602 iwm_apm_stop(struct iwm_softc *sc)
   1603 {
   1604 	/* stop device's busmaster DMA activity */
   1605 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
   1606 
   1607 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
   1608 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
   1609 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
   1610 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
   1611 	DPRINTF(("iwm apm stop\n"));
   1612 }
   1613 
   1614 static int
   1615 iwm_start_hw(struct iwm_softc *sc)
   1616 {
   1617 	int err;
   1618 
   1619 	err = iwm_prepare_card_hw(sc);
   1620 	if (err)
   1621 		return err;
   1622 
   1623 	/* Reset the entire device */
   1624 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
   1625 	DELAY(10);
   1626 
   1627 	err = iwm_apm_init(sc);
   1628 	if (err)
   1629 		return err;
   1630 
   1631 	iwm_enable_rfkill_int(sc);
   1632 	iwm_check_rfkill(sc);
   1633 
   1634 	return 0;
   1635 }
   1636 
   1637 static void
   1638 iwm_stop_device(struct iwm_softc *sc)
   1639 {
   1640 	int chnl, ntries;
   1641 	int qid;
   1642 
   1643 	iwm_disable_interrupts(sc);
   1644 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
   1645 
   1646 	/* Deactivate TX scheduler. */
   1647 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1648 
   1649 	/* Stop all DMA channels. */
   1650 	if (iwm_nic_lock(sc)) {
   1651 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1652 			IWM_WRITE(sc,
   1653 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
   1654 			for (ntries = 0; ntries < 200; ntries++) {
   1655 				uint32_t r;
   1656 
   1657 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
   1658 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
   1659 				    chnl))
   1660 					break;
   1661 				DELAY(20);
   1662 			}
   1663 		}
   1664 		iwm_nic_unlock(sc);
   1665 	}
   1666 	iwm_disable_rx_dma(sc);
   1667 
   1668 	iwm_reset_rx_ring(sc, &sc->rxq);
   1669 
   1670 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
   1671 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
   1672 
   1673 	/*
   1674 	 * Power-down device's busmaster DMA clocks
   1675 	 */
   1676 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1677 	DELAY(5);
   1678 
   1679 	/* Make sure (redundant) we've released our request to stay awake */
   1680 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1681 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1682 
   1683 	/* Stop the device, and put it in low power state */
   1684 	iwm_apm_stop(sc);
   1685 
   1686 	/*
   1687 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
   1688 	 * Clean again the interrupt here
   1689 	 */
   1690 	iwm_disable_interrupts(sc);
   1691 
   1692 	/* Reset the on-board processor. */
   1693 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
   1694 
   1695 	/* Even though we stop the HW we still want the RF kill interrupt. */
   1696 	iwm_enable_rfkill_int(sc);
   1697 	iwm_check_rfkill(sc);
   1698 }
   1699 
   1700 static void
   1701 iwm_nic_config(struct iwm_softc *sc)
   1702 {
   1703 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
   1704 	uint32_t reg_val = 0;
   1705 
   1706 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
   1707 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
   1708 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
   1709 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
   1710 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
   1711 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
   1712 
   1713 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
   1714 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
   1715 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
   1716 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
   1717 
   1718 	/* radio configuration */
   1719 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
   1720 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
   1721 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
   1722 
   1723 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
   1724 
   1725 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
   1726 	    radio_cfg_step, radio_cfg_dash));
   1727 
   1728 	/*
   1729 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
   1730 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
   1731 	 * to lose ownership and not being able to obtain it back.
   1732 	 */
   1733 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
   1734 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1735 		    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
   1736 		    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
   1737 }
   1738 
   1739 static int
   1740 iwm_nic_rx_init(struct iwm_softc *sc)
   1741 {
   1742 	if (!iwm_nic_lock(sc))
   1743 		return EBUSY;
   1744 
   1745 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
   1746 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
   1747 	    0, sc->rxq.stat_dma.size,
   1748 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1749 
   1750 	iwm_disable_rx_dma(sc);
   1751 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
   1752 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
   1753 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
   1754 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
   1755 
   1756 	/* Set physical address of RX ring (256-byte aligned). */
   1757 	IWM_WRITE(sc,
   1758 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
   1759 
   1760 	/* Set physical address of RX status (16-byte aligned). */
   1761 	IWM_WRITE(sc,
   1762 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
   1763 
   1764 	/* Enable RX. */
   1765 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
   1766 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
   1767 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
   1768 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
   1769 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK	|
   1770 	    (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
   1771 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
   1772 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
   1773 
   1774 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
   1775 
   1776 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
   1777 	if (sc->host_interrupt_operation_mode)
   1778 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
   1779 
   1780 	/*
   1781 	 * This value should initially be 0 (before preparing any RBs),
   1782 	 * and should be 8 after preparing the first 8 RBs (for example).
   1783 	 */
   1784 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
   1785 
   1786 	iwm_nic_unlock(sc);
   1787 
   1788 	return 0;
   1789 }
   1790 
   1791 static int
   1792 iwm_nic_tx_init(struct iwm_softc *sc)
   1793 {
   1794 	int qid;
   1795 
   1796 	if (!iwm_nic_lock(sc))
   1797 		return EBUSY;
   1798 
   1799 	/* Deactivate TX scheduler. */
   1800 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1801 
   1802 	/* Set physical address of "keep warm" page (16-byte aligned). */
   1803 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
   1804 
   1805 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
   1806 		struct iwm_tx_ring *txq = &sc->txq[qid];
   1807 
   1808 		/* Set physical address of TX ring (256-byte aligned). */
   1809 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
   1810 		    txq->desc_dma.paddr >> 8);
   1811 		DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
   1812 		    qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
   1813 	}
   1814 
   1815 	iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
   1816 
   1817 	iwm_nic_unlock(sc);
   1818 
   1819 	return 0;
   1820 }
   1821 
   1822 static int
   1823 iwm_nic_init(struct iwm_softc *sc)
   1824 {
   1825 	int err;
   1826 
   1827 	iwm_apm_init(sc);
   1828 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
   1829 		iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1830 		    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
   1831 		    ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
   1832 
   1833 	iwm_nic_config(sc);
   1834 
   1835 	err = iwm_nic_rx_init(sc);
   1836 	if (err)
   1837 		return err;
   1838 
   1839 	err = iwm_nic_tx_init(sc);
   1840 	if (err)
   1841 		return err;
   1842 
   1843 	DPRINTF(("shadow registers enabled\n"));
   1844 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
   1845 
   1846 	return 0;
   1847 }
   1848 
   1849 static const uint8_t iwm_ac_to_tx_fifo[] = {
   1850 	IWM_TX_FIFO_VO,
   1851 	IWM_TX_FIFO_VI,
   1852 	IWM_TX_FIFO_BE,
   1853 	IWM_TX_FIFO_BK,
   1854 };
   1855 
   1856 static int
   1857 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
   1858 {
   1859 	if (!iwm_nic_lock(sc)) {
   1860 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
   1861 		return EBUSY;
   1862 	}
   1863 
   1864 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
   1865 
   1866 	if (qid == IWM_CMD_QUEUE) {
   1867 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1868 		    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
   1869 		    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
   1870 
   1871 		iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
   1872 
   1873 		iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
   1874 
   1875 		iwm_write_mem32(sc,
   1876 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
   1877 
   1878 		/* Set scheduler window size and frame limit. */
   1879 		iwm_write_mem32(sc,
   1880 		    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
   1881 		    sizeof(uint32_t),
   1882 		    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
   1883 		    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
   1884 		    ((IWM_FRAME_LIMIT
   1885 		        << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
   1886 		    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
   1887 
   1888 		iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1889 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
   1890 		    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
   1891 		    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
   1892 		    IWM_SCD_QUEUE_STTS_REG_MSK);
   1893 	} else {
   1894 		struct iwm_scd_txq_cfg_cmd cmd;
   1895 		int err;
   1896 
   1897 		iwm_nic_unlock(sc);
   1898 
   1899 		memset(&cmd, 0, sizeof(cmd));
   1900 		cmd.scd_queue = qid;
   1901 		cmd.enable = 1;
   1902 		cmd.sta_id = sta_id;
   1903 		cmd.tx_fifo = fifo;
   1904 		cmd.aggregate = 0;
   1905 		cmd.window = IWM_FRAME_LIMIT;
   1906 
   1907 		err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
   1908 		    &cmd);
   1909 		if (err)
   1910 			return err;
   1911 
   1912 		if (!iwm_nic_lock(sc))
   1913 			return EBUSY;
   1914 	}
   1915 
   1916 	iwm_write_prph(sc, IWM_SCD_EN_CTRL,
   1917 	    iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
   1918 
   1919 	iwm_nic_unlock(sc);
   1920 
   1921 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
   1922 
   1923 	return 0;
   1924 }
   1925 
   1926 static int
   1927 iwm_post_alive(struct iwm_softc *sc)
   1928 {
   1929 	int nwords;
   1930 	int err, chnl;
   1931 	uint32_t base;
   1932 
   1933 	if (!iwm_nic_lock(sc))
   1934 		return EBUSY;
   1935 
   1936 	base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
   1937 	if (sc->sched_base != base) {
   1938 		DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
   1939 		    DEVNAME(sc), sc->sched_base, base));
   1940 		err = EINVAL;
   1941 		goto out;
   1942 	}
   1943 
   1944 	iwm_ict_reset(sc);
   1945 
   1946 	/* Clear TX scheduler state in SRAM. */
   1947 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
   1948 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
   1949 	    / sizeof(uint32_t);
   1950 	err = iwm_write_mem(sc,
   1951 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
   1952 	    NULL, nwords);
   1953 	if (err)
   1954 		goto out;
   1955 
   1956 	/* Set physical address of TX scheduler rings (1KB aligned). */
   1957 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
   1958 
   1959 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
   1960 
   1961 	iwm_nic_unlock(sc);
   1962 
   1963 	/* enable command channel */
   1964 	err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
   1965 	if (err)
   1966 		return err;
   1967 
   1968 	if (!iwm_nic_lock(sc))
   1969 		return EBUSY;
   1970 
   1971 	/* Activate TX scheduler. */
   1972 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
   1973 
   1974 	/* Enable DMA channels. */
   1975 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1976 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
   1977 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
   1978 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
   1979 	}
   1980 
   1981 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
   1982 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
   1983 
   1984 	/* Enable L1-Active */
   1985 	if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
   1986 		iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1987 		    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1988 
   1989  out:
   1990 	iwm_nic_unlock(sc);
   1991 	return err;
   1992 }
   1993 
   1994 static struct iwm_phy_db_entry *
   1995 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
   1996     uint16_t chg_id)
   1997 {
   1998 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   1999 
   2000 	if (type >= IWM_PHY_DB_MAX)
   2001 		return NULL;
   2002 
   2003 	switch (type) {
   2004 	case IWM_PHY_DB_CFG:
   2005 		return &phy_db->cfg;
   2006 	case IWM_PHY_DB_CALIB_NCH:
   2007 		return &phy_db->calib_nch;
   2008 	case IWM_PHY_DB_CALIB_CHG_PAPD:
   2009 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
   2010 			return NULL;
   2011 		return &phy_db->calib_ch_group_papd[chg_id];
   2012 	case IWM_PHY_DB_CALIB_CHG_TXP:
   2013 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
   2014 			return NULL;
   2015 		return &phy_db->calib_ch_group_txp[chg_id];
   2016 	default:
   2017 		return NULL;
   2018 	}
   2019 	return NULL;
   2020 }
   2021 
   2022 static int
   2023 iwm_phy_db_set_section(struct iwm_softc *sc,
   2024     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
   2025 {
   2026 	struct iwm_phy_db_entry *entry;
   2027 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
   2028 	uint16_t chg_id = 0;
   2029 
   2030 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
   2031 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
   2032 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
   2033 
   2034 	entry = iwm_phy_db_get_section(sc, type, chg_id);
   2035 	if (!entry)
   2036 		return EINVAL;
   2037 
   2038 	if (entry->data)
   2039 		kmem_intr_free(entry->data, entry->size);
   2040 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
   2041 	if (!entry->data) {
   2042 		entry->size = 0;
   2043 		return ENOMEM;
   2044 	}
   2045 	memcpy(entry->data, phy_db_notif->data, size);
   2046 	entry->size = size;
   2047 
   2048 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
   2049 	    __func__, __LINE__, type, size, entry->data));
   2050 
   2051 	return 0;
   2052 }
   2053 
   2054 static int
   2055 iwm_is_valid_channel(uint16_t ch_id)
   2056 {
   2057 	if (ch_id <= 14 ||
   2058 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
   2059 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
   2060 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
   2061 		return 1;
   2062 	return 0;
   2063 }
   2064 
   2065 static uint8_t
   2066 iwm_ch_id_to_ch_index(uint16_t ch_id)
   2067 {
   2068 	if (!iwm_is_valid_channel(ch_id))
   2069 		return 0xff;
   2070 
   2071 	if (ch_id <= 14)
   2072 		return ch_id - 1;
   2073 	if (ch_id <= 64)
   2074 		return (ch_id + 20) / 4;
   2075 	if (ch_id <= 140)
   2076 		return (ch_id - 12) / 4;
   2077 	return (ch_id - 13) / 4;
   2078 }
   2079 
   2080 
   2081 static uint16_t
   2082 iwm_channel_id_to_papd(uint16_t ch_id)
   2083 {
   2084 	if (!iwm_is_valid_channel(ch_id))
   2085 		return 0xff;
   2086 
   2087 	if (1 <= ch_id && ch_id <= 14)
   2088 		return 0;
   2089 	if (36 <= ch_id && ch_id <= 64)
   2090 		return 1;
   2091 	if (100 <= ch_id && ch_id <= 140)
   2092 		return 2;
   2093 	return 3;
   2094 }
   2095 
   2096 static uint16_t
   2097 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
   2098 {
   2099 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   2100 	struct iwm_phy_db_chg_txp *txp_chg;
   2101 	int i;
   2102 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
   2103 
   2104 	if (ch_index == 0xff)
   2105 		return 0xff;
   2106 
   2107 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
   2108 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
   2109 		if (!txp_chg)
   2110 			return 0xff;
   2111 		/*
   2112 		 * Looking for the first channel group the max channel
   2113 		 * of which is higher than the requested channel.
   2114 		 */
   2115 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
   2116 			return i;
   2117 	}
   2118 	return 0xff;
   2119 }
   2120 
   2121 static int
   2122 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
   2123     uint16_t *size, uint16_t ch_id)
   2124 {
   2125 	struct iwm_phy_db_entry *entry;
   2126 	uint16_t ch_group_id = 0;
   2127 
   2128 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
   2129 		ch_group_id = iwm_channel_id_to_papd(ch_id);
   2130 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
   2131 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
   2132 
   2133 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
   2134 	if (!entry)
   2135 		return EINVAL;
   2136 
   2137 	*data = entry->data;
   2138 	*size = entry->size;
   2139 
   2140 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
   2141 		       __func__, __LINE__, type, *size));
   2142 
   2143 	return 0;
   2144 }
   2145 
   2146 static int
   2147 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
   2148     void *data)
   2149 {
   2150 	struct iwm_phy_db_cmd phy_db_cmd;
   2151 	struct iwm_host_cmd cmd = {
   2152 		.id = IWM_PHY_DB_CMD,
   2153 		.flags = IWM_CMD_ASYNC,
   2154 	};
   2155 
   2156 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
   2157 	    type, length));
   2158 
   2159 	phy_db_cmd.type = le16toh(type);
   2160 	phy_db_cmd.length = le16toh(length);
   2161 
   2162 	cmd.data[0] = &phy_db_cmd;
   2163 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
   2164 	cmd.data[1] = data;
   2165 	cmd.len[1] = length;
   2166 
   2167 	return iwm_send_cmd(sc, &cmd);
   2168 }
   2169 
   2170 static int
   2171 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
   2172     enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
   2173 {
   2174 	uint16_t i;
   2175 	int err;
   2176 	struct iwm_phy_db_entry *entry;
   2177 
   2178 	/* Send all the channel-specific groups to operational fw */
   2179 	for (i = 0; i < max_ch_groups; i++) {
   2180 		entry = iwm_phy_db_get_section(sc, type, i);
   2181 		if (!entry)
   2182 			return EINVAL;
   2183 
   2184 		if (!entry->size)
   2185 			continue;
   2186 
   2187 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
   2188 		if (err) {
   2189 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
   2190 			    "err %d\n", DEVNAME(sc), type, i, err));
   2191 			return err;
   2192 		}
   2193 
   2194 		DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
   2195 		    DEVNAME(sc), type, i));
   2196 
   2197 		DELAY(1000);
   2198 	}
   2199 
   2200 	return 0;
   2201 }
   2202 
   2203 static int
   2204 iwm_send_phy_db_data(struct iwm_softc *sc)
   2205 {
   2206 	uint8_t *data = NULL;
   2207 	uint16_t size = 0;
   2208 	int err;
   2209 
   2210 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
   2211 	if (err)
   2212 		return err;
   2213 
   2214 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
   2215 	if (err)
   2216 		return err;
   2217 
   2218 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
   2219 	    &data, &size, 0);
   2220 	if (err)
   2221 		return err;
   2222 
   2223 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
   2224 	if (err)
   2225 		return err;
   2226 
   2227 	err = iwm_phy_db_send_all_channel_groups(sc,
   2228 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
   2229 	if (err)
   2230 		return err;
   2231 
   2232 	err = iwm_phy_db_send_all_channel_groups(sc,
   2233 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
   2234 	if (err)
   2235 		return err;
   2236 
   2237 	return 0;
   2238 }
   2239 
   2240 /*
   2241  * For the high priority TE use a time event type that has similar priority to
   2242  * the FW's action scan priority.
   2243  */
   2244 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
   2245 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
   2246 
   2247 /* used to convert from time event API v2 to v1 */
   2248 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
   2249 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
   2250 static inline uint16_t
   2251 iwm_te_v2_get_notify(uint16_t policy)
   2252 {
   2253 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
   2254 }
   2255 
   2256 static inline uint16_t
   2257 iwm_te_v2_get_dep_policy(uint16_t policy)
   2258 {
   2259 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
   2260 		IWM_TE_V2_PLACEMENT_POS;
   2261 }
   2262 
   2263 static inline uint16_t
   2264 iwm_te_v2_get_absence(uint16_t policy)
   2265 {
   2266 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
   2267 }
   2268 
   2269 static void
   2270 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
   2271     struct iwm_time_event_cmd_v1 *cmd_v1)
   2272 {
   2273 	cmd_v1->id_and_color = cmd_v2->id_and_color;
   2274 	cmd_v1->action = cmd_v2->action;
   2275 	cmd_v1->id = cmd_v2->id;
   2276 	cmd_v1->apply_time = cmd_v2->apply_time;
   2277 	cmd_v1->max_delay = cmd_v2->max_delay;
   2278 	cmd_v1->depends_on = cmd_v2->depends_on;
   2279 	cmd_v1->interval = cmd_v2->interval;
   2280 	cmd_v1->duration = cmd_v2->duration;
   2281 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
   2282 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
   2283 	else
   2284 		cmd_v1->repeat = htole32(cmd_v2->repeat);
   2285 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
   2286 	cmd_v1->interval_reciprocal = 0; /* unused */
   2287 
   2288 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
   2289 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
   2290 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
   2291 }
   2292 
   2293 static int
   2294 iwm_send_time_event_cmd(struct iwm_softc *sc,
   2295     const struct iwm_time_event_cmd_v2 *cmd)
   2296 {
   2297 	struct iwm_time_event_cmd_v1 cmd_v1;
   2298 
   2299 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
   2300 		return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
   2301 		    cmd);
   2302 
   2303 	iwm_te_v2_to_v1(cmd, &cmd_v1);
   2304 	return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
   2305 	    &cmd_v1);
   2306 }
   2307 
   2308 static void
   2309 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
   2310     uint32_t duration, uint32_t max_delay)
   2311 {
   2312 	struct iwm_time_event_cmd_v2 time_cmd;
   2313 
   2314 	memset(&time_cmd, 0, sizeof(time_cmd));
   2315 
   2316 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
   2317 	time_cmd.id_and_color =
   2318 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   2319 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
   2320 
   2321 	time_cmd.apply_time = htole32(0);
   2322 
   2323 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
   2324 	time_cmd.max_delay = htole32(max_delay);
   2325 	/* TODO: why do we need to interval = bi if it is not periodic? */
   2326 	time_cmd.interval = htole32(1);
   2327 	time_cmd.duration = htole32(duration);
   2328 	time_cmd.repeat = 1;
   2329 	time_cmd.policy
   2330 	    = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
   2331 	        IWM_TE_V2_NOTIF_HOST_EVENT_END |
   2332 		IWM_T2_V2_START_IMMEDIATELY);
   2333 
   2334 	iwm_send_time_event_cmd(sc, &time_cmd);
   2335 }
   2336 
   2337 /*
   2338  * NVM read access and content parsing.  We do not support
   2339  * external NVM or writing NVM.
   2340  */
   2341 
   2342 /* list of NVM sections we are allowed/need to read */
   2343 static const int iwm_nvm_to_read[] = {
   2344 	IWM_NVM_SECTION_TYPE_HW,
   2345 	IWM_NVM_SECTION_TYPE_SW,
   2346 	IWM_NVM_SECTION_TYPE_REGULATORY,
   2347 	IWM_NVM_SECTION_TYPE_CALIBRATION,
   2348 	IWM_NVM_SECTION_TYPE_PRODUCTION,
   2349 	IWM_NVM_SECTION_TYPE_HW_8000,
   2350 	IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
   2351 	IWM_NVM_SECTION_TYPE_PHY_SKU,
   2352 };
   2353 
   2354 /* Default NVM size to read */
   2355 #define IWM_NVM_DEFAULT_CHUNK_SIZE	(2*1024)
   2356 #define IWM_MAX_NVM_SECTION_SIZE	8192
   2357 
   2358 #define IWM_NVM_WRITE_OPCODE 1
   2359 #define IWM_NVM_READ_OPCODE 0
   2360 
   2361 static int
   2362 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
   2363     uint16_t length, uint8_t *data, uint16_t *len)
   2364 {
   2365 	offset = 0;
   2366 	struct iwm_nvm_access_cmd nvm_access_cmd = {
   2367 		.offset = htole16(offset),
   2368 		.length = htole16(length),
   2369 		.type = htole16(section),
   2370 		.op_code = IWM_NVM_READ_OPCODE,
   2371 	};
   2372 	struct iwm_nvm_access_resp *nvm_resp;
   2373 	struct iwm_rx_packet *pkt;
   2374 	struct iwm_host_cmd cmd = {
   2375 		.id = IWM_NVM_ACCESS_CMD,
   2376 		.flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
   2377 		.data = { &nvm_access_cmd, },
   2378 	};
   2379 	int err, offset_read;
   2380 	size_t bytes_read;
   2381 	uint8_t *resp_data;
   2382 
   2383 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
   2384 
   2385 	err = iwm_send_cmd(sc, &cmd);
   2386 	if (err) {
   2387 		DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
   2388 		    DEVNAME(sc), err));
   2389 		return err;
   2390 	}
   2391 
   2392 	pkt = cmd.resp_pkt;
   2393 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   2394 		err = EIO;
   2395 		goto exit;
   2396 	}
   2397 
   2398 	/* Extract NVM response */
   2399 	nvm_resp = (void *)pkt->data;
   2400 
   2401 	err = le16toh(nvm_resp->status);
   2402 	bytes_read = le16toh(nvm_resp->length);
   2403 	offset_read = le16toh(nvm_resp->offset);
   2404 	resp_data = nvm_resp->data;
   2405 	if (err) {
   2406 		err = EINVAL;
   2407 		goto exit;
   2408 	}
   2409 
   2410 	if (offset_read != offset) {
   2411 		err = EINVAL;
   2412 		goto exit;
   2413 	}
   2414 	if (bytes_read > length) {
   2415 		err = EINVAL;
   2416 		goto exit;
   2417 	}
   2418 
   2419 	memcpy(data + offset, resp_data, bytes_read);
   2420 	*len = bytes_read;
   2421 
   2422  exit:
   2423 	iwm_free_resp(sc, &cmd);
   2424 	return err;
   2425 }
   2426 
   2427 /*
   2428  * Reads an NVM section completely.
   2429  * NICs prior to 7000 family doesn't have a real NVM, but just read
   2430  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
   2431  * by uCode, we need to manually check in this case that we don't
   2432  * overflow and try to read more than the EEPROM size.
   2433  */
   2434 static int
   2435 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
   2436     uint16_t *len, size_t max_len)
   2437 {
   2438 	uint16_t chunklen, seglen;
   2439 	int err;
   2440 
   2441 	chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
   2442 	*len = 0;
   2443 
   2444 	/* Read NVM chunks until exhausted (reading less than requested) */
   2445 	while (seglen == chunklen && *len < max_len) {
   2446 		err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
   2447 		    &seglen);
   2448 		if (err) {
   2449 			DPRINTF(("%s:Cannot read NVM from section %d "
   2450 			    "offset %d, length %d\n",
   2451 			    DEVNAME(sc), section, *len, chunklen));
   2452 			return err;
   2453 		}
   2454 		*len += seglen;
   2455 	}
   2456 
   2457 	DPRINTFN(4, ("NVM section %d read completed\n", section));
   2458 	return 0;
   2459 }
   2460 
   2461 static uint8_t
   2462 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
   2463 {
   2464 	uint8_t tx_ant;
   2465 
   2466 	tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
   2467 	    >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
   2468 
   2469 	if (sc->sc_nvm.valid_tx_ant)
   2470 		tx_ant &= sc->sc_nvm.valid_tx_ant;
   2471 
   2472 	return tx_ant;
   2473 }
   2474 
   2475 static uint8_t
   2476 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
   2477 {
   2478 	uint8_t rx_ant;
   2479 
   2480 	rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
   2481 	    >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
   2482 
   2483 	if (sc->sc_nvm.valid_rx_ant)
   2484 		rx_ant &= sc->sc_nvm.valid_rx_ant;
   2485 
   2486 	return rx_ant;
   2487 }
   2488 
   2489 static void
   2490 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
   2491     const uint8_t *nvm_channels, size_t nchan)
   2492 {
   2493 	struct ieee80211com *ic = &sc->sc_ic;
   2494 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2495 	int ch_idx;
   2496 	struct ieee80211_channel *channel;
   2497 	uint16_t ch_flags;
   2498 	int is_5ghz;
   2499 	int flags, hw_value;
   2500 
   2501 	for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
   2502 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
   2503 
   2504 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
   2505 		    !data->sku_cap_band_52GHz_enable)
   2506 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
   2507 
   2508 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
   2509 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
   2510 			    iwm_nvm_channels[ch_idx],
   2511 			    ch_flags,
   2512 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
   2513 			    "5.2" : "2.4"));
   2514 			continue;
   2515 		}
   2516 
   2517 		hw_value = nvm_channels[ch_idx];
   2518 		channel = &ic->ic_channels[hw_value];
   2519 
   2520 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
   2521 		if (!is_5ghz) {
   2522 			flags = IEEE80211_CHAN_2GHZ;
   2523 			channel->ic_flags
   2524 			    = IEEE80211_CHAN_CCK
   2525 			    | IEEE80211_CHAN_OFDM
   2526 			    | IEEE80211_CHAN_DYN
   2527 			    | IEEE80211_CHAN_2GHZ;
   2528 		} else {
   2529 			flags = IEEE80211_CHAN_5GHZ;
   2530 			channel->ic_flags =
   2531 			    IEEE80211_CHAN_A;
   2532 		}
   2533 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
   2534 
   2535 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
   2536 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
   2537 
   2538 #ifndef IEEE80211_NO_HT
   2539 		if (data->sku_cap_11n_enable)
   2540 			channel->ic_flags |= IEEE80211_CHAN_HT;
   2541 #endif
   2542 	}
   2543 }
   2544 
   2545 #ifndef IEEE80211_NO_HT
   2546 static void
   2547 iwm_setup_ht_rates(struct iwm_softc *sc)
   2548 {
   2549 	struct ieee80211com *ic = &sc->sc_ic;
   2550 
   2551 	/* TX is supported with the same MCS as RX. */
   2552 	ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
   2553 
   2554 	ic->ic_sup_mcs[0] = 0xff;		/* MCS 0-7 */
   2555 
   2556 #ifdef notyet
   2557 	if (sc->sc_nvm.sku_cap_mimo_disable)
   2558 		return;
   2559 
   2560 	if (iwm_fw_valid_rx_ant(sc) > 1)
   2561 		ic->ic_sup_mcs[1] = 0xff;	/* MCS 8-15 */
   2562 	if (iwm_fw_valid_rx_ant(sc) > 2)
   2563 		ic->ic_sup_mcs[2] = 0xff;	/* MCS 16-23 */
   2564 #endif
   2565 }
   2566 
   2567 #define IWM_MAX_RX_BA_SESSIONS 16
   2568 
   2569 static void
   2570 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
   2571     uint16_t ssn, int start)
   2572 {
   2573 	struct ieee80211com *ic = &sc->sc_ic;
   2574 	struct iwm_add_sta_cmd_v7 cmd;
   2575 	struct iwm_node *in = (struct iwm_node *)ni;
   2576 	int err, s;
   2577 	uint32_t status;
   2578 
   2579 	if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
   2580 		ieee80211_addba_req_refuse(ic, ni, tid);
   2581 		return;
   2582 	}
   2583 
   2584 	memset(&cmd, 0, sizeof(cmd));
   2585 
   2586 	cmd.sta_id = IWM_STATION_ID;
   2587 	cmd.mac_id_n_color
   2588 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   2589 	cmd.add_modify = IWM_STA_MODE_MODIFY;
   2590 
   2591 	if (start) {
   2592 		cmd.add_immediate_ba_tid = (uint8_t)tid;
   2593 		cmd.add_immediate_ba_ssn = ssn;
   2594 	} else {
   2595 		cmd.remove_immediate_ba_tid = (uint8_t)tid;
   2596 	}
   2597 	cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
   2598 	    IWM_STA_MODIFY_REMOVE_BA_TID;
   2599 
   2600 	status = IWM_ADD_STA_SUCCESS;
   2601 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
   2602 	    &status);
   2603 
   2604 	s = splnet();
   2605 	if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
   2606 		if (start) {
   2607 			sc->sc_rx_ba_sessions++;
   2608 			ieee80211_addba_req_accept(ic, ni, tid);
   2609 		} else if (sc->sc_rx_ba_sessions > 0)
   2610 			sc->sc_rx_ba_sessions--;
   2611 	} else if (start)
   2612 		ieee80211_addba_req_refuse(ic, ni, tid);
   2613 
   2614 	splx(s);
   2615 }
   2616 
   2617 static void
   2618 iwm_htprot_task(void *arg)
   2619 {
   2620 	struct iwm_softc *sc = arg;
   2621 	struct ieee80211com *ic = &sc->sc_ic;
   2622 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   2623 	int err;
   2624 
   2625 	/* This call updates HT protection based on in->in_ni.ni_htop1. */
   2626 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
   2627 	if (err)
   2628 		aprint_error_dev(sc->sc_dev,
   2629 		    "could not change HT protection: error %d\n", err);
   2630 }
   2631 
   2632 /*
   2633  * This function is called by upper layer when HT protection settings in
   2634  * beacons have changed.
   2635  */
   2636 static void
   2637 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
   2638 {
   2639 	struct iwm_softc *sc = ic->ic_softc;
   2640 
   2641 	/* assumes that ni == ic->ic_bss */
   2642 	task_add(systq, &sc->htprot_task);
   2643 }
   2644 
   2645 static void
   2646 iwm_ba_task(void *arg)
   2647 {
   2648 	struct iwm_softc *sc = arg;
   2649 	struct ieee80211com *ic = &sc->sc_ic;
   2650 	struct ieee80211_node *ni = ic->ic_bss;
   2651 
   2652 	if (sc->ba_start)
   2653 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
   2654 	else
   2655 		iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
   2656 }
   2657 
   2658 /*
   2659  * This function is called by upper layer when an ADDBA request is received
   2660  * from another STA and before the ADDBA response is sent.
   2661  */
   2662 static int
   2663 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
   2664     uint8_t tid)
   2665 {
   2666 	struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
   2667 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   2668 
   2669 	if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
   2670 		return ENOSPC;
   2671 
   2672 	sc->ba_start = 1;
   2673 	sc->ba_tid = tid;
   2674 	sc->ba_ssn = htole16(ba->ba_winstart);
   2675 	task_add(systq, &sc->ba_task);
   2676 
   2677 	return EBUSY;
   2678 }
   2679 
   2680 /*
   2681  * This function is called by upper layer on teardown of an HT-immediate
   2682  * Block Ack agreement (eg. upon receipt of a DELBA frame).
   2683  */
   2684 static void
   2685 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
   2686     uint8_t tid)
   2687 {
   2688 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   2689 
   2690 	sc->ba_start = 0;
   2691 	sc->ba_tid = tid;
   2692 	task_add(systq, &sc->ba_task);
   2693 }
   2694 #endif
   2695 
   2696 static void
   2697 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
   2698     const uint16_t *mac_override, const uint16_t *nvm_hw)
   2699 {
   2700 	static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
   2701 		0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
   2702 	};
   2703 	static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
   2704 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
   2705 	};
   2706 	const uint8_t *hw_addr;
   2707 
   2708 	if (mac_override) {
   2709 		hw_addr = (const uint8_t *)(mac_override +
   2710 		    IWM_MAC_ADDRESS_OVERRIDE_8000);
   2711 
   2712 		/*
   2713 		 * Store the MAC address from MAO section.
   2714 		 * No byte swapping is required in MAO section
   2715 		 */
   2716 		memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
   2717 
   2718 		/*
   2719 		 * Force the use of the OTP MAC address in case of reserved MAC
   2720 		 * address in the NVM, or if address is given but invalid.
   2721 		 */
   2722 		if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
   2723 		    (memcmp(etherbroadcastaddr, data->hw_addr,
   2724 		    sizeof(etherbroadcastaddr)) != 0) &&
   2725 		    (memcmp(etheranyaddr, data->hw_addr,
   2726 		    sizeof(etheranyaddr)) != 0) &&
   2727 		    !ETHER_IS_MULTICAST(data->hw_addr))
   2728 			return;
   2729 	}
   2730 
   2731 	if (nvm_hw) {
   2732 		/* Read the mac address from WFMP registers. */
   2733 		uint32_t mac_addr0 =
   2734 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
   2735 		uint32_t mac_addr1 =
   2736 		    htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
   2737 
   2738 		hw_addr = (const uint8_t *)&mac_addr0;
   2739 		data->hw_addr[0] = hw_addr[3];
   2740 		data->hw_addr[1] = hw_addr[2];
   2741 		data->hw_addr[2] = hw_addr[1];
   2742 		data->hw_addr[3] = hw_addr[0];
   2743 
   2744 		hw_addr = (const uint8_t *)&mac_addr1;
   2745 		data->hw_addr[4] = hw_addr[1];
   2746 		data->hw_addr[5] = hw_addr[0];
   2747 
   2748 		return;
   2749 	}
   2750 
   2751 	aprint_error_dev(sc->sc_dev, "mac address not found\n");
   2752 	memset(data->hw_addr, 0, sizeof(data->hw_addr));
   2753 }
   2754 
   2755 static int
   2756 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
   2757     const uint16_t *nvm_sw, const uint16_t *nvm_calib,
   2758     const uint16_t *mac_override, const uint16_t *phy_sku,
   2759     const uint16_t *regulatory)
   2760 {
   2761 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2762 	uint8_t hw_addr[ETHER_ADDR_LEN];
   2763 	uint32_t sku;
   2764 
   2765 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
   2766 
   2767 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   2768 		uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
   2769 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
   2770 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
   2771 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
   2772 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
   2773 
   2774 		sku = le16_to_cpup(nvm_sw + IWM_SKU);
   2775 	} else {
   2776 		uint32_t radio_cfg = le32_to_cpup(
   2777 		    (const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
   2778 		data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
   2779 		data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
   2780 		data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
   2781 		data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
   2782 		data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
   2783 		data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
   2784 
   2785 		sku = le32_to_cpup(
   2786 		    (const uint32_t *)(phy_sku + IWM_SKU_8000));
   2787 	}
   2788 
   2789 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
   2790 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
   2791 	data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
   2792 	data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
   2793 
   2794 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
   2795 
   2796 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   2797 		memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
   2798 		data->hw_addr[0] = hw_addr[1];
   2799 		data->hw_addr[1] = hw_addr[0];
   2800 		data->hw_addr[2] = hw_addr[3];
   2801 		data->hw_addr[3] = hw_addr[2];
   2802 		data->hw_addr[4] = hw_addr[5];
   2803 		data->hw_addr[5] = hw_addr[4];
   2804 	} else
   2805 		iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
   2806 
   2807 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
   2808 		iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
   2809 		    iwm_nvm_channels, __arraycount(iwm_nvm_channels));
   2810 	else
   2811 		iwm_init_channel_map(sc, &regulatory[IWM_NVM_CHANNELS_8000],
   2812 		    iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
   2813 
   2814 	data->calib_version = 255;   /* TODO:
   2815 					this value will prevent some checks from
   2816 					failing, we need to check if this
   2817 					field is still needed, and if it does,
   2818 					where is it in the NVM */
   2819 
   2820 	return 0;
   2821 }
   2822 
   2823 static int
   2824 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
   2825 {
   2826 	const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
   2827 	const uint16_t *regulatory = NULL;
   2828 
   2829 	/* Checking for required sections */
   2830 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
   2831 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
   2832 		    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
   2833 			return ENOENT;
   2834 		}
   2835 
   2836 		hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
   2837 	} else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
   2838 		/* SW and REGULATORY sections are mandatory */
   2839 		if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
   2840 		    !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
   2841 			return ENOENT;
   2842 		}
   2843 		/* MAC_OVERRIDE or at least HW section must exist */
   2844 		if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
   2845 		    !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
   2846 			return ENOENT;
   2847 		}
   2848 
   2849 		/* PHY_SKU section is mandatory in B0 */
   2850 		if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
   2851 			return ENOENT;
   2852 		}
   2853 
   2854 		regulatory = (const uint16_t *)
   2855 		    sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
   2856 		hw = (const uint16_t *)
   2857 		    sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
   2858 		mac_override =
   2859 			(const uint16_t *)
   2860 			sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
   2861 		phy_sku = (const uint16_t *)
   2862 		    sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
   2863 	} else {
   2864 		panic("unknown device family %d\n", sc->sc_device_family);
   2865 	}
   2866 
   2867 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
   2868 	calib = (const uint16_t *)
   2869 	    sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
   2870 
   2871 	return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
   2872 	    phy_sku, regulatory);
   2873 }
   2874 
   2875 static int
   2876 iwm_nvm_init(struct iwm_softc *sc)
   2877 {
   2878 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
   2879 	int i, section, err;
   2880 	uint16_t len;
   2881 	uint8_t *buf;
   2882 	const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
   2883 
   2884 	/* Read From FW NVM */
   2885 	DPRINTF(("Read NVM\n"));
   2886 
   2887 	memset(nvm_sections, 0, sizeof(nvm_sections));
   2888 
   2889 	buf = kmem_alloc(bufsz, KM_SLEEP);
   2890 	if (buf == NULL)
   2891 		return ENOMEM;
   2892 
   2893 	for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
   2894 		section = iwm_nvm_to_read[i];
   2895 		KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
   2896 
   2897 		err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
   2898 		if (err) {
   2899 			err = 0;
   2900 			continue;
   2901 		}
   2902 		nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
   2903 		if (nvm_sections[section].data == NULL) {
   2904 			err = ENOMEM;
   2905 			break;
   2906 		}
   2907 		memcpy(nvm_sections[section].data, buf, len);
   2908 		nvm_sections[section].length = len;
   2909 	}
   2910 	kmem_free(buf, bufsz);
   2911 	if (err == 0)
   2912 		err = iwm_parse_nvm_sections(sc, nvm_sections);
   2913 
   2914 	for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
   2915 		if (nvm_sections[i].data != NULL)
   2916 			kmem_free(nvm_sections[i].data, nvm_sections[i].length);
   2917 	}
   2918 
   2919 	return err;
   2920 }
   2921 
   2922 static int
   2923 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
   2924     const uint8_t *section, uint32_t byte_cnt)
   2925 {
   2926 	int err = EINVAL;
   2927 	uint32_t chunk_sz, offset;
   2928 
   2929 	chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
   2930 
   2931 	for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
   2932 		uint32_t addr, len;
   2933 		const uint8_t *data;
   2934 
   2935 		addr = dst_addr + offset;
   2936 		len = MIN(chunk_sz, byte_cnt - offset);
   2937 		data = section + offset;
   2938 
   2939 		err = iwm_firmware_load_chunk(sc, addr, data, len);
   2940 		if (err)
   2941 			break;
   2942 	}
   2943 
   2944 	return err;
   2945 }
   2946 
   2947 static int
   2948 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
   2949     const uint8_t *section, uint32_t byte_cnt)
   2950 {
   2951 	struct iwm_dma_info *dma = &sc->fw_dma;
   2952 	bool is_extended = false;
   2953 	int err;
   2954 
   2955 	/* Copy firmware chunk into pre-allocated DMA-safe memory. */
   2956 	memcpy(dma->vaddr, section, byte_cnt);
   2957 	bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
   2958 	    BUS_DMASYNC_PREWRITE);
   2959 
   2960 	if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
   2961 	    dst_addr <= IWM_FW_MEM_EXTENDED_END)
   2962 		is_extended = true;
   2963 
   2964 	if (is_extended) {
   2965 		iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
   2966 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
   2967 	}
   2968 
   2969 	sc->sc_fw_chunk_done = 0;
   2970 
   2971 	if (!iwm_nic_lock(sc)) {
   2972 		if (is_extended)
   2973 			iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
   2974 			    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
   2975 		return EBUSY;
   2976 	}
   2977 
   2978 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2979 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
   2980 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
   2981 	    dst_addr);
   2982 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
   2983 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
   2984 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
   2985 	    (iwm_get_dma_hi_addr(dma->paddr)
   2986 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
   2987 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
   2988 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
   2989 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
   2990 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
   2991 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2992 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
   2993 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
   2994 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
   2995 
   2996 	iwm_nic_unlock(sc);
   2997 
   2998 	/* Wait for this segment to load. */
   2999 	err = 0;
   3000 	while (!sc->sc_fw_chunk_done) {
   3001 		err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
   3002 		if (err)
   3003 			break;
   3004 	}
   3005 	if (!sc->sc_fw_chunk_done) {
   3006 		aprint_error_dev(sc->sc_dev,
   3007 		    "fw chunk addr 0x%x len %d failed to load\n",
   3008 		    dst_addr, byte_cnt);
   3009 	}
   3010 
   3011 	if (is_extended) {
   3012 		int rv = iwm_nic_lock(sc);
   3013 		iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
   3014 		    IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
   3015 		if (rv == 0)
   3016 			iwm_nic_unlock(sc);
   3017 	}
   3018 
   3019 	return err;
   3020 }
   3021 
   3022 static int
   3023 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3024 {
   3025 	struct iwm_fw_sects *fws;
   3026 	int err, i;
   3027 	void *data;
   3028 	uint32_t dlen;
   3029 	uint32_t offset;
   3030 
   3031 	fws = &sc->sc_fw.fw_sects[ucode_type];
   3032 	for (i = 0; i < fws->fw_count; i++) {
   3033 		data = fws->fw_sect[i].fws_data;
   3034 		dlen = fws->fw_sect[i].fws_len;
   3035 		offset = fws->fw_sect[i].fws_devoff;
   3036 		if (dlen > sc->sc_fwdmasegsz) {
   3037 			err = EFBIG;
   3038 		} else
   3039 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
   3040 		if (err) {
   3041 			aprint_error_dev(sc->sc_dev,
   3042 			    "could not load firmware chunk %u of %u\n",
   3043 			    i, fws->fw_count);
   3044 			return err;
   3045 		}
   3046 	}
   3047 
   3048 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
   3049 
   3050 	return 0;
   3051 }
   3052 
   3053 static int
   3054 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
   3055     int cpu, int *first_ucode_section)
   3056 {
   3057 	int shift_param;
   3058 	int i, err = 0, sec_num = 0x1;
   3059 	uint32_t val, last_read_idx = 0;
   3060 	void *data;
   3061 	uint32_t dlen;
   3062 	uint32_t offset;
   3063 
   3064 	if (cpu == 1) {
   3065 		shift_param = 0;
   3066 		*first_ucode_section = 0;
   3067 	} else {
   3068 		shift_param = 16;
   3069 		(*first_ucode_section)++;
   3070 	}
   3071 
   3072 	for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
   3073 		last_read_idx = i;
   3074 		data = fws->fw_sect[i].fws_data;
   3075 		dlen = fws->fw_sect[i].fws_len;
   3076 		offset = fws->fw_sect[i].fws_devoff;
   3077 
   3078 		/*
   3079 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
   3080 		 * CPU1 to CPU2.
   3081 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
   3082 		 * CPU2 non paged to CPU2 paging sec.
   3083 		 */
   3084 		if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
   3085 		    offset == IWM_PAGING_SEPARATOR_SECTION)
   3086 			break;
   3087 
   3088 		if (dlen > sc->sc_fwdmasegsz) {
   3089 			err = EFBIG;
   3090 		} else
   3091 			err = iwm_firmware_load_sect(sc, offset, data, dlen);
   3092 		if (err) {
   3093 			aprint_error_dev(sc->sc_dev,
   3094 			    "could not load firmware chunk %d (error %d)\n",
   3095 			    i, err);
   3096 			return err;
   3097 		}
   3098 
   3099 		/* Notify the ucode of the loaded section number and status */
   3100 		if (iwm_nic_lock(sc)) {
   3101 			val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
   3102 			val = val | (sec_num << shift_param);
   3103 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
   3104 			sec_num = (sec_num << 1) | 0x1;
   3105 			iwm_nic_unlock(sc);
   3106 
   3107 			/*
   3108 			 * The firmware won't load correctly without this delay.
   3109 			 */
   3110 			DELAY(8000);
   3111 		}
   3112 	}
   3113 
   3114 	*first_ucode_section = last_read_idx;
   3115 
   3116 	if (iwm_nic_lock(sc)) {
   3117 		if (cpu == 1)
   3118 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
   3119 		else
   3120 			IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
   3121 		iwm_nic_unlock(sc);
   3122 	}
   3123 
   3124 	return 0;
   3125 }
   3126 
   3127 static int
   3128 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3129 {
   3130 	struct iwm_fw_sects *fws;
   3131 	int err = 0;
   3132 	int first_ucode_section;
   3133 
   3134 	fws = &sc->sc_fw.fw_sects[ucode_type];
   3135 
   3136 	/* configure the ucode to be ready to get the secured image */
   3137 	/* release CPU reset */
   3138 	iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
   3139 
   3140 	/* load to FW the binary Secured sections of CPU1 */
   3141 	err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
   3142 	if (err)
   3143 		return err;
   3144 
   3145 	/* load to FW the binary sections of CPU2 */
   3146 	return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
   3147 }
   3148 
   3149 static int
   3150 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3151 {
   3152 	int err, w;
   3153 
   3154 	sc->sc_uc.uc_intr = 0;
   3155 
   3156 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   3157 		err = iwm_load_firmware_8000(sc, ucode_type);
   3158 	else
   3159 		err = iwm_load_firmware_7000(sc, ucode_type);
   3160 
   3161 	if (err)
   3162 		return err;
   3163 
   3164 	/* wait for the firmware to load */
   3165 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
   3166 		err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
   3167 	if (err || !sc->sc_uc.uc_ok)
   3168 		aprint_error_dev(sc->sc_dev, "could not load firmware\n");
   3169 
   3170 	return err;
   3171 }
   3172 
   3173 static int
   3174 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3175 {
   3176 	int err;
   3177 
   3178 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   3179 
   3180 	err = iwm_nic_init(sc);
   3181 	if (err) {
   3182 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
   3183 		return err;
   3184 	}
   3185 
   3186 	/* make sure rfkill handshake bits are cleared */
   3187 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   3188 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
   3189 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
   3190 
   3191 	/* clear (again), then enable host interrupts */
   3192 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   3193 	iwm_enable_interrupts(sc);
   3194 
   3195 	/* really make sure rfkill handshake bits are cleared */
   3196 	/* maybe we should write a few times more?  just to make sure */
   3197 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   3198 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   3199 
   3200 	return iwm_load_firmware(sc, ucode_type);
   3201 }
   3202 
   3203 static int
   3204 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
   3205 {
   3206 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
   3207 		.valid = htole32(valid_tx_ant),
   3208 	};
   3209 
   3210 	return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
   3211 	    sizeof(tx_ant_cmd), &tx_ant_cmd);
   3212 }
   3213 
   3214 static int
   3215 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
   3216 {
   3217 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
   3218 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
   3219 
   3220 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
   3221 	phy_cfg_cmd.calib_control.event_trigger =
   3222 	    sc->sc_default_calib[ucode_type].event_trigger;
   3223 	phy_cfg_cmd.calib_control.flow_trigger =
   3224 	    sc->sc_default_calib[ucode_type].flow_trigger;
   3225 
   3226 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
   3227 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
   3228 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
   3229 }
   3230 
   3231 static int
   3232 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   3233 {
   3234 	enum iwm_ucode_type old_type = sc->sc_uc_current;
   3235 	int err;
   3236 
   3237 	err = iwm_read_firmware(sc);
   3238 	if (err)
   3239 		return err;
   3240 
   3241 	sc->sc_uc_current = ucode_type;
   3242 	err = iwm_start_fw(sc, ucode_type);
   3243 	if (err) {
   3244 		sc->sc_uc_current = old_type;
   3245 		return err;
   3246 	}
   3247 
   3248 	return iwm_post_alive(sc);
   3249 }
   3250 
   3251 static int
   3252 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
   3253 {
   3254 	int err;
   3255 
   3256 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
   3257 		aprint_error_dev(sc->sc_dev,
   3258 		    "radio is disabled by hardware switch\n");
   3259 		return EPERM;
   3260 	}
   3261 
   3262 	sc->sc_init_complete = 0;
   3263 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
   3264 	if (err) {
   3265 		aprint_error_dev(sc->sc_dev, "failed to load init firmware\n");
   3266 		return err;
   3267 	}
   3268 
   3269 	if (justnvm) {
   3270 		err = iwm_nvm_init(sc);
   3271 		if (err) {
   3272 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
   3273 			return err;
   3274 		}
   3275 
   3276 		memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
   3277 		    ETHER_ADDR_LEN);
   3278 		return 0;
   3279 	}
   3280 
   3281 	err = iwm_send_bt_init_conf(sc);
   3282 	if (err)
   3283 		return err;
   3284 
   3285 	err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
   3286 	if (err)
   3287 		return err;
   3288 
   3289 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
   3290 	if (err)
   3291 		return err;
   3292 
   3293 	/*
   3294 	 * Send phy configurations command to init uCode
   3295 	 * to start the 16.0 uCode init image internal calibrations.
   3296 	 */
   3297 	err = iwm_send_phy_cfg_cmd(sc);
   3298 	if (err)
   3299 		return err;
   3300 
   3301 	/*
   3302 	 * Nothing to do but wait for the init complete notification
   3303 	 * from the firmware
   3304 	 */
   3305 	while (!sc->sc_init_complete) {
   3306 		err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
   3307 		if (err)
   3308 			break;
   3309 	}
   3310 
   3311 	return err;
   3312 }
   3313 
   3314 static int
   3315 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
   3316 {
   3317 	struct iwm_rx_ring *ring = &sc->rxq;
   3318 	struct iwm_rx_data *data = &ring->data[idx];
   3319 	struct mbuf *m;
   3320 	int err;
   3321 	int fatal = 0;
   3322 
   3323 	m = m_gethdr(M_DONTWAIT, MT_DATA);
   3324 	if (m == NULL)
   3325 		return ENOBUFS;
   3326 
   3327 	if (size <= MCLBYTES) {
   3328 		MCLGET(m, M_DONTWAIT);
   3329 	} else {
   3330 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   3331 	}
   3332 	if ((m->m_flags & M_EXT) == 0) {
   3333 		m_freem(m);
   3334 		return ENOBUFS;
   3335 	}
   3336 
   3337 	if (data->m != NULL) {
   3338 		bus_dmamap_unload(sc->sc_dmat, data->map);
   3339 		fatal = 1;
   3340 	}
   3341 
   3342 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3343 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   3344 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3345 	if (err) {
   3346 		/* XXX */
   3347 		if (fatal)
   3348 			panic("iwm: could not load RX mbuf");
   3349 		m_freem(m);
   3350 		return err;
   3351 	}
   3352 	data->m = m;
   3353 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
   3354 
   3355 	/* Update RX descriptor. */
   3356 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
   3357 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3358 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
   3359 
   3360 	return 0;
   3361 }
   3362 
   3363 #define IWM_RSSI_OFFSET 50
   3364 static int
   3365 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
   3366 {
   3367 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
   3368 	uint32_t agc_a, agc_b;
   3369 	uint32_t val;
   3370 
   3371 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
   3372 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
   3373 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
   3374 
   3375 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
   3376 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
   3377 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
   3378 
   3379 	/*
   3380 	 * dBm = rssi dB - agc dB - constant.
   3381 	 * Higher AGC (higher radio gain) means lower signal.
   3382 	 */
   3383 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
   3384 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
   3385 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
   3386 
   3387 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
   3388 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
   3389 
   3390 	return max_rssi_dbm;
   3391 }
   3392 
   3393 /*
   3394  * RSSI values are reported by the FW as positive values - need to negate
   3395  * to obtain their dBM.  Account for missing antennas by replacing 0
   3396  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
   3397  */
   3398 static int
   3399 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
   3400 {
   3401 	int energy_a, energy_b, energy_c, max_energy;
   3402 	uint32_t val;
   3403 
   3404 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
   3405 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
   3406 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
   3407 	energy_a = energy_a ? -energy_a : -256;
   3408 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
   3409 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
   3410 	energy_b = energy_b ? -energy_b : -256;
   3411 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
   3412 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
   3413 	energy_c = energy_c ? -energy_c : -256;
   3414 	max_energy = MAX(energy_a, energy_b);
   3415 	max_energy = MAX(max_energy, energy_c);
   3416 
   3417 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
   3418 	    energy_a, energy_b, energy_c, max_energy));
   3419 
   3420 	return max_energy;
   3421 }
   3422 
   3423 static void
   3424 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3425     struct iwm_rx_data *data)
   3426 {
   3427 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
   3428 
   3429 	DPRINTFN(20, ("received PHY stats\n"));
   3430 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
   3431 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
   3432 
   3433 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
   3434 }
   3435 
   3436 /*
   3437  * Retrieve the average noise (in dBm) among receivers.
   3438  */
   3439 static int
   3440 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
   3441 {
   3442 	int i, total, nbant, noise;
   3443 
   3444 	total = nbant = noise = 0;
   3445 	for (i = 0; i < 3; i++) {
   3446 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
   3447 		if (noise) {
   3448 			total += noise;
   3449 			nbant++;
   3450 		}
   3451 	}
   3452 
   3453 	/* There should be at least one antenna but check anyway. */
   3454 	return (nbant == 0) ? -127 : (total / nbant) - 107;
   3455 }
   3456 
   3457 static void
   3458 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3459     struct iwm_rx_data *data)
   3460 {
   3461 	struct ieee80211com *ic = &sc->sc_ic;
   3462 	struct ieee80211_frame *wh;
   3463 	struct ieee80211_node *ni;
   3464 	struct ieee80211_channel *c = NULL;
   3465 	struct mbuf *m;
   3466 	struct iwm_rx_phy_info *phy_info;
   3467 	struct iwm_rx_mpdu_res_start *rx_res;
   3468 	int device_timestamp;
   3469 	uint32_t len;
   3470 	uint32_t rx_pkt_status;
   3471 	int rssi;
   3472 	int s;
   3473 
   3474 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3475 	    BUS_DMASYNC_POSTREAD);
   3476 
   3477 	phy_info = &sc->sc_last_phy_info;
   3478 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
   3479 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
   3480 	len = le16toh(rx_res->byte_count);
   3481 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
   3482 	    sizeof(*rx_res) + len));
   3483 
   3484 	m = data->m;
   3485 	m->m_data = pkt->data + sizeof(*rx_res);
   3486 	m->m_pkthdr.len = m->m_len = len;
   3487 
   3488 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
   3489 		DPRINTF(("dsp size out of range [0,20]: %d\n",
   3490 		    phy_info->cfg_phy_cnt));
   3491 		return;
   3492 	}
   3493 
   3494 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
   3495 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
   3496 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
   3497 		return; /* drop */
   3498 	}
   3499 
   3500 	device_timestamp = le32toh(phy_info->system_timestamp);
   3501 
   3502 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
   3503 		rssi = iwm_get_signal_strength(sc, phy_info);
   3504 	} else {
   3505 		rssi = iwm_calc_rssi(sc, phy_info);
   3506 	}
   3507 	rssi = -rssi;
   3508 
   3509 	if (ic->ic_state == IEEE80211_S_SCAN)
   3510 		iwm_fix_channel(sc, m);
   3511 
   3512 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
   3513 		return;
   3514 
   3515 	m_set_rcvif(m, IC2IFP(ic));
   3516 
   3517 	if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
   3518 		c = &ic->ic_channels[le32toh(phy_info->channel)];
   3519 
   3520 	s = splnet();
   3521 
   3522 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
   3523 	if (c)
   3524 		ni->ni_chan = c;
   3525 
   3526 	if (__predict_false(sc->sc_drvbpf != NULL)) {
   3527 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
   3528 
   3529 		tap->wr_flags = 0;
   3530 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
   3531 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
   3532 		tap->wr_chan_freq =
   3533 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
   3534 		tap->wr_chan_flags =
   3535 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
   3536 		tap->wr_dbm_antsignal = (int8_t)rssi;
   3537 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
   3538 		tap->wr_tsft = phy_info->system_timestamp;
   3539 		if (phy_info->phy_flags &
   3540 		    htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
   3541 			uint8_t mcs = (phy_info->rate_n_flags &
   3542 			    htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK));
   3543 			tap->wr_rate = (0x80 | mcs);
   3544 		} else {
   3545 			uint8_t rate = (phy_info->rate_n_flags &
   3546 			    htole32(IWM_RATE_LEGACY_RATE_MSK));
   3547 			switch (rate) {
   3548 			/* CCK rates. */
   3549 			case  10: tap->wr_rate =   2; break;
   3550 			case  20: tap->wr_rate =   4; break;
   3551 			case  55: tap->wr_rate =  11; break;
   3552 			case 110: tap->wr_rate =  22; break;
   3553 			/* OFDM rates. */
   3554 			case 0xd: tap->wr_rate =  12; break;
   3555 			case 0xf: tap->wr_rate =  18; break;
   3556 			case 0x5: tap->wr_rate =  24; break;
   3557 			case 0x7: tap->wr_rate =  36; break;
   3558 			case 0x9: tap->wr_rate =  48; break;
   3559 			case 0xb: tap->wr_rate =  72; break;
   3560 			case 0x1: tap->wr_rate =  96; break;
   3561 			case 0x3: tap->wr_rate = 108; break;
   3562 			/* Unknown rate: should not happen. */
   3563 			default:  tap->wr_rate =   0;
   3564 			}
   3565 		}
   3566 
   3567 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
   3568 	}
   3569 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
   3570 	ieee80211_free_node(ni);
   3571 
   3572 	splx(s);
   3573 }
   3574 
   3575 static void
   3576 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3577     struct iwm_node *in)
   3578 {
   3579 	struct ieee80211com *ic = &sc->sc_ic;
   3580 	struct ifnet *ifp = IC2IFP(ic);
   3581 	struct iwm_tx_resp *tx_resp = (void *)pkt->data;
   3582 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
   3583 	int failack = tx_resp->failure_frame;
   3584 
   3585 	KASSERT(tx_resp->frame_count == 1);
   3586 
   3587 	/* Update rate control statistics. */
   3588 	in->in_amn.amn_txcnt++;
   3589 	if (failack > 0) {
   3590 		in->in_amn.amn_retrycnt++;
   3591 	}
   3592 
   3593 	if (status != IWM_TX_STATUS_SUCCESS &&
   3594 	    status != IWM_TX_STATUS_DIRECT_DONE)
   3595 		ifp->if_oerrors++;
   3596 	else
   3597 		ifp->if_opackets++;
   3598 }
   3599 
   3600 static void
   3601 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3602     struct iwm_rx_data *data)
   3603 {
   3604 	struct ieee80211com *ic = &sc->sc_ic;
   3605 	struct ifnet *ifp = IC2IFP(ic);
   3606 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
   3607 	int idx = cmd_hdr->idx;
   3608 	int qid = cmd_hdr->qid;
   3609 	struct iwm_tx_ring *ring = &sc->txq[qid];
   3610 	struct iwm_tx_data *txd = &ring->data[idx];
   3611 	struct iwm_node *in = txd->in;
   3612 
   3613 	if (txd->done) {
   3614 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
   3615 		    DEVNAME(sc)));
   3616 		return;
   3617 	}
   3618 
   3619 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3620 	    BUS_DMASYNC_POSTREAD);
   3621 
   3622 	sc->sc_tx_timer = 0;
   3623 
   3624 	iwm_rx_tx_cmd_single(sc, pkt, in);
   3625 
   3626 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
   3627 	    BUS_DMASYNC_POSTWRITE);
   3628 	bus_dmamap_unload(sc->sc_dmat, txd->map);
   3629 	m_freem(txd->m);
   3630 
   3631 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
   3632 	KASSERT(txd->done == 0);
   3633 	txd->done = 1;
   3634 	KASSERT(txd->in);
   3635 
   3636 	txd->m = NULL;
   3637 	txd->in = NULL;
   3638 	ieee80211_free_node(&in->in_ni);
   3639 
   3640 	if (--ring->queued < IWM_TX_RING_LOMARK) {
   3641 		sc->qfullmsk &= ~(1 << ring->qid);
   3642 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
   3643 			ifp->if_flags &= ~IFF_OACTIVE;
   3644 			if_start_lock(ifp);
   3645 		}
   3646 	}
   3647 }
   3648 
   3649 static int
   3650 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
   3651 {
   3652 	struct iwm_binding_cmd cmd;
   3653 	struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
   3654 	int i, err;
   3655 	uint32_t status;
   3656 
   3657 	memset(&cmd, 0, sizeof(cmd));
   3658 
   3659 	cmd.id_and_color
   3660 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3661 	cmd.action = htole32(action);
   3662 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3663 
   3664 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   3665 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
   3666 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
   3667 
   3668 	status = 0;
   3669 	err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
   3670 	    sizeof(cmd), &cmd, &status);
   3671 	if (err == 0 && status != 0)
   3672 		err = EIO;
   3673 
   3674 	return err;
   3675 }
   3676 
   3677 static void
   3678 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
   3679     struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
   3680 {
   3681 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
   3682 
   3683 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
   3684 	    ctxt->color));
   3685 	cmd->action = htole32(action);
   3686 	cmd->apply_time = htole32(apply_time);
   3687 }
   3688 
   3689 static void
   3690 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
   3691     struct ieee80211_channel *chan, uint8_t chains_static,
   3692     uint8_t chains_dynamic)
   3693 {
   3694 	struct ieee80211com *ic = &sc->sc_ic;
   3695 	uint8_t active_cnt, idle_cnt;
   3696 
   3697 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
   3698 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
   3699 
   3700 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
   3701 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
   3702 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
   3703 
   3704 	/* Set rx the chains */
   3705 	idle_cnt = chains_static;
   3706 	active_cnt = chains_dynamic;
   3707 
   3708 	cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
   3709 	    IWM_PHY_RX_CHAIN_VALID_POS);
   3710 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
   3711 	cmd->rxchain_info |= htole32(active_cnt <<
   3712 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
   3713 
   3714 	cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
   3715 }
   3716 
   3717 static int
   3718 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
   3719     uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
   3720     uint32_t apply_time)
   3721 {
   3722 	struct iwm_phy_context_cmd cmd;
   3723 
   3724 	iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
   3725 
   3726 	iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
   3727 	    chains_static, chains_dynamic);
   3728 
   3729 	return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
   3730 	    sizeof(struct iwm_phy_context_cmd), &cmd);
   3731 }
   3732 
   3733 static int
   3734 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3735 {
   3736 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
   3737 	struct iwm_tfd *desc;
   3738 	struct iwm_tx_data *txdata;
   3739 	struct iwm_device_cmd *cmd;
   3740 	struct mbuf *m;
   3741 	bus_addr_t paddr;
   3742 	uint32_t addr_lo;
   3743 	int err = 0, i, paylen, off, s;
   3744 	int code;
   3745 	int async, wantresp;
   3746 	int group_id;
   3747 	size_t hdrlen, datasz;
   3748 	uint8_t *data;
   3749 
   3750 	code = hcmd->id;
   3751 	async = hcmd->flags & IWM_CMD_ASYNC;
   3752 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
   3753 
   3754 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
   3755 		paylen += hcmd->len[i];
   3756 	}
   3757 
   3758 	/* if the command wants an answer, busy sc_cmd_resp */
   3759 	if (wantresp) {
   3760 		KASSERT(!async);
   3761 		while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
   3762 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
   3763 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
   3764 	}
   3765 
   3766 	/*
   3767 	 * Is the hardware still available?  (after e.g. above wait).
   3768 	 */
   3769 	s = splnet();
   3770 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
   3771 		err = ENXIO;
   3772 		goto out;
   3773 	}
   3774 
   3775 	desc = &ring->desc[ring->cur];
   3776 	txdata = &ring->data[ring->cur];
   3777 
   3778 	group_id = iwm_cmd_groupid(code);
   3779 	if (group_id != 0) {
   3780 		hdrlen = sizeof(cmd->hdr_wide);
   3781 		datasz = sizeof(cmd->data_wide);
   3782 	} else {
   3783 		hdrlen = sizeof(cmd->hdr);
   3784 		datasz = sizeof(cmd->data);
   3785 	}
   3786 
   3787 	if (paylen > datasz) {
   3788 		/* Command is too large to fit in pre-allocated space. */
   3789 		size_t totlen = hdrlen + paylen;
   3790 		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
   3791 			aprint_error_dev(sc->sc_dev,
   3792 			    "firmware command too long (%zd bytes)\n", totlen);
   3793 			err = EINVAL;
   3794 			goto out;
   3795 		}
   3796 		m = m_gethdr(M_DONTWAIT, MT_DATA);
   3797 		if (m == NULL) {
   3798 			err = ENOMEM;
   3799 			goto out;
   3800 		}
   3801 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   3802 		if (!(m->m_flags & M_EXT)) {
   3803 			aprint_error_dev(sc->sc_dev,
   3804 			    "could not get fw cmd mbuf (%zd bytes)\n", totlen);
   3805 			m_freem(m);
   3806 			err = ENOMEM;
   3807 			goto out;
   3808 		}
   3809 		cmd = mtod(m, struct iwm_device_cmd *);
   3810 		err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
   3811 		    totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3812 		if (err) {
   3813 			aprint_error_dev(sc->sc_dev,
   3814 			    "could not load fw cmd mbuf (%zd bytes)\n", totlen);
   3815 			m_freem(m);
   3816 			goto out;
   3817 		}
   3818 		txdata->m = m;
   3819 		paddr = txdata->map->dm_segs[0].ds_addr;
   3820 	} else {
   3821 		cmd = &ring->cmd[ring->cur];
   3822 		paddr = txdata->cmd_paddr;
   3823 	}
   3824 
   3825 	if (group_id != 0) {
   3826 		cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
   3827 		cmd->hdr_wide.group_id = group_id;
   3828 		cmd->hdr_wide.qid = ring->qid;
   3829 		cmd->hdr_wide.idx = ring->cur;
   3830 		cmd->hdr_wide.length = htole16(paylen);
   3831 		cmd->hdr_wide.version = iwm_cmd_version(code);
   3832 		data = cmd->data_wide;
   3833 	} else {
   3834 		cmd->hdr.code = code;
   3835 		cmd->hdr.flags = 0;
   3836 		cmd->hdr.qid = ring->qid;
   3837 		cmd->hdr.idx = ring->cur;
   3838 		data = cmd->data;
   3839 	}
   3840 
   3841 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
   3842 		if (hcmd->len[i] == 0)
   3843 			continue;
   3844 		memcpy(data + off, hcmd->data[i], hcmd->len[i]);
   3845 		off += hcmd->len[i];
   3846 	}
   3847 	KASSERT(off == paylen);
   3848 
   3849 	/* lo field is not aligned */
   3850 	addr_lo = htole32((uint32_t)paddr);
   3851 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
   3852 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
   3853 	    | ((hdrlen + paylen) << 4));
   3854 	desc->num_tbs = 1;
   3855 
   3856 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
   3857 	    code, hdrlen + paylen, async ? " (async)" : ""));
   3858 
   3859 	if (paylen > datasz) {
   3860 		bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
   3861 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
   3862 	} else {
   3863 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   3864 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   3865 		    hdrlen + paylen, BUS_DMASYNC_PREWRITE);
   3866 	}
   3867 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3868 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   3869 	    sizeof(*desc), BUS_DMASYNC_PREWRITE);
   3870 
   3871 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   3872 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   3873 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   3874 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
   3875 	    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
   3876 	     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
   3877 		aprint_error_dev(sc->sc_dev, "acquiring device failed\n");
   3878 		err = EBUSY;
   3879 		goto out;
   3880 	}
   3881 
   3882 #if 0
   3883 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
   3884 #endif
   3885 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
   3886 	    code, ring->qid, ring->cur));
   3887 
   3888 	/* Kick command ring. */
   3889 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   3890 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   3891 
   3892 	if (!async) {
   3893 		int generation = sc->sc_generation;
   3894 		err = tsleep(desc, PCATCH, "iwmcmd", mstohz(1000));
   3895 		if (err == 0) {
   3896 			/* if hardware is no longer up, return error */
   3897 			if (generation != sc->sc_generation) {
   3898 				err = ENXIO;
   3899 			} else {
   3900 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
   3901 			}
   3902 		}
   3903 	}
   3904  out:
   3905 	if (wantresp && err) {
   3906 		iwm_free_resp(sc, hcmd);
   3907 	}
   3908 	splx(s);
   3909 
   3910 	return err;
   3911 }
   3912 
   3913 static int
   3914 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
   3915     uint16_t len, const void *data)
   3916 {
   3917 	struct iwm_host_cmd cmd = {
   3918 		.id = id,
   3919 		.len = { len, },
   3920 		.data = { data, },
   3921 		.flags = flags,
   3922 	};
   3923 
   3924 	return iwm_send_cmd(sc, &cmd);
   3925 }
   3926 
   3927 static int
   3928 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
   3929     uint32_t *status)
   3930 {
   3931 	struct iwm_rx_packet *pkt;
   3932 	struct iwm_cmd_response *resp;
   3933 	int err, resp_len;
   3934 
   3935 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
   3936 	cmd->flags |= IWM_CMD_WANT_SKB;
   3937 
   3938 	err = iwm_send_cmd(sc, cmd);
   3939 	if (err)
   3940 		return err;
   3941 	pkt = cmd->resp_pkt;
   3942 
   3943 	/* Can happen if RFKILL is asserted */
   3944 	if (!pkt) {
   3945 		err = 0;
   3946 		goto out_free_resp;
   3947 	}
   3948 
   3949 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   3950 		err = EIO;
   3951 		goto out_free_resp;
   3952 	}
   3953 
   3954 	resp_len = iwm_rx_packet_payload_len(pkt);
   3955 	if (resp_len != sizeof(*resp)) {
   3956 		err = EIO;
   3957 		goto out_free_resp;
   3958 	}
   3959 
   3960 	resp = (void *)pkt->data;
   3961 	*status = le32toh(resp->status);
   3962  out_free_resp:
   3963 	iwm_free_resp(sc, cmd);
   3964 	return err;
   3965 }
   3966 
   3967 static int
   3968 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
   3969     const void *data, uint32_t *status)
   3970 {
   3971 	struct iwm_host_cmd cmd = {
   3972 		.id = id,
   3973 		.len = { len, },
   3974 		.data = { data, },
   3975 	};
   3976 
   3977 	return iwm_send_cmd_status(sc, &cmd, status);
   3978 }
   3979 
   3980 static void
   3981 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3982 {
   3983 	KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
   3984 	KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
   3985 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
   3986 	wakeup(&sc->sc_wantresp);
   3987 }
   3988 
   3989 static void
   3990 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
   3991 {
   3992 	struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
   3993 	struct iwm_tx_data *data;
   3994 
   3995 	if (qid != IWM_CMD_QUEUE) {
   3996 		return;	/* Not a command ack. */
   3997 	}
   3998 
   3999 	data = &ring->data[idx];
   4000 
   4001 	if (data->m != NULL) {
   4002 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   4003 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   4004 		bus_dmamap_unload(sc->sc_dmat, data->map);
   4005 		m_freem(data->m);
   4006 		data->m = NULL;
   4007 	}
   4008 	wakeup(&ring->desc[idx]);
   4009 }
   4010 
   4011 #if 0
   4012 /*
   4013  * necessary only for block ack mode
   4014  */
   4015 void
   4016 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
   4017     uint16_t len)
   4018 {
   4019 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
   4020 	uint16_t w_val;
   4021 
   4022 	scd_bc_tbl = sc->sched_dma.vaddr;
   4023 
   4024 	len += 8; /* magic numbers came naturally from paris */
   4025 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
   4026 		len = roundup(len, 4) / 4;
   4027 
   4028 	w_val = htole16(sta_id << 12 | len);
   4029 
   4030 	/* Update TX scheduler. */
   4031 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
   4032 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   4033 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
   4034 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
   4035 
   4036 	/* I really wonder what this is ?!? */
   4037 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
   4038 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
   4039 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   4040 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
   4041 		    (char *)(void *)sc->sched_dma.vaddr,
   4042 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
   4043 	}
   4044 }
   4045 #endif
   4046 
   4047 /*
   4048  * Fill in various bit for management frames, and leave them
   4049  * unfilled for data frames (firmware takes care of that).
   4050  * Return the selected TX rate.
   4051  */
   4052 static const struct iwm_rate *
   4053 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
   4054     struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
   4055 {
   4056 	struct ieee80211com *ic = &sc->sc_ic;
   4057 	struct ieee80211_node *ni = &in->in_ni;
   4058 	const struct iwm_rate *rinfo;
   4059 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   4060 	int ridx, rate_flags, i;
   4061 	int nrates = ni->ni_rates.rs_nrates;
   4062 
   4063 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
   4064 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
   4065 
   4066 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
   4067 	    type != IEEE80211_FC0_TYPE_DATA) {
   4068 		/* for non-data, use the lowest supported rate */
   4069 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
   4070 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
   4071 		tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
   4072 #ifndef IEEE80211_NO_HT
   4073 	} else if (ic->ic_fixed_mcs != -1) {
   4074 		ridx = sc->sc_fixed_ridx;
   4075 #endif
   4076 	} else if (ic->ic_fixed_rate != -1) {
   4077 		ridx = sc->sc_fixed_ridx;
   4078 	} else {
   4079 		/* for data frames, use RS table */
   4080 		tx->initial_rate_index = 0;
   4081 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
   4082 		DPRINTFN(12, ("start with txrate %d\n",
   4083 		    tx->initial_rate_index));
   4084 #ifndef IEEE80211_NO_HT
   4085 		if (ni->ni_flags & IEEE80211_NODE_HT) {
   4086 			ridx = iwm_mcs2ridx[ni->ni_txmcs];
   4087 			return &iwm_rates[ridx];
   4088 		}
   4089 #endif
   4090 		ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
   4091 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
   4092 		for (i = 0; i < nrates; i++) {
   4093 			if (iwm_rates[i].rate == (ni->ni_txrate &
   4094 			    IEEE80211_RATE_VAL)) {
   4095 				ridx = i;
   4096 				break;
   4097 			}
   4098 		}
   4099 		return &iwm_rates[ridx];
   4100 	}
   4101 
   4102 	rinfo = &iwm_rates[ridx];
   4103 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
   4104 	if (IWM_RIDX_IS_CCK(ridx))
   4105 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
   4106 #ifndef IEEE80211_NO_HT
   4107 	if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   4108 	    rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
   4109 		rate_flags |= IWM_RATE_MCS_HT_MSK;
   4110 		tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
   4111 	} else
   4112 #endif
   4113 		tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
   4114 
   4115 	return rinfo;
   4116 }
   4117 
   4118 #define TB0_SIZE 16
   4119 static int
   4120 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
   4121 {
   4122 	struct ieee80211com *ic = &sc->sc_ic;
   4123 	struct iwm_node *in = (struct iwm_node *)ni;
   4124 	struct iwm_tx_ring *ring;
   4125 	struct iwm_tx_data *data;
   4126 	struct iwm_tfd *desc;
   4127 	struct iwm_device_cmd *cmd;
   4128 	struct iwm_tx_cmd *tx;
   4129 	struct ieee80211_frame *wh;
   4130 	struct ieee80211_key *k = NULL;
   4131 	struct mbuf *m1;
   4132 	const struct iwm_rate *rinfo;
   4133 	uint32_t flags;
   4134 	u_int hdrlen;
   4135 	bus_dma_segment_t *seg;
   4136 	uint8_t tid, type;
   4137 	int i, totlen, err, pad;
   4138 
   4139 	wh = mtod(m, struct ieee80211_frame *);
   4140 	hdrlen = ieee80211_anyhdrsize(wh);
   4141 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   4142 
   4143 	tid = 0;
   4144 
   4145 	ring = &sc->txq[ac];
   4146 	desc = &ring->desc[ring->cur];
   4147 	memset(desc, 0, sizeof(*desc));
   4148 	data = &ring->data[ring->cur];
   4149 
   4150 	cmd = &ring->cmd[ring->cur];
   4151 	cmd->hdr.code = IWM_TX_CMD;
   4152 	cmd->hdr.flags = 0;
   4153 	cmd->hdr.qid = ring->qid;
   4154 	cmd->hdr.idx = ring->cur;
   4155 
   4156 	tx = (void *)cmd->data;
   4157 	memset(tx, 0, sizeof(*tx));
   4158 
   4159 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
   4160 
   4161 	if (__predict_false(sc->sc_drvbpf != NULL)) {
   4162 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
   4163 
   4164 		tap->wt_flags = 0;
   4165 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
   4166 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
   4167 #ifndef IEEE80211_NO_HT
   4168 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   4169 		    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
   4170 		    type == IEEE80211_FC0_TYPE_DATA &&
   4171 		    rinfo->plcp == IWM_RATE_INVM_PLCP) {
   4172 			tap->wt_rate = (0x80 | rinfo->ht_plcp);
   4173 		} else
   4174 #endif
   4175 			tap->wt_rate = rinfo->rate;
   4176 		tap->wt_hwqueue = ac;
   4177 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
   4178 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
   4179 
   4180 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
   4181 	}
   4182 
   4183 	/* Encrypt the frame if need be. */
   4184 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
   4185 		k = ieee80211_crypto_encap(ic, ni, m);
   4186 		if (k == NULL) {
   4187 			m_freem(m);
   4188 			return ENOBUFS;
   4189 		}
   4190 		/* Packet header may have moved, reset our local pointer. */
   4191 		wh = mtod(m, struct ieee80211_frame *);
   4192 	}
   4193 	totlen = m->m_pkthdr.len;
   4194 
   4195 	flags = 0;
   4196 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
   4197 		flags |= IWM_TX_CMD_FLG_ACK;
   4198 	}
   4199 
   4200 	if (type == IEEE80211_FC0_TYPE_DATA &&
   4201 	    !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
   4202 	    (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
   4203 	     (ic->ic_flags & IEEE80211_F_USEPROT)))
   4204 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
   4205 
   4206 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
   4207 	    type != IEEE80211_FC0_TYPE_DATA)
   4208 		tx->sta_id = IWM_AUX_STA_ID;
   4209 	else
   4210 		tx->sta_id = IWM_STATION_ID;
   4211 
   4212 	if (type == IEEE80211_FC0_TYPE_MGT) {
   4213 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
   4214 
   4215 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
   4216 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
   4217 			tx->pm_frame_timeout = htole16(3);
   4218 		else
   4219 			tx->pm_frame_timeout = htole16(2);
   4220 	} else {
   4221 		tx->pm_frame_timeout = htole16(0);
   4222 	}
   4223 
   4224 	if (hdrlen & 3) {
   4225 		/* First segment length must be a multiple of 4. */
   4226 		flags |= IWM_TX_CMD_FLG_MH_PAD;
   4227 		pad = 4 - (hdrlen & 3);
   4228 	} else
   4229 		pad = 0;
   4230 
   4231 	tx->driver_txop = 0;
   4232 	tx->next_frame_len = 0;
   4233 
   4234 	tx->len = htole16(totlen);
   4235 	tx->tid_tspec = tid;
   4236 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
   4237 
   4238 	/* Set physical address of "scratch area". */
   4239 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
   4240 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
   4241 
   4242 	/* Copy 802.11 header in TX command. */
   4243 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
   4244 
   4245 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
   4246 
   4247 	tx->sec_ctl = 0;
   4248 	tx->tx_flags |= htole32(flags);
   4249 
   4250 	/* Trim 802.11 header. */
   4251 	m_adj(m, hdrlen);
   4252 
   4253 	err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   4254 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   4255 	if (err) {
   4256 		if (err != EFBIG) {
   4257 			aprint_error_dev(sc->sc_dev,
   4258 			    "can't map mbuf (error %d)\n", err);
   4259 			m_freem(m);
   4260 			return err;
   4261 		}
   4262 		/* Too many DMA segments, linearize mbuf. */
   4263 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
   4264 		if (m1 == NULL) {
   4265 			m_freem(m);
   4266 			return ENOBUFS;
   4267 		}
   4268 		if (m->m_pkthdr.len > MHLEN) {
   4269 			MCLGET(m1, M_DONTWAIT);
   4270 			if (!(m1->m_flags & M_EXT)) {
   4271 				m_freem(m);
   4272 				m_freem(m1);
   4273 				return ENOBUFS;
   4274 			}
   4275 		}
   4276 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
   4277 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
   4278 		m_freem(m);
   4279 		m = m1;
   4280 
   4281 		err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   4282 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   4283 		if (err) {
   4284 			aprint_error_dev(sc->sc_dev,
   4285 			    "can't map mbuf (error %d)\n", err);
   4286 			m_freem(m);
   4287 			return err;
   4288 		}
   4289 	}
   4290 	data->m = m;
   4291 	data->in = in;
   4292 	data->done = 0;
   4293 
   4294 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
   4295 	KASSERT(data->in != NULL);
   4296 
   4297 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
   4298 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs));
   4299 
   4300 	/* Fill TX descriptor. */
   4301 	desc->num_tbs = 2 + data->map->dm_nsegs;
   4302 
   4303 	desc->tbs[0].lo = htole32(data->cmd_paddr);
   4304 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   4305 	    (TB0_SIZE << 4);
   4306 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
   4307 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   4308 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
   4309 	      + hdrlen + pad - TB0_SIZE) << 4);
   4310 
   4311 	/* Other DMA segments are for data payload. */
   4312 	seg = data->map->dm_segs;
   4313 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
   4314 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
   4315 		desc->tbs[i+2].hi_n_len =
   4316 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
   4317 		    | ((seg->ds_len) << 4);
   4318 	}
   4319 
   4320 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
   4321 	    BUS_DMASYNC_PREWRITE);
   4322 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   4323 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   4324 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
   4325 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   4326 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   4327 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
   4328 
   4329 #if 0
   4330 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
   4331 	    le16toh(tx->len));
   4332 #endif
   4333 
   4334 	/* Kick TX ring. */
   4335 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   4336 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   4337 
   4338 	/* Mark TX ring as full if we reach a certain threshold. */
   4339 	if (++ring->queued > IWM_TX_RING_HIMARK) {
   4340 		sc->qfullmsk |= 1 << ring->qid;
   4341 	}
   4342 
   4343 	return 0;
   4344 }
   4345 
   4346 #if 0
   4347 /* not necessary? */
   4348 static int
   4349 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
   4350 {
   4351 	struct iwm_tx_path_flush_cmd flush_cmd = {
   4352 		.queues_ctl = htole32(tfd_msk),
   4353 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
   4354 	};
   4355 	int err;
   4356 
   4357 	err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
   4358 	    sizeof(flush_cmd), &flush_cmd);
   4359 	if (err)
   4360 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
   4361 		    err);
   4362 	return err;
   4363 }
   4364 #endif
   4365 
   4366 static void
   4367 iwm_led_enable(struct iwm_softc *sc)
   4368 {
   4369 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
   4370 }
   4371 
   4372 static void
   4373 iwm_led_disable(struct iwm_softc *sc)
   4374 {
   4375 	IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
   4376 }
   4377 
   4378 static int
   4379 iwm_led_is_enabled(struct iwm_softc *sc)
   4380 {
   4381 	return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
   4382 }
   4383 
   4384 static void
   4385 iwm_led_blink_timeout(void *arg)
   4386 {
   4387 	struct iwm_softc *sc = arg;
   4388 
   4389 	if (iwm_led_is_enabled(sc))
   4390 		iwm_led_disable(sc);
   4391 	else
   4392 		iwm_led_enable(sc);
   4393 
   4394 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
   4395 }
   4396 
   4397 static void
   4398 iwm_led_blink_start(struct iwm_softc *sc)
   4399 {
   4400 	callout_schedule(&sc->sc_led_blink_to, mstohz(200));
   4401 }
   4402 
   4403 static void
   4404 iwm_led_blink_stop(struct iwm_softc *sc)
   4405 {
   4406 	callout_stop(&sc->sc_led_blink_to);
   4407 	iwm_led_disable(sc);
   4408 }
   4409 
   4410 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
   4411 
   4412 static int
   4413 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
   4414     struct iwm_beacon_filter_cmd *cmd)
   4415 {
   4416 	return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
   4417 	    0, sizeof(struct iwm_beacon_filter_cmd), cmd);
   4418 }
   4419 
   4420 static void
   4421 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
   4422     struct iwm_beacon_filter_cmd *cmd)
   4423 {
   4424 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
   4425 }
   4426 
   4427 static int
   4428 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
   4429 {
   4430 	struct iwm_beacon_filter_cmd cmd = {
   4431 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4432 		.bf_enable_beacon_filter = htole32(1),
   4433 		.ba_enable_beacon_abort = htole32(enable),
   4434 	};
   4435 
   4436 	if (!sc->sc_bf.bf_enabled)
   4437 		return 0;
   4438 
   4439 	sc->sc_bf.ba_enabled = enable;
   4440 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4441 	return iwm_beacon_filter_send_cmd(sc, &cmd);
   4442 }
   4443 
   4444 static void
   4445 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
   4446     struct iwm_mac_power_cmd *cmd)
   4447 {
   4448 	struct ieee80211_node *ni = &in->in_ni;
   4449 	int dtim_period, dtim_msec, keep_alive;
   4450 
   4451 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   4452 	    in->in_color));
   4453 	if (ni->ni_dtim_period)
   4454 		dtim_period = ni->ni_dtim_period;
   4455 	else
   4456 		dtim_period = 1;
   4457 
   4458 	/*
   4459 	 * Regardless of power management state the driver must set
   4460 	 * keep alive period. FW will use it for sending keep alive NDPs
   4461 	 * immediately after association. Check that keep alive period
   4462 	 * is at least 3 * DTIM.
   4463 	 */
   4464 	dtim_msec = dtim_period * ni->ni_intval;
   4465 	keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
   4466 	keep_alive = roundup(keep_alive, 1000) / 1000;
   4467 	cmd->keep_alive_seconds = htole16(keep_alive);
   4468 
   4469 #ifdef notyet
   4470 	cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
   4471 	cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
   4472 	cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
   4473 #endif
   4474 }
   4475 
   4476 static int
   4477 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
   4478 {
   4479 	int err;
   4480 	int ba_enable;
   4481 	struct iwm_mac_power_cmd cmd;
   4482 
   4483 	memset(&cmd, 0, sizeof(cmd));
   4484 
   4485 	iwm_power_build_cmd(sc, in, &cmd);
   4486 
   4487 	err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
   4488 	    sizeof(cmd), &cmd);
   4489 	if (err)
   4490 		return err;
   4491 
   4492 	ba_enable = !!(cmd.flags &
   4493 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
   4494 	return iwm_update_beacon_abort(sc, in, ba_enable);
   4495 }
   4496 
   4497 static int
   4498 iwm_power_update_device(struct iwm_softc *sc)
   4499 {
   4500 	struct iwm_device_power_cmd cmd = {
   4501 #ifdef notyet
   4502 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
   4503 #endif
   4504 	};
   4505 
   4506 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
   4507 		return 0;
   4508 
   4509 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
   4510 	DPRINTF(("Sending device power command with flags = 0x%X\n",
   4511 	    cmd.flags));
   4512 
   4513 	return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
   4514 }
   4515 
   4516 #ifdef notyet
   4517 static int
   4518 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4519 {
   4520 	struct iwm_beacon_filter_cmd cmd = {
   4521 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4522 		.bf_enable_beacon_filter = htole32(1),
   4523 	};
   4524 	int err;
   4525 
   4526 	iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4527 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
   4528 
   4529 	if (err == 0)
   4530 		sc->sc_bf.bf_enabled = 1;
   4531 
   4532 	return err;
   4533 }
   4534 #endif
   4535 
   4536 static int
   4537 iwm_disable_beacon_filter(struct iwm_softc *sc)
   4538 {
   4539 	struct iwm_beacon_filter_cmd cmd;
   4540 	int err;
   4541 
   4542 	memset(&cmd, 0, sizeof(cmd));
   4543 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
   4544 		return 0;
   4545 
   4546 	err = iwm_beacon_filter_send_cmd(sc, &cmd);
   4547 	if (err == 0)
   4548 		sc->sc_bf.bf_enabled = 0;
   4549 
   4550 	return err;
   4551 }
   4552 
   4553 static int
   4554 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
   4555 {
   4556 	struct iwm_add_sta_cmd_v7 add_sta_cmd;
   4557 	int err;
   4558 	uint32_t status;
   4559 
   4560 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
   4561 
   4562 	add_sta_cmd.sta_id = IWM_STATION_ID;
   4563 	add_sta_cmd.mac_id_n_color
   4564 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   4565 	if (!update) {
   4566 		int ac;
   4567 		for (ac = 0; ac < WME_NUM_AC; ac++) {
   4568 			add_sta_cmd.tfd_queue_msk |=
   4569 			    htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
   4570 		}
   4571 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
   4572 	}
   4573 	add_sta_cmd.add_modify = update ? 1 : 0;
   4574 	add_sta_cmd.station_flags_msk
   4575 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
   4576 	add_sta_cmd.tid_disable_tx = htole16(0xffff);
   4577 	if (update)
   4578 		add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
   4579 
   4580 #ifndef IEEE80211_NO_HT
   4581 	if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
   4582 		add_sta_cmd.station_flags_msk
   4583 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
   4584 		    IWM_STA_FLG_AGG_MPDU_DENS_MSK);
   4585 
   4586 		add_sta_cmd.station_flags
   4587 		    |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
   4588 		switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
   4589 		case IEEE80211_AMPDU_PARAM_SS_2:
   4590 			add_sta_cmd.station_flags
   4591 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
   4592 			break;
   4593 		case IEEE80211_AMPDU_PARAM_SS_4:
   4594 			add_sta_cmd.station_flags
   4595 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
   4596 			break;
   4597 		case IEEE80211_AMPDU_PARAM_SS_8:
   4598 			add_sta_cmd.station_flags
   4599 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
   4600 			break;
   4601 		case IEEE80211_AMPDU_PARAM_SS_16:
   4602 			add_sta_cmd.station_flags
   4603 			    |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
   4604 			break;
   4605 		default:
   4606 			break;
   4607 		}
   4608 	}
   4609 #endif
   4610 
   4611 	status = IWM_ADD_STA_SUCCESS;
   4612 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
   4613 	    &add_sta_cmd, &status);
   4614 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
   4615 		err = EIO;
   4616 
   4617 	return err;
   4618 }
   4619 
   4620 static int
   4621 iwm_add_aux_sta(struct iwm_softc *sc)
   4622 {
   4623 	struct iwm_add_sta_cmd_v7 cmd;
   4624 	int err;
   4625 	uint32_t status;
   4626 
   4627 	err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
   4628 	if (err)
   4629 		return err;
   4630 
   4631 	memset(&cmd, 0, sizeof(cmd));
   4632 	cmd.sta_id = IWM_AUX_STA_ID;
   4633 	cmd.mac_id_n_color =
   4634 	    htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
   4635 	cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
   4636 	cmd.tid_disable_tx = htole16(0xffff);
   4637 
   4638 	status = IWM_ADD_STA_SUCCESS;
   4639 	err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
   4640 	    &status);
   4641 	if (err == 0 && status != IWM_ADD_STA_SUCCESS)
   4642 		err = EIO;
   4643 
   4644 	return err;
   4645 }
   4646 
   4647 #define IWM_PLCP_QUIET_THRESH 1
   4648 #define IWM_ACTIVE_QUIET_TIME 10
   4649 #define LONG_OUT_TIME_PERIOD 600
   4650 #define SHORT_OUT_TIME_PERIOD 200
   4651 #define SUSPEND_TIME_PERIOD 100
   4652 
   4653 static uint16_t
   4654 iwm_scan_rx_chain(struct iwm_softc *sc)
   4655 {
   4656 	uint16_t rx_chain;
   4657 	uint8_t rx_ant;
   4658 
   4659 	rx_ant = iwm_fw_valid_rx_ant(sc);
   4660 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
   4661 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
   4662 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
   4663 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
   4664 	return htole16(rx_chain);
   4665 }
   4666 
   4667 static uint32_t
   4668 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
   4669 {
   4670 	uint32_t tx_ant;
   4671 	int i, ind;
   4672 
   4673 	for (i = 0, ind = sc->sc_scan_last_antenna;
   4674 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
   4675 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
   4676 		if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
   4677 			sc->sc_scan_last_antenna = ind;
   4678 			break;
   4679 		}
   4680 	}
   4681 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
   4682 
   4683 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
   4684 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
   4685 				   tx_ant);
   4686 	else
   4687 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
   4688 }
   4689 
   4690 #ifdef notyet
   4691 /*
   4692  * If req->n_ssids > 0, it means we should do an active scan.
   4693  * In case of active scan w/o directed scan, we receive a zero-length SSID
   4694  * just to notify that this scan is active and not passive.
   4695  * In order to notify the FW of the number of SSIDs we wish to scan (including
   4696  * the zero-length one), we need to set the corresponding bits in chan->type,
   4697  * one for each SSID, and set the active bit (first). If the first SSID is
   4698  * already included in the probe template, so we need to set only
   4699  * req->n_ssids - 1 bits in addition to the first bit.
   4700  */
   4701 static uint16_t
   4702 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
   4703 {
   4704 	if (flags & IEEE80211_CHAN_2GHZ)
   4705 		return 30  + 3 * (n_ssids + 1);
   4706 	return 20  + 2 * (n_ssids + 1);
   4707 }
   4708 
   4709 static uint16_t
   4710 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
   4711 {
   4712 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
   4713 }
   4714 #endif
   4715 
   4716 static uint8_t
   4717 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
   4718     struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
   4719 {
   4720 	struct ieee80211com *ic = &sc->sc_ic;
   4721 	struct ieee80211_channel *c;
   4722 	uint8_t nchan;
   4723 
   4724 	for (nchan = 0, c = &ic->ic_channels[1];
   4725 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
   4726 	    nchan < sc->sc_capa_n_scan_channels;
   4727 	    c++) {
   4728 		if (c->ic_flags == 0)
   4729 			continue;
   4730 
   4731 		chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
   4732 		chan->iter_count = htole16(1);
   4733 		chan->iter_interval = 0;
   4734 		chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
   4735 #if 0 /* makes scanning while associated less useful */
   4736 		if (n_ssids != 0)
   4737 			chan->flags |= htole32(1 << 1); /* select SSID 0 */
   4738 #endif
   4739 		chan++;
   4740 		nchan++;
   4741 	}
   4742 
   4743 	return nchan;
   4744 }
   4745 
   4746 static uint8_t
   4747 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
   4748     struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
   4749 {
   4750 	struct ieee80211com *ic = &sc->sc_ic;
   4751 	struct ieee80211_channel *c;
   4752 	uint8_t nchan;
   4753 
   4754 	for (nchan = 0, c = &ic->ic_channels[1];
   4755 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
   4756 	    nchan < sc->sc_capa_n_scan_channels;
   4757 	    c++) {
   4758 		if (c->ic_flags == 0)
   4759 			continue;
   4760 		chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
   4761 		chan->iter_count = 1;
   4762 		chan->iter_interval = htole16(0);
   4763 #if 0 /* makes scanning while associated less useful */
   4764 		if (n_ssids != 0)
   4765 			chan->flags = htole32(1 << 0); /* select SSID 0 */
   4766 #endif
   4767 		chan++;
   4768 		nchan++;
   4769 	}
   4770 
   4771 	return nchan;
   4772 }
   4773 
   4774 static int
   4775 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
   4776 {
   4777 	struct ieee80211com *ic = &sc->sc_ic;
   4778 	struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
   4779 	struct ieee80211_rateset *rs;
   4780 	size_t remain = sizeof(preq->buf);
   4781 	uint8_t *frm, *pos;
   4782 
   4783 	memset(preq, 0, sizeof(*preq));
   4784 
   4785 	if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
   4786 		return ENOBUFS;
   4787 
   4788 	/*
   4789 	 * Build a probe request frame.  Most of the following code is a
   4790 	 * copy & paste of what is done in net80211.
   4791 	 */
   4792 	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
   4793 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
   4794 	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
   4795 	IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
   4796 	IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
   4797 	IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
   4798 	*(uint16_t *)&wh->i_dur[0] = 0;	/* filled by HW */
   4799 	*(uint16_t *)&wh->i_seq[0] = 0;	/* filled by HW */
   4800 
   4801 	frm = (uint8_t *)(wh + 1);
   4802 	frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
   4803 
   4804 	/* Tell the firmware where the MAC header is. */
   4805 	preq->mac_header.offset = 0;
   4806 	preq->mac_header.len = htole16(frm - (uint8_t *)wh);
   4807 	remain -= frm - (uint8_t *)wh;
   4808 
   4809 	/* Fill in 2GHz IEs and tell firmware where they are. */
   4810 	rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
   4811 	if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
   4812 		if (remain < 4 + rs->rs_nrates)
   4813 			return ENOBUFS;
   4814 	} else if (remain < 2 + rs->rs_nrates)
   4815 		return ENOBUFS;
   4816 	preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
   4817 	pos = frm;
   4818 	frm = ieee80211_add_rates(frm, rs);
   4819 	if (rs->rs_nrates > IEEE80211_RATE_SIZE)
   4820 		frm = ieee80211_add_xrates(frm, rs);
   4821 	preq->band_data[0].len = htole16(frm - pos);
   4822 	remain -= frm - pos;
   4823 
   4824 	if (isset(sc->sc_enabled_capa,
   4825 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
   4826 		if (remain < 3)
   4827 			return ENOBUFS;
   4828 		*frm++ = IEEE80211_ELEMID_DSPARMS;
   4829 		*frm++ = 1;
   4830 		*frm++ = 0;
   4831 		remain -= 3;
   4832 	}
   4833 
   4834 	if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
   4835 		/* Fill in 5GHz IEs. */
   4836 		rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
   4837 		if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
   4838 			if (remain < 4 + rs->rs_nrates)
   4839 				return ENOBUFS;
   4840 		} else if (remain < 2 + rs->rs_nrates)
   4841 			return ENOBUFS;
   4842 		preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
   4843 		pos = frm;
   4844 		frm = ieee80211_add_rates(frm, rs);
   4845 		if (rs->rs_nrates > IEEE80211_RATE_SIZE)
   4846 			frm = ieee80211_add_xrates(frm, rs);
   4847 		preq->band_data[1].len = htole16(frm - pos);
   4848 		remain -= frm - pos;
   4849 	}
   4850 
   4851 #ifndef IEEE80211_NO_HT
   4852 	/* Send 11n IEs on both 2GHz and 5GHz bands. */
   4853 	preq->common_data.offset = htole16(frm - (uint8_t *)wh);
   4854 	pos = frm;
   4855 	if (ic->ic_flags & IEEE80211_F_HTON) {
   4856 		if (remain < 28)
   4857 			return ENOBUFS;
   4858 		frm = ieee80211_add_htcaps(frm, ic);
   4859 		/* XXX add WME info? */
   4860 	}
   4861 #endif
   4862 
   4863 	preq->common_data.len = htole16(frm - pos);
   4864 
   4865 	return 0;
   4866 }
   4867 
   4868 static int
   4869 iwm_lmac_scan(struct iwm_softc *sc)
   4870 {
   4871 	struct ieee80211com *ic = &sc->sc_ic;
   4872 	struct iwm_host_cmd hcmd = {
   4873 		.id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
   4874 		.len = { 0, },
   4875 		.data = { NULL, },
   4876 		.flags = 0,
   4877 	};
   4878 	struct iwm_scan_req_lmac *req;
   4879 	size_t req_len;
   4880 	int err;
   4881 
   4882 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
   4883 
   4884 	req_len = sizeof(struct iwm_scan_req_lmac) +
   4885 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
   4886 	    sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
   4887 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
   4888 		return ENOMEM;
   4889 	req = kmem_zalloc(req_len, KM_SLEEP);
   4890 	if (req == NULL)
   4891 		return ENOMEM;
   4892 
   4893 	hcmd.len[0] = (uint16_t)req_len;
   4894 	hcmd.data[0] = (void *)req;
   4895 
   4896 	/* These timings correspond to iwlwifi's UNASSOC scan. */
   4897 	req->active_dwell = 10;
   4898 	req->passive_dwell = 110;
   4899 	req->fragmented_dwell = 44;
   4900 	req->extended_dwell = 90;
   4901 	req->max_out_time = 0;
   4902 	req->suspend_time = 0;
   4903 
   4904 	req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
   4905 	req->rx_chain_select = iwm_scan_rx_chain(sc);
   4906 	req->iter_num = htole32(1);
   4907 	req->delay = 0;
   4908 
   4909 	req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
   4910 	    IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
   4911 	    IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
   4912 	if (ic->ic_des_esslen == 0)
   4913 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
   4914 	else
   4915 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
   4916 	if (isset(sc->sc_enabled_capa,
   4917 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
   4918 		req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
   4919 
   4920 	req->flags = htole32(IWM_PHY_BAND_24);
   4921 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
   4922 		req->flags |= htole32(IWM_PHY_BAND_5);
   4923 	req->filter_flags =
   4924 	    htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
   4925 
   4926 	/* Tx flags 2 GHz. */
   4927 	req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
   4928 	    IWM_TX_CMD_FLG_BT_DIS);
   4929 	req->tx_cmd[0].rate_n_flags =
   4930 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
   4931 	req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
   4932 
   4933 	/* Tx flags 5 GHz. */
   4934 	req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
   4935 	    IWM_TX_CMD_FLG_BT_DIS);
   4936 	req->tx_cmd[1].rate_n_flags =
   4937 	    iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
   4938 	req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
   4939 
   4940 	/* Check if we're doing an active directed scan. */
   4941 	if (ic->ic_des_esslen != 0) {
   4942 		req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
   4943 		req->direct_scan[0].len = ic->ic_des_esslen;
   4944 		memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
   4945 		    ic->ic_des_esslen);
   4946 	}
   4947 
   4948 	req->n_channels = iwm_lmac_scan_fill_channels(sc,
   4949 	    (struct iwm_scan_channel_cfg_lmac *)req->data,
   4950 	    ic->ic_des_esslen != 0);
   4951 
   4952 	err = iwm_fill_probe_req(sc,
   4953 	    (struct iwm_scan_probe_req *)(req->data +
   4954 	    (sizeof(struct iwm_scan_channel_cfg_lmac) *
   4955 	     sc->sc_capa_n_scan_channels)));
   4956 	if (err) {
   4957 		kmem_free(req, req_len);
   4958 		return err;
   4959 	}
   4960 
   4961 	/* Specify the scan plan: We'll do one iteration. */
   4962 	req->schedule[0].iterations = 1;
   4963 	req->schedule[0].full_scan_mul = 1;
   4964 
   4965 	/* Disable EBS. */
   4966 	req->channel_opt[0].non_ebs_ratio = 1;
   4967 	req->channel_opt[1].non_ebs_ratio = 1;
   4968 
   4969 	err = iwm_send_cmd(sc, &hcmd);
   4970 	kmem_free(req, req_len);
   4971 	return err;
   4972 }
   4973 
   4974 static int
   4975 iwm_config_umac_scan(struct iwm_softc *sc)
   4976 {
   4977 	struct ieee80211com *ic = &sc->sc_ic;
   4978 	struct iwm_scan_config *scan_config;
   4979 	int err, nchan;
   4980 	size_t cmd_size;
   4981 	struct ieee80211_channel *c;
   4982 	struct iwm_host_cmd hcmd = {
   4983 		.id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
   4984 		.flags = 0,
   4985 	};
   4986 	static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
   4987 	    IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
   4988 	    IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
   4989 	    IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
   4990 	    IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
   4991 	    IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
   4992 	    IWM_SCAN_CONFIG_RATE_54M);
   4993 
   4994 	cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
   4995 
   4996 	scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
   4997 	if (scan_config == NULL)
   4998 		return ENOMEM;
   4999 
   5000 	scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
   5001 	scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
   5002 	scan_config->legacy_rates = htole32(rates |
   5003 	    IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
   5004 
   5005 	/* These timings correspond to iwlwifi's UNASSOC scan. */
   5006 	scan_config->dwell_active = 10;
   5007 	scan_config->dwell_passive = 110;
   5008 	scan_config->dwell_fragmented = 44;
   5009 	scan_config->dwell_extended = 90;
   5010 	scan_config->out_of_channel_time = htole32(0);
   5011 	scan_config->suspend_time = htole32(0);
   5012 
   5013 	IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
   5014 
   5015 	scan_config->bcast_sta_id = IWM_AUX_STA_ID;
   5016 	scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
   5017 	    IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
   5018 	    IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
   5019 
   5020 	for (c = &ic->ic_channels[1], nchan = 0;
   5021 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
   5022 	    nchan < sc->sc_capa_n_scan_channels; c++) {
   5023 		if (c->ic_flags == 0)
   5024 			continue;
   5025 		scan_config->channel_array[nchan++] =
   5026 		    ieee80211_mhz2ieee(c->ic_freq, 0);
   5027 	}
   5028 
   5029 	scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
   5030 	    IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
   5031 	    IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
   5032 	    IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
   5033 	    IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
   5034 	    IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
   5035 	    IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
   5036 	    IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
   5037 	    IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
   5038 	    IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
   5039 	    IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
   5040 
   5041 	hcmd.data[0] = scan_config;
   5042 	hcmd.len[0] = cmd_size;
   5043 
   5044 	err = iwm_send_cmd(sc, &hcmd);
   5045 	kmem_free(scan_config, cmd_size);
   5046 	return err;
   5047 }
   5048 
   5049 static int
   5050 iwm_umac_scan(struct iwm_softc *sc)
   5051 {
   5052 	struct ieee80211com *ic = &sc->sc_ic;
   5053 	struct iwm_host_cmd hcmd = {
   5054 		.id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
   5055 		.len = { 0, },
   5056 		.data = { NULL, },
   5057 		.flags = 0,
   5058 	};
   5059 	struct iwm_scan_req_umac *req;
   5060 	struct iwm_scan_req_umac_tail *tail;
   5061 	size_t req_len;
   5062 	int err;
   5063 
   5064 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
   5065 
   5066 	req_len = sizeof(struct iwm_scan_req_umac) +
   5067 	    (sizeof(struct iwm_scan_channel_cfg_umac) *
   5068 	    sc->sc_capa_n_scan_channels) +
   5069 	    sizeof(struct iwm_scan_req_umac_tail);
   5070 	if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
   5071 		return ENOMEM;
   5072 	req = kmem_zalloc(req_len, KM_SLEEP);
   5073 	if (req == NULL)
   5074 		return ENOMEM;
   5075 
   5076 	hcmd.len[0] = (uint16_t)req_len;
   5077 	hcmd.data[0] = (void *)req;
   5078 
   5079 	/* These timings correspond to iwlwifi's UNASSOC scan. */
   5080 	req->active_dwell = 10;
   5081 	req->passive_dwell = 110;
   5082 	req->fragmented_dwell = 44;
   5083 	req->extended_dwell = 90;
   5084 	req->max_out_time = 0;
   5085 	req->suspend_time = 0;
   5086 
   5087 	req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
   5088 	req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
   5089 
   5090 	req->n_channels = iwm_umac_scan_fill_channels(sc,
   5091 	    (struct iwm_scan_channel_cfg_umac *)req->data,
   5092 	    ic->ic_des_esslen != 0);
   5093 
   5094 	req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
   5095 	    IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
   5096 	    IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
   5097 
   5098 	tail = (struct iwm_scan_req_umac_tail *)(req->data +
   5099 		sizeof(struct iwm_scan_channel_cfg_umac) *
   5100 			sc->sc_capa_n_scan_channels);
   5101 
   5102 	/* Check if we're doing an active directed scan. */
   5103 	if (ic->ic_des_esslen != 0) {
   5104 		tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
   5105 		tail->direct_scan[0].len = ic->ic_des_esslen;
   5106 		memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
   5107 		    ic->ic_des_esslen);
   5108 		req->general_flags |=
   5109 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
   5110 	} else
   5111 		req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
   5112 
   5113 	if (isset(sc->sc_enabled_capa,
   5114 	    IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
   5115 		req->general_flags |=
   5116 		    htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
   5117 
   5118 	err = iwm_fill_probe_req(sc, &tail->preq);
   5119 	if (err) {
   5120 		kmem_free(req, req_len);
   5121 		return err;
   5122 	}
   5123 
   5124 	/* Specify the scan plan: We'll do one iteration. */
   5125 	tail->schedule[0].interval = 0;
   5126 	tail->schedule[0].iter_count = 1;
   5127 
   5128 	err = iwm_send_cmd(sc, &hcmd);
   5129 	kmem_free(req, req_len);
   5130 	return err;
   5131 }
   5132 
   5133 static uint8_t
   5134 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
   5135 {
   5136 	int i;
   5137 	uint8_t rval;
   5138 
   5139 	for (i = 0; i < rs->rs_nrates; i++) {
   5140 		rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
   5141 		if (rval == iwm_rates[ridx].rate)
   5142 			return rs->rs_rates[i];
   5143 	}
   5144 	return 0;
   5145 }
   5146 
   5147 static void
   5148 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
   5149     int *ofdm_rates)
   5150 {
   5151 	struct ieee80211_node *ni = &in->in_ni;
   5152 	struct ieee80211_rateset *rs = &ni->ni_rates;
   5153 	int lowest_present_ofdm = 100;
   5154 	int lowest_present_cck = 100;
   5155 	uint8_t cck = 0;
   5156 	uint8_t ofdm = 0;
   5157 	int i;
   5158 
   5159 	if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
   5160 	    IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
   5161 		for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
   5162 			if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
   5163 				continue;
   5164 			cck |= (1 << i);
   5165 			if (lowest_present_cck > i)
   5166 				lowest_present_cck = i;
   5167 		}
   5168 	}
   5169 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
   5170 		if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
   5171 			continue;
   5172 		ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
   5173 		if (lowest_present_ofdm > i)
   5174 			lowest_present_ofdm = i;
   5175 	}
   5176 
   5177 	/*
   5178 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
   5179 	 * variables. This isn't sufficient though, as there might not
   5180 	 * be all the right rates in the bitmap. E.g. if the only basic
   5181 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
   5182 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
   5183 	 *
   5184 	 *    [...] a STA responding to a received frame shall transmit
   5185 	 *    its Control Response frame [...] at the highest rate in the
   5186 	 *    BSSBasicRateSet parameter that is less than or equal to the
   5187 	 *    rate of the immediately previous frame in the frame exchange
   5188 	 *    sequence ([...]) and that is of the same modulation class
   5189 	 *    ([...]) as the received frame. If no rate contained in the
   5190 	 *    BSSBasicRateSet parameter meets these conditions, then the
   5191 	 *    control frame sent in response to a received frame shall be
   5192 	 *    transmitted at the highest mandatory rate of the PHY that is
   5193 	 *    less than or equal to the rate of the received frame, and
   5194 	 *    that is of the same modulation class as the received frame.
   5195 	 *
   5196 	 * As a consequence, we need to add all mandatory rates that are
   5197 	 * lower than all of the basic rates to these bitmaps.
   5198 	 */
   5199 
   5200 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
   5201 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
   5202 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
   5203 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
   5204 	/* 6M already there or needed so always add */
   5205 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
   5206 
   5207 	/*
   5208 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
   5209 	 * Note, however:
   5210 	 *  - if no CCK rates are basic, it must be ERP since there must
   5211 	 *    be some basic rates at all, so they're OFDM => ERP PHY
   5212 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
   5213 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
   5214 	 *  - if 5.5M is basic, 1M and 2M are mandatory
   5215 	 *  - if 2M is basic, 1M is mandatory
   5216 	 *  - if 1M is basic, that's the only valid ACK rate.
   5217 	 * As a consequence, it's not as complicated as it sounds, just add
   5218 	 * any lower rates to the ACK rate bitmap.
   5219 	 */
   5220 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
   5221 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
   5222 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
   5223 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
   5224 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
   5225 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
   5226 	/* 1M already there or needed so always add */
   5227 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
   5228 
   5229 	*cck_rates = cck;
   5230 	*ofdm_rates = ofdm;
   5231 }
   5232 
   5233 static void
   5234 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
   5235     struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
   5236 {
   5237 #define IWM_EXP2(x)	((1 << (x)) - 1)	/* CWmin = 2^ECWmin - 1 */
   5238 	struct ieee80211com *ic = &sc->sc_ic;
   5239 	struct ieee80211_node *ni = ic->ic_bss;
   5240 	int cck_ack_rates, ofdm_ack_rates;
   5241 	int i;
   5242 
   5243 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   5244 	    in->in_color));
   5245 	cmd->action = htole32(action);
   5246 
   5247 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
   5248 	cmd->tsf_id = htole32(IWM_TSF_ID_A);
   5249 
   5250 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
   5251 	IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
   5252 
   5253 	iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
   5254 	cmd->cck_rates = htole32(cck_ack_rates);
   5255 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
   5256 
   5257 	cmd->cck_short_preamble
   5258 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
   5259 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
   5260 	cmd->short_slot
   5261 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
   5262 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
   5263 
   5264 	for (i = 0; i < WME_NUM_AC; i++) {
   5265 		struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
   5266 		int txf = iwm_ac_to_tx_fifo[i];
   5267 
   5268 		cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
   5269 		cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
   5270 		cmd->ac[txf].aifsn = wmep->wmep_aifsn;
   5271 		cmd->ac[txf].fifos_mask = (1 << txf);
   5272 		cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
   5273 	}
   5274 	if (ni->ni_flags & IEEE80211_NODE_QOS)
   5275 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
   5276 
   5277 #ifndef IEEE80211_NO_HT
   5278 	if (ni->ni_flags & IEEE80211_NODE_HT) {
   5279 		enum ieee80211_htprot htprot =
   5280 		    (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
   5281 		switch (htprot) {
   5282 		case IEEE80211_HTPROT_NONE:
   5283 			break;
   5284 		case IEEE80211_HTPROT_NONMEMBER:
   5285 		case IEEE80211_HTPROT_NONHT_MIXED:
   5286 			cmd->protection_flags |=
   5287 			    htole32(IWM_MAC_PROT_FLG_HT_PROT);
   5288 		case IEEE80211_HTPROT_20MHZ:
   5289 			cmd->protection_flags |=
   5290 			    htole32(IWM_MAC_PROT_FLG_HT_PROT |
   5291 			    IWM_MAC_PROT_FLG_FAT_PROT);
   5292 			break;
   5293 		default:
   5294 			break;
   5295 		}
   5296 
   5297 		cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
   5298 	}
   5299 #endif
   5300 
   5301 	if (ic->ic_flags & IEEE80211_F_USEPROT)
   5302 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
   5303 
   5304 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
   5305 #undef IWM_EXP2
   5306 }
   5307 
   5308 static void
   5309 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
   5310     struct iwm_mac_data_sta *sta, int assoc)
   5311 {
   5312 	struct ieee80211_node *ni = &in->in_ni;
   5313 	uint32_t dtim_off;
   5314 	uint64_t tsf;
   5315 
   5316 	dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
   5317 	tsf = le64toh(ni->ni_tstamp.tsf);
   5318 
   5319 	sta->is_assoc = htole32(assoc);
   5320 	sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
   5321 	sta->dtim_tsf = htole64(tsf + dtim_off);
   5322 	sta->bi = htole32(ni->ni_intval);
   5323 	sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
   5324 	sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
   5325 	sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
   5326 	sta->listen_interval = htole32(10);
   5327 	sta->assoc_id = htole32(ni->ni_associd);
   5328 	sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
   5329 }
   5330 
   5331 static int
   5332 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
   5333     int assoc)
   5334 {
   5335 	struct ieee80211_node *ni = &in->in_ni;
   5336 	struct iwm_mac_ctx_cmd cmd;
   5337 
   5338 	memset(&cmd, 0, sizeof(cmd));
   5339 
   5340 	iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
   5341 
   5342 	/* Allow beacons to pass through as long as we are not associated or we
   5343 	 * do not have dtim period information */
   5344 	if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
   5345 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
   5346 	else
   5347 		iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
   5348 
   5349 	return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
   5350 }
   5351 
   5352 #define IWM_MISSED_BEACONS_THRESHOLD 8
   5353 
   5354 static void
   5355 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
   5356 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   5357 {
   5358 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
   5359 
   5360 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
   5361 	    le32toh(mb->mac_id),
   5362 	    le32toh(mb->consec_missed_beacons),
   5363 	    le32toh(mb->consec_missed_beacons_since_last_rx),
   5364 	    le32toh(mb->num_recvd_beacons),
   5365 	    le32toh(mb->num_expected_beacons)));
   5366 
   5367 	/*
   5368 	 * TODO: the threshold should be adjusted based on latency conditions,
   5369 	 * and/or in case of a CS flow on one of the other AP vifs.
   5370 	 */
   5371 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
   5372 	    IWM_MISSED_BEACONS_THRESHOLD)
   5373 		ieee80211_beacon_miss(&sc->sc_ic);
   5374 }
   5375 
   5376 static int
   5377 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
   5378 {
   5379 	struct iwm_time_quota_cmd cmd;
   5380 	int i, idx, num_active_macs, quota, quota_rem;
   5381 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
   5382 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
   5383 	uint16_t id;
   5384 
   5385 	memset(&cmd, 0, sizeof(cmd));
   5386 
   5387 	/* currently, PHY ID == binding ID */
   5388 	if (in) {
   5389 		id = in->in_phyctxt->id;
   5390 		KASSERT(id < IWM_MAX_BINDINGS);
   5391 		colors[id] = in->in_phyctxt->color;
   5392 
   5393 		if (1)
   5394 			n_ifs[id] = 1;
   5395 	}
   5396 
   5397 	/*
   5398 	 * The FW's scheduling session consists of
   5399 	 * IWM_MAX_QUOTA fragments. Divide these fragments
   5400 	 * equally between all the bindings that require quota
   5401 	 */
   5402 	num_active_macs = 0;
   5403 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
   5404 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
   5405 		num_active_macs += n_ifs[i];
   5406 	}
   5407 
   5408 	quota = 0;
   5409 	quota_rem = 0;
   5410 	if (num_active_macs) {
   5411 		quota = IWM_MAX_QUOTA / num_active_macs;
   5412 		quota_rem = IWM_MAX_QUOTA % num_active_macs;
   5413 	}
   5414 
   5415 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
   5416 		if (colors[i] < 0)
   5417 			continue;
   5418 
   5419 		cmd.quotas[idx].id_and_color =
   5420 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
   5421 
   5422 		if (n_ifs[i] <= 0) {
   5423 			cmd.quotas[idx].quota = htole32(0);
   5424 			cmd.quotas[idx].max_duration = htole32(0);
   5425 		} else {
   5426 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
   5427 			cmd.quotas[idx].max_duration = htole32(0);
   5428 		}
   5429 		idx++;
   5430 	}
   5431 
   5432 	/* Give the remainder of the session to the first binding */
   5433 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
   5434 
   5435 	return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
   5436 }
   5437 
   5438 static int
   5439 iwm_auth(struct iwm_softc *sc)
   5440 {
   5441 	struct ieee80211com *ic = &sc->sc_ic;
   5442 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   5443 	uint32_t duration;
   5444 	int err;
   5445 
   5446 	err = iwm_sf_config(sc, IWM_SF_FULL_ON);
   5447 	if (err)
   5448 		return err;
   5449 
   5450 	err = iwm_allow_mcast(sc);
   5451 	if (err)
   5452 		return err;
   5453 
   5454 	sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
   5455 	err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
   5456 	    IWM_FW_CTXT_ACTION_MODIFY, 0);
   5457 	if (err)
   5458 		return err;
   5459 	in->in_phyctxt = &sc->sc_phyctxt[0];
   5460 
   5461 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
   5462 	if (err) {
   5463 		aprint_error_dev(sc->sc_dev,
   5464 		    "could not add MAC context (error %d)\n", err);
   5465 		return err;
   5466 	}
   5467 
   5468 	err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
   5469 	if (err)
   5470 		return err;
   5471 
   5472 	err = iwm_add_sta_cmd(sc, in, 0);
   5473 	if (err)
   5474 		return err;
   5475 
   5476 	err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
   5477 	if (err) {
   5478 		aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
   5479 		return err;
   5480 	}
   5481 
   5482 	/*
   5483 	 * Prevent the FW from wandering off channel during association
   5484 	 * by "protecting" the session with a time event.
   5485 	 */
   5486 	if (in->in_ni.ni_intval)
   5487 		duration = in->in_ni.ni_intval * 2;
   5488 	else
   5489 		duration = IEEE80211_DUR_TU;
   5490 	iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
   5491 	DELAY(100);
   5492 
   5493 	return 0;
   5494 }
   5495 
   5496 static int
   5497 iwm_assoc(struct iwm_softc *sc)
   5498 {
   5499 	struct ieee80211com *ic = &sc->sc_ic;
   5500 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   5501 	int err;
   5502 
   5503 	err = iwm_add_sta_cmd(sc, in, 1);
   5504 	if (err)
   5505 		return err;
   5506 
   5507 	return 0;
   5508 }
   5509 
   5510 static struct ieee80211_node *
   5511 iwm_node_alloc(struct ieee80211_node_table *nt)
   5512 {
   5513 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
   5514 }
   5515 
   5516 static void
   5517 iwm_calib_timeout(void *arg)
   5518 {
   5519 	struct iwm_softc *sc = arg;
   5520 	struct ieee80211com *ic = &sc->sc_ic;
   5521 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   5522 #ifndef IEEE80211_NO_HT
   5523 	struct ieee80211_node *ni = &in->in_ni;
   5524 	int otxrate;
   5525 #endif
   5526 	int s;
   5527 
   5528 	s = splnet();
   5529 	if ((ic->ic_fixed_rate == -1
   5530 #ifndef IEEE80211_NO_HT
   5531 	    || ic->ic_fixed_mcs == -1
   5532 #endif
   5533 	    ) &&
   5534 	    ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
   5535 #ifndef IEEE80211_NO_HT
   5536 		if (ni->ni_flags & IEEE80211_NODE_HT)
   5537 			otxrate = ni->ni_txmcs;
   5538 		else
   5539 			otxrate = ni->ni_txrate;
   5540 #endif
   5541 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
   5542 
   5543 #ifndef IEEE80211_NO_HT
   5544 		/*
   5545 		 * If AMRR has chosen a new TX rate we must update
   5546 		 * the firwmare's LQ rate table from process context.
   5547 		 */
   5548 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   5549 		    otxrate != ni->ni_txmcs)
   5550 			softint_schedule(sc->setrates_task);
   5551 		else if (otxrate != ni->ni_txrate)
   5552 			softint_schedule(sc->setrates_task);
   5553 #endif
   5554 	}
   5555 	splx(s);
   5556 
   5557 	callout_schedule(&sc->sc_calib_to, mstohz(500));
   5558 }
   5559 
   5560 #ifndef IEEE80211_NO_HT
   5561 static void
   5562 iwm_setrates_task(void *arg)
   5563 {
   5564 	struct iwm_softc *sc = arg;
   5565 	struct ieee80211com *ic = &sc->sc_ic;
   5566 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   5567 
   5568 	/* Update rates table based on new TX rate determined by AMRR. */
   5569 	iwm_setrates(in);
   5570 }
   5571 
   5572 static int
   5573 iwm_setrates(struct iwm_node *in)
   5574 {
   5575 	struct ieee80211_node *ni = &in->in_ni;
   5576 	struct ieee80211com *ic = ni->ni_ic;
   5577 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   5578 	struct iwm_lq_cmd *lq = &in->in_lq;
   5579 	struct ieee80211_rateset *rs = &ni->ni_rates;
   5580 	int i, j, ridx, ridx_min, tab = 0;
   5581 #ifndef IEEE80211_NO_HT
   5582 	int sgi_ok;
   5583 #endif
   5584 	struct iwm_host_cmd cmd = {
   5585 		.id = IWM_LQ_CMD,
   5586 		.len = { sizeof(in->in_lq), },
   5587 	};
   5588 
   5589 	memset(lq, 0, sizeof(*lq));
   5590 	lq->sta_id = IWM_STATION_ID;
   5591 
   5592 	if (ic->ic_flags & IEEE80211_F_USEPROT)
   5593 		lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
   5594 
   5595 #ifndef IEEE80211_NO_HT
   5596 	sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
   5597 	    (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
   5598 #endif
   5599 
   5600 
   5601 	/*
   5602 	 * Fill the LQ rate selection table with legacy and/or HT rates
   5603 	 * in descending order, i.e. with the node's current TX rate first.
   5604 	 * In cases where throughput of an HT rate corresponds to a legacy
   5605 	 * rate it makes no sense to add both. We rely on the fact that
   5606 	 * iwm_rates is laid out such that equivalent HT/legacy rates share
   5607 	 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
   5608 	 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
   5609 	 */
   5610 	j = 0;
   5611 	ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
   5612 	    IWM_RIDX_OFDM : IWM_RIDX_CCK;
   5613 	for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
   5614 		if (j >= __arraycount(lq->rs_table))
   5615 			break;
   5616 		tab = 0;
   5617 #ifndef IEEE80211_NO_HT
   5618 		if ((ni->ni_flags & IEEE80211_NODE_HT) &&
   5619 		    iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
   5620 			for (i = ni->ni_txmcs; i >= 0; i--) {
   5621 				if (isclr(ni->ni_rxmcs, i))
   5622 					continue;
   5623 				if (ridx == iwm_mcs2ridx[i]) {
   5624 					tab = iwm_rates[ridx].ht_plcp;
   5625 					tab |= IWM_RATE_MCS_HT_MSK;
   5626 					if (sgi_ok)
   5627 						tab |= IWM_RATE_MCS_SGI_MSK;
   5628 					break;
   5629 				}
   5630 			}
   5631 		}
   5632 #endif
   5633 		if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
   5634 			for (i = ni->ni_txrate; i >= 0; i--) {
   5635 				if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
   5636 				    IEEE80211_RATE_VAL)) {
   5637 					tab = iwm_rates[ridx].plcp;
   5638 					break;
   5639 				}
   5640 			}
   5641 		}
   5642 
   5643 		if (tab == 0)
   5644 			continue;
   5645 
   5646 		tab |= 1 << IWM_RATE_MCS_ANT_POS;
   5647 		if (IWM_RIDX_IS_CCK(ridx))
   5648 			tab |= IWM_RATE_MCS_CCK_MSK;
   5649 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
   5650 		lq->rs_table[j++] = htole32(tab);
   5651 	}
   5652 
   5653 	/* Fill the rest with the lowest possible rate */
   5654 	i = j > 0 ? j - 1 : 0;
   5655 	while (j < __arraycount(lq->rs_table))
   5656 		lq->rs_table[j++] = lq->rs_table[i];
   5657 
   5658 	lq->single_stream_ant_msk = IWM_ANT_A;
   5659 	lq->dual_stream_ant_msk = IWM_ANT_AB;
   5660 
   5661 	lq->agg_time_limit = htole16(4000);	/* 4ms */
   5662 	lq->agg_disable_start_th = 3;
   5663 #ifdef notyet
   5664 	lq->agg_frame_cnt_limit = 0x3f;
   5665 #else
   5666 	lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
   5667 #endif
   5668 
   5669 	cmd.data[0] = &in->in_lq;
   5670 	return iwm_send_cmd(sc, &cmd);
   5671 }
   5672 #endif
   5673 
   5674 static int
   5675 iwm_media_change(struct ifnet *ifp)
   5676 {
   5677 	struct iwm_softc *sc = ifp->if_softc;
   5678 	struct ieee80211com *ic = &sc->sc_ic;
   5679 	uint8_t rate, ridx;
   5680 	int err;
   5681 
   5682 	err = ieee80211_media_change(ifp);
   5683 	if (err != ENETRESET)
   5684 		return err;
   5685 
   5686 #ifndef IEEE80211_NO_HT
   5687 	if (ic->ic_fixed_mcs != -1)
   5688 		sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
   5689 	else
   5690 #endif
   5691 	if (ic->ic_fixed_rate != -1) {
   5692 		rate = ic->ic_sup_rates[ic->ic_curmode].
   5693 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
   5694 		/* Map 802.11 rate to HW rate index. */
   5695 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
   5696 			if (iwm_rates[ridx].rate == rate)
   5697 				break;
   5698 		sc->sc_fixed_ridx = ridx;
   5699 	}
   5700 
   5701 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   5702 	    (IFF_UP | IFF_RUNNING)) {
   5703 		iwm_stop(ifp, 0);
   5704 		err = iwm_init(ifp);
   5705 	}
   5706 	return err;
   5707 }
   5708 
   5709 static void
   5710 iwm_newstate_cb(struct work *wk, void *v)
   5711 {
   5712 	struct iwm_softc *sc = v;
   5713 	struct ieee80211com *ic = &sc->sc_ic;
   5714 	struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
   5715 	enum ieee80211_state nstate = iwmns->ns_nstate;
   5716 	enum ieee80211_state ostate = ic->ic_state;
   5717 	int generation = iwmns->ns_generation;
   5718 	struct iwm_node *in;
   5719 	int arg = iwmns->ns_arg;
   5720 	int err;
   5721 
   5722 	kmem_free(iwmns, sizeof(*iwmns));
   5723 
   5724 	DPRINTF(("Prepare to switch state %d->%d\n", ostate, nstate));
   5725 	if (sc->sc_generation != generation) {
   5726 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
   5727 		if (nstate == IEEE80211_S_INIT) {
   5728 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
   5729 			sc->sc_newstate(ic, nstate, arg);
   5730 		}
   5731 		return;
   5732 	}
   5733 
   5734 	DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
   5735 	    ieee80211_state_name[nstate]));
   5736 
   5737 	if (ostate == IEEE80211_S_SCAN && nstate != ostate)
   5738 		iwm_led_blink_stop(sc);
   5739 
   5740 	if (ostate == IEEE80211_S_RUN && nstate != ostate)
   5741 		iwm_disable_beacon_filter(sc);
   5742 
   5743 	/* Reset the device if moving out of AUTH, ASSOC, or RUN. */
   5744 	/* XXX Is there a way to switch states without a full reset? */
   5745 	if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
   5746 		iwm_stop_device(sc);
   5747 		iwm_init_hw(sc);
   5748 
   5749 		/*
   5750 		 * Upon receiving a deauth frame from AP the net80211 stack
   5751 		 * puts the driver into AUTH state. This will fail with this
   5752 		 * driver so bring the FSM from RUN to SCAN in this case.
   5753 		 */
   5754 		if (nstate == IEEE80211_S_SCAN ||
   5755 		    nstate == IEEE80211_S_AUTH ||
   5756 		    nstate == IEEE80211_S_ASSOC) {
   5757 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
   5758 			/* Always pass arg as -1 since we can't Tx right now. */
   5759 			sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
   5760 			DPRINTF(("Going INIT->SCAN\n"));
   5761 			nstate = IEEE80211_S_SCAN;
   5762 		}
   5763 	}
   5764 
   5765 	switch (nstate) {
   5766 	case IEEE80211_S_INIT:
   5767 		break;
   5768 
   5769 	case IEEE80211_S_SCAN:
   5770 		if (ostate == nstate &&
   5771 		    ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
   5772 			return;
   5773 		if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
   5774 			err = iwm_umac_scan(sc);
   5775 		else
   5776 			err = iwm_lmac_scan(sc);
   5777 		if (err) {
   5778 			DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
   5779 			return;
   5780 		}
   5781 		SET(sc->sc_flags, IWM_FLAG_SCANNING);
   5782 		ic->ic_state = nstate;
   5783 		iwm_led_blink_start(sc);
   5784 		return;
   5785 
   5786 	case IEEE80211_S_AUTH:
   5787 		err = iwm_auth(sc);
   5788 		if (err) {
   5789 			DPRINTF(("%s: could not move to auth state: %d\n",
   5790 			    DEVNAME(sc), err));
   5791 			return;
   5792 		}
   5793 		break;
   5794 
   5795 	case IEEE80211_S_ASSOC:
   5796 		err = iwm_assoc(sc);
   5797 		if (err) {
   5798 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
   5799 			    err));
   5800 			return;
   5801 		}
   5802 		break;
   5803 
   5804 	case IEEE80211_S_RUN:
   5805 		in = (struct iwm_node *)ic->ic_bss;
   5806 
   5807 		/* We have now been assigned an associd by the AP. */
   5808 		err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
   5809 		if (err) {
   5810 			aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
   5811 			return;
   5812 		}
   5813 
   5814 		err = iwm_power_update_device(sc);
   5815 		if (err) {
   5816 			aprint_error_dev(sc->sc_dev,
   5817 			    "could send power command (error %d)\n", err);
   5818 			return;
   5819 		}
   5820 #ifdef notyet
   5821 		/*
   5822 		 * Disabled for now. Default beacon filter settings
   5823 		 * prevent net80211 from getting ERP and HT protection
   5824 		 * updates from beacons.
   5825 		 */
   5826 		err = iwm_enable_beacon_filter(sc, in);
   5827 		if (err) {
   5828 			aprint_error_dev(sc->sc_dev,
   5829 			    "could not enable beacon filter\n");
   5830 			return;
   5831 		}
   5832 #endif
   5833 		err = iwm_power_mac_update_mode(sc, in);
   5834 		if (err) {
   5835 			aprint_error_dev(sc->sc_dev,
   5836 			    "could not update MAC power (error %d)\n", err);
   5837 			return;
   5838 		}
   5839 
   5840 		err = iwm_update_quotas(sc, in);
   5841 		if (err) {
   5842 			aprint_error_dev(sc->sc_dev,
   5843 			    "could not update quotas (error %d)\n", err);
   5844 			return;
   5845 		}
   5846 
   5847 		ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
   5848 
   5849 		/* Start at lowest available bit-rate, AMRR will raise. */
   5850 		in->in_ni.ni_txrate = 0;
   5851 #ifndef IEEE80211_NO_HT
   5852 		in->in_ni.ni_txmcs = 0;
   5853 		iwm_setrates(in);
   5854 #endif
   5855 
   5856 		callout_schedule(&sc->sc_calib_to, mstohz(500));
   5857 		iwm_led_enable(sc);
   5858 		break;
   5859 
   5860 	default:
   5861 		break;
   5862 	}
   5863 
   5864 	sc->sc_newstate(ic, nstate, arg);
   5865 }
   5866 
   5867 static int
   5868 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
   5869 {
   5870 	struct iwm_newstate_state *iwmns;
   5871 	struct ifnet *ifp = IC2IFP(ic);
   5872 	struct iwm_softc *sc = ifp->if_softc;
   5873 
   5874 	callout_stop(&sc->sc_calib_to);
   5875 
   5876 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
   5877 	if (!iwmns) {
   5878 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
   5879 		return ENOMEM;
   5880 	}
   5881 
   5882 	iwmns->ns_nstate = nstate;
   5883 	iwmns->ns_arg = arg;
   5884 	iwmns->ns_generation = sc->sc_generation;
   5885 
   5886 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
   5887 
   5888 	return 0;
   5889 }
   5890 
   5891 static void
   5892 iwm_endscan(struct iwm_softc *sc)
   5893 {
   5894 	struct ieee80211com *ic = &sc->sc_ic;
   5895 
   5896 	DPRINTF(("scan ended\n"));
   5897 
   5898 	CLR(sc->sc_flags, IWM_FLAG_SCANNING);
   5899 	ieee80211_end_scan(ic);
   5900 }
   5901 
   5902 /*
   5903  * Aging and idle timeouts for the different possible scenarios
   5904  * in default configuration
   5905  */
   5906 static const uint32_t
   5907 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
   5908 	{
   5909 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
   5910 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
   5911 	},
   5912 	{
   5913 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
   5914 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
   5915 	},
   5916 	{
   5917 		htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
   5918 		htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
   5919 	},
   5920 	{
   5921 		htole32(IWM_SF_BA_AGING_TIMER_DEF),
   5922 		htole32(IWM_SF_BA_IDLE_TIMER_DEF)
   5923 	},
   5924 	{
   5925 		htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
   5926 		htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
   5927 	},
   5928 };
   5929 
   5930 /*
   5931  * Aging and idle timeouts for the different possible scenarios
   5932  * in single BSS MAC configuration.
   5933  */
   5934 static const uint32_t
   5935 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
   5936 	{
   5937 		htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
   5938 		htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
   5939 	},
   5940 	{
   5941 		htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
   5942 		htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
   5943 	},
   5944 	{
   5945 		htole32(IWM_SF_MCAST_AGING_TIMER),
   5946 		htole32(IWM_SF_MCAST_IDLE_TIMER)
   5947 	},
   5948 	{
   5949 		htole32(IWM_SF_BA_AGING_TIMER),
   5950 		htole32(IWM_SF_BA_IDLE_TIMER)
   5951 	},
   5952 	{
   5953 		htole32(IWM_SF_TX_RE_AGING_TIMER),
   5954 		htole32(IWM_SF_TX_RE_IDLE_TIMER)
   5955 	},
   5956 };
   5957 
   5958 static void
   5959 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
   5960     struct ieee80211_node *ni)
   5961 {
   5962 	int i, j, watermark;
   5963 
   5964 	sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
   5965 
   5966 	/*
   5967 	 * If we are in association flow - check antenna configuration
   5968 	 * capabilities of the AP station, and choose the watermark accordingly.
   5969 	 */
   5970 	if (ni) {
   5971 #ifndef IEEE80211_NO_HT
   5972 		if (ni->ni_flags & IEEE80211_NODE_HT) {
   5973 #ifdef notyet
   5974 			if (ni->ni_rxmcs[2] != 0)
   5975 				watermark = IWM_SF_W_MARK_MIMO3;
   5976 			else if (ni->ni_rxmcs[1] != 0)
   5977 				watermark = IWM_SF_W_MARK_MIMO2;
   5978 			else
   5979 #endif
   5980 				watermark = IWM_SF_W_MARK_SISO;
   5981 		} else
   5982 #endif
   5983 			watermark = IWM_SF_W_MARK_LEGACY;
   5984 	/* default watermark value for unassociated mode. */
   5985 	} else {
   5986 		watermark = IWM_SF_W_MARK_MIMO2;
   5987 	}
   5988 	sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
   5989 
   5990 	for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
   5991 		for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
   5992 			sf_cmd->long_delay_timeouts[i][j] =
   5993 					htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
   5994 		}
   5995 	}
   5996 
   5997 	if (ni) {
   5998 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
   5999 		       sizeof(iwm_sf_full_timeout));
   6000 	} else {
   6001 		memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
   6002 		       sizeof(iwm_sf_full_timeout_def));
   6003 	}
   6004 }
   6005 
   6006 static int
   6007 iwm_sf_config(struct iwm_softc *sc, int new_state)
   6008 {
   6009 	struct ieee80211com *ic = &sc->sc_ic;
   6010 	struct iwm_sf_cfg_cmd sf_cmd = {
   6011 		.state = htole32(IWM_SF_FULL_ON),
   6012 	};
   6013 
   6014 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   6015 		sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
   6016 
   6017 	switch (new_state) {
   6018 	case IWM_SF_UNINIT:
   6019 	case IWM_SF_INIT_OFF:
   6020 		iwm_fill_sf_command(sc, &sf_cmd, NULL);
   6021 		break;
   6022 	case IWM_SF_FULL_ON:
   6023 		iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
   6024 		break;
   6025 	default:
   6026 		return EINVAL;
   6027 	}
   6028 
   6029 	return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
   6030 	    sizeof(sf_cmd), &sf_cmd);
   6031 }
   6032 
   6033 static int
   6034 iwm_send_bt_init_conf(struct iwm_softc *sc)
   6035 {
   6036 	struct iwm_bt_coex_cmd bt_cmd;
   6037 
   6038 	bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
   6039 	bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
   6040 
   6041 	return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
   6042 }
   6043 
   6044 static int
   6045 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
   6046 {
   6047 	struct iwm_mcc_update_cmd mcc_cmd;
   6048 	struct iwm_host_cmd hcmd = {
   6049 		.id = IWM_MCC_UPDATE_CMD,
   6050 		.flags = IWM_CMD_WANT_SKB,
   6051 		.data = { &mcc_cmd },
   6052 	};
   6053 	int resp_v2 = isset(sc->sc_enabled_capa,
   6054 	    IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
   6055 	int err;
   6056 
   6057 	memset(&mcc_cmd, 0, sizeof(mcc_cmd));
   6058 	mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
   6059 	if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
   6060 	    isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
   6061 		mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
   6062 	else
   6063 		mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
   6064 
   6065 	if (resp_v2)
   6066 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
   6067 	else
   6068 		hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
   6069 
   6070 	err = iwm_send_cmd(sc, &hcmd);
   6071 	if (err)
   6072 		return err;
   6073 
   6074 	iwm_free_resp(sc, &hcmd);
   6075 
   6076 	return 0;
   6077 }
   6078 
   6079 static void
   6080 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
   6081 {
   6082 	struct iwm_host_cmd cmd = {
   6083 		.id = IWM_REPLY_THERMAL_MNG_BACKOFF,
   6084 		.len = { sizeof(uint32_t), },
   6085 		.data = { &backoff, },
   6086 	};
   6087 
   6088 	iwm_send_cmd(sc, &cmd);
   6089 }
   6090 
   6091 static int
   6092 iwm_init_hw(struct iwm_softc *sc)
   6093 {
   6094 	struct ieee80211com *ic = &sc->sc_ic;
   6095 	int err, i, ac;
   6096 
   6097 	err = iwm_preinit(sc);
   6098 	if (err)
   6099 		return err;
   6100 
   6101 	err = iwm_start_hw(sc);
   6102 	if (err) {
   6103 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6104 		return err;
   6105 	}
   6106 
   6107 	err = iwm_run_init_mvm_ucode(sc, 0);
   6108 	if (err)
   6109 		return err;
   6110 
   6111 	/* Should stop and start HW since INIT image just loaded. */
   6112 	iwm_stop_device(sc);
   6113 	err = iwm_start_hw(sc);
   6114 	if (err) {
   6115 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6116 		return err;
   6117 	}
   6118 
   6119 	/* Restart, this time with the regular firmware */
   6120 	err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
   6121 	if (err) {
   6122 		aprint_error_dev(sc->sc_dev, "could not load firmware\n");
   6123 		goto err;
   6124 	}
   6125 
   6126 	err = iwm_send_bt_init_conf(sc);
   6127 	if (err) {
   6128 		aprint_error_dev(sc->sc_dev,
   6129 		    "could not init bt coex (error %d)\n", err);
   6130 		goto err;
   6131 	}
   6132 
   6133 	err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
   6134 	if (err) {
   6135 		aprint_error_dev(sc->sc_dev,
   6136 		    "could not init tx ant config (error %d)\n", err);
   6137 		goto err;
   6138 	}
   6139 
   6140 	/* Send phy db control command and then phy db calibration*/
   6141 	err = iwm_send_phy_db_data(sc);
   6142 	if (err) {
   6143 		aprint_error_dev(sc->sc_dev,
   6144 		    "could not init phy db (error %d)\n", err);
   6145 		goto err;
   6146 	}
   6147 
   6148 	err = iwm_send_phy_cfg_cmd(sc);
   6149 	if (err) {
   6150 		aprint_error_dev(sc->sc_dev,
   6151 		    "could not send phy config (error %d)\n", err);
   6152 		goto err;
   6153 	}
   6154 
   6155 	/* Add auxiliary station for scanning */
   6156 	err = iwm_add_aux_sta(sc);
   6157 	if (err) {
   6158 		aprint_error_dev(sc->sc_dev,
   6159 		    "could not add aux station (error %d)\n", err);
   6160 		goto err;
   6161 	}
   6162 
   6163 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
   6164 		/*
   6165 		 * The channel used here isn't relevant as it's
   6166 		 * going to be overwritten in the other flows.
   6167 		 * For now use the first channel we have.
   6168 		 */
   6169 		sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
   6170 		err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
   6171 		    IWM_FW_CTXT_ACTION_ADD, 0);
   6172 		if (err) {
   6173 			aprint_error_dev(sc->sc_dev,
   6174 			    "could not add phy context %d (error %d)\n",
   6175 			    i, err);
   6176 			goto err;
   6177 		}
   6178 	}
   6179 
   6180 	/* Initialize tx backoffs to the minimum. */
   6181 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
   6182 		iwm_tt_tx_backoff(sc, 0);
   6183 
   6184 	err = iwm_power_update_device(sc);
   6185 	if (err) {
   6186 		aprint_error_dev(sc->sc_dev,
   6187 		    "could send power command (error %d)\n", err);
   6188 		goto err;
   6189 	}
   6190 
   6191 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
   6192 		err = iwm_send_update_mcc_cmd(sc, "ZZ");
   6193 		if (err) {
   6194 			aprint_error_dev(sc->sc_dev,
   6195 			    "could not init LAR (error %d)\n", err);
   6196 			goto err;
   6197 		}
   6198 	}
   6199 
   6200 	if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
   6201 		err = iwm_config_umac_scan(sc);
   6202 		if (err) {
   6203 			aprint_error_dev(sc->sc_dev,
   6204 			    "could not configure scan (error %d)\n", err);
   6205 			goto err;
   6206 		}
   6207 	}
   6208 
   6209 	for (ac = 0; ac < WME_NUM_AC; ac++) {
   6210 		err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
   6211 		    iwm_ac_to_tx_fifo[ac]);
   6212 		if (err) {
   6213 			aprint_error_dev(sc->sc_dev,
   6214 			    "could not enable Tx queue %d (error %d)\n",
   6215 			    i, err);
   6216 			goto err;
   6217 		}
   6218 	}
   6219 
   6220 	err = iwm_disable_beacon_filter(sc);
   6221 	if (err) {
   6222 		aprint_error_dev(sc->sc_dev,
   6223 		    "could not disable beacon filter (error %d)\n", err);
   6224 		goto err;
   6225 	}
   6226 
   6227 	return 0;
   6228 
   6229  err:
   6230 	iwm_stop_device(sc);
   6231 	return err;
   6232 }
   6233 
   6234 /* Allow multicast from our BSSID. */
   6235 static int
   6236 iwm_allow_mcast(struct iwm_softc *sc)
   6237 {
   6238 	struct ieee80211com *ic = &sc->sc_ic;
   6239 	struct ieee80211_node *ni = ic->ic_bss;
   6240 	struct iwm_mcast_filter_cmd *cmd;
   6241 	size_t size;
   6242 	int err;
   6243 
   6244 	size = roundup(sizeof(*cmd), 4);
   6245 	cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
   6246 	if (cmd == NULL)
   6247 		return ENOMEM;
   6248 	cmd->filter_own = 1;
   6249 	cmd->port_id = 0;
   6250 	cmd->count = 0;
   6251 	cmd->pass_all = 1;
   6252 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
   6253 
   6254 	err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
   6255 	kmem_intr_free(cmd, size);
   6256 	return err;
   6257 }
   6258 
   6259 static int
   6260 iwm_init(struct ifnet *ifp)
   6261 {
   6262 	struct iwm_softc *sc = ifp->if_softc;
   6263 	int err;
   6264 
   6265 	if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
   6266 		return 0;
   6267 
   6268 	sc->sc_generation++;
   6269 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
   6270 
   6271 	err = iwm_init_hw(sc);
   6272 	if (err) {
   6273 		iwm_stop(ifp, 1);
   6274 		return err;
   6275 	}
   6276 
   6277 	ifp->if_flags &= ~IFF_OACTIVE;
   6278 	ifp->if_flags |= IFF_RUNNING;
   6279 
   6280 	ieee80211_begin_scan(&sc->sc_ic, 0);
   6281 	SET(sc->sc_flags, IWM_FLAG_HW_INITED);
   6282 
   6283 	return 0;
   6284 }
   6285 
   6286 static void
   6287 iwm_start(struct ifnet *ifp)
   6288 {
   6289 	struct iwm_softc *sc = ifp->if_softc;
   6290 	struct ieee80211com *ic = &sc->sc_ic;
   6291 	struct ieee80211_node *ni;
   6292 	struct ether_header *eh;
   6293 	struct mbuf *m;
   6294 	int ac;
   6295 
   6296 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6297 		return;
   6298 
   6299 	for (;;) {
   6300 		/* why isn't this done per-queue? */
   6301 		if (sc->qfullmsk != 0) {
   6302 			ifp->if_flags |= IFF_OACTIVE;
   6303 			break;
   6304 		}
   6305 
   6306 		/* need to send management frames even if we're not RUNning */
   6307 		IF_DEQUEUE(&ic->ic_mgtq, m);
   6308 		if (m) {
   6309 			ni = M_GETCTX(m, struct ieee80211_node *);
   6310 			M_CLEARCTX(m);
   6311 			ac = WME_AC_BE;
   6312 			goto sendit;
   6313 		}
   6314 		if (ic->ic_state != IEEE80211_S_RUN) {
   6315 			break;
   6316 		}
   6317 
   6318 		IFQ_DEQUEUE(&ifp->if_snd, m);
   6319 		if (m == NULL)
   6320 			break;
   6321 
   6322 		if (m->m_len < sizeof (*eh) &&
   6323 		   (m = m_pullup(m, sizeof (*eh))) == NULL) {
   6324 			ifp->if_oerrors++;
   6325 			continue;
   6326 		}
   6327 
   6328 		eh = mtod(m, struct ether_header *);
   6329 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
   6330 		if (ni == NULL) {
   6331 			m_freem(m);
   6332 			ifp->if_oerrors++;
   6333 			continue;
   6334 		}
   6335 
   6336 		/* classify mbuf so we can find which tx ring to use */
   6337 		if (ieee80211_classify(ic, m, ni) != 0) {
   6338 			m_freem(m);
   6339 			ieee80211_free_node(ni);
   6340 			ifp->if_oerrors++;
   6341 			continue;
   6342 		}
   6343 
   6344 		/* No QoS encapsulation for EAPOL frames. */
   6345 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
   6346 		    M_WME_GETAC(m) : WME_AC_BE;
   6347 
   6348 		bpf_mtap(ifp, m);
   6349 
   6350 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
   6351 			ieee80211_free_node(ni);
   6352 			ifp->if_oerrors++;
   6353 			continue;
   6354 		}
   6355 
   6356  sendit:
   6357 		bpf_mtap3(ic->ic_rawbpf, m);
   6358 
   6359 		if (iwm_tx(sc, m, ni, ac) != 0) {
   6360 			ieee80211_free_node(ni);
   6361 			ifp->if_oerrors++;
   6362 			continue;
   6363 		}
   6364 
   6365 		if (ifp->if_flags & IFF_UP) {
   6366 			sc->sc_tx_timer = 15;
   6367 			ifp->if_timer = 1;
   6368 		}
   6369 	}
   6370 }
   6371 
   6372 static void
   6373 iwm_stop(struct ifnet *ifp, int disable)
   6374 {
   6375 	struct iwm_softc *sc = ifp->if_softc;
   6376 	struct ieee80211com *ic = &sc->sc_ic;
   6377 	struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
   6378 
   6379 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
   6380 	sc->sc_flags |= IWM_FLAG_STOPPED;
   6381 	sc->sc_generation++;
   6382 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6383 
   6384 	if (in)
   6385 		in->in_phyctxt = NULL;
   6386 
   6387 	if (ic->ic_state != IEEE80211_S_INIT)
   6388 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
   6389 
   6390 	callout_stop(&sc->sc_calib_to);
   6391 	iwm_led_blink_stop(sc);
   6392 	ifp->if_timer = sc->sc_tx_timer = 0;
   6393 	iwm_stop_device(sc);
   6394 }
   6395 
   6396 static void
   6397 iwm_watchdog(struct ifnet *ifp)
   6398 {
   6399 	struct iwm_softc *sc = ifp->if_softc;
   6400 
   6401 	ifp->if_timer = 0;
   6402 	if (sc->sc_tx_timer > 0) {
   6403 		if (--sc->sc_tx_timer == 0) {
   6404 			aprint_error_dev(sc->sc_dev, "device timeout\n");
   6405 #ifdef IWM_DEBUG
   6406 			iwm_nic_error(sc);
   6407 #endif
   6408 			ifp->if_flags &= ~IFF_UP;
   6409 			iwm_stop(ifp, 1);
   6410 			ifp->if_oerrors++;
   6411 			return;
   6412 		}
   6413 		ifp->if_timer = 1;
   6414 	}
   6415 
   6416 	ieee80211_watchdog(&sc->sc_ic);
   6417 }
   6418 
   6419 static int
   6420 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   6421 {
   6422 	struct iwm_softc *sc = ifp->if_softc;
   6423 	struct ieee80211com *ic = &sc->sc_ic;
   6424 	const struct sockaddr *sa;
   6425 	int s, err = 0;
   6426 
   6427 	s = splnet();
   6428 
   6429 	switch (cmd) {
   6430 	case SIOCSIFADDR:
   6431 		ifp->if_flags |= IFF_UP;
   6432 		/* FALLTHROUGH */
   6433 	case SIOCSIFFLAGS:
   6434 		err = ifioctl_common(ifp, cmd, data);
   6435 		if (err)
   6436 			break;
   6437 		if (ifp->if_flags & IFF_UP) {
   6438 			if (!(ifp->if_flags & IFF_RUNNING)) {
   6439 				err = iwm_init(ifp);
   6440 				if (err)
   6441 					ifp->if_flags &= ~IFF_UP;
   6442 			}
   6443 		} else {
   6444 			if (ifp->if_flags & IFF_RUNNING)
   6445 				iwm_stop(ifp, 1);
   6446 		}
   6447 		break;
   6448 
   6449 	case SIOCADDMULTI:
   6450 	case SIOCDELMULTI:
   6451 		if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
   6452 			err = ENXIO;
   6453 			break;
   6454 		}
   6455 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
   6456 		err = (cmd == SIOCADDMULTI) ?
   6457 		    ether_addmulti(sa, &sc->sc_ec) :
   6458 		    ether_delmulti(sa, &sc->sc_ec);
   6459 		if (err == ENETRESET)
   6460 			err = 0;
   6461 		break;
   6462 
   6463 	default:
   6464 		if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
   6465 			err = ether_ioctl(ifp, cmd, data);
   6466 			break;
   6467 		}
   6468 		err = ieee80211_ioctl(ic, cmd, data);
   6469 		break;
   6470 	}
   6471 
   6472 	if (err == ENETRESET) {
   6473 		err = 0;
   6474 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   6475 		    (IFF_UP | IFF_RUNNING)) {
   6476 			iwm_stop(ifp, 0);
   6477 			err = iwm_init(ifp);
   6478 		}
   6479 	}
   6480 
   6481 	splx(s);
   6482 	return err;
   6483 }
   6484 
   6485 /*
   6486  * Note: This structure is read from the device with IO accesses,
   6487  * and the reading already does the endian conversion. As it is
   6488  * read with uint32_t-sized accesses, any members with a different size
   6489  * need to be ordered correctly though!
   6490  */
   6491 struct iwm_error_event_table {
   6492 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
   6493 	uint32_t error_id;		/* type of error */
   6494 	uint32_t trm_hw_status0;	/* TRM HW status */
   6495 	uint32_t trm_hw_status1;	/* TRM HW status */
   6496 	uint32_t blink2;		/* branch link */
   6497 	uint32_t ilink1;		/* interrupt link */
   6498 	uint32_t ilink2;		/* interrupt link */
   6499 	uint32_t data1;		/* error-specific data */
   6500 	uint32_t data2;		/* error-specific data */
   6501 	uint32_t data3;		/* error-specific data */
   6502 	uint32_t bcon_time;		/* beacon timer */
   6503 	uint32_t tsf_low;		/* network timestamp function timer */
   6504 	uint32_t tsf_hi;		/* network timestamp function timer */
   6505 	uint32_t gp1;		/* GP1 timer register */
   6506 	uint32_t gp2;		/* GP2 timer register */
   6507 	uint32_t fw_rev_type;	/* firmware revision type */
   6508 	uint32_t major;		/* uCode version major */
   6509 	uint32_t minor;		/* uCode version minor */
   6510 	uint32_t hw_ver;		/* HW Silicon version */
   6511 	uint32_t brd_ver;		/* HW board version */
   6512 	uint32_t log_pc;		/* log program counter */
   6513 	uint32_t frame_ptr;		/* frame pointer */
   6514 	uint32_t stack_ptr;		/* stack pointer */
   6515 	uint32_t hcmd;		/* last host command header */
   6516 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
   6517 				 * rxtx_flag */
   6518 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
   6519 				 * host_flag */
   6520 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
   6521 				 * enc_flag */
   6522 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
   6523 				 * time_flag */
   6524 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
   6525 				 * wico interrupt */
   6526 	uint32_t last_cmd_id;	/* last HCMD id handled by the firmware */
   6527 	uint32_t wait_event;		/* wait event() caller address */
   6528 	uint32_t l2p_control;	/* L2pControlField */
   6529 	uint32_t l2p_duration;	/* L2pDurationField */
   6530 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
   6531 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
   6532 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
   6533 				 * (LMPM_PMG_SEL) */
   6534 	uint32_t u_timestamp;	/* indicate when the date and time of the
   6535 				 * compilation */
   6536 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
   6537 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
   6538 
   6539 /*
   6540  * UMAC error struct - relevant starting from family 8000 chip.
   6541  * Note: This structure is read from the device with IO accesses,
   6542  * and the reading already does the endian conversion. As it is
   6543  * read with u32-sized accesses, any members with a different size
   6544  * need to be ordered correctly though!
   6545  */
   6546 struct iwm_umac_error_event_table {
   6547 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
   6548 	uint32_t error_id;	/* type of error */
   6549 	uint32_t blink1;	/* branch link */
   6550 	uint32_t blink2;	/* branch link */
   6551 	uint32_t ilink1;	/* interrupt link */
   6552 	uint32_t ilink2;	/* interrupt link */
   6553 	uint32_t data1;		/* error-specific data */
   6554 	uint32_t data2;		/* error-specific data */
   6555 	uint32_t data3;		/* error-specific data */
   6556 	uint32_t umac_major;
   6557 	uint32_t umac_minor;
   6558 	uint32_t frame_pointer;	/* core register 27 */
   6559 	uint32_t stack_pointer;	/* core register 28 */
   6560 	uint32_t cmd_header;	/* latest host cmd sent to UMAC */
   6561 	uint32_t nic_isr_pref;	/* ISR status register */
   6562 } __packed;
   6563 
   6564 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
   6565 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
   6566 
   6567 #ifdef IWM_DEBUG
   6568 static const struct {
   6569 	const char *name;
   6570 	uint8_t num;
   6571 } advanced_lookup[] = {
   6572 	{ "NMI_INTERRUPT_WDG", 0x34 },
   6573 	{ "SYSASSERT", 0x35 },
   6574 	{ "UCODE_VERSION_MISMATCH", 0x37 },
   6575 	{ "BAD_COMMAND", 0x38 },
   6576 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
   6577 	{ "FATAL_ERROR", 0x3D },
   6578 	{ "NMI_TRM_HW_ERR", 0x46 },
   6579 	{ "NMI_INTERRUPT_TRM", 0x4C },
   6580 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
   6581 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
   6582 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
   6583 	{ "NMI_INTERRUPT_HOST", 0x66 },
   6584 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
   6585 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
   6586 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
   6587 	{ "ADVANCED_SYSASSERT", 0 },
   6588 };
   6589 
   6590 static const char *
   6591 iwm_desc_lookup(uint32_t num)
   6592 {
   6593 	int i;
   6594 
   6595 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
   6596 		if (advanced_lookup[i].num == num)
   6597 			return advanced_lookup[i].name;
   6598 
   6599 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
   6600 	return advanced_lookup[i].name;
   6601 }
   6602 
   6603 /*
   6604  * Support for dumping the error log seemed like a good idea ...
   6605  * but it's mostly hex junk and the only sensible thing is the
   6606  * hw/ucode revision (which we know anyway).  Since it's here,
   6607  * I'll just leave it in, just in case e.g. the Intel guys want to
   6608  * help us decipher some "ADVANCED_SYSASSERT" later.
   6609  */
   6610 static void
   6611 iwm_nic_error(struct iwm_softc *sc)
   6612 {
   6613 	struct iwm_error_event_table t;
   6614 	uint32_t base;
   6615 
   6616 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
   6617 	base = sc->sc_uc.uc_error_event_table;
   6618 	if (base < 0x800000) {
   6619 		aprint_error_dev(sc->sc_dev,
   6620 		    "Invalid error log pointer 0x%08x\n", base);
   6621 		return;
   6622 	}
   6623 
   6624 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
   6625 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
   6626 		return;
   6627 	}
   6628 
   6629 	if (!t.valid) {
   6630 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
   6631 		return;
   6632 	}
   6633 
   6634 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
   6635 		aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
   6636 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
   6637 		    sc->sc_flags, t.valid);
   6638 	}
   6639 
   6640 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
   6641 	    iwm_desc_lookup(t.error_id));
   6642 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
   6643 	    t.trm_hw_status0);
   6644 	aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
   6645 	    t.trm_hw_status1);
   6646 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
   6647 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
   6648 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
   6649 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
   6650 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
   6651 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
   6652 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
   6653 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
   6654 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
   6655 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
   6656 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
   6657 	aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
   6658 	    t.fw_rev_type);
   6659 	aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
   6660 	    t.major);
   6661 	aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
   6662 	    t.minor);
   6663 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
   6664 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
   6665 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
   6666 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
   6667 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
   6668 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
   6669 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
   6670 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
   6671 	aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
   6672 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
   6673 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
   6674 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
   6675 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
   6676 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
   6677 	    t.l2p_addr_match);
   6678 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
   6679 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
   6680 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
   6681 
   6682 	if (sc->sc_uc.uc_umac_error_event_table)
   6683 		iwm_nic_umac_error(sc);
   6684 }
   6685 
   6686 static void
   6687 iwm_nic_umac_error(struct iwm_softc *sc)
   6688 {
   6689 	struct iwm_umac_error_event_table t;
   6690 	uint32_t base;
   6691 
   6692 	base = sc->sc_uc.uc_umac_error_event_table;
   6693 
   6694 	if (base < 0x800000) {
   6695 		aprint_error_dev(sc->sc_dev,
   6696 		    "Invalid error log pointer 0x%08x\n", base);
   6697 		return;
   6698 	}
   6699 
   6700 	if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
   6701 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
   6702 		return;
   6703 	}
   6704 
   6705 	if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
   6706 		aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
   6707 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
   6708 		    sc->sc_flags, t.valid);
   6709 	}
   6710 
   6711 	aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
   6712 		iwm_desc_lookup(t.error_id));
   6713 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
   6714 	aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
   6715 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
   6716 	    t.ilink1);
   6717 	aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
   6718 	    t.ilink2);
   6719 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
   6720 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
   6721 	aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
   6722 	aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
   6723 	aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
   6724 	aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
   6725 	    t.frame_pointer);
   6726 	aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
   6727 	    t.stack_pointer);
   6728 	aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
   6729 	aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
   6730 	    t.nic_isr_pref);
   6731 }
   6732 #endif
   6733 
   6734 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
   6735 do {									\
   6736 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6737 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
   6738 	_var_ = (void *)((_pkt_)+1);					\
   6739 } while (/*CONSTCOND*/0)
   6740 
   6741 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
   6742 do {									\
   6743 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6744 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
   6745 	_ptr_ = (void *)((_pkt_)+1);					\
   6746 } while (/*CONSTCOND*/0)
   6747 
   6748 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
   6749 
   6750 static void
   6751 iwm_notif_intr(struct iwm_softc *sc)
   6752 {
   6753 	uint16_t hw;
   6754 
   6755 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
   6756 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
   6757 
   6758 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
   6759 	while (sc->rxq.cur != hw) {
   6760 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
   6761 		struct iwm_rx_packet *pkt;
   6762 		struct iwm_cmd_response *cresp;
   6763 		int orig_qid, qid, idx, code;
   6764 
   6765 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
   6766 		    BUS_DMASYNC_POSTREAD);
   6767 		pkt = mtod(data->m, struct iwm_rx_packet *);
   6768 
   6769 		orig_qid = pkt->hdr.qid;
   6770 		qid = orig_qid & ~0x80;
   6771 		idx = pkt->hdr.idx;
   6772 
   6773 		code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
   6774 
   6775 		/*
   6776 		 * randomly get these from the firmware, no idea why.
   6777 		 * they at least seem harmless, so just ignore them for now
   6778 		 */
   6779 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
   6780 		    || pkt->len_n_flags == htole32(0x55550000))) {
   6781 			ADVANCE_RXQ(sc);
   6782 			continue;
   6783 		}
   6784 
   6785 		switch (code) {
   6786 		case IWM_REPLY_RX_PHY_CMD:
   6787 			iwm_rx_rx_phy_cmd(sc, pkt, data);
   6788 			break;
   6789 
   6790 		case IWM_REPLY_RX_MPDU_CMD:
   6791 			iwm_rx_rx_mpdu(sc, pkt, data);
   6792 			break;
   6793 
   6794 		case IWM_TX_CMD:
   6795 			iwm_rx_tx_cmd(sc, pkt, data);
   6796 			break;
   6797 
   6798 		case IWM_MISSED_BEACONS_NOTIFICATION:
   6799 			iwm_rx_missed_beacons_notif(sc, pkt, data);
   6800 			break;
   6801 
   6802 		case IWM_MFUART_LOAD_NOTIFICATION:
   6803 			break;
   6804 
   6805 		case IWM_ALIVE: {
   6806 			struct iwm_alive_resp_v1 *resp1;
   6807 			struct iwm_alive_resp_v2 *resp2;
   6808 			struct iwm_alive_resp_v3 *resp3;
   6809 
   6810 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
   6811 				SYNC_RESP_STRUCT(resp1, pkt);
   6812 				sc->sc_uc.uc_error_event_table
   6813 				    = le32toh(resp1->error_event_table_ptr);
   6814 				sc->sc_uc.uc_log_event_table
   6815 				    = le32toh(resp1->log_event_table_ptr);
   6816 				sc->sched_base = le32toh(resp1->scd_base_ptr);
   6817 				if (resp1->status == IWM_ALIVE_STATUS_OK)
   6818 					sc->sc_uc.uc_ok = 1;
   6819 				else
   6820 					sc->sc_uc.uc_ok = 0;
   6821 			}
   6822 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
   6823 				SYNC_RESP_STRUCT(resp2, pkt);
   6824 				sc->sc_uc.uc_error_event_table
   6825 				    = le32toh(resp2->error_event_table_ptr);
   6826 				sc->sc_uc.uc_log_event_table
   6827 				    = le32toh(resp2->log_event_table_ptr);
   6828 				sc->sched_base = le32toh(resp2->scd_base_ptr);
   6829 				sc->sc_uc.uc_umac_error_event_table
   6830 				    = le32toh(resp2->error_info_addr);
   6831 				if (resp2->status == IWM_ALIVE_STATUS_OK)
   6832 					sc->sc_uc.uc_ok = 1;
   6833 				else
   6834 					sc->sc_uc.uc_ok = 0;
   6835 			}
   6836 			if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
   6837 				SYNC_RESP_STRUCT(resp3, pkt);
   6838 				sc->sc_uc.uc_error_event_table
   6839 				    = le32toh(resp3->error_event_table_ptr);
   6840 				sc->sc_uc.uc_log_event_table
   6841 				    = le32toh(resp3->log_event_table_ptr);
   6842 				sc->sched_base = le32toh(resp3->scd_base_ptr);
   6843 				sc->sc_uc.uc_umac_error_event_table
   6844 				    = le32toh(resp3->error_info_addr);
   6845 				if (resp3->status == IWM_ALIVE_STATUS_OK)
   6846 					sc->sc_uc.uc_ok = 1;
   6847 				else
   6848 					sc->sc_uc.uc_ok = 0;
   6849 			}
   6850 
   6851 			sc->sc_uc.uc_intr = 1;
   6852 			wakeup(&sc->sc_uc);
   6853 			break;
   6854 		}
   6855 
   6856 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
   6857 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
   6858 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
   6859 			uint16_t size = le16toh(phy_db_notif->length);
   6860 			bus_dmamap_sync(sc->sc_dmat, data->map,
   6861 			    sizeof(*pkt) + sizeof(*phy_db_notif),
   6862 			    size, BUS_DMASYNC_POSTREAD);
   6863 			iwm_phy_db_set_section(sc, phy_db_notif, size);
   6864 			break;
   6865 		}
   6866 
   6867 		case IWM_STATISTICS_NOTIFICATION: {
   6868 			struct iwm_notif_statistics *stats;
   6869 			SYNC_RESP_STRUCT(stats, pkt);
   6870 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
   6871 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
   6872 			break;
   6873 		}
   6874 
   6875 		case IWM_NVM_ACCESS_CMD:
   6876 		case IWM_MCC_UPDATE_CMD:
   6877 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6878 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   6879 				    sizeof(sc->sc_cmd_resp),
   6880 				    BUS_DMASYNC_POSTREAD);
   6881 				memcpy(sc->sc_cmd_resp,
   6882 				    pkt, sizeof(sc->sc_cmd_resp));
   6883 			}
   6884 			break;
   6885 
   6886 		case IWM_MCC_CHUB_UPDATE_CMD: {
   6887 			struct iwm_mcc_chub_notif *notif;
   6888 			SYNC_RESP_STRUCT(notif, pkt);
   6889 
   6890 			sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
   6891 			sc->sc_fw_mcc[1] = notif->mcc & 0xff;
   6892 			sc->sc_fw_mcc[2] = '\0';
   6893 			break;
   6894 		}
   6895 
   6896 		case IWM_DTS_MEASUREMENT_NOTIFICATION:
   6897 			break;
   6898 
   6899 		case IWM_PHY_CONFIGURATION_CMD:
   6900 		case IWM_TX_ANT_CONFIGURATION_CMD:
   6901 		case IWM_ADD_STA:
   6902 		case IWM_MAC_CONTEXT_CMD:
   6903 		case IWM_REPLY_SF_CFG_CMD:
   6904 		case IWM_POWER_TABLE_CMD:
   6905 		case IWM_PHY_CONTEXT_CMD:
   6906 		case IWM_BINDING_CONTEXT_CMD:
   6907 		case IWM_TIME_EVENT_CMD:
   6908 		case IWM_SCAN_REQUEST_CMD:
   6909 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
   6910 		case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
   6911 		case IWM_SCAN_OFFLOAD_REQUEST_CMD:
   6912 		case IWM_REPLY_BEACON_FILTERING_CMD:
   6913 		case IWM_MAC_PM_POWER_TABLE:
   6914 		case IWM_TIME_QUOTA_CMD:
   6915 		case IWM_REMOVE_STA:
   6916 		case IWM_TXPATH_FLUSH:
   6917 		case IWM_LQ_CMD:
   6918 		case IWM_BT_CONFIG:
   6919 		case IWM_REPLY_THERMAL_MNG_BACKOFF:
   6920 			SYNC_RESP_STRUCT(cresp, pkt);
   6921 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6922 				memcpy(sc->sc_cmd_resp,
   6923 				    pkt, sizeof(*pkt) + sizeof(*cresp));
   6924 			}
   6925 			break;
   6926 
   6927 		/* ignore */
   6928 		case 0x6c: /* IWM_PHY_DB_CMD */
   6929 			break;
   6930 
   6931 		case IWM_INIT_COMPLETE_NOTIF:
   6932 			sc->sc_init_complete = 1;
   6933 			wakeup(&sc->sc_init_complete);
   6934 			break;
   6935 
   6936 		case IWM_SCAN_OFFLOAD_COMPLETE: {
   6937 			struct iwm_periodic_scan_complete *notif;
   6938 			SYNC_RESP_STRUCT(notif, pkt);
   6939 			break;
   6940 		}
   6941 
   6942 		case IWM_SCAN_ITERATION_COMPLETE: {
   6943 			struct iwm_lmac_scan_complete_notif *notif;
   6944 			SYNC_RESP_STRUCT(notif, pkt);
   6945 			iwm_endscan(sc);
   6946 			break;
   6947 		}
   6948 
   6949 		case IWM_SCAN_COMPLETE_UMAC: {
   6950 			struct iwm_umac_scan_complete *notif;
   6951 			SYNC_RESP_STRUCT(notif, pkt);
   6952 			iwm_endscan(sc);
   6953 			break;
   6954 		}
   6955 
   6956 		case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
   6957 			struct iwm_umac_scan_iter_complete_notif *notif;
   6958 			SYNC_RESP_STRUCT(notif, pkt);
   6959 			iwm_endscan(sc);
   6960 			break;
   6961 		}
   6962 
   6963 		case IWM_REPLY_ERROR: {
   6964 			struct iwm_error_resp *resp;
   6965 			SYNC_RESP_STRUCT(resp, pkt);
   6966 			aprint_error_dev(sc->sc_dev,
   6967 			    "firmware error 0x%x, cmd 0x%x\n",
   6968 			    le32toh(resp->error_type), resp->cmd_id);
   6969 			break;
   6970 		}
   6971 
   6972 		case IWM_TIME_EVENT_NOTIFICATION: {
   6973 			struct iwm_time_event_notif *notif;
   6974 			SYNC_RESP_STRUCT(notif, pkt);
   6975 			break;
   6976 		}
   6977 
   6978 		case IWM_MCAST_FILTER_CMD:
   6979 			break;
   6980 
   6981 		case IWM_SCD_QUEUE_CFG: {
   6982 			struct iwm_scd_txq_cfg_rsp *rsp;
   6983 			SYNC_RESP_STRUCT(rsp, pkt);
   6984 			break;
   6985 		}
   6986 
   6987 		default:
   6988 			aprint_error_dev(sc->sc_dev,
   6989 			    "unhandled firmware response 0x%x 0x%x/0x%x "
   6990 			    "rx ring %d[%d]\n",
   6991 			    code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
   6992 			break;
   6993 		}
   6994 
   6995 		/*
   6996 		 * uCode sets bit 0x80 when it originates the notification,
   6997 		 * i.e. when the notification is not a direct response to a
   6998 		 * command sent by the driver.
   6999 		 * For example, uCode issues IWM_REPLY_RX when it sends a
   7000 		 * received frame to the driver.
   7001 		 */
   7002 		if (!(orig_qid & (1 << 7))) {
   7003 			iwm_cmd_done(sc, qid, idx);
   7004 		}
   7005 
   7006 		ADVANCE_RXQ(sc);
   7007 	}
   7008 
   7009 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   7010 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   7011 
   7012 	/*
   7013 	 * Seems like the hardware gets upset unless we align the write by 8??
   7014 	 */
   7015 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
   7016 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
   7017 }
   7018 
   7019 static void
   7020 iwm_softintr(void *arg)
   7021 {
   7022 	struct iwm_softc *sc = arg;
   7023 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   7024 	uint32_t r1;
   7025 	int isperiodic = 0;
   7026 
   7027 	r1 = atomic_swap_32(&sc->sc_soft_flags, 0);
   7028 
   7029  restart:
   7030 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
   7031 #ifdef IWM_DEBUG
   7032 		int i;
   7033 
   7034 		iwm_nic_error(sc);
   7035 
   7036 		/* Dump driver status (TX and RX rings) while we're here. */
   7037 		DPRINTF(("driver status:\n"));
   7038 		for (i = 0; i < IWM_MAX_QUEUES; i++) {
   7039 			struct iwm_tx_ring *ring = &sc->txq[i];
   7040 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
   7041 			    "queued=%-3d\n",
   7042 			    i, ring->qid, ring->cur, ring->queued));
   7043 		}
   7044 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
   7045 		DPRINTF(("  802.11 state %s\n",
   7046 		    ieee80211_state_name[sc->sc_ic.ic_state]));
   7047 #endif
   7048 
   7049 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
   7050  fatal:
   7051 		ifp->if_flags &= ~IFF_UP;
   7052 		iwm_stop(ifp, 1);
   7053 		/* Don't restore interrupt mask */
   7054 		return;
   7055 
   7056 	}
   7057 
   7058 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
   7059 		aprint_error_dev(sc->sc_dev,
   7060 		    "hardware error, stopping device\n");
   7061 		goto fatal;
   7062 	}
   7063 
   7064 	/* firmware chunk loaded */
   7065 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
   7066 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
   7067 		sc->sc_fw_chunk_done = 1;
   7068 		wakeup(&sc->sc_fw);
   7069 	}
   7070 
   7071 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
   7072 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
   7073 			ifp->if_flags &= ~IFF_UP;
   7074 			iwm_stop(ifp, 1);
   7075 		}
   7076 	}
   7077 
   7078 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
   7079 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
   7080 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
   7081 			IWM_WRITE_1(sc,
   7082 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
   7083 		isperiodic = 1;
   7084 	}
   7085 
   7086 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
   7087 	    isperiodic) {
   7088 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
   7089 
   7090 		iwm_notif_intr(sc);
   7091 
   7092 		/* enable periodic interrupt, see above */
   7093 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
   7094 		    !isperiodic)
   7095 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
   7096 			    IWM_CSR_INT_PERIODIC_ENA);
   7097 	}
   7098 
   7099 	r1 = atomic_swap_32(&sc->sc_soft_flags, 0);
   7100 	if (r1 != 0)
   7101 		goto restart;
   7102 
   7103 	iwm_restore_interrupts(sc);
   7104 }
   7105 
   7106 static int
   7107 iwm_intr(void *arg)
   7108 {
   7109 	struct iwm_softc *sc = arg;
   7110 	int r1, r2;
   7111 
   7112 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   7113 
   7114 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
   7115 		uint32_t *ict = sc->ict_dma.vaddr;
   7116 		int tmp;
   7117 
   7118 		bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
   7119 		    0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
   7120 		tmp = htole32(ict[sc->ict_cur]);
   7121 		if (!tmp)
   7122 			goto out_ena;
   7123 
   7124 		/*
   7125 		 * ok, there was something.  keep plowing until we have all.
   7126 		 */
   7127 		r1 = r2 = 0;
   7128 		while (tmp) {
   7129 			r1 |= tmp;
   7130 			ict[sc->ict_cur] = 0;	/* Acknowledge. */
   7131 			bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
   7132 			    &ict[sc->ict_cur] - ict, sizeof(*ict),
   7133 			    BUS_DMASYNC_PREWRITE);
   7134 			sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
   7135 			tmp = htole32(ict[sc->ict_cur]);
   7136 		}
   7137 
   7138 		/* this is where the fun begins.  don't ask */
   7139 		if (r1 == 0xffffffff)
   7140 			r1 = 0;
   7141 
   7142 		/* i am not expected to understand this */
   7143 		if (r1 & 0xc0000)
   7144 			r1 |= 0x8000;
   7145 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
   7146 	} else {
   7147 		r1 = IWM_READ(sc, IWM_CSR_INT);
   7148 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
   7149 			goto out;
   7150 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
   7151 	}
   7152 	if (r1 == 0 && r2 == 0) {
   7153 		goto out_ena;
   7154 	}
   7155 
   7156 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
   7157 
   7158 	atomic_or_32(&sc->sc_soft_flags, r1);
   7159 	softint_schedule(sc->sc_soft_ih);
   7160 	return 1;
   7161 
   7162  out_ena:
   7163 	iwm_restore_interrupts(sc);
   7164  out:
   7165 	return 0;
   7166 }
   7167 
   7168 /*
   7169  * Autoconf glue-sniffing
   7170  */
   7171 
   7172 static const pci_product_id_t iwm_devices[] = {
   7173 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
   7174 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
   7175 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
   7176 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
   7177 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
   7178 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
   7179 #if 0
   7180 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
   7181 	PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
   7182 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
   7183 	PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
   7184 #endif
   7185 };
   7186 
   7187 static int
   7188 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
   7189 {
   7190 	struct pci_attach_args *pa = aux;
   7191 
   7192 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
   7193 		return 0;
   7194 
   7195 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
   7196 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
   7197 			return 1;
   7198 
   7199 	return 0;
   7200 }
   7201 
   7202 static int
   7203 iwm_preinit(struct iwm_softc *sc)
   7204 {
   7205 	struct ieee80211com *ic = &sc->sc_ic;
   7206 	int err;
   7207 
   7208 	if (ISSET(sc->sc_flags, IWM_FLAG_ATTACHED))
   7209 		return 0;
   7210 
   7211 	err = iwm_start_hw(sc);
   7212 	if (err) {
   7213 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   7214 		return err;
   7215 	}
   7216 
   7217 	err = iwm_run_init_mvm_ucode(sc, 1);
   7218 	iwm_stop_device(sc);
   7219 	if (err)
   7220 		return err;
   7221 
   7222 	sc->sc_flags |= IWM_FLAG_ATTACHED;
   7223 
   7224 	aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
   7225 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
   7226 	    ether_sprintf(sc->sc_nvm.hw_addr));
   7227 
   7228 #ifndef IEEE80211_NO_HT
   7229 	if (sc->sc_nvm.sku_cap_11n_enable)
   7230 		iwm_setup_ht_rates(sc);
   7231 #endif
   7232 
   7233 	/* not all hardware can do 5GHz band */
   7234 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
   7235 		ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
   7236 
   7237 	ieee80211_ifattach(ic);
   7238 
   7239 	ic->ic_node_alloc = iwm_node_alloc;
   7240 
   7241 	/* Override 802.11 state transition machine. */
   7242 	sc->sc_newstate = ic->ic_newstate;
   7243 	ic->ic_newstate = iwm_newstate;
   7244 	ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
   7245 	ieee80211_announce(ic);
   7246 
   7247 	iwm_radiotap_attach(sc);
   7248 
   7249 	return 0;
   7250 }
   7251 
   7252 static void
   7253 iwm_attach_hook(device_t dev)
   7254 {
   7255 	struct iwm_softc *sc = device_private(dev);
   7256 
   7257 	iwm_preinit(sc);
   7258 }
   7259 
   7260 static void
   7261 iwm_attach(device_t parent, device_t self, void *aux)
   7262 {
   7263 	struct iwm_softc *sc = device_private(self);
   7264 	struct pci_attach_args *pa = aux;
   7265 	struct ieee80211com *ic = &sc->sc_ic;
   7266 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   7267 	pcireg_t reg, memtype;
   7268 	char intrbuf[PCI_INTRSTR_LEN];
   7269 	const char *intrstr;
   7270 	int err;
   7271 	int txq_i;
   7272 	const struct sysctlnode *node;
   7273 
   7274 	sc->sc_dev = self;
   7275 	sc->sc_pct = pa->pa_pc;
   7276 	sc->sc_pcitag = pa->pa_tag;
   7277 	sc->sc_dmat = pa->pa_dmat;
   7278 	sc->sc_pciid = pa->pa_id;
   7279 
   7280 	pci_aprint_devinfo(pa, NULL);
   7281 
   7282 	if (workqueue_create(&sc->sc_nswq, "iwmns",
   7283 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
   7284 		panic("%s: could not create workqueue: newstate",
   7285 		    device_xname(self));
   7286 	sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwm_softintr, sc);
   7287 	if (sc->sc_soft_ih == NULL)
   7288 		panic("%s: could not establish softint", device_xname(self));
   7289 
   7290 	/*
   7291 	 * Get the offset of the PCI Express Capability Structure in PCI
   7292 	 * Configuration Space.
   7293 	 */
   7294 	err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
   7295 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
   7296 	if (err == 0) {
   7297 		aprint_error_dev(self,
   7298 		    "PCIe capability structure not found!\n");
   7299 		return;
   7300 	}
   7301 
   7302 	/* Clear device-specific "PCI retry timeout" register (41h). */
   7303 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   7304 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   7305 
   7306 	/* Enable bus-mastering */
   7307 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
   7308 	reg |= PCI_COMMAND_MASTER_ENABLE;
   7309 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
   7310 
   7311 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
   7312 	err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
   7313 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
   7314 	if (err) {
   7315 		aprint_error_dev(self, "can't map mem space\n");
   7316 		return;
   7317 	}
   7318 
   7319 	/* Install interrupt handler. */
   7320 	err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
   7321 	if (err) {
   7322 		aprint_error_dev(self, "can't allocate interrupt\n");
   7323 		return;
   7324 	}
   7325 	if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX) {
   7326 		reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
   7327 		    PCI_COMMAND_STATUS_REG);
   7328 		if (ISSET(reg, PCI_COMMAND_INTERRUPT_DISABLE)) {
   7329 			CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
   7330 			pci_conf_write(sc->sc_pct, sc->sc_pcitag,
   7331 			    PCI_COMMAND_STATUS_REG, reg);
   7332 		}
   7333 	}
   7334 	intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
   7335 	    sizeof(intrbuf));
   7336 	sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0],
   7337 	    IPL_NET, iwm_intr, sc, device_xname(self));
   7338 	if (sc->sc_ih == NULL) {
   7339 		aprint_error_dev(self, "can't establish interrupt");
   7340 		if (intrstr != NULL)
   7341 			aprint_error(" at %s", intrstr);
   7342 		aprint_error("\n");
   7343 		return;
   7344 	}
   7345 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
   7346 
   7347 	sc->sc_wantresp = IWM_CMD_RESP_IDLE;
   7348 
   7349 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
   7350 	switch (PCI_PRODUCT(sc->sc_pciid)) {
   7351 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
   7352 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
   7353 		sc->sc_fwname = "iwlwifi-3160-16.ucode";
   7354 		sc->host_interrupt_operation_mode = 1;
   7355 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   7356 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   7357 		break;
   7358 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
   7359 	case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
   7360 		sc->sc_fwname = "iwlwifi-7265D-16.ucode";
   7361 		sc->host_interrupt_operation_mode = 0;
   7362 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   7363 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   7364 		break;
   7365 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
   7366 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
   7367 		sc->sc_fwname = "iwlwifi-7260-16.ucode";
   7368 		sc->host_interrupt_operation_mode = 1;
   7369 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   7370 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   7371 		break;
   7372 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
   7373 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
   7374 		sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
   7375 		    IWM_CSR_HW_REV_TYPE_7265D ?
   7376 		    "iwlwifi-7265D-16.ucode": "iwlwifi-7265-16.ucode";
   7377 		sc->host_interrupt_operation_mode = 0;
   7378 		sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
   7379 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   7380 		break;
   7381 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
   7382 	case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
   7383 		sc->sc_fwname = "iwlwifi-8000C-16.ucode";
   7384 		sc->host_interrupt_operation_mode = 0;
   7385 		sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
   7386 		sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
   7387 		break;
   7388 	default:
   7389 		aprint_error_dev(self, "unknown product %#x",
   7390 		    PCI_PRODUCT(sc->sc_pciid));
   7391 		return;
   7392 	}
   7393 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
   7394 
   7395 	/*
   7396 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
   7397 	 * changed, and now the revision step also includes bit 0-1 (no more
   7398 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
   7399 	 * in the old format.
   7400 	 */
   7401 
   7402 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
   7403 		sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
   7404 		    (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
   7405 
   7406 	if (iwm_prepare_card_hw(sc) != 0) {
   7407 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   7408 		return;
   7409 	}
   7410 
   7411 	if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
   7412 		uint32_t hw_step;
   7413 
   7414 		/*
   7415 		 * In order to recognize C step the driver should read the
   7416 		 * chip version id located at the AUX bus MISC address.
   7417 		 */
   7418 		IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   7419 			    IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
   7420 		DELAY(2);
   7421 
   7422 		err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   7423 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   7424 				   IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   7425 				   25000);
   7426 		if (!err) {
   7427 			aprint_error_dev(sc->sc_dev,
   7428 			    "failed to wake up the nic\n");
   7429 			return;
   7430 		}
   7431 
   7432 		if (iwm_nic_lock(sc)) {
   7433 			hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
   7434 			hw_step |= IWM_ENABLE_WFPM;
   7435 			iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
   7436 			hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
   7437 			hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
   7438 			if (hw_step == 0x3)
   7439 				sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
   7440 				    (IWM_SILICON_C_STEP << 2);
   7441 			iwm_nic_unlock(sc);
   7442 		} else {
   7443 			aprint_error_dev(sc->sc_dev,
   7444 			    "failed to lock the nic\n");
   7445 			return;
   7446 		}
   7447 	}
   7448 
   7449 	/*
   7450 	 * Allocate DMA memory for firmware transfers.
   7451 	 * Must be aligned on a 16-byte boundary.
   7452 	 */
   7453 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
   7454 	    16);
   7455 	if (err) {
   7456 		aprint_error_dev(sc->sc_dev,
   7457 		    "could not allocate memory for firmware\n");
   7458 		return;
   7459 	}
   7460 
   7461 	/* Allocate "Keep Warm" page, used internally by the card. */
   7462 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
   7463 	if (err) {
   7464 		aprint_error_dev(sc->sc_dev,
   7465 		    "could not allocate keep warm page\n");
   7466 		goto fail1;
   7467 	}
   7468 
   7469 	/* Allocate interrupt cause table (ICT).*/
   7470 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
   7471 	    1 << IWM_ICT_PADDR_SHIFT);
   7472 	if (err) {
   7473 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
   7474 		goto fail2;
   7475 	}
   7476 
   7477 	/* TX scheduler rings must be aligned on a 1KB boundary. */
   7478 	err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
   7479 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
   7480 	if (err) {
   7481 		aprint_error_dev(sc->sc_dev,
   7482 		    "could not allocate TX scheduler rings\n");
   7483 		goto fail3;
   7484 	}
   7485 
   7486 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
   7487 		err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
   7488 		if (err) {
   7489 			aprint_error_dev(sc->sc_dev,
   7490 			    "could not allocate TX ring %d\n", txq_i);
   7491 			goto fail4;
   7492 		}
   7493 	}
   7494 
   7495 	err = iwm_alloc_rx_ring(sc, &sc->rxq);
   7496 	if (err) {
   7497 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
   7498 		goto fail4;
   7499 	}
   7500 
   7501 	/* Clear pending interrupts. */
   7502 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
   7503 
   7504 	if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
   7505 	    0, CTLTYPE_NODE, device_xname(sc->sc_dev),
   7506 	    SYSCTL_DESCR("iwm per-controller controls"),
   7507 	    NULL, 0, NULL, 0,
   7508 	    CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
   7509 	    CTL_EOL)) != 0) {
   7510 		aprint_normal_dev(sc->sc_dev,
   7511 		    "couldn't create iwm per-controller sysctl node\n");
   7512 	}
   7513 	if (err == 0) {
   7514 		int iwm_nodenum = node->sysctl_num;
   7515 
   7516 		/* Reload firmware sysctl node */
   7517 		if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
   7518 		    CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
   7519 		    SYSCTL_DESCR("Reload firmware"),
   7520 		    iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
   7521 		    CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
   7522 		    CTL_EOL)) != 0) {
   7523 			aprint_normal_dev(sc->sc_dev,
   7524 			    "couldn't create load_fw sysctl node\n");
   7525 		}
   7526 	}
   7527 
   7528 	/*
   7529 	 * Attach interface
   7530 	 */
   7531 	ic->ic_ifp = ifp;
   7532 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
   7533 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
   7534 	ic->ic_state = IEEE80211_S_INIT;
   7535 
   7536 	/* Set device capabilities. */
   7537 	ic->ic_caps =
   7538 	    IEEE80211_C_WEP |		/* WEP */
   7539 	    IEEE80211_C_WPA |		/* 802.11i */
   7540 #ifdef notyet
   7541 	    IEEE80211_C_SCANALL |	/* device scans all channels at once */
   7542 	    IEEE80211_C_SCANALLBAND |	/* device scans all bands at once */
   7543 #endif
   7544 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
   7545 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
   7546 
   7547 #ifndef IEEE80211_NO_HT
   7548 	ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
   7549 	ic->ic_htxcaps = 0;
   7550 	ic->ic_txbfcaps = 0;
   7551 	ic->ic_aselcaps = 0;
   7552 	ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
   7553 #endif
   7554 
   7555 	/* all hardware can do 2.4GHz band */
   7556 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
   7557 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
   7558 
   7559 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
   7560 		sc->sc_phyctxt[i].id = i;
   7561 	}
   7562 
   7563 	sc->sc_amrr.amrr_min_success_threshold =  1;
   7564 	sc->sc_amrr.amrr_max_success_threshold = 15;
   7565 
   7566 	/* IBSS channel undefined for now. */
   7567 	ic->ic_ibss_chan = &ic->ic_channels[1];
   7568 
   7569 #if 0
   7570 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
   7571 #endif
   7572 
   7573 	ifp->if_softc = sc;
   7574 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   7575 	ifp->if_init = iwm_init;
   7576 	ifp->if_stop = iwm_stop;
   7577 	ifp->if_ioctl = iwm_ioctl;
   7578 	ifp->if_start = iwm_start;
   7579 	ifp->if_watchdog = iwm_watchdog;
   7580 	IFQ_SET_READY(&ifp->if_snd);
   7581 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
   7582 
   7583 	if_initialize(ifp);
   7584 #if 0
   7585 	ieee80211_ifattach(ic);
   7586 #else
   7587 	ether_ifattach(ifp, ic->ic_myaddr);	/* XXX */
   7588 #endif
   7589 	/* Use common softint-based if_input */
   7590 	ifp->if_percpuq = if_percpuq_create(ifp);
   7591 	if_deferred_start_init(ifp, NULL);
   7592 	if_register(ifp);
   7593 
   7594 	callout_init(&sc->sc_calib_to, 0);
   7595 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
   7596 	callout_init(&sc->sc_led_blink_to, 0);
   7597 	callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
   7598 #ifndef IEEE80211_NO_HT
   7599 	if (workqueue_create(&sc->sc_setratewq, "iwmsr",
   7600 	    iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
   7601 		panic("%s: could not create workqueue: setrates",
   7602 		    device_xname(self));
   7603 	if (workqueue_create(&sc->sc_bawq, "iwmba",
   7604 	    iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
   7605 		panic("%s: could not create workqueue: blockack",
   7606 		    device_xname(self));
   7607 	if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
   7608 	    iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
   7609 		panic("%s: could not create workqueue: htprot",
   7610 		    device_xname(self));
   7611 #endif
   7612 
   7613 	if (pmf_device_register(self, NULL, NULL))
   7614 		pmf_class_network_register(self, ifp);
   7615 	else
   7616 		aprint_error_dev(self, "couldn't establish power handler\n");
   7617 
   7618 	/*
   7619 	 * We can't do normal attach before the file system is mounted
   7620 	 * because we cannot read the MAC address without loading the
   7621 	 * firmware from disk.  So we postpone until mountroot is done.
   7622 	 * Notably, this will require a full driver unload/load cycle
   7623 	 * (or reboot) in case the firmware is not present when the
   7624 	 * hook runs.
   7625 	 */
   7626 	config_mountroot(self, iwm_attach_hook);
   7627 
   7628 	return;
   7629 
   7630 fail4:	while (--txq_i >= 0)
   7631 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
   7632 	iwm_free_rx_ring(sc, &sc->rxq);
   7633 	iwm_dma_contig_free(&sc->sched_dma);
   7634 fail3:	if (sc->ict_dma.vaddr != NULL)
   7635 		iwm_dma_contig_free(&sc->ict_dma);
   7636 fail2:	iwm_dma_contig_free(&sc->kw_dma);
   7637 fail1:	iwm_dma_contig_free(&sc->fw_dma);
   7638 }
   7639 
   7640 void
   7641 iwm_radiotap_attach(struct iwm_softc *sc)
   7642 {
   7643 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   7644 
   7645 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
   7646 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
   7647 	    &sc->sc_drvbpf);
   7648 
   7649 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
   7650 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
   7651 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
   7652 
   7653 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
   7654 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
   7655 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
   7656 }
   7657 
   7658 #if 0
   7659 static void
   7660 iwm_init_task(void *arg)
   7661 {
   7662 	struct iwm_softc *sc = arg;
   7663 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   7664 	int s;
   7665 
   7666 	rw_enter_write(&sc->ioctl_rwl);
   7667 	s = splnet();
   7668 
   7669 	iwm_stop(ifp, 0);
   7670 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
   7671 		iwm_init(ifp);
   7672 
   7673 	splx(s);
   7674 	rw_exit(&sc->ioctl_rwl);
   7675 }
   7676 
   7677 static void
   7678 iwm_wakeup(struct iwm_softc *sc)
   7679 {
   7680 	pcireg_t reg;
   7681 
   7682 	/* Clear device-specific "PCI retry timeout" register (41h). */
   7683 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   7684 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   7685 
   7686 	iwm_init_task(sc);
   7687 }
   7688 
   7689 static int
   7690 iwm_activate(device_t self, enum devact act)
   7691 {
   7692 	struct iwm_softc *sc = device_private(self);
   7693 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   7694 
   7695 	switch (act) {
   7696 	case DVACT_DEACTIVATE:
   7697 		if (ifp->if_flags & IFF_RUNNING)
   7698 			iwm_stop(ifp, 0);
   7699 		return 0;
   7700 	default:
   7701 		return EOPNOTSUPP;
   7702 	}
   7703 }
   7704 #endif
   7705 
   7706 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
   7707 	NULL, NULL);
   7708 
   7709 static int
   7710 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
   7711 {
   7712 	struct sysctlnode node;
   7713 	struct iwm_softc *sc;
   7714 	int err, t;
   7715 
   7716 	node = *rnode;
   7717 	sc = node.sysctl_data;
   7718 	t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
   7719 	node.sysctl_data = &t;
   7720 	err = sysctl_lookup(SYSCTLFN_CALL(&node));
   7721 	if (err || newp == NULL)
   7722 		return err;
   7723 
   7724 	if (t == 0)
   7725 		CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
   7726 	return 0;
   7727 }
   7728 
   7729 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
   7730 {
   7731 	const struct sysctlnode *rnode;
   7732 #ifdef IWM_DEBUG
   7733 	const struct sysctlnode *cnode;
   7734 #endif /* IWM_DEBUG */
   7735 	int rc;
   7736 
   7737 	if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
   7738 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
   7739 	    SYSCTL_DESCR("iwm global controls"),
   7740 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   7741 		goto err;
   7742 
   7743 	iwm_sysctl_root_num = rnode->sysctl_num;
   7744 
   7745 #ifdef IWM_DEBUG
   7746 	/* control debugging printfs */
   7747 	if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
   7748 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
   7749 	    "debug", SYSCTL_DESCR("Enable debugging output"),
   7750 	    NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
   7751 		goto err;
   7752 #endif /* IWM_DEBUG */
   7753 
   7754 	return;
   7755 
   7756  err:
   7757 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
   7758 }
   7759